@tanstack/electric-db-collection 0.2.28 → 0.2.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -39,21 +39,29 @@ function createLoadSubsetDedupe({
39
39
  write,
40
40
  commit,
41
41
  collectionId,
42
- encodeColumnName
42
+ encodeColumnName,
43
+ signal
43
44
  }) {
44
45
  if (syncMode === `eager`) {
45
46
  return null;
46
47
  }
47
48
  const compileOptions = encodeColumnName ? { encodeColumnName } : void 0;
49
+ const logPrefix = collectionId ? `[${collectionId}] ` : ``;
50
+ function handleSnapshotError(error, operation) {
51
+ if (signal.aborted) {
52
+ debug(`${logPrefix}Ignoring ${operation} error during cleanup: %o`, error);
53
+ return true;
54
+ }
55
+ debug(`${logPrefix}Error in ${operation}: %o`, error);
56
+ return false;
57
+ }
48
58
  const loadSubset = async (opts) => {
49
59
  if (isBufferingInitialSync()) {
50
60
  const snapshotParams = sqlCompiler.compileSQL(opts, compileOptions);
51
61
  try {
52
62
  const { data: rows } = await stream.fetchSnapshot(snapshotParams);
53
63
  if (!isBufferingInitialSync()) {
54
- debug(
55
- `${collectionId ? `[${collectionId}] ` : ``}Ignoring snapshot - sync completed while fetching`
56
- );
64
+ debug(`${logPrefix}Ignoring snapshot - sync completed while fetching`);
57
65
  return;
58
66
  }
59
67
  if (rows.length > 0) {
@@ -62,57 +70,57 @@ function createLoadSubsetDedupe({
62
70
  write({
63
71
  type: `insert`,
64
72
  value: row.value,
65
- metadata: {
66
- ...row.headers
67
- }
73
+ metadata: { ...row.headers }
68
74
  });
69
75
  }
70
76
  commit();
71
- debug(
72
- `${collectionId ? `[${collectionId}] ` : ``}Applied snapshot with ${rows.length} rows`
73
- );
77
+ debug(`${logPrefix}Applied snapshot with ${rows.length} rows`);
74
78
  }
75
79
  } catch (error) {
76
- debug(
77
- `${collectionId ? `[${collectionId}] ` : ``}Error fetching snapshot: %o`,
78
- error
79
- );
80
+ if (handleSnapshotError(error, `fetchSnapshot`)) {
81
+ return;
82
+ }
80
83
  throw error;
81
84
  }
82
- } else if (syncMode === `progressive`) {
83
85
  return;
84
- } else {
85
- const { cursor, where, orderBy, limit } = opts;
86
+ }
87
+ if (syncMode === `progressive`) {
88
+ return;
89
+ }
90
+ const { cursor, where, orderBy, limit } = opts;
91
+ try {
86
92
  if (cursor) {
87
- const promises = [];
88
93
  const whereCurrentOpts = {
89
94
  where: where ? db.and(where, cursor.whereCurrent) : cursor.whereCurrent,
90
95
  orderBy
91
- // No limit - get all ties
92
96
  };
93
97
  const whereCurrentParams = sqlCompiler.compileSQL(
94
98
  whereCurrentOpts,
95
99
  compileOptions
96
100
  );
97
- promises.push(stream.requestSnapshot(whereCurrentParams));
98
- debug(
99
- `${collectionId ? `[${collectionId}] ` : ``}Requesting cursor.whereCurrent snapshot (all ties)`
100
- );
101
101
  const whereFromOpts = {
102
102
  where: where ? db.and(where, cursor.whereFrom) : cursor.whereFrom,
103
103
  orderBy,
104
104
  limit
105
105
  };
106
106
  const whereFromParams = sqlCompiler.compileSQL(whereFromOpts, compileOptions);
107
- promises.push(stream.requestSnapshot(whereFromParams));
107
+ debug(`${logPrefix}Requesting cursor.whereCurrent snapshot (all ties)`);
108
108
  debug(
109
- `${collectionId ? `[${collectionId}] ` : ``}Requesting cursor.whereFrom snapshot (with limit ${limit})`
109
+ `${logPrefix}Requesting cursor.whereFrom snapshot (with limit ${limit})`
110
110
  );
111
- await Promise.all(promises);
111
+ await Promise.all([
112
+ stream.requestSnapshot(whereCurrentParams),
113
+ stream.requestSnapshot(whereFromParams)
114
+ ]);
112
115
  } else {
113
116
  const snapshotParams = sqlCompiler.compileSQL(opts, compileOptions);
114
117
  await stream.requestSnapshot(snapshotParams);
115
118
  }
119
+ } catch (error) {
120
+ if (handleSnapshotError(error, `requestSnapshot`)) {
121
+ return;
122
+ }
123
+ throw error;
116
124
  }
117
125
  };
118
126
  return new db.DeduplicatedLoadSubset({ loadSubset });
@@ -590,7 +598,9 @@ You can provide an 'onError' handler on the shapeOptions to handle this error, a
590
598
  collectionId,
591
599
  // Pass the columnMapper's encode function to transform column names
592
600
  // (e.g., camelCase to snake_case) when compiling SQL for subset queries
593
- encodeColumnName: shapeOptions.columnMapper?.encode
601
+ encodeColumnName: shapeOptions.columnMapper?.encode,
602
+ // Pass abort signal so requestSnapshot errors can be ignored during cleanup
603
+ signal: abortController.signal
594
604
  });
595
605
  unsubscribeStream = stream.subscribe((messages) => {
596
606
  let commitPoint = null;
@@ -1 +1 @@
1
- {"version":3,"file":"electric.cjs","sources":["../../src/electric.ts"],"sourcesContent":["import {\n ShapeStream,\n isChangeMessage,\n isControlMessage,\n isVisibleInSnapshot,\n} from '@electric-sql/client'\nimport { Store } from '@tanstack/store'\nimport DebugModule from 'debug'\nimport { DeduplicatedLoadSubset, and } from '@tanstack/db'\nimport {\n ExpectedNumberInAwaitTxIdError,\n StreamAbortedError,\n TimeoutWaitingForMatchError,\n TimeoutWaitingForTxIdError,\n} from './errors'\nimport { compileSQL } from './sql-compiler'\nimport {\n addTagToIndex,\n findRowsMatchingPattern,\n getTagLength,\n isMoveOutMessage,\n removeTagFromIndex,\n tagMatchesPattern,\n} from './tag-index'\nimport type { ColumnEncoder } from './sql-compiler'\nimport type {\n MoveOutPattern,\n MoveTag,\n ParsedMoveTag,\n RowId,\n TagIndex,\n} from './tag-index'\nimport type {\n BaseCollectionConfig,\n ChangeMessageOrDeleteKeyMessage,\n CollectionConfig,\n DeleteMutationFnParams,\n InsertMutationFnParams,\n LoadSubsetOptions,\n SyncConfig,\n SyncMode,\n UpdateMutationFnParams,\n UtilsRecord,\n} from '@tanstack/db'\nimport type { StandardSchemaV1 } from '@standard-schema/spec'\nimport type {\n ControlMessage,\n GetExtensions,\n Message,\n PostgresSnapshot,\n Row,\n ShapeStreamOptions,\n} from '@electric-sql/client'\n\n// Re-export for user convenience in custom match functions\nexport { isChangeMessage, isControlMessage } from '@electric-sql/client'\n\nconst debug = DebugModule.debug(`ts/db:electric`)\n\n/**\n * Symbol for internal test hooks (hidden from public API)\n */\nexport const ELECTRIC_TEST_HOOKS = Symbol(`electricTestHooks`)\n\n/**\n * Internal test hooks interface (for testing only)\n */\nexport interface ElectricTestHooks {\n /**\n * Called before marking collection ready after first up-to-date in progressive mode\n * Allows tests to pause and validate snapshot phase before atomic swap completes\n */\n beforeMarkingReady?: () => Promise<void>\n}\n\n/**\n * Type representing a transaction ID in ElectricSQL\n */\nexport type Txid = number\n\n/**\n * Custom match function type - receives stream messages and returns boolean\n * indicating if the mutation has been synchronized\n */\nexport type MatchFunction<T extends Row<unknown>> = (\n message: Message<T>,\n) => boolean\n\n/**\n * Matching strategies for Electric synchronization\n * Handlers can return:\n * - Txid strategy: { txid: number | number[], timeout?: number } (recommended)\n * - Void (no return value) - mutation completes without waiting\n *\n * The optional timeout property specifies how long to wait for the txid(s) in milliseconds.\n * If not specified, defaults to 5000ms.\n */\nexport type MatchingStrategy = {\n txid: Txid | Array<Txid>\n timeout?: number\n} | void\n\n/**\n * Type representing a snapshot end message\n */\ntype SnapshotEndMessage = ControlMessage & {\n headers: { control: `snapshot-end` }\n}\n// The `InferSchemaOutput` and `ResolveType` are copied from the `@tanstack/db` package\n// but we modified `InferSchemaOutput` slightly to restrict the schema output to `Row<unknown>`\n// This is needed in order for `GetExtensions` to be able to infer the parser extensions type from the schema\ntype InferSchemaOutput<T> = T extends StandardSchemaV1\n ? StandardSchemaV1.InferOutput<T> extends Row<unknown>\n ? StandardSchemaV1.InferOutput<T>\n : Record<string, unknown>\n : Record<string, unknown>\n\n/**\n * The mode of sync to use for the collection.\n * @default `eager`\n * @description\n * - `eager`:\n * - syncs all data immediately on preload\n * - collection will be marked as ready once the sync is complete\n * - there is no incremental sync\n * - `on-demand`:\n * - syncs data in incremental snapshots when the collection is queried\n * - collection will be marked as ready immediately after the first snapshot is synced\n * - `progressive`:\n * - syncs all data for the collection in the background\n * - uses incremental snapshots during the initial sync to provide a fast path to the data required for queries\n * - collection will be marked as ready once the full sync is complete\n */\nexport type ElectricSyncMode = SyncMode | `progressive`\n\n/**\n * Configuration interface for Electric collection options\n * @template T - The type of items in the collection\n * @template TSchema - The schema type for validation\n */\nexport interface ElectricCollectionConfig<\n T extends Row<unknown> = Row<unknown>,\n TSchema extends StandardSchemaV1 = never,\n> extends Omit<\n BaseCollectionConfig<\n T,\n string | number,\n TSchema,\n ElectricCollectionUtils<T>,\n any\n >,\n `onInsert` | `onUpdate` | `onDelete` | `syncMode`\n> {\n /**\n * Configuration options for the ElectricSQL ShapeStream\n */\n shapeOptions: ShapeStreamOptions<GetExtensions<T>>\n syncMode?: ElectricSyncMode\n\n /**\n * Internal test hooks (for testing only)\n * Hidden via Symbol to prevent accidental usage in production\n */\n [ELECTRIC_TEST_HOOKS]?: ElectricTestHooks\n\n /**\n * Optional asynchronous handler function called before an insert operation\n * @param params Object containing transaction and collection information\n * @returns Promise resolving to { txid, timeout? } or void\n * @example\n * // Basic Electric insert handler with txid (recommended)\n * onInsert: async ({ transaction }) => {\n * const newItem = transaction.mutations[0].modified\n * const result = await api.todos.create({\n * data: newItem\n * })\n * return { txid: result.txid }\n * }\n *\n * @example\n * // Insert handler with custom timeout\n * onInsert: async ({ transaction }) => {\n * const newItem = transaction.mutations[0].modified\n * const result = await api.todos.create({\n * data: newItem\n * })\n * return { txid: result.txid, timeout: 10000 } // Wait up to 10 seconds\n * }\n *\n * @example\n * // Insert handler with multiple items - return array of txids\n * onInsert: async ({ transaction }) => {\n * const items = transaction.mutations.map(m => m.modified)\n * const results = await Promise.all(\n * items.map(item => api.todos.create({ data: item }))\n * )\n * return { txid: results.map(r => r.txid) }\n * }\n *\n * @example\n * // Use awaitMatch utility for custom matching\n * onInsert: async ({ transaction, collection }) => {\n * const newItem = transaction.mutations[0].modified\n * await api.todos.create({ data: newItem })\n * await collection.utils.awaitMatch(\n * (message) => isChangeMessage(message) &&\n * message.headers.operation === 'insert' &&\n * message.value.name === newItem.name\n * )\n * }\n */\n onInsert?: (\n params: InsertMutationFnParams<\n T,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => Promise<MatchingStrategy>\n\n /**\n * Optional asynchronous handler function called before an update operation\n * @param params Object containing transaction and collection information\n * @returns Promise resolving to { txid, timeout? } or void\n * @example\n * // Basic Electric update handler with txid (recommended)\n * onUpdate: async ({ transaction }) => {\n * const { original, changes } = transaction.mutations[0]\n * const result = await api.todos.update({\n * where: { id: original.id },\n * data: changes\n * })\n * return { txid: result.txid }\n * }\n *\n * @example\n * // Use awaitMatch utility for custom matching\n * onUpdate: async ({ transaction, collection }) => {\n * const { original, changes } = transaction.mutations[0]\n * await api.todos.update({ where: { id: original.id }, data: changes })\n * await collection.utils.awaitMatch(\n * (message) => isChangeMessage(message) &&\n * message.headers.operation === 'update' &&\n * message.value.id === original.id\n * )\n * }\n */\n onUpdate?: (\n params: UpdateMutationFnParams<\n T,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => Promise<MatchingStrategy>\n\n /**\n * Optional asynchronous handler function called before a delete operation\n * @param params Object containing transaction and collection information\n * @returns Promise resolving to { txid, timeout? } or void\n * @example\n * // Basic Electric delete handler with txid (recommended)\n * onDelete: async ({ transaction }) => {\n * const mutation = transaction.mutations[0]\n * const result = await api.todos.delete({\n * id: mutation.original.id\n * })\n * return { txid: result.txid }\n * }\n *\n * @example\n * // Use awaitMatch utility for custom matching\n * onDelete: async ({ transaction, collection }) => {\n * const mutation = transaction.mutations[0]\n * await api.todos.delete({ id: mutation.original.id })\n * await collection.utils.awaitMatch(\n * (message) => isChangeMessage(message) &&\n * message.headers.operation === 'delete' &&\n * message.value.id === mutation.original.id\n * )\n * }\n */\n onDelete?: (\n params: DeleteMutationFnParams<\n T,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => Promise<MatchingStrategy>\n}\n\nfunction isUpToDateMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is ControlMessage & { up_to_date: true } {\n return isControlMessage(message) && message.headers.control === `up-to-date`\n}\n\nfunction isMustRefetchMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is ControlMessage & { headers: { control: `must-refetch` } } {\n return isControlMessage(message) && message.headers.control === `must-refetch`\n}\n\nfunction isSnapshotEndMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is SnapshotEndMessage {\n return isControlMessage(message) && message.headers.control === `snapshot-end`\n}\n\nfunction isSubsetEndMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is ControlMessage & { headers: { control: `subset-end` } } {\n return (\n isControlMessage(message) &&\n (message.headers.control as string) === `subset-end`\n )\n}\n\nfunction parseSnapshotMessage(message: SnapshotEndMessage): PostgresSnapshot {\n return {\n xmin: message.headers.xmin,\n xmax: message.headers.xmax,\n xip_list: message.headers.xip_list,\n }\n}\n\n// Check if a message contains txids in its headers\nfunction hasTxids<T extends Row<unknown>>(\n message: Message<T>,\n): message is Message<T> & { headers: { txids?: Array<Txid> } } {\n return `txids` in message.headers && Array.isArray(message.headers.txids)\n}\n\n/**\n * Creates a deduplicated loadSubset handler for progressive/on-demand modes\n * Returns null for eager mode, or a DeduplicatedLoadSubset instance for other modes.\n * Handles fetching snapshots in progressive mode during buffering phase,\n * and requesting snapshots in on-demand mode.\n *\n * When cursor expressions are provided (whereFrom/whereCurrent), makes two\n * requestSnapshot calls:\n * - One for whereFrom (rows > cursor) with limit\n * - One for whereCurrent (rows = cursor, for tie-breaking) without limit\n */\nfunction createLoadSubsetDedupe<T extends Row<unknown>>({\n stream,\n syncMode,\n isBufferingInitialSync,\n begin,\n write,\n commit,\n collectionId,\n encodeColumnName,\n}: {\n stream: ShapeStream<T>\n syncMode: ElectricSyncMode\n isBufferingInitialSync: () => boolean\n begin: () => void\n write: (mutation: {\n type: `insert` | `update` | `delete`\n value: T\n metadata: Record<string, unknown>\n }) => void\n commit: () => void\n collectionId?: string\n /**\n * Optional function to encode column names (e.g., camelCase to snake_case).\n * This is typically the `encode` function from shapeOptions.columnMapper.\n */\n encodeColumnName?: ColumnEncoder\n}): DeduplicatedLoadSubset | null {\n // Eager mode doesn't need subset loading\n if (syncMode === `eager`) {\n return null\n }\n\n const compileOptions = encodeColumnName ? { encodeColumnName } : undefined\n\n const loadSubset = async (opts: LoadSubsetOptions) => {\n // In progressive mode, use fetchSnapshot during snapshot phase\n if (isBufferingInitialSync()) {\n // Progressive mode snapshot phase: fetch and apply immediately\n const snapshotParams = compileSQL<T>(opts, compileOptions)\n try {\n const { data: rows } = await stream.fetchSnapshot(snapshotParams)\n\n // Check again if we're still buffering - we might have received up-to-date\n // and completed the atomic swap while waiting for the snapshot\n if (!isBufferingInitialSync()) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Ignoring snapshot - sync completed while fetching`,\n )\n return\n }\n\n // Apply snapshot data in a sync transaction (only if we have data)\n if (rows.length > 0) {\n begin()\n for (const row of rows) {\n write({\n type: `insert`,\n value: row.value,\n metadata: {\n ...row.headers,\n },\n })\n }\n commit()\n\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Applied snapshot with ${rows.length} rows`,\n )\n }\n } catch (error) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Error fetching snapshot: %o`,\n error,\n )\n throw error\n }\n } else if (syncMode === `progressive`) {\n // Progressive mode after full sync complete: no need to load more\n return\n } else {\n // On-demand mode: use requestSnapshot\n // When cursor is provided, make two calls:\n // 1. whereCurrent (all ties, no limit)\n // 2. whereFrom (rows > cursor, with limit)\n const { cursor, where, orderBy, limit } = opts\n\n if (cursor) {\n // Make parallel requests for cursor-based pagination\n const promises: Array<Promise<unknown>> = []\n\n // Request 1: All rows matching whereCurrent (ties at boundary, no limit)\n // Combine main where with cursor.whereCurrent\n const whereCurrentOpts: LoadSubsetOptions = {\n where: where ? and(where, cursor.whereCurrent) : cursor.whereCurrent,\n orderBy,\n // No limit - get all ties\n }\n const whereCurrentParams = compileSQL<T>(\n whereCurrentOpts,\n compileOptions,\n )\n promises.push(stream.requestSnapshot(whereCurrentParams))\n\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Requesting cursor.whereCurrent snapshot (all ties)`,\n )\n\n // Request 2: Rows matching whereFrom (rows > cursor, with limit)\n // Combine main where with cursor.whereFrom\n const whereFromOpts: LoadSubsetOptions = {\n where: where ? and(where, cursor.whereFrom) : cursor.whereFrom,\n orderBy,\n limit,\n }\n const whereFromParams = compileSQL<T>(whereFromOpts, compileOptions)\n promises.push(stream.requestSnapshot(whereFromParams))\n\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Requesting cursor.whereFrom snapshot (with limit ${limit})`,\n )\n\n // Wait for both requests to complete\n await Promise.all(promises)\n } else {\n // No cursor - standard single request\n const snapshotParams = compileSQL<T>(opts, compileOptions)\n await stream.requestSnapshot(snapshotParams)\n }\n }\n }\n\n return new DeduplicatedLoadSubset({ loadSubset })\n}\n\n/**\n * Type for the awaitTxId utility function\n */\nexport type AwaitTxIdFn = (txId: Txid, timeout?: number) => Promise<boolean>\n\n/**\n * Type for the awaitMatch utility function\n */\nexport type AwaitMatchFn<T extends Row<unknown>> = (\n matchFn: MatchFunction<T>,\n timeout?: number,\n) => Promise<boolean>\n\n/**\n * Electric collection utilities type\n */\nexport interface ElectricCollectionUtils<\n T extends Row<unknown> = Row<unknown>,\n> extends UtilsRecord {\n awaitTxId: AwaitTxIdFn\n awaitMatch: AwaitMatchFn<T>\n}\n\n/**\n * Creates Electric collection options for use with a standard Collection\n *\n * @template T - The explicit type of items in the collection (highest priority)\n * @template TSchema - The schema type for validation and type inference (second priority)\n * @template TFallback - The fallback type if no explicit or schema type is provided\n * @param config - Configuration options for the Electric collection\n * @returns Collection options with utilities\n */\n\n// Overload for when schema is provided\nexport function electricCollectionOptions<T extends StandardSchemaV1>(\n config: ElectricCollectionConfig<InferSchemaOutput<T>, T> & {\n schema: T\n },\n): Omit<CollectionConfig<InferSchemaOutput<T>, string | number, T>, `utils`> & {\n id?: string\n utils: ElectricCollectionUtils<InferSchemaOutput<T>>\n schema: T\n}\n\n// Overload for when no schema is provided\nexport function electricCollectionOptions<T extends Row<unknown>>(\n config: ElectricCollectionConfig<T> & {\n schema?: never // prohibit schema\n },\n): Omit<CollectionConfig<T, string | number>, `utils`> & {\n id?: string\n utils: ElectricCollectionUtils<T>\n schema?: never // no schema in the result\n}\n\nexport function electricCollectionOptions<T extends Row<unknown>>(\n config: ElectricCollectionConfig<T, any>,\n): Omit<\n CollectionConfig<T, string | number, any, ElectricCollectionUtils<T>>,\n `utils`\n> & {\n id?: string\n utils: ElectricCollectionUtils<T>\n schema?: any\n} {\n const seenTxids = new Store<Set<Txid>>(new Set([]))\n const seenSnapshots = new Store<Array<PostgresSnapshot>>([])\n const internalSyncMode = config.syncMode ?? `eager`\n const finalSyncMode =\n internalSyncMode === `progressive` ? `on-demand` : internalSyncMode\n const pendingMatches = new Store<\n Map<\n string,\n {\n matchFn: (message: Message<any>) => boolean\n resolve: (value: boolean) => void\n reject: (error: Error) => void\n timeoutId: ReturnType<typeof setTimeout>\n matched: boolean\n }\n >\n >(new Map())\n\n // Buffer messages since last up-to-date to handle race conditions\n const currentBatchMessages = new Store<Array<Message<any>>>([])\n\n // Track whether the current batch has been committed (up-to-date received)\n // This allows awaitMatch to resolve immediately for messages from committed batches\n const batchCommitted = new Store<boolean>(false)\n\n /**\n * Helper function to remove multiple matches from the pendingMatches store\n */\n const removePendingMatches = (matchIds: Array<string>) => {\n if (matchIds.length > 0) {\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n matchIds.forEach((id) => newMatches.delete(id))\n return newMatches\n })\n }\n }\n\n /**\n * Helper function to resolve and cleanup matched pending matches\n */\n const resolveMatchedPendingMatches = () => {\n const matchesToResolve: Array<string> = []\n pendingMatches.state.forEach((match, matchId) => {\n if (match.matched) {\n clearTimeout(match.timeoutId)\n match.resolve(true)\n matchesToResolve.push(matchId)\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch resolved on up-to-date for match %s`,\n matchId,\n )\n }\n })\n removePendingMatches(matchesToResolve)\n }\n const sync = createElectricSync<T>(config.shapeOptions, {\n seenTxids,\n seenSnapshots,\n syncMode: internalSyncMode,\n pendingMatches,\n currentBatchMessages,\n batchCommitted,\n removePendingMatches,\n resolveMatchedPendingMatches,\n collectionId: config.id,\n testHooks: config[ELECTRIC_TEST_HOOKS],\n })\n\n /**\n * Wait for a specific transaction ID to be synced\n * @param txId The transaction ID to wait for as a number\n * @param timeout Optional timeout in milliseconds (defaults to 5000ms)\n * @returns Promise that resolves when the txId is synced\n */\n const awaitTxId: AwaitTxIdFn = async (\n txId: Txid,\n timeout: number = 5000,\n ): Promise<boolean> => {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitTxId called with txid %d`,\n txId,\n )\n if (typeof txId !== `number`) {\n throw new ExpectedNumberInAwaitTxIdError(typeof txId, config.id)\n }\n\n // First check if the txid is in the seenTxids store\n const hasTxid = seenTxids.state.has(txId)\n if (hasTxid) return true\n\n // Then check if the txid is in any of the seen snapshots\n const hasSnapshot = seenSnapshots.state.some((snapshot) =>\n isVisibleInSnapshot(txId, snapshot),\n )\n if (hasSnapshot) return true\n\n return new Promise((resolve, reject) => {\n const timeoutId = setTimeout(() => {\n unsubscribeSeenTxids()\n unsubscribeSeenSnapshots()\n reject(new TimeoutWaitingForTxIdError(txId, config.id))\n }, timeout)\n\n const unsubscribeSeenTxids = seenTxids.subscribe(() => {\n if (seenTxids.state.has(txId)) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitTxId found match for txid %o`,\n txId,\n )\n clearTimeout(timeoutId)\n unsubscribeSeenTxids()\n unsubscribeSeenSnapshots()\n resolve(true)\n }\n })\n\n const unsubscribeSeenSnapshots = seenSnapshots.subscribe(() => {\n const visibleSnapshot = seenSnapshots.state.find((snapshot) =>\n isVisibleInSnapshot(txId, snapshot),\n )\n if (visibleSnapshot) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitTxId found match for txid %o in snapshot %o`,\n txId,\n visibleSnapshot,\n )\n clearTimeout(timeoutId)\n unsubscribeSeenSnapshots()\n unsubscribeSeenTxids()\n resolve(true)\n }\n })\n })\n }\n\n /**\n * Wait for a custom match function to find a matching message\n * @param matchFn Function that returns true when a message matches\n * @param timeout Optional timeout in milliseconds (defaults to 5000ms)\n * @returns Promise that resolves when a matching message is found\n */\n const awaitMatch: AwaitMatchFn<any> = async (\n matchFn: MatchFunction<any>,\n timeout: number = 3000,\n ): Promise<boolean> => {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch called with custom function`,\n )\n\n return new Promise((resolve, reject) => {\n const matchId = Math.random().toString(36)\n\n const cleanupMatch = () => {\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n newMatches.delete(matchId)\n return newMatches\n })\n }\n\n const onTimeout = () => {\n cleanupMatch()\n reject(new TimeoutWaitingForMatchError(config.id))\n }\n\n const timeoutId = setTimeout(onTimeout, timeout)\n\n // We need access to the stream messages to check against the match function\n // This will be handled by the sync configuration\n const checkMatch = (message: Message<any>) => {\n if (matchFn(message)) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch found matching message, waiting for up-to-date`,\n )\n // Mark as matched but don't resolve yet - wait for up-to-date\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n const existing = newMatches.get(matchId)\n if (existing) {\n newMatches.set(matchId, { ...existing, matched: true })\n }\n return newMatches\n })\n return true\n }\n return false\n }\n\n // Check against current batch messages first to handle race conditions\n for (const message of currentBatchMessages.state) {\n if (matchFn(message)) {\n // If batch is committed (up-to-date already received), resolve immediately\n // just like awaitTxId does when it finds a txid in seenTxids\n if (batchCommitted.state) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch found immediate match in committed batch, resolving immediately`,\n )\n clearTimeout(timeoutId)\n resolve(true)\n return\n }\n\n // If batch is not yet committed, register match and wait for up-to-date\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch found immediate match in current batch, waiting for up-to-date`,\n )\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n newMatches.set(matchId, {\n matchFn: checkMatch,\n resolve,\n reject,\n timeoutId,\n matched: true, // Already matched, will resolve on up-to-date\n })\n return newMatches\n })\n return\n }\n }\n\n // Store the match function for the sync process to use\n // We'll add this to a pending matches store\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n newMatches.set(matchId, {\n matchFn: checkMatch,\n resolve,\n reject,\n timeoutId,\n matched: false,\n })\n return newMatches\n })\n })\n }\n\n /**\n * Process matching strategy and wait for synchronization\n */\n const processMatchingStrategy = async (\n result: MatchingStrategy,\n ): Promise<void> => {\n // Only wait if result contains txid\n if (result && `txid` in result) {\n const timeout = result.timeout\n // Handle both single txid and array of txids\n if (Array.isArray(result.txid)) {\n await Promise.all(result.txid.map((txid) => awaitTxId(txid, timeout)))\n } else {\n await awaitTxId(result.txid, timeout)\n }\n }\n // If result is void/undefined, don't wait - mutation completes immediately\n }\n\n // Create wrapper handlers for direct persistence operations that handle different matching strategies\n const wrappedOnInsert = config.onInsert\n ? async (\n params: InsertMutationFnParams<\n any,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => {\n const handlerResult = await config.onInsert!(params)\n await processMatchingStrategy(handlerResult)\n return handlerResult\n }\n : undefined\n\n const wrappedOnUpdate = config.onUpdate\n ? async (\n params: UpdateMutationFnParams<\n any,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => {\n const handlerResult = await config.onUpdate!(params)\n await processMatchingStrategy(handlerResult)\n return handlerResult\n }\n : undefined\n\n const wrappedOnDelete = config.onDelete\n ? async (\n params: DeleteMutationFnParams<\n any,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => {\n const handlerResult = await config.onDelete!(params)\n await processMatchingStrategy(handlerResult)\n return handlerResult\n }\n : undefined\n\n // Extract standard Collection config properties\n const {\n shapeOptions: _shapeOptions,\n onInsert: _onInsert,\n onUpdate: _onUpdate,\n onDelete: _onDelete,\n ...restConfig\n } = config\n\n return {\n ...restConfig,\n syncMode: finalSyncMode,\n sync,\n onInsert: wrappedOnInsert,\n onUpdate: wrappedOnUpdate,\n onDelete: wrappedOnDelete,\n utils: {\n awaitTxId,\n awaitMatch,\n },\n }\n}\n\n/**\n * Internal function to create ElectricSQL sync configuration\n */\nfunction createElectricSync<T extends Row<unknown>>(\n shapeOptions: ShapeStreamOptions<GetExtensions<T>>,\n options: {\n syncMode: ElectricSyncMode\n seenTxids: Store<Set<Txid>>\n seenSnapshots: Store<Array<PostgresSnapshot>>\n pendingMatches: Store<\n Map<\n string,\n {\n matchFn: (message: Message<T>) => boolean\n resolve: (value: boolean) => void\n reject: (error: Error) => void\n timeoutId: ReturnType<typeof setTimeout>\n matched: boolean\n }\n >\n >\n currentBatchMessages: Store<Array<Message<T>>>\n batchCommitted: Store<boolean>\n removePendingMatches: (matchIds: Array<string>) => void\n resolveMatchedPendingMatches: () => void\n collectionId?: string\n testHooks?: ElectricTestHooks\n },\n): SyncConfig<T> {\n const {\n seenTxids,\n seenSnapshots,\n syncMode,\n pendingMatches,\n currentBatchMessages,\n batchCommitted,\n removePendingMatches,\n resolveMatchedPendingMatches,\n collectionId,\n testHooks,\n } = options\n const MAX_BATCH_MESSAGES = 1000 // Safety limit for message buffer\n\n // Store for the relation schema information\n const relationSchema = new Store<string | undefined>(undefined)\n\n const tagCache = new Map<MoveTag, ParsedMoveTag>()\n\n // Parses a tag string into a MoveTag.\n // It memoizes the result parsed tag such that future calls\n // for the same tag string return the same MoveTag array.\n const parseTag = (tag: MoveTag): ParsedMoveTag => {\n const cachedTag = tagCache.get(tag)\n if (cachedTag) {\n return cachedTag\n }\n\n const parsedTag = tag.split(`|`)\n tagCache.set(tag, parsedTag)\n return parsedTag\n }\n\n // Tag tracking state\n const rowTagSets = new Map<RowId, Set<MoveTag>>()\n const tagIndex: TagIndex = []\n let tagLength: number | undefined = undefined\n\n /**\n * Initialize the tag index with the correct length\n */\n const initializeTagIndex = (length: number): void => {\n if (tagIndex.length < length) {\n // Extend the index array to the required length\n for (let i = tagIndex.length; i < length; i++) {\n tagIndex[i] = new Map()\n }\n }\n }\n\n /**\n * Add tags to a row and update the tag index\n */\n const addTagsToRow = (\n tags: Array<MoveTag>,\n rowId: RowId,\n rowTagSet: Set<MoveTag>,\n ): void => {\n for (const tag of tags) {\n const parsedTag = parseTag(tag)\n\n // Infer tag length from first tag\n if (tagLength === undefined) {\n tagLength = getTagLength(parsedTag)\n initializeTagIndex(tagLength)\n }\n\n // Validate tag length matches\n const currentTagLength = getTagLength(parsedTag)\n if (currentTagLength !== tagLength) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Tag length mismatch: expected ${tagLength}, got ${currentTagLength}`,\n )\n continue\n }\n\n rowTagSet.add(tag)\n addTagToIndex(parsedTag, rowId, tagIndex, tagLength)\n }\n }\n\n /**\n * Remove tags from a row and update the tag index\n */\n const removeTagsFromRow = (\n removedTags: Array<MoveTag>,\n rowId: RowId,\n rowTagSet: Set<MoveTag>,\n ): void => {\n if (tagLength === undefined) {\n return\n }\n\n for (const tag of removedTags) {\n const parsedTag = parseTag(tag)\n rowTagSet.delete(tag)\n removeTagFromIndex(parsedTag, rowId, tagIndex, tagLength)\n // We aggresively evict the tag from the cache\n // if this tag is shared with another row\n // and is not removed from that other row\n // then next time we encounter the tag it will be parsed again\n tagCache.delete(tag)\n }\n }\n\n /**\n * Process tags for a change message (add and remove tags)\n */\n const processTagsForChangeMessage = (\n tags: Array<MoveTag> | undefined,\n removedTags: Array<MoveTag> | undefined,\n rowId: RowId,\n ): Set<MoveTag> => {\n // Initialize tag set for this row if it doesn't exist (needed for checking deletion)\n if (!rowTagSets.has(rowId)) {\n rowTagSets.set(rowId, new Set())\n }\n const rowTagSet = rowTagSets.get(rowId)!\n\n // Add new tags\n if (tags) {\n addTagsToRow(tags, rowId, rowTagSet)\n }\n\n // Remove tags\n if (removedTags) {\n removeTagsFromRow(removedTags, rowId, rowTagSet)\n }\n\n return rowTagSet\n }\n\n /**\n * Clear all tag tracking state (used when truncating)\n */\n const clearTagTrackingState = (): void => {\n rowTagSets.clear()\n tagIndex.length = 0\n tagLength = undefined\n }\n\n /**\n * Remove all tags for a row from both the tag set and the index\n * Used when a row is deleted\n */\n const clearTagsForRow = (rowId: RowId): void => {\n if (tagLength === undefined) {\n return\n }\n\n const rowTagSet = rowTagSets.get(rowId)\n if (!rowTagSet) {\n return\n }\n\n // Remove each tag from the index\n for (const tag of rowTagSet) {\n const parsedTag = parseTag(tag)\n const currentTagLength = getTagLength(parsedTag)\n if (currentTagLength === tagLength) {\n removeTagFromIndex(parsedTag, rowId, tagIndex, tagLength)\n }\n tagCache.delete(tag)\n }\n\n // Remove the row from the tag sets map\n rowTagSets.delete(rowId)\n }\n\n /**\n * Remove matching tags from a row based on a pattern\n * Returns true if the row's tag set is now empty\n */\n const removeMatchingTagsFromRow = (\n rowId: RowId,\n pattern: MoveOutPattern,\n ): boolean => {\n const rowTagSet = rowTagSets.get(rowId)\n if (!rowTagSet) {\n return false\n }\n\n // Find tags that match this pattern and remove them\n for (const tag of rowTagSet) {\n const parsedTag = parseTag(tag)\n if (tagMatchesPattern(parsedTag, pattern)) {\n rowTagSet.delete(tag)\n removeTagFromIndex(parsedTag, rowId, tagIndex, tagLength!)\n }\n }\n\n // Check if row's tag set is now empty\n if (rowTagSet.size === 0) {\n rowTagSets.delete(rowId)\n return true\n }\n\n return false\n }\n\n /**\n * Process move-out event: remove matching tags from rows and delete rows with empty tag sets\n */\n const processMoveOutEvent = (\n patterns: Array<MoveOutPattern>,\n begin: () => void,\n write: (message: ChangeMessageOrDeleteKeyMessage<T>) => void,\n transactionStarted: boolean,\n ): boolean => {\n if (tagLength === undefined) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Received move-out message but no tag length set yet, ignoring`,\n )\n return transactionStarted\n }\n\n let txStarted = transactionStarted\n\n // Process all patterns and collect rows to delete\n for (const pattern of patterns) {\n // Find all rows that match this pattern\n const affectedRowIds = findRowsMatchingPattern(pattern, tagIndex)\n\n for (const rowId of affectedRowIds) {\n if (removeMatchingTagsFromRow(rowId, pattern)) {\n // Delete rows with empty tag sets\n if (!txStarted) {\n begin()\n txStarted = true\n }\n\n write({\n type: `delete`,\n key: rowId,\n })\n }\n }\n }\n\n return txStarted\n }\n\n /**\n * Get the sync metadata for insert operations\n * @returns Record containing relation information\n */\n const getSyncMetadata = (): Record<string, unknown> => {\n // Use the stored schema if available, otherwise default to 'public'\n const schema = relationSchema.state || `public`\n\n return {\n relation: shapeOptions.params?.table\n ? [schema, shapeOptions.params.table]\n : undefined,\n }\n }\n\n let unsubscribeStream: () => void\n\n return {\n sync: (params: Parameters<SyncConfig<T>[`sync`]>[0]) => {\n const { begin, write, commit, markReady, truncate, collection } = params\n\n // Wrap markReady to wait for test hook in progressive mode\n let progressiveReadyGate: Promise<void> | null = null\n const wrappedMarkReady = (isBuffering: boolean) => {\n // Only create gate if we're in buffering phase (first up-to-date)\n if (\n isBuffering &&\n syncMode === `progressive` &&\n testHooks?.beforeMarkingReady\n ) {\n // Create a new gate promise for this sync cycle\n progressiveReadyGate = testHooks.beforeMarkingReady()\n progressiveReadyGate.then(() => {\n markReady()\n })\n } else {\n // No hook, not buffering, or already past first up-to-date\n markReady()\n }\n }\n\n // Abort controller for the stream - wraps the signal if provided\n const abortController = new AbortController()\n\n if (shapeOptions.signal) {\n shapeOptions.signal.addEventListener(\n `abort`,\n () => {\n abortController.abort()\n },\n {\n once: true,\n },\n )\n if (shapeOptions.signal.aborted) {\n abortController.abort()\n }\n }\n\n // Cleanup pending matches on abort\n abortController.signal.addEventListener(`abort`, () => {\n pendingMatches.setState((current) => {\n current.forEach((match) => {\n clearTimeout(match.timeoutId)\n match.reject(new StreamAbortedError())\n })\n return new Map() // Clear all pending matches\n })\n })\n\n const stream = new ShapeStream({\n ...shapeOptions,\n // In on-demand mode, we only want to sync changes, so we set the log to `changes_only`\n log: syncMode === `on-demand` ? `changes_only` : undefined,\n // In on-demand mode, we only need the changes from the point of time the collection was created\n // so we default to `now` when there is no saved offset.\n offset:\n shapeOptions.offset ?? (syncMode === `on-demand` ? `now` : undefined),\n signal: abortController.signal,\n onError: (errorParams) => {\n // Just immediately mark ready if there's an error to avoid blocking\n // apps waiting for `.preload()` to finish.\n // Note that Electric sends a 409 error on a `must-refetch` message, but the\n // ShapeStream handled this and it will not reach this handler, therefor\n // this markReady will not be triggers by a `must-refetch`.\n markReady()\n\n if (shapeOptions.onError) {\n return shapeOptions.onError(errorParams)\n } else {\n console.error(\n `An error occurred while syncing collection: ${collection.id}, \\n` +\n `it has been marked as ready to avoid blocking apps waiting for '.preload()' to finish. \\n` +\n `You can provide an 'onError' handler on the shapeOptions to handle this error, and this message will not be logged.`,\n errorParams,\n )\n }\n\n return\n },\n })\n let transactionStarted = false\n const newTxids = new Set<Txid>()\n const newSnapshots: Array<PostgresSnapshot> = []\n let hasReceivedUpToDate = false // Track if we've completed initial sync in progressive mode\n\n // Progressive mode state\n // Helper to determine if we're buffering the initial sync\n const isBufferingInitialSync = () =>\n syncMode === `progressive` && !hasReceivedUpToDate\n const bufferedMessages: Array<Message<T>> = [] // Buffer change messages during initial sync\n\n // Track keys that have been synced to handle overlapping subset queries.\n // When multiple subset queries return the same row, the server sends `insert`\n // for each response. We convert subsequent inserts to updates to avoid\n // duplicate key errors when the row's data has changed between requests.\n const syncedKeys = new Set<string | number>()\n\n /**\n * Process a change message: handle tags and write the mutation\n */\n const processChangeMessage = (changeMessage: Message<T>) => {\n if (!isChangeMessage(changeMessage)) {\n return\n }\n\n // Process tags if present\n const tags = changeMessage.headers.tags\n const removedTags = changeMessage.headers.removed_tags\n const hasTags = tags || removedTags\n\n const rowId = collection.getKeyFromItem(changeMessage.value)\n const operation = changeMessage.headers.operation\n\n // Track synced keys and handle overlapping subset queries.\n // When multiple subset queries return the same row, the server sends\n // `insert` for each response. We convert subsequent inserts to updates\n // to avoid duplicate key errors when the row's data has changed.\n const isDelete = operation === `delete`\n const isDuplicateInsert =\n operation === `insert` && syncedKeys.has(rowId)\n\n if (isDelete) {\n syncedKeys.delete(rowId)\n } else {\n syncedKeys.add(rowId)\n }\n\n if (isDelete) {\n clearTagsForRow(rowId)\n } else if (hasTags) {\n processTagsForChangeMessage(tags, removedTags, rowId)\n }\n\n write({\n type: isDuplicateInsert ? `update` : operation,\n value: changeMessage.value,\n // Include the primary key and relation info in the metadata\n metadata: {\n ...changeMessage.headers,\n },\n })\n }\n\n // Create deduplicated loadSubset wrapper for non-eager modes\n // This prevents redundant snapshot requests when multiple concurrent\n // live queries request overlapping or subset predicates\n const loadSubsetDedupe = createLoadSubsetDedupe({\n stream,\n syncMode,\n isBufferingInitialSync,\n begin,\n write,\n commit,\n collectionId,\n // Pass the columnMapper's encode function to transform column names\n // (e.g., camelCase to snake_case) when compiling SQL for subset queries\n encodeColumnName: shapeOptions.columnMapper?.encode,\n })\n\n unsubscribeStream = stream.subscribe((messages: Array<Message<T>>) => {\n // Track commit point type - up-to-date takes precedence as it also triggers progressive mode atomic swap\n let commitPoint: `up-to-date` | `subset-end` | null = null\n\n // Don't clear the buffer between batches - this preserves messages for awaitMatch\n // to find even if multiple batches arrive before awaitMatch is called.\n // The buffer is naturally limited by MAX_BATCH_MESSAGES (oldest messages are dropped).\n // Reset batchCommitted since we're starting a new batch\n batchCommitted.setState(() => false)\n\n for (const message of messages) {\n // Add message to current batch buffer (for race condition handling)\n if (isChangeMessage(message) || isMoveOutMessage(message)) {\n currentBatchMessages.setState((currentBuffer) => {\n const newBuffer = [...currentBuffer, message]\n // Limit buffer size for safety\n if (newBuffer.length > MAX_BATCH_MESSAGES) {\n newBuffer.splice(0, newBuffer.length - MAX_BATCH_MESSAGES)\n }\n return newBuffer\n })\n }\n\n // Check for txids in the message and add them to our store\n // Skip during buffered initial sync in progressive mode (txids will be extracted during atomic swap)\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), track txids\n // to avoid losing them when messages are written to the existing transaction.\n if (\n hasTxids(message) &&\n (!isBufferingInitialSync() || transactionStarted)\n ) {\n message.headers.txids?.forEach((txid) => newTxids.add(txid))\n }\n\n // Check pending matches against this message\n // Note: matchFn will mark matches internally, we don't resolve here\n const matchesToRemove: Array<string> = []\n pendingMatches.state.forEach((match, matchId) => {\n if (!match.matched) {\n try {\n match.matchFn(message)\n } catch (err) {\n // If matchFn throws, clean up and reject the promise\n clearTimeout(match.timeoutId)\n match.reject(\n err instanceof Error ? err : new Error(String(err)),\n )\n matchesToRemove.push(matchId)\n debug(`matchFn error: %o`, err)\n }\n }\n })\n\n // Remove matches that errored\n removePendingMatches(matchesToRemove)\n\n if (isChangeMessage(message)) {\n // Check if the message contains schema information\n const schema = message.headers.schema\n if (schema && typeof schema === `string`) {\n // Store the schema for future use if it's a valid string\n relationSchema.setState(() => schema)\n }\n\n // In buffered initial sync of progressive mode, buffer messages instead of writing\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), write\n // directly to it instead of buffering. This prevents orphan transactions.\n if (isBufferingInitialSync() && !transactionStarted) {\n bufferedMessages.push(message)\n } else {\n // Normal processing: write changes immediately\n if (!transactionStarted) {\n begin()\n transactionStarted = true\n }\n\n processChangeMessage(message)\n }\n } else if (isSnapshotEndMessage(message)) {\n // Track postgres snapshot metadata for resolving awaiting mutations\n // Skip during buffered initial sync (will be extracted during atomic swap)\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), track snapshots\n // to avoid losing them when messages are written to the existing transaction.\n if (!isBufferingInitialSync() || transactionStarted) {\n newSnapshots.push(parseSnapshotMessage(message))\n }\n } else if (isUpToDateMessage(message)) {\n // up-to-date takes precedence - also triggers progressive mode atomic swap\n commitPoint = `up-to-date`\n } else if (isSubsetEndMessage(message)) {\n // subset-end triggers commit but not progressive mode atomic swap\n if (commitPoint !== `up-to-date`) {\n commitPoint = `subset-end`\n }\n } else if (isMoveOutMessage(message)) {\n // Handle move-out event: buffer if buffering, otherwise process immediately\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), process\n // immediately to avoid orphan transactions.\n if (isBufferingInitialSync() && !transactionStarted) {\n bufferedMessages.push(message)\n } else {\n // Normal processing: process move-out immediately\n transactionStarted = processMoveOutEvent(\n message.headers.patterns,\n begin,\n write,\n transactionStarted,\n )\n }\n } else if (isMustRefetchMessage(message)) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Received must-refetch message, starting transaction with truncate`,\n )\n\n // Start a transaction and truncate the collection\n if (!transactionStarted) {\n begin()\n transactionStarted = true\n }\n\n truncate()\n\n // Clear tag tracking state\n clearTagTrackingState()\n\n // Clear synced keys tracking since we're starting fresh\n syncedKeys.clear()\n\n // Reset the loadSubset deduplication state since we're starting fresh\n // This ensures that previously loaded predicates don't prevent refetching after truncate\n loadSubsetDedupe?.reset()\n\n // Reset flags so we continue accumulating changes until next up-to-date\n commitPoint = null\n hasReceivedUpToDate = false // Reset for progressive mode (isBufferingInitialSync will reflect this)\n bufferedMessages.length = 0 // Clear buffered messages\n }\n }\n\n if (commitPoint !== null) {\n // PROGRESSIVE MODE: Atomic swap on first up-to-date (not subset-end)\n // EXCEPTION: Skip atomic swap if a transaction is already started (e.g., from must-refetch).\n // In that case, do a normal commit to properly close the existing transaction.\n if (\n isBufferingInitialSync() &&\n commitPoint === `up-to-date` &&\n !transactionStarted\n ) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Progressive mode: Performing atomic swap with ${bufferedMessages.length} buffered messages`,\n )\n\n // Start atomic swap transaction\n begin()\n\n // Truncate to clear all snapshot data\n truncate()\n\n // Clear tag tracking state for atomic swap\n clearTagTrackingState()\n\n // Clear synced keys tracking for atomic swap\n syncedKeys.clear()\n\n // Apply all buffered change messages and extract txids/snapshots\n for (const bufferedMsg of bufferedMessages) {\n if (isChangeMessage(bufferedMsg)) {\n processChangeMessage(bufferedMsg)\n\n // Extract txids from buffered messages (will be committed to store after transaction)\n if (hasTxids(bufferedMsg)) {\n bufferedMsg.headers.txids?.forEach((txid) =>\n newTxids.add(txid),\n )\n }\n } else if (isSnapshotEndMessage(bufferedMsg)) {\n // Extract snapshots from buffered messages (will be committed to store after transaction)\n newSnapshots.push(parseSnapshotMessage(bufferedMsg))\n } else if (isMoveOutMessage(bufferedMsg)) {\n // Process buffered move-out messages during atomic swap\n processMoveOutEvent(\n bufferedMsg.headers.patterns,\n begin,\n write,\n transactionStarted,\n )\n }\n }\n\n // Commit the atomic swap\n commit()\n\n // Exit buffering phase by marking that we've received up-to-date\n // isBufferingInitialSync() will now return false\n bufferedMessages.length = 0\n\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Progressive mode: Atomic swap complete, now in normal sync mode`,\n )\n } else {\n // Normal mode or on-demand: commit transaction if one was started\n // Both up-to-date and subset-end trigger a commit\n if (transactionStarted) {\n commit()\n transactionStarted = false\n }\n }\n wrappedMarkReady(isBufferingInitialSync())\n\n // Track that we've received the first up-to-date for progressive mode\n if (commitPoint === `up-to-date`) {\n hasReceivedUpToDate = true\n }\n\n // Always commit txids when we receive up-to-date, regardless of transaction state\n seenTxids.setState((currentTxids) => {\n const clonedSeen = new Set<Txid>(currentTxids)\n if (newTxids.size > 0) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}new txids synced from pg %O`,\n Array.from(newTxids),\n )\n }\n newTxids.forEach((txid) => clonedSeen.add(txid))\n newTxids.clear()\n return clonedSeen\n })\n\n // Always commit snapshots when we receive up-to-date, regardless of transaction state\n seenSnapshots.setState((currentSnapshots) => {\n const seen = [...currentSnapshots, ...newSnapshots]\n newSnapshots.forEach((snapshot) =>\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}new snapshot synced from pg %o`,\n snapshot,\n ),\n )\n newSnapshots.length = 0\n return seen\n })\n\n // Resolve all matched pending matches on up-to-date or subset-end\n // Set batchCommitted BEFORE resolving to avoid timing window where late awaitMatch\n // calls could register as \"matched\" after resolver pass already ran\n batchCommitted.setState(() => true)\n\n resolveMatchedPendingMatches()\n }\n })\n\n // Return the deduplicated loadSubset if available (on-demand or progressive mode)\n // The loadSubset method is auto-bound, so it can be safely returned directly\n return {\n loadSubset: loadSubsetDedupe?.loadSubset,\n cleanup: () => {\n // Unsubscribe from the stream\n unsubscribeStream()\n // Abort the abort controller to stop the stream\n abortController.abort()\n // Reset deduplication tracking so collection can load fresh data if restarted\n loadSubsetDedupe?.reset()\n },\n }\n },\n // Expose the getSyncMetadata function\n getSyncMetadata,\n }\n}\n"],"names":["isControlMessage","compileSQL","and","DeduplicatedLoadSubset","Store","ExpectedNumberInAwaitTxIdError","isVisibleInSnapshot","TimeoutWaitingForTxIdError","TimeoutWaitingForMatchError","tagIndex","getTagLength","addTagToIndex","removeTagFromIndex","tagMatchesPattern","findRowsMatchingPattern","StreamAbortedError","ShapeStream","isChangeMessage","isMoveOutMessage"],"mappings":";;;;;;;;;AAyDA,MAAM,QAAQ,YAAY,MAAM,gBAAgB;AAKzC,MAAM,6CAA6B,mBAAmB;AAmO7D,SAAS,kBACP,SACkD;AAClD,SAAOA,OAAAA,iBAAiB,OAAO,KAAK,QAAQ,QAAQ,YAAY;AAClE;AAEA,SAAS,qBACP,SACsE;AACtE,SAAOA,OAAAA,iBAAiB,OAAO,KAAK,QAAQ,QAAQ,YAAY;AAClE;AAEA,SAAS,qBACP,SAC+B;AAC/B,SAAOA,OAAAA,iBAAiB,OAAO,KAAK,QAAQ,QAAQ,YAAY;AAClE;AAEA,SAAS,mBACP,SACoE;AACpE,SACEA,OAAAA,iBAAiB,OAAO,KACvB,QAAQ,QAAQ,YAAuB;AAE5C;AAEA,SAAS,qBAAqB,SAA+C;AAC3E,SAAO;AAAA,IACL,MAAM,QAAQ,QAAQ;AAAA,IACtB,MAAM,QAAQ,QAAQ;AAAA,IACtB,UAAU,QAAQ,QAAQ;AAAA,EAAA;AAE9B;AAGA,SAAS,SACP,SAC8D;AAC9D,SAAO,WAAW,QAAQ,WAAW,MAAM,QAAQ,QAAQ,QAAQ,KAAK;AAC1E;AAaA,SAAS,uBAA+C;AAAA,EACtD;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,GAiBkC;AAEhC,MAAI,aAAa,SAAS;AACxB,WAAO;AAAA,EACT;AAEA,QAAM,iBAAiB,mBAAmB,EAAE,iBAAA,IAAqB;AAEjE,QAAM,aAAa,OAAO,SAA4B;AAEpD,QAAI,0BAA0B;AAE5B,YAAM,iBAAiBC,YAAAA,WAAc,MAAM,cAAc;AACzD,UAAI;AACF,cAAM,EAAE,MAAM,KAAA,IAAS,MAAM,OAAO,cAAc,cAAc;AAIhE,YAAI,CAAC,0BAA0B;AAC7B;AAAA,YACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,UAAA;AAE7C;AAAA,QACF;AAGA,YAAI,KAAK,SAAS,GAAG;AACnB,gBAAA;AACA,qBAAW,OAAO,MAAM;AACtB,kBAAM;AAAA,cACJ,MAAM;AAAA,cACN,OAAO,IAAI;AAAA,cACX,UAAU;AAAA,gBACR,GAAG,IAAI;AAAA,cAAA;AAAA,YACT,CACD;AAAA,UACH;AACA,iBAAA;AAEA;AAAA,YACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE,yBAAyB,KAAK,MAAM;AAAA,UAAA;AAAA,QAEnF;AAAA,MACF,SAAS,OAAO;AACd;AAAA,UACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,UAC3C;AAAA,QAAA;AAEF,cAAM;AAAA,MACR;AAAA,IACF,WAAW,aAAa,eAAe;AAErC;AAAA,IACF,OAAO;AAKL,YAAM,EAAE,QAAQ,OAAO,SAAS,UAAU;AAE1C,UAAI,QAAQ;AAEV,cAAM,WAAoC,CAAA;AAI1C,cAAM,mBAAsC;AAAA,UAC1C,OAAO,QAAQC,OAAI,OAAO,OAAO,YAAY,IAAI,OAAO;AAAA,UACxD;AAAA;AAAA,QAAA;AAGF,cAAM,qBAAqBD,YAAAA;AAAAA,UACzB;AAAA,UACA;AAAA,QAAA;AAEF,iBAAS,KAAK,OAAO,gBAAgB,kBAAkB,CAAC;AAExD;AAAA,UACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,QAAA;AAK7C,cAAM,gBAAmC;AAAA,UACvC,OAAO,QAAQC,OAAI,OAAO,OAAO,SAAS,IAAI,OAAO;AAAA,UACrD;AAAA,UACA;AAAA,QAAA;AAEF,cAAM,kBAAkBD,YAAAA,WAAc,eAAe,cAAc;AACnE,iBAAS,KAAK,OAAO,gBAAgB,eAAe,CAAC;AAErD;AAAA,UACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE,oDAAoD,KAAK;AAAA,QAAA;AAItG,cAAM,QAAQ,IAAI,QAAQ;AAAA,MAC5B,OAAO;AAEL,cAAM,iBAAiBA,YAAAA,WAAc,MAAM,cAAc;AACzD,cAAM,OAAO,gBAAgB,cAAc;AAAA,MAC7C;AAAA,IACF;AAAA,EACF;AAEA,SAAO,IAAIE,GAAAA,uBAAuB,EAAE,YAAY;AAClD;AAyDO,SAAS,0BACd,QAQA;AACA,QAAM,YAAY,IAAIC,MAAAA,0BAAqB,IAAI,CAAA,CAAE,CAAC;AAClD,QAAM,gBAAgB,IAAIA,MAAAA,MAA+B,EAAE;AAC3D,QAAM,mBAAmB,OAAO,YAAY;AAC5C,QAAM,gBACJ,qBAAqB,gBAAgB,cAAc;AACrD,QAAM,iBAAiB,IAAIA,YAWzB,oBAAI,KAAK;AAGX,QAAM,uBAAuB,IAAIA,MAAAA,MAA2B,EAAE;AAI9D,QAAM,iBAAiB,IAAIA,MAAAA,MAAe,KAAK;AAK/C,QAAM,uBAAuB,CAAC,aAA4B;AACxD,QAAI,SAAS,SAAS,GAAG;AACvB,qBAAe,SAAS,CAAC,YAAY;AACnC,cAAM,aAAa,IAAI,IAAI,OAAO;AAClC,iBAAS,QAAQ,CAAC,OAAO,WAAW,OAAO,EAAE,CAAC;AAC9C,eAAO;AAAA,MACT,CAAC;AAAA,IACH;AAAA,EACF;AAKA,QAAM,+BAA+B,MAAM;AACzC,UAAM,mBAAkC,CAAA;AACxC,mBAAe,MAAM,QAAQ,CAAC,OAAO,YAAY;AAC/C,UAAI,MAAM,SAAS;AACjB,qBAAa,MAAM,SAAS;AAC5B,cAAM,QAAQ,IAAI;AAClB,yBAAiB,KAAK,OAAO;AAC7B;AAAA,UACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,UACrC;AAAA,QAAA;AAAA,MAEJ;AAAA,IACF,CAAC;AACD,yBAAqB,gBAAgB;AAAA,EACvC;AACA,QAAM,OAAO,mBAAsB,OAAO,cAAc;AAAA,IACtD;AAAA,IACA;AAAA,IACA,UAAU;AAAA,IACV;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,cAAc,OAAO;AAAA,IACrB,WAAW,OAAO,mBAAmB;AAAA,EAAA,CACtC;AAQD,QAAM,YAAyB,OAC7B,MACA,UAAkB,QACG;AACrB;AAAA,MACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,MACrC;AAAA,IAAA;AAEF,QAAI,OAAO,SAAS,UAAU;AAC5B,YAAM,IAAIC,OAAAA,+BAA+B,OAAO,MAAM,OAAO,EAAE;AAAA,IACjE;AAGA,UAAM,UAAU,UAAU,MAAM,IAAI,IAAI;AACxC,QAAI,QAAS,QAAO;AAGpB,UAAM,cAAc,cAAc,MAAM;AAAA,MAAK,CAAC,aAC5CC,2BAAoB,MAAM,QAAQ;AAAA,IAAA;AAEpC,QAAI,YAAa,QAAO;AAExB,WAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,YAAM,YAAY,WAAW,MAAM;AACjC,6BAAA;AACA,iCAAA;AACA,eAAO,IAAIC,OAAAA,2BAA2B,MAAM,OAAO,EAAE,CAAC;AAAA,MACxD,GAAG,OAAO;AAEV,YAAM,uBAAuB,UAAU,UAAU,MAAM;AACrD,YAAI,UAAU,MAAM,IAAI,IAAI,GAAG;AAC7B;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,YACrC;AAAA,UAAA;AAEF,uBAAa,SAAS;AACtB,+BAAA;AACA,mCAAA;AACA,kBAAQ,IAAI;AAAA,QACd;AAAA,MACF,CAAC;AAED,YAAM,2BAA2B,cAAc,UAAU,MAAM;AAC7D,cAAM,kBAAkB,cAAc,MAAM;AAAA,UAAK,CAAC,aAChDD,2BAAoB,MAAM,QAAQ;AAAA,QAAA;AAEpC,YAAI,iBAAiB;AACnB;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,YACrC;AAAA,YACA;AAAA,UAAA;AAEF,uBAAa,SAAS;AACtB,mCAAA;AACA,+BAAA;AACA,kBAAQ,IAAI;AAAA,QACd;AAAA,MACF,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAQA,QAAM,aAAgC,OACpC,SACA,UAAkB,QACG;AACrB;AAAA,MACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,IAAA;AAGvC,WAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,YAAM,UAAU,KAAK,OAAA,EAAS,SAAS,EAAE;AAEzC,YAAM,eAAe,MAAM;AACzB,uBAAe,SAAS,CAAC,YAAY;AACnC,gBAAM,aAAa,IAAI,IAAI,OAAO;AAClC,qBAAW,OAAO,OAAO;AACzB,iBAAO;AAAA,QACT,CAAC;AAAA,MACH;AAEA,YAAM,YAAY,MAAM;AACtB,qBAAA;AACA,eAAO,IAAIE,OAAAA,4BAA4B,OAAO,EAAE,CAAC;AAAA,MACnD;AAEA,YAAM,YAAY,WAAW,WAAW,OAAO;AAI/C,YAAM,aAAa,CAAC,YAA0B;AAC5C,YAAI,QAAQ,OAAO,GAAG;AACpB;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,UAAA;AAGvC,yBAAe,SAAS,CAAC,YAAY;AACnC,kBAAM,aAAa,IAAI,IAAI,OAAO;AAClC,kBAAM,WAAW,WAAW,IAAI,OAAO;AACvC,gBAAI,UAAU;AACZ,yBAAW,IAAI,SAAS,EAAE,GAAG,UAAU,SAAS,MAAM;AAAA,YACxD;AACA,mBAAO;AAAA,UACT,CAAC;AACD,iBAAO;AAAA,QACT;AACA,eAAO;AAAA,MACT;AAGA,iBAAW,WAAW,qBAAqB,OAAO;AAChD,YAAI,QAAQ,OAAO,GAAG;AAGpB,cAAI,eAAe,OAAO;AACxB;AAAA,cACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,YAAA;AAEvC,yBAAa,SAAS;AACtB,oBAAQ,IAAI;AACZ;AAAA,UACF;AAGA;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,UAAA;AAEvC,yBAAe,SAAS,CAAC,YAAY;AACnC,kBAAM,aAAa,IAAI,IAAI,OAAO;AAClC,uBAAW,IAAI,SAAS;AAAA,cACtB,SAAS;AAAA,cACT;AAAA,cACA;AAAA,cACA;AAAA,cACA,SAAS;AAAA;AAAA,YAAA,CACV;AACD,mBAAO;AAAA,UACT,CAAC;AACD;AAAA,QACF;AAAA,MACF;AAIA,qBAAe,SAAS,CAAC,YAAY;AACnC,cAAM,aAAa,IAAI,IAAI,OAAO;AAClC,mBAAW,IAAI,SAAS;AAAA,UACtB,SAAS;AAAA,UACT;AAAA,UACA;AAAA,UACA;AAAA,UACA,SAAS;AAAA,QAAA,CACV;AACD,eAAO;AAAA,MACT,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAKA,QAAM,0BAA0B,OAC9B,WACkB;AAElB,QAAI,UAAU,UAAU,QAAQ;AAC9B,YAAM,UAAU,OAAO;AAEvB,UAAI,MAAM,QAAQ,OAAO,IAAI,GAAG;AAC9B,cAAM,QAAQ,IAAI,OAAO,KAAK,IAAI,CAAC,SAAS,UAAU,MAAM,OAAO,CAAC,CAAC;AAAA,MACvE,OAAO;AACL,cAAM,UAAU,OAAO,MAAM,OAAO;AAAA,MACtC;AAAA,IACF;AAAA,EAEF;AAGA,QAAM,kBAAkB,OAAO,WAC3B,OACE,WAKG;AACH,UAAM,gBAAgB,MAAM,OAAO,SAAU,MAAM;AACnD,UAAM,wBAAwB,aAAa;AAC3C,WAAO;AAAA,EACT,IACA;AAEJ,QAAM,kBAAkB,OAAO,WAC3B,OACE,WAKG;AACH,UAAM,gBAAgB,MAAM,OAAO,SAAU,MAAM;AACnD,UAAM,wBAAwB,aAAa;AAC3C,WAAO;AAAA,EACT,IACA;AAEJ,QAAM,kBAAkB,OAAO,WAC3B,OACE,WAKG;AACH,UAAM,gBAAgB,MAAM,OAAO,SAAU,MAAM;AACnD,UAAM,wBAAwB,aAAa;AAC3C,WAAO;AAAA,EACT,IACA;AAGJ,QAAM;AAAA,IACJ,cAAc;AAAA,IACd,UAAU;AAAA,IACV,UAAU;AAAA,IACV,UAAU;AAAA,IACV,GAAG;AAAA,EAAA,IACD;AAEJ,SAAO;AAAA,IACL,GAAG;AAAA,IACH,UAAU;AAAA,IACV;AAAA,IACA,UAAU;AAAA,IACV,UAAU;AAAA,IACV,UAAU;AAAA,IACV,OAAO;AAAA,MACL;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAEJ;AAKA,SAAS,mBACP,cACA,SAuBe;AACf,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA,IACE;AACJ,QAAM,qBAAqB;AAG3B,QAAM,iBAAiB,IAAIJ,MAAAA,MAA0B,MAAS;AAE9D,QAAM,+BAAe,IAAA;AAKrB,QAAM,WAAW,CAAC,QAAgC;AAChD,UAAM,YAAY,SAAS,IAAI,GAAG;AAClC,QAAI,WAAW;AACb,aAAO;AAAA,IACT;AAEA,UAAM,YAAY,IAAI,MAAM,GAAG;AAC/B,aAAS,IAAI,KAAK,SAAS;AAC3B,WAAO;AAAA,EACT;AAGA,QAAM,iCAAiB,IAAA;AACvB,QAAMK,aAAqB,CAAA;AAC3B,MAAI,YAAgC;AAKpC,QAAM,qBAAqB,CAAC,WAAyB;AACnD,QAAIA,WAAS,SAAS,QAAQ;AAE5B,eAAS,IAAIA,WAAS,QAAQ,IAAI,QAAQ,KAAK;AAC7CA,mBAAS,CAAC,IAAI,oBAAI,IAAA;AAAA,MACpB;AAAA,IACF;AAAA,EACF;AAKA,QAAM,eAAe,CACnB,MACA,OACA,cACS;AACT,eAAW,OAAO,MAAM;AACtB,YAAM,YAAY,SAAS,GAAG;AAG9B,UAAI,cAAc,QAAW;AAC3B,oBAAYC,SAAAA,aAAa,SAAS;AAClC,2BAAmB,SAAS;AAAA,MAC9B;AAGA,YAAM,mBAAmBA,SAAAA,aAAa,SAAS;AAC/C,UAAI,qBAAqB,WAAW;AAClC;AAAA,UACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE,iCAAiC,SAAS,SAAS,gBAAgB;AAAA,QAAA;AAEhH;AAAA,MACF;AAEA,gBAAU,IAAI,GAAG;AACjBC,eAAAA,cAAc,WAAW,OAAOF,YAAU,SAAS;AAAA,IACrD;AAAA,EACF;AAKA,QAAM,oBAAoB,CACxB,aACA,OACA,cACS;AACT,QAAI,cAAc,QAAW;AAC3B;AAAA,IACF;AAEA,eAAW,OAAO,aAAa;AAC7B,YAAM,YAAY,SAAS,GAAG;AAC9B,gBAAU,OAAO,GAAG;AACpBG,eAAAA,mBAAmB,WAAW,OAAOH,YAAU,SAAS;AAKxD,eAAS,OAAO,GAAG;AAAA,IACrB;AAAA,EACF;AAKA,QAAM,8BAA8B,CAClC,MACA,aACA,UACiB;AAEjB,QAAI,CAAC,WAAW,IAAI,KAAK,GAAG;AAC1B,iBAAW,IAAI,OAAO,oBAAI,IAAA,CAAK;AAAA,IACjC;AACA,UAAM,YAAY,WAAW,IAAI,KAAK;AAGtC,QAAI,MAAM;AACR,mBAAa,MAAM,OAAO,SAAS;AAAA,IACrC;AAGA,QAAI,aAAa;AACf,wBAAkB,aAAa,OAAO,SAAS;AAAA,IACjD;AAEA,WAAO;AAAA,EACT;AAKA,QAAM,wBAAwB,MAAY;AACxC,eAAW,MAAA;AACXA,eAAS,SAAS;AAClB,gBAAY;AAAA,EACd;AAMA,QAAM,kBAAkB,CAAC,UAAuB;AAC9C,QAAI,cAAc,QAAW;AAC3B;AAAA,IACF;AAEA,UAAM,YAAY,WAAW,IAAI,KAAK;AACtC,QAAI,CAAC,WAAW;AACd;AAAA,IACF;AAGA,eAAW,OAAO,WAAW;AAC3B,YAAM,YAAY,SAAS,GAAG;AAC9B,YAAM,mBAAmBC,SAAAA,aAAa,SAAS;AAC/C,UAAI,qBAAqB,WAAW;AAClCE,iBAAAA,mBAAmB,WAAW,OAAOH,YAAU,SAAS;AAAA,MAC1D;AACA,eAAS,OAAO,GAAG;AAAA,IACrB;AAGA,eAAW,OAAO,KAAK;AAAA,EACzB;AAMA,QAAM,4BAA4B,CAChC,OACA,YACY;AACZ,UAAM,YAAY,WAAW,IAAI,KAAK;AACtC,QAAI,CAAC,WAAW;AACd,aAAO;AAAA,IACT;AAGA,eAAW,OAAO,WAAW;AAC3B,YAAM,YAAY,SAAS,GAAG;AAC9B,UAAII,SAAAA,kBAAkB,WAAW,OAAO,GAAG;AACzC,kBAAU,OAAO,GAAG;AACpBD,iBAAAA,mBAAmB,WAAW,OAAOH,YAAU,SAAU;AAAA,MAC3D;AAAA,IACF;AAGA,QAAI,UAAU,SAAS,GAAG;AACxB,iBAAW,OAAO,KAAK;AACvB,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,EACT;AAKA,QAAM,sBAAsB,CAC1B,UACA,OACA,OACA,uBACY;AACZ,QAAI,cAAc,QAAW;AAC3B;AAAA,QACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,MAAA;AAE7C,aAAO;AAAA,IACT;AAEA,QAAI,YAAY;AAGhB,eAAW,WAAW,UAAU;AAE9B,YAAM,iBAAiBK,SAAAA,wBAAwB,SAASL,UAAQ;AAEhE,iBAAW,SAAS,gBAAgB;AAClC,YAAI,0BAA0B,OAAO,OAAO,GAAG;AAE7C,cAAI,CAAC,WAAW;AACd,kBAAA;AACA,wBAAY;AAAA,UACd;AAEA,gBAAM;AAAA,YACJ,MAAM;AAAA,YACN,KAAK;AAAA,UAAA,CACN;AAAA,QACH;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAMA,QAAM,kBAAkB,MAA+B;AAErD,UAAM,SAAS,eAAe,SAAS;AAEvC,WAAO;AAAA,MACL,UAAU,aAAa,QAAQ,QAC3B,CAAC,QAAQ,aAAa,OAAO,KAAK,IAClC;AAAA,IAAA;AAAA,EAER;AAEA,MAAI;AAEJ,SAAO;AAAA,IACL,MAAM,CAAC,WAAiD;AACtD,YAAM,EAAE,OAAO,OAAO,QAAQ,WAAW,UAAU,eAAe;AAGlE,UAAI,uBAA6C;AACjD,YAAM,mBAAmB,CAAC,gBAAyB;AAEjD,YACE,eACA,aAAa,iBACb,WAAW,oBACX;AAEA,iCAAuB,UAAU,mBAAA;AACjC,+BAAqB,KAAK,MAAM;AAC9B,sBAAA;AAAA,UACF,CAAC;AAAA,QACH,OAAO;AAEL,oBAAA;AAAA,QACF;AAAA,MACF;AAGA,YAAM,kBAAkB,IAAI,gBAAA;AAE5B,UAAI,aAAa,QAAQ;AACvB,qBAAa,OAAO;AAAA,UAClB;AAAA,UACA,MAAM;AACJ,4BAAgB,MAAA;AAAA,UAClB;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UAAA;AAAA,QACR;AAEF,YAAI,aAAa,OAAO,SAAS;AAC/B,0BAAgB,MAAA;AAAA,QAClB;AAAA,MACF;AAGA,sBAAgB,OAAO,iBAAiB,SAAS,MAAM;AACrD,uBAAe,SAAS,CAAC,YAAY;AACnC,kBAAQ,QAAQ,CAAC,UAAU;AACzB,yBAAa,MAAM,SAAS;AAC5B,kBAAM,OAAO,IAAIM,OAAAA,oBAAoB;AAAA,UACvC,CAAC;AACD,qCAAW,IAAA;AAAA,QACb,CAAC;AAAA,MACH,CAAC;AAED,YAAM,SAAS,IAAIC,mBAAY;AAAA,QAC7B,GAAG;AAAA;AAAA,QAEH,KAAK,aAAa,cAAc,iBAAiB;AAAA;AAAA;AAAA,QAGjD,QACE,aAAa,WAAW,aAAa,cAAc,QAAQ;AAAA,QAC7D,QAAQ,gBAAgB;AAAA,QACxB,SAAS,CAAC,gBAAgB;AAMxB,oBAAA;AAEA,cAAI,aAAa,SAAS;AACxB,mBAAO,aAAa,QAAQ,WAAW;AAAA,UACzC,OAAO;AACL,oBAAQ;AAAA,cACN,+CAA+C,WAAW,EAAE;AAAA;AAAA;AAAA,cAG5D;AAAA,YAAA;AAAA,UAEJ;AAEA;AAAA,QACF;AAAA,MAAA,CACD;AACD,UAAI,qBAAqB;AACzB,YAAM,+BAAe,IAAA;AACrB,YAAM,eAAwC,CAAA;AAC9C,UAAI,sBAAsB;AAI1B,YAAM,yBAAyB,MAC7B,aAAa,iBAAiB,CAAC;AACjC,YAAM,mBAAsC,CAAA;AAM5C,YAAM,iCAAiB,IAAA;AAKvB,YAAM,uBAAuB,CAAC,kBAA8B;AAC1D,YAAI,CAACC,OAAAA,gBAAgB,aAAa,GAAG;AACnC;AAAA,QACF;AAGA,cAAM,OAAO,cAAc,QAAQ;AACnC,cAAM,cAAc,cAAc,QAAQ;AAC1C,cAAM,UAAU,QAAQ;AAExB,cAAM,QAAQ,WAAW,eAAe,cAAc,KAAK;AAC3D,cAAM,YAAY,cAAc,QAAQ;AAMxC,cAAM,WAAW,cAAc;AAC/B,cAAM,oBACJ,cAAc,YAAY,WAAW,IAAI,KAAK;AAEhD,YAAI,UAAU;AACZ,qBAAW,OAAO,KAAK;AAAA,QACzB,OAAO;AACL,qBAAW,IAAI,KAAK;AAAA,QACtB;AAEA,YAAI,UAAU;AACZ,0BAAgB,KAAK;AAAA,QACvB,WAAW,SAAS;AAClB,sCAA4B,MAAM,aAAa,KAAK;AAAA,QACtD;AAEA,cAAM;AAAA,UACJ,MAAM,oBAAoB,WAAW;AAAA,UACrC,OAAO,cAAc;AAAA;AAAA,UAErB,UAAU;AAAA,YACR,GAAG,cAAc;AAAA,UAAA;AAAA,QACnB,CACD;AAAA,MACH;AAKA,YAAM,mBAAmB,uBAAuB;AAAA,QAC9C;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA;AAAA;AAAA,QAGA,kBAAkB,aAAa,cAAc;AAAA,MAAA,CAC9C;AAED,0BAAoB,OAAO,UAAU,CAAC,aAAgC;AAEpE,YAAI,cAAkD;AAMtD,uBAAe,SAAS,MAAM,KAAK;AAEnC,mBAAW,WAAW,UAAU;AAE9B,cAAIA,OAAAA,gBAAgB,OAAO,KAAKC,SAAAA,iBAAiB,OAAO,GAAG;AACzD,iCAAqB,SAAS,CAAC,kBAAkB;AAC/C,oBAAM,YAAY,CAAC,GAAG,eAAe,OAAO;AAE5C,kBAAI,UAAU,SAAS,oBAAoB;AACzC,0BAAU,OAAO,GAAG,UAAU,SAAS,kBAAkB;AAAA,cAC3D;AACA,qBAAO;AAAA,YACT,CAAC;AAAA,UACH;AAMA,cACE,SAAS,OAAO,MACf,CAAC,uBAAA,KAA4B,qBAC9B;AACA,oBAAQ,QAAQ,OAAO,QAAQ,CAAC,SAAS,SAAS,IAAI,IAAI,CAAC;AAAA,UAC7D;AAIA,gBAAM,kBAAiC,CAAA;AACvC,yBAAe,MAAM,QAAQ,CAAC,OAAO,YAAY;AAC/C,gBAAI,CAAC,MAAM,SAAS;AAClB,kBAAI;AACF,sBAAM,QAAQ,OAAO;AAAA,cACvB,SAAS,KAAK;AAEZ,6BAAa,MAAM,SAAS;AAC5B,sBAAM;AAAA,kBACJ,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,GAAG,CAAC;AAAA,gBAAA;AAEpD,gCAAgB,KAAK,OAAO;AAC5B,sBAAM,qBAAqB,GAAG;AAAA,cAChC;AAAA,YACF;AAAA,UACF,CAAC;AAGD,+BAAqB,eAAe;AAEpC,cAAID,OAAAA,gBAAgB,OAAO,GAAG;AAE5B,kBAAM,SAAS,QAAQ,QAAQ;AAC/B,gBAAI,UAAU,OAAO,WAAW,UAAU;AAExC,6BAAe,SAAS,MAAM,MAAM;AAAA,YACtC;AAKA,gBAAI,uBAAA,KAA4B,CAAC,oBAAoB;AACnD,+BAAiB,KAAK,OAAO;AAAA,YAC/B,OAAO;AAEL,kBAAI,CAAC,oBAAoB;AACvB,sBAAA;AACA,qCAAqB;AAAA,cACvB;AAEA,mCAAqB,OAAO;AAAA,YAC9B;AAAA,UACF,WAAW,qBAAqB,OAAO,GAAG;AAKxC,gBAAI,CAAC,uBAAA,KAA4B,oBAAoB;AACnD,2BAAa,KAAK,qBAAqB,OAAO,CAAC;AAAA,YACjD;AAAA,UACF,WAAW,kBAAkB,OAAO,GAAG;AAErC,0BAAc;AAAA,UAChB,WAAW,mBAAmB,OAAO,GAAG;AAEtC,gBAAI,gBAAgB,cAAc;AAChC,4BAAc;AAAA,YAChB;AAAA,UACF,WAAWC,0BAAiB,OAAO,GAAG;AAIpC,gBAAI,uBAAA,KAA4B,CAAC,oBAAoB;AACnD,+BAAiB,KAAK,OAAO;AAAA,YAC/B,OAAO;AAEL,mCAAqB;AAAA,gBACnB,QAAQ,QAAQ;AAAA,gBAChB;AAAA,gBACA;AAAA,gBACA;AAAA,cAAA;AAAA,YAEJ;AAAA,UACF,WAAW,qBAAqB,OAAO,GAAG;AACxC;AAAA,cACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,YAAA;AAI7C,gBAAI,CAAC,oBAAoB;AACvB,oBAAA;AACA,mCAAqB;AAAA,YACvB;AAEA,qBAAA;AAGA,kCAAA;AAGA,uBAAW,MAAA;AAIX,8BAAkB,MAAA;AAGlB,0BAAc;AACd,kCAAsB;AACtB,6BAAiB,SAAS;AAAA,UAC5B;AAAA,QACF;AAEA,YAAI,gBAAgB,MAAM;AAIxB,cACE,uBAAA,KACA,gBAAgB,gBAChB,CAAC,oBACD;AACA;AAAA,cACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE,iDAAiD,iBAAiB,MAAM;AAAA,YAAA;AAIrH,kBAAA;AAGA,qBAAA;AAGA,kCAAA;AAGA,uBAAW,MAAA;AAGX,uBAAW,eAAe,kBAAkB;AAC1C,kBAAID,OAAAA,gBAAgB,WAAW,GAAG;AAChC,qCAAqB,WAAW;AAGhC,oBAAI,SAAS,WAAW,GAAG;AACzB,8BAAY,QAAQ,OAAO;AAAA,oBAAQ,CAAC,SAClC,SAAS,IAAI,IAAI;AAAA,kBAAA;AAAA,gBAErB;AAAA,cACF,WAAW,qBAAqB,WAAW,GAAG;AAE5C,6BAAa,KAAK,qBAAqB,WAAW,CAAC;AAAA,cACrD,WAAWC,0BAAiB,WAAW,GAAG;AAExC;AAAA,kBACE,YAAY,QAAQ;AAAA,kBACpB;AAAA,kBACA;AAAA,kBACA;AAAA,gBAAA;AAAA,cAEJ;AAAA,YACF;AAGA,mBAAA;AAIA,6BAAiB,SAAS;AAE1B;AAAA,cACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,YAAA;AAAA,UAE/C,OAAO;AAGL,gBAAI,oBAAoB;AACtB,qBAAA;AACA,mCAAqB;AAAA,YACvB;AAAA,UACF;AACA,2BAAiB,wBAAwB;AAGzC,cAAI,gBAAgB,cAAc;AAChC,kCAAsB;AAAA,UACxB;AAGA,oBAAU,SAAS,CAAC,iBAAiB;AACnC,kBAAM,aAAa,IAAI,IAAU,YAAY;AAC7C,gBAAI,SAAS,OAAO,GAAG;AACrB;AAAA,gBACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,gBAC3C,MAAM,KAAK,QAAQ;AAAA,cAAA;AAAA,YAEvB;AACA,qBAAS,QAAQ,CAAC,SAAS,WAAW,IAAI,IAAI,CAAC;AAC/C,qBAAS,MAAA;AACT,mBAAO;AAAA,UACT,CAAC;AAGD,wBAAc,SAAS,CAAC,qBAAqB;AAC3C,kBAAM,OAAO,CAAC,GAAG,kBAAkB,GAAG,YAAY;AAClD,yBAAa;AAAA,cAAQ,CAAC,aACpB;AAAA,gBACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,gBAC3C;AAAA,cAAA;AAAA,YACF;AAEF,yBAAa,SAAS;AACtB,mBAAO;AAAA,UACT,CAAC;AAKD,yBAAe,SAAS,MAAM,IAAI;AAElC,uCAAA;AAAA,QACF;AAAA,MACF,CAAC;AAID,aAAO;AAAA,QACL,YAAY,kBAAkB;AAAA,QAC9B,SAAS,MAAM;AAEb,4BAAA;AAEA,0BAAgB,MAAA;AAEhB,4BAAkB,MAAA;AAAA,QACpB;AAAA,MAAA;AAAA,IAEJ;AAAA;AAAA,IAEA;AAAA,EAAA;AAEJ;;;;;;;;;;;"}
1
+ {"version":3,"file":"electric.cjs","sources":["../../src/electric.ts"],"sourcesContent":["import {\n ShapeStream,\n isChangeMessage,\n isControlMessage,\n isVisibleInSnapshot,\n} from '@electric-sql/client'\nimport { Store } from '@tanstack/store'\nimport DebugModule from 'debug'\nimport { DeduplicatedLoadSubset, and } from '@tanstack/db'\nimport {\n ExpectedNumberInAwaitTxIdError,\n StreamAbortedError,\n TimeoutWaitingForMatchError,\n TimeoutWaitingForTxIdError,\n} from './errors'\nimport { compileSQL } from './sql-compiler'\nimport {\n addTagToIndex,\n findRowsMatchingPattern,\n getTagLength,\n isMoveOutMessage,\n removeTagFromIndex,\n tagMatchesPattern,\n} from './tag-index'\nimport type { ColumnEncoder } from './sql-compiler'\nimport type {\n MoveOutPattern,\n MoveTag,\n ParsedMoveTag,\n RowId,\n TagIndex,\n} from './tag-index'\nimport type {\n BaseCollectionConfig,\n ChangeMessageOrDeleteKeyMessage,\n CollectionConfig,\n DeleteMutationFnParams,\n InsertMutationFnParams,\n LoadSubsetOptions,\n SyncConfig,\n SyncMode,\n UpdateMutationFnParams,\n UtilsRecord,\n} from '@tanstack/db'\nimport type { StandardSchemaV1 } from '@standard-schema/spec'\nimport type {\n ControlMessage,\n GetExtensions,\n Message,\n PostgresSnapshot,\n Row,\n ShapeStreamOptions,\n} from '@electric-sql/client'\n\n// Re-export for user convenience in custom match functions\nexport { isChangeMessage, isControlMessage } from '@electric-sql/client'\n\nconst debug = DebugModule.debug(`ts/db:electric`)\n\n/**\n * Symbol for internal test hooks (hidden from public API)\n */\nexport const ELECTRIC_TEST_HOOKS = Symbol(`electricTestHooks`)\n\n/**\n * Internal test hooks interface (for testing only)\n */\nexport interface ElectricTestHooks {\n /**\n * Called before marking collection ready after first up-to-date in progressive mode\n * Allows tests to pause and validate snapshot phase before atomic swap completes\n */\n beforeMarkingReady?: () => Promise<void>\n}\n\n/**\n * Type representing a transaction ID in ElectricSQL\n */\nexport type Txid = number\n\n/**\n * Custom match function type - receives stream messages and returns boolean\n * indicating if the mutation has been synchronized\n */\nexport type MatchFunction<T extends Row<unknown>> = (\n message: Message<T>,\n) => boolean\n\n/**\n * Matching strategies for Electric synchronization\n * Handlers can return:\n * - Txid strategy: { txid: number | number[], timeout?: number } (recommended)\n * - Void (no return value) - mutation completes without waiting\n *\n * The optional timeout property specifies how long to wait for the txid(s) in milliseconds.\n * If not specified, defaults to 5000ms.\n */\nexport type MatchingStrategy = {\n txid: Txid | Array<Txid>\n timeout?: number\n} | void\n\n/**\n * Type representing a snapshot end message\n */\ntype SnapshotEndMessage = ControlMessage & {\n headers: { control: `snapshot-end` }\n}\n// The `InferSchemaOutput` and `ResolveType` are copied from the `@tanstack/db` package\n// but we modified `InferSchemaOutput` slightly to restrict the schema output to `Row<unknown>`\n// This is needed in order for `GetExtensions` to be able to infer the parser extensions type from the schema\ntype InferSchemaOutput<T> = T extends StandardSchemaV1\n ? StandardSchemaV1.InferOutput<T> extends Row<unknown>\n ? StandardSchemaV1.InferOutput<T>\n : Record<string, unknown>\n : Record<string, unknown>\n\n/**\n * The mode of sync to use for the collection.\n * @default `eager`\n * @description\n * - `eager`:\n * - syncs all data immediately on preload\n * - collection will be marked as ready once the sync is complete\n * - there is no incremental sync\n * - `on-demand`:\n * - syncs data in incremental snapshots when the collection is queried\n * - collection will be marked as ready immediately after the first snapshot is synced\n * - `progressive`:\n * - syncs all data for the collection in the background\n * - uses incremental snapshots during the initial sync to provide a fast path to the data required for queries\n * - collection will be marked as ready once the full sync is complete\n */\nexport type ElectricSyncMode = SyncMode | `progressive`\n\n/**\n * Configuration interface for Electric collection options\n * @template T - The type of items in the collection\n * @template TSchema - The schema type for validation\n */\nexport interface ElectricCollectionConfig<\n T extends Row<unknown> = Row<unknown>,\n TSchema extends StandardSchemaV1 = never,\n> extends Omit<\n BaseCollectionConfig<\n T,\n string | number,\n TSchema,\n ElectricCollectionUtils<T>,\n any\n >,\n `onInsert` | `onUpdate` | `onDelete` | `syncMode`\n> {\n /**\n * Configuration options for the ElectricSQL ShapeStream\n */\n shapeOptions: ShapeStreamOptions<GetExtensions<T>>\n syncMode?: ElectricSyncMode\n\n /**\n * Internal test hooks (for testing only)\n * Hidden via Symbol to prevent accidental usage in production\n */\n [ELECTRIC_TEST_HOOKS]?: ElectricTestHooks\n\n /**\n * Optional asynchronous handler function called before an insert operation\n * @param params Object containing transaction and collection information\n * @returns Promise resolving to { txid, timeout? } or void\n * @example\n * // Basic Electric insert handler with txid (recommended)\n * onInsert: async ({ transaction }) => {\n * const newItem = transaction.mutations[0].modified\n * const result = await api.todos.create({\n * data: newItem\n * })\n * return { txid: result.txid }\n * }\n *\n * @example\n * // Insert handler with custom timeout\n * onInsert: async ({ transaction }) => {\n * const newItem = transaction.mutations[0].modified\n * const result = await api.todos.create({\n * data: newItem\n * })\n * return { txid: result.txid, timeout: 10000 } // Wait up to 10 seconds\n * }\n *\n * @example\n * // Insert handler with multiple items - return array of txids\n * onInsert: async ({ transaction }) => {\n * const items = transaction.mutations.map(m => m.modified)\n * const results = await Promise.all(\n * items.map(item => api.todos.create({ data: item }))\n * )\n * return { txid: results.map(r => r.txid) }\n * }\n *\n * @example\n * // Use awaitMatch utility for custom matching\n * onInsert: async ({ transaction, collection }) => {\n * const newItem = transaction.mutations[0].modified\n * await api.todos.create({ data: newItem })\n * await collection.utils.awaitMatch(\n * (message) => isChangeMessage(message) &&\n * message.headers.operation === 'insert' &&\n * message.value.name === newItem.name\n * )\n * }\n */\n onInsert?: (\n params: InsertMutationFnParams<\n T,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => Promise<MatchingStrategy>\n\n /**\n * Optional asynchronous handler function called before an update operation\n * @param params Object containing transaction and collection information\n * @returns Promise resolving to { txid, timeout? } or void\n * @example\n * // Basic Electric update handler with txid (recommended)\n * onUpdate: async ({ transaction }) => {\n * const { original, changes } = transaction.mutations[0]\n * const result = await api.todos.update({\n * where: { id: original.id },\n * data: changes\n * })\n * return { txid: result.txid }\n * }\n *\n * @example\n * // Use awaitMatch utility for custom matching\n * onUpdate: async ({ transaction, collection }) => {\n * const { original, changes } = transaction.mutations[0]\n * await api.todos.update({ where: { id: original.id }, data: changes })\n * await collection.utils.awaitMatch(\n * (message) => isChangeMessage(message) &&\n * message.headers.operation === 'update' &&\n * message.value.id === original.id\n * )\n * }\n */\n onUpdate?: (\n params: UpdateMutationFnParams<\n T,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => Promise<MatchingStrategy>\n\n /**\n * Optional asynchronous handler function called before a delete operation\n * @param params Object containing transaction and collection information\n * @returns Promise resolving to { txid, timeout? } or void\n * @example\n * // Basic Electric delete handler with txid (recommended)\n * onDelete: async ({ transaction }) => {\n * const mutation = transaction.mutations[0]\n * const result = await api.todos.delete({\n * id: mutation.original.id\n * })\n * return { txid: result.txid }\n * }\n *\n * @example\n * // Use awaitMatch utility for custom matching\n * onDelete: async ({ transaction, collection }) => {\n * const mutation = transaction.mutations[0]\n * await api.todos.delete({ id: mutation.original.id })\n * await collection.utils.awaitMatch(\n * (message) => isChangeMessage(message) &&\n * message.headers.operation === 'delete' &&\n * message.value.id === mutation.original.id\n * )\n * }\n */\n onDelete?: (\n params: DeleteMutationFnParams<\n T,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => Promise<MatchingStrategy>\n}\n\nfunction isUpToDateMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is ControlMessage & { up_to_date: true } {\n return isControlMessage(message) && message.headers.control === `up-to-date`\n}\n\nfunction isMustRefetchMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is ControlMessage & { headers: { control: `must-refetch` } } {\n return isControlMessage(message) && message.headers.control === `must-refetch`\n}\n\nfunction isSnapshotEndMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is SnapshotEndMessage {\n return isControlMessage(message) && message.headers.control === `snapshot-end`\n}\n\nfunction isSubsetEndMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is ControlMessage & { headers: { control: `subset-end` } } {\n return (\n isControlMessage(message) &&\n (message.headers.control as string) === `subset-end`\n )\n}\n\nfunction parseSnapshotMessage(message: SnapshotEndMessage): PostgresSnapshot {\n return {\n xmin: message.headers.xmin,\n xmax: message.headers.xmax,\n xip_list: message.headers.xip_list,\n }\n}\n\n// Check if a message contains txids in its headers\nfunction hasTxids<T extends Row<unknown>>(\n message: Message<T>,\n): message is Message<T> & { headers: { txids?: Array<Txid> } } {\n return `txids` in message.headers && Array.isArray(message.headers.txids)\n}\n\n/**\n * Creates a deduplicated loadSubset handler for progressive/on-demand modes\n * Returns null for eager mode, or a DeduplicatedLoadSubset instance for other modes.\n * Handles fetching snapshots in progressive mode during buffering phase,\n * and requesting snapshots in on-demand mode.\n *\n * When cursor expressions are provided (whereFrom/whereCurrent), makes two\n * requestSnapshot calls:\n * - One for whereFrom (rows > cursor) with limit\n * - One for whereCurrent (rows = cursor, for tie-breaking) without limit\n */\nfunction createLoadSubsetDedupe<T extends Row<unknown>>({\n stream,\n syncMode,\n isBufferingInitialSync,\n begin,\n write,\n commit,\n collectionId,\n encodeColumnName,\n signal,\n}: {\n stream: ShapeStream<T>\n syncMode: ElectricSyncMode\n isBufferingInitialSync: () => boolean\n begin: () => void\n write: (mutation: {\n type: `insert` | `update` | `delete`\n value: T\n metadata: Record<string, unknown>\n }) => void\n commit: () => void\n collectionId?: string\n /**\n * Optional function to encode column names (e.g., camelCase to snake_case).\n * This is typically the `encode` function from shapeOptions.columnMapper.\n */\n encodeColumnName?: ColumnEncoder\n /**\n * Abort signal to check if the stream has been aborted during cleanup.\n * When aborted, errors from requestSnapshot are silently ignored.\n */\n signal: AbortSignal\n}): DeduplicatedLoadSubset | null {\n if (syncMode === `eager`) {\n return null\n }\n\n const compileOptions = encodeColumnName ? { encodeColumnName } : undefined\n const logPrefix = collectionId ? `[${collectionId}] ` : ``\n\n /**\n * Handles errors from snapshot operations. Returns true if the error was\n * handled (signal aborted during cleanup), false if it should be re-thrown.\n */\n function handleSnapshotError(error: unknown, operation: string): boolean {\n if (signal.aborted) {\n debug(`${logPrefix}Ignoring ${operation} error during cleanup: %o`, error)\n return true\n }\n debug(`${logPrefix}Error in ${operation}: %o`, error)\n return false\n }\n\n const loadSubset = async (opts: LoadSubsetOptions) => {\n if (isBufferingInitialSync()) {\n const snapshotParams = compileSQL<T>(opts, compileOptions)\n try {\n const { data: rows } = await stream.fetchSnapshot(snapshotParams)\n\n if (!isBufferingInitialSync()) {\n debug(`${logPrefix}Ignoring snapshot - sync completed while fetching`)\n return\n }\n\n if (rows.length > 0) {\n begin()\n for (const row of rows) {\n write({\n type: `insert`,\n value: row.value,\n metadata: { ...row.headers },\n })\n }\n commit()\n debug(`${logPrefix}Applied snapshot with ${rows.length} rows`)\n }\n } catch (error) {\n if (handleSnapshotError(error, `fetchSnapshot`)) {\n return\n }\n throw error\n }\n return\n }\n\n if (syncMode === `progressive`) {\n return\n }\n\n const { cursor, where, orderBy, limit } = opts\n\n try {\n if (cursor) {\n const whereCurrentOpts: LoadSubsetOptions = {\n where: where ? and(where, cursor.whereCurrent) : cursor.whereCurrent,\n orderBy,\n }\n const whereCurrentParams = compileSQL<T>(\n whereCurrentOpts,\n compileOptions,\n )\n\n const whereFromOpts: LoadSubsetOptions = {\n where: where ? and(where, cursor.whereFrom) : cursor.whereFrom,\n orderBy,\n limit,\n }\n const whereFromParams = compileSQL<T>(whereFromOpts, compileOptions)\n\n debug(`${logPrefix}Requesting cursor.whereCurrent snapshot (all ties)`)\n debug(\n `${logPrefix}Requesting cursor.whereFrom snapshot (with limit ${limit})`,\n )\n\n await Promise.all([\n stream.requestSnapshot(whereCurrentParams),\n stream.requestSnapshot(whereFromParams),\n ])\n } else {\n const snapshotParams = compileSQL<T>(opts, compileOptions)\n await stream.requestSnapshot(snapshotParams)\n }\n } catch (error) {\n if (handleSnapshotError(error, `requestSnapshot`)) {\n return\n }\n throw error\n }\n }\n\n return new DeduplicatedLoadSubset({ loadSubset })\n}\n\n/**\n * Type for the awaitTxId utility function\n */\nexport type AwaitTxIdFn = (txId: Txid, timeout?: number) => Promise<boolean>\n\n/**\n * Type for the awaitMatch utility function\n */\nexport type AwaitMatchFn<T extends Row<unknown>> = (\n matchFn: MatchFunction<T>,\n timeout?: number,\n) => Promise<boolean>\n\n/**\n * Electric collection utilities type\n */\nexport interface ElectricCollectionUtils<\n T extends Row<unknown> = Row<unknown>,\n> extends UtilsRecord {\n awaitTxId: AwaitTxIdFn\n awaitMatch: AwaitMatchFn<T>\n}\n\n/**\n * Creates Electric collection options for use with a standard Collection\n *\n * @template T - The explicit type of items in the collection (highest priority)\n * @template TSchema - The schema type for validation and type inference (second priority)\n * @template TFallback - The fallback type if no explicit or schema type is provided\n * @param config - Configuration options for the Electric collection\n * @returns Collection options with utilities\n */\n\n// Overload for when schema is provided\nexport function electricCollectionOptions<T extends StandardSchemaV1>(\n config: ElectricCollectionConfig<InferSchemaOutput<T>, T> & {\n schema: T\n },\n): Omit<CollectionConfig<InferSchemaOutput<T>, string | number, T>, `utils`> & {\n id?: string\n utils: ElectricCollectionUtils<InferSchemaOutput<T>>\n schema: T\n}\n\n// Overload for when no schema is provided\nexport function electricCollectionOptions<T extends Row<unknown>>(\n config: ElectricCollectionConfig<T> & {\n schema?: never // prohibit schema\n },\n): Omit<CollectionConfig<T, string | number>, `utils`> & {\n id?: string\n utils: ElectricCollectionUtils<T>\n schema?: never // no schema in the result\n}\n\nexport function electricCollectionOptions<T extends Row<unknown>>(\n config: ElectricCollectionConfig<T, any>,\n): Omit<\n CollectionConfig<T, string | number, any, ElectricCollectionUtils<T>>,\n `utils`\n> & {\n id?: string\n utils: ElectricCollectionUtils<T>\n schema?: any\n} {\n const seenTxids = new Store<Set<Txid>>(new Set([]))\n const seenSnapshots = new Store<Array<PostgresSnapshot>>([])\n const internalSyncMode = config.syncMode ?? `eager`\n const finalSyncMode =\n internalSyncMode === `progressive` ? `on-demand` : internalSyncMode\n const pendingMatches = new Store<\n Map<\n string,\n {\n matchFn: (message: Message<any>) => boolean\n resolve: (value: boolean) => void\n reject: (error: Error) => void\n timeoutId: ReturnType<typeof setTimeout>\n matched: boolean\n }\n >\n >(new Map())\n\n // Buffer messages since last up-to-date to handle race conditions\n const currentBatchMessages = new Store<Array<Message<any>>>([])\n\n // Track whether the current batch has been committed (up-to-date received)\n // This allows awaitMatch to resolve immediately for messages from committed batches\n const batchCommitted = new Store<boolean>(false)\n\n /**\n * Helper function to remove multiple matches from the pendingMatches store\n */\n const removePendingMatches = (matchIds: Array<string>) => {\n if (matchIds.length > 0) {\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n matchIds.forEach((id) => newMatches.delete(id))\n return newMatches\n })\n }\n }\n\n /**\n * Helper function to resolve and cleanup matched pending matches\n */\n const resolveMatchedPendingMatches = () => {\n const matchesToResolve: Array<string> = []\n pendingMatches.state.forEach((match, matchId) => {\n if (match.matched) {\n clearTimeout(match.timeoutId)\n match.resolve(true)\n matchesToResolve.push(matchId)\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch resolved on up-to-date for match %s`,\n matchId,\n )\n }\n })\n removePendingMatches(matchesToResolve)\n }\n const sync = createElectricSync<T>(config.shapeOptions, {\n seenTxids,\n seenSnapshots,\n syncMode: internalSyncMode,\n pendingMatches,\n currentBatchMessages,\n batchCommitted,\n removePendingMatches,\n resolveMatchedPendingMatches,\n collectionId: config.id,\n testHooks: config[ELECTRIC_TEST_HOOKS],\n })\n\n /**\n * Wait for a specific transaction ID to be synced\n * @param txId The transaction ID to wait for as a number\n * @param timeout Optional timeout in milliseconds (defaults to 5000ms)\n * @returns Promise that resolves when the txId is synced\n */\n const awaitTxId: AwaitTxIdFn = async (\n txId: Txid,\n timeout: number = 5000,\n ): Promise<boolean> => {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitTxId called with txid %d`,\n txId,\n )\n if (typeof txId !== `number`) {\n throw new ExpectedNumberInAwaitTxIdError(typeof txId, config.id)\n }\n\n // First check if the txid is in the seenTxids store\n const hasTxid = seenTxids.state.has(txId)\n if (hasTxid) return true\n\n // Then check if the txid is in any of the seen snapshots\n const hasSnapshot = seenSnapshots.state.some((snapshot) =>\n isVisibleInSnapshot(txId, snapshot),\n )\n if (hasSnapshot) return true\n\n return new Promise((resolve, reject) => {\n const timeoutId = setTimeout(() => {\n unsubscribeSeenTxids()\n unsubscribeSeenSnapshots()\n reject(new TimeoutWaitingForTxIdError(txId, config.id))\n }, timeout)\n\n const unsubscribeSeenTxids = seenTxids.subscribe(() => {\n if (seenTxids.state.has(txId)) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitTxId found match for txid %o`,\n txId,\n )\n clearTimeout(timeoutId)\n unsubscribeSeenTxids()\n unsubscribeSeenSnapshots()\n resolve(true)\n }\n })\n\n const unsubscribeSeenSnapshots = seenSnapshots.subscribe(() => {\n const visibleSnapshot = seenSnapshots.state.find((snapshot) =>\n isVisibleInSnapshot(txId, snapshot),\n )\n if (visibleSnapshot) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitTxId found match for txid %o in snapshot %o`,\n txId,\n visibleSnapshot,\n )\n clearTimeout(timeoutId)\n unsubscribeSeenSnapshots()\n unsubscribeSeenTxids()\n resolve(true)\n }\n })\n })\n }\n\n /**\n * Wait for a custom match function to find a matching message\n * @param matchFn Function that returns true when a message matches\n * @param timeout Optional timeout in milliseconds (defaults to 5000ms)\n * @returns Promise that resolves when a matching message is found\n */\n const awaitMatch: AwaitMatchFn<any> = async (\n matchFn: MatchFunction<any>,\n timeout: number = 3000,\n ): Promise<boolean> => {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch called with custom function`,\n )\n\n return new Promise((resolve, reject) => {\n const matchId = Math.random().toString(36)\n\n const cleanupMatch = () => {\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n newMatches.delete(matchId)\n return newMatches\n })\n }\n\n const onTimeout = () => {\n cleanupMatch()\n reject(new TimeoutWaitingForMatchError(config.id))\n }\n\n const timeoutId = setTimeout(onTimeout, timeout)\n\n // We need access to the stream messages to check against the match function\n // This will be handled by the sync configuration\n const checkMatch = (message: Message<any>) => {\n if (matchFn(message)) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch found matching message, waiting for up-to-date`,\n )\n // Mark as matched but don't resolve yet - wait for up-to-date\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n const existing = newMatches.get(matchId)\n if (existing) {\n newMatches.set(matchId, { ...existing, matched: true })\n }\n return newMatches\n })\n return true\n }\n return false\n }\n\n // Check against current batch messages first to handle race conditions\n for (const message of currentBatchMessages.state) {\n if (matchFn(message)) {\n // If batch is committed (up-to-date already received), resolve immediately\n // just like awaitTxId does when it finds a txid in seenTxids\n if (batchCommitted.state) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch found immediate match in committed batch, resolving immediately`,\n )\n clearTimeout(timeoutId)\n resolve(true)\n return\n }\n\n // If batch is not yet committed, register match and wait for up-to-date\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch found immediate match in current batch, waiting for up-to-date`,\n )\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n newMatches.set(matchId, {\n matchFn: checkMatch,\n resolve,\n reject,\n timeoutId,\n matched: true, // Already matched, will resolve on up-to-date\n })\n return newMatches\n })\n return\n }\n }\n\n // Store the match function for the sync process to use\n // We'll add this to a pending matches store\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n newMatches.set(matchId, {\n matchFn: checkMatch,\n resolve,\n reject,\n timeoutId,\n matched: false,\n })\n return newMatches\n })\n })\n }\n\n /**\n * Process matching strategy and wait for synchronization\n */\n const processMatchingStrategy = async (\n result: MatchingStrategy,\n ): Promise<void> => {\n // Only wait if result contains txid\n if (result && `txid` in result) {\n const timeout = result.timeout\n // Handle both single txid and array of txids\n if (Array.isArray(result.txid)) {\n await Promise.all(result.txid.map((txid) => awaitTxId(txid, timeout)))\n } else {\n await awaitTxId(result.txid, timeout)\n }\n }\n // If result is void/undefined, don't wait - mutation completes immediately\n }\n\n // Create wrapper handlers for direct persistence operations that handle different matching strategies\n const wrappedOnInsert = config.onInsert\n ? async (\n params: InsertMutationFnParams<\n any,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => {\n const handlerResult = await config.onInsert!(params)\n await processMatchingStrategy(handlerResult)\n return handlerResult\n }\n : undefined\n\n const wrappedOnUpdate = config.onUpdate\n ? async (\n params: UpdateMutationFnParams<\n any,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => {\n const handlerResult = await config.onUpdate!(params)\n await processMatchingStrategy(handlerResult)\n return handlerResult\n }\n : undefined\n\n const wrappedOnDelete = config.onDelete\n ? async (\n params: DeleteMutationFnParams<\n any,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => {\n const handlerResult = await config.onDelete!(params)\n await processMatchingStrategy(handlerResult)\n return handlerResult\n }\n : undefined\n\n // Extract standard Collection config properties\n const {\n shapeOptions: _shapeOptions,\n onInsert: _onInsert,\n onUpdate: _onUpdate,\n onDelete: _onDelete,\n ...restConfig\n } = config\n\n return {\n ...restConfig,\n syncMode: finalSyncMode,\n sync,\n onInsert: wrappedOnInsert,\n onUpdate: wrappedOnUpdate,\n onDelete: wrappedOnDelete,\n utils: {\n awaitTxId,\n awaitMatch,\n },\n }\n}\n\n/**\n * Internal function to create ElectricSQL sync configuration\n */\nfunction createElectricSync<T extends Row<unknown>>(\n shapeOptions: ShapeStreamOptions<GetExtensions<T>>,\n options: {\n syncMode: ElectricSyncMode\n seenTxids: Store<Set<Txid>>\n seenSnapshots: Store<Array<PostgresSnapshot>>\n pendingMatches: Store<\n Map<\n string,\n {\n matchFn: (message: Message<T>) => boolean\n resolve: (value: boolean) => void\n reject: (error: Error) => void\n timeoutId: ReturnType<typeof setTimeout>\n matched: boolean\n }\n >\n >\n currentBatchMessages: Store<Array<Message<T>>>\n batchCommitted: Store<boolean>\n removePendingMatches: (matchIds: Array<string>) => void\n resolveMatchedPendingMatches: () => void\n collectionId?: string\n testHooks?: ElectricTestHooks\n },\n): SyncConfig<T> {\n const {\n seenTxids,\n seenSnapshots,\n syncMode,\n pendingMatches,\n currentBatchMessages,\n batchCommitted,\n removePendingMatches,\n resolveMatchedPendingMatches,\n collectionId,\n testHooks,\n } = options\n const MAX_BATCH_MESSAGES = 1000 // Safety limit for message buffer\n\n // Store for the relation schema information\n const relationSchema = new Store<string | undefined>(undefined)\n\n const tagCache = new Map<MoveTag, ParsedMoveTag>()\n\n // Parses a tag string into a MoveTag.\n // It memoizes the result parsed tag such that future calls\n // for the same tag string return the same MoveTag array.\n const parseTag = (tag: MoveTag): ParsedMoveTag => {\n const cachedTag = tagCache.get(tag)\n if (cachedTag) {\n return cachedTag\n }\n\n const parsedTag = tag.split(`|`)\n tagCache.set(tag, parsedTag)\n return parsedTag\n }\n\n // Tag tracking state\n const rowTagSets = new Map<RowId, Set<MoveTag>>()\n const tagIndex: TagIndex = []\n let tagLength: number | undefined = undefined\n\n /**\n * Initialize the tag index with the correct length\n */\n const initializeTagIndex = (length: number): void => {\n if (tagIndex.length < length) {\n // Extend the index array to the required length\n for (let i = tagIndex.length; i < length; i++) {\n tagIndex[i] = new Map()\n }\n }\n }\n\n /**\n * Add tags to a row and update the tag index\n */\n const addTagsToRow = (\n tags: Array<MoveTag>,\n rowId: RowId,\n rowTagSet: Set<MoveTag>,\n ): void => {\n for (const tag of tags) {\n const parsedTag = parseTag(tag)\n\n // Infer tag length from first tag\n if (tagLength === undefined) {\n tagLength = getTagLength(parsedTag)\n initializeTagIndex(tagLength)\n }\n\n // Validate tag length matches\n const currentTagLength = getTagLength(parsedTag)\n if (currentTagLength !== tagLength) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Tag length mismatch: expected ${tagLength}, got ${currentTagLength}`,\n )\n continue\n }\n\n rowTagSet.add(tag)\n addTagToIndex(parsedTag, rowId, tagIndex, tagLength)\n }\n }\n\n /**\n * Remove tags from a row and update the tag index\n */\n const removeTagsFromRow = (\n removedTags: Array<MoveTag>,\n rowId: RowId,\n rowTagSet: Set<MoveTag>,\n ): void => {\n if (tagLength === undefined) {\n return\n }\n\n for (const tag of removedTags) {\n const parsedTag = parseTag(tag)\n rowTagSet.delete(tag)\n removeTagFromIndex(parsedTag, rowId, tagIndex, tagLength)\n // We aggresively evict the tag from the cache\n // if this tag is shared with another row\n // and is not removed from that other row\n // then next time we encounter the tag it will be parsed again\n tagCache.delete(tag)\n }\n }\n\n /**\n * Process tags for a change message (add and remove tags)\n */\n const processTagsForChangeMessage = (\n tags: Array<MoveTag> | undefined,\n removedTags: Array<MoveTag> | undefined,\n rowId: RowId,\n ): Set<MoveTag> => {\n // Initialize tag set for this row if it doesn't exist (needed for checking deletion)\n if (!rowTagSets.has(rowId)) {\n rowTagSets.set(rowId, new Set())\n }\n const rowTagSet = rowTagSets.get(rowId)!\n\n // Add new tags\n if (tags) {\n addTagsToRow(tags, rowId, rowTagSet)\n }\n\n // Remove tags\n if (removedTags) {\n removeTagsFromRow(removedTags, rowId, rowTagSet)\n }\n\n return rowTagSet\n }\n\n /**\n * Clear all tag tracking state (used when truncating)\n */\n const clearTagTrackingState = (): void => {\n rowTagSets.clear()\n tagIndex.length = 0\n tagLength = undefined\n }\n\n /**\n * Remove all tags for a row from both the tag set and the index\n * Used when a row is deleted\n */\n const clearTagsForRow = (rowId: RowId): void => {\n if (tagLength === undefined) {\n return\n }\n\n const rowTagSet = rowTagSets.get(rowId)\n if (!rowTagSet) {\n return\n }\n\n // Remove each tag from the index\n for (const tag of rowTagSet) {\n const parsedTag = parseTag(tag)\n const currentTagLength = getTagLength(parsedTag)\n if (currentTagLength === tagLength) {\n removeTagFromIndex(parsedTag, rowId, tagIndex, tagLength)\n }\n tagCache.delete(tag)\n }\n\n // Remove the row from the tag sets map\n rowTagSets.delete(rowId)\n }\n\n /**\n * Remove matching tags from a row based on a pattern\n * Returns true if the row's tag set is now empty\n */\n const removeMatchingTagsFromRow = (\n rowId: RowId,\n pattern: MoveOutPattern,\n ): boolean => {\n const rowTagSet = rowTagSets.get(rowId)\n if (!rowTagSet) {\n return false\n }\n\n // Find tags that match this pattern and remove them\n for (const tag of rowTagSet) {\n const parsedTag = parseTag(tag)\n if (tagMatchesPattern(parsedTag, pattern)) {\n rowTagSet.delete(tag)\n removeTagFromIndex(parsedTag, rowId, tagIndex, tagLength!)\n }\n }\n\n // Check if row's tag set is now empty\n if (rowTagSet.size === 0) {\n rowTagSets.delete(rowId)\n return true\n }\n\n return false\n }\n\n /**\n * Process move-out event: remove matching tags from rows and delete rows with empty tag sets\n */\n const processMoveOutEvent = (\n patterns: Array<MoveOutPattern>,\n begin: () => void,\n write: (message: ChangeMessageOrDeleteKeyMessage<T>) => void,\n transactionStarted: boolean,\n ): boolean => {\n if (tagLength === undefined) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Received move-out message but no tag length set yet, ignoring`,\n )\n return transactionStarted\n }\n\n let txStarted = transactionStarted\n\n // Process all patterns and collect rows to delete\n for (const pattern of patterns) {\n // Find all rows that match this pattern\n const affectedRowIds = findRowsMatchingPattern(pattern, tagIndex)\n\n for (const rowId of affectedRowIds) {\n if (removeMatchingTagsFromRow(rowId, pattern)) {\n // Delete rows with empty tag sets\n if (!txStarted) {\n begin()\n txStarted = true\n }\n\n write({\n type: `delete`,\n key: rowId,\n })\n }\n }\n }\n\n return txStarted\n }\n\n /**\n * Get the sync metadata for insert operations\n * @returns Record containing relation information\n */\n const getSyncMetadata = (): Record<string, unknown> => {\n // Use the stored schema if available, otherwise default to 'public'\n const schema = relationSchema.state || `public`\n\n return {\n relation: shapeOptions.params?.table\n ? [schema, shapeOptions.params.table]\n : undefined,\n }\n }\n\n let unsubscribeStream: () => void\n\n return {\n sync: (params: Parameters<SyncConfig<T>[`sync`]>[0]) => {\n const { begin, write, commit, markReady, truncate, collection } = params\n\n // Wrap markReady to wait for test hook in progressive mode\n let progressiveReadyGate: Promise<void> | null = null\n const wrappedMarkReady = (isBuffering: boolean) => {\n // Only create gate if we're in buffering phase (first up-to-date)\n if (\n isBuffering &&\n syncMode === `progressive` &&\n testHooks?.beforeMarkingReady\n ) {\n // Create a new gate promise for this sync cycle\n progressiveReadyGate = testHooks.beforeMarkingReady()\n progressiveReadyGate.then(() => {\n markReady()\n })\n } else {\n // No hook, not buffering, or already past first up-to-date\n markReady()\n }\n }\n\n // Abort controller for the stream - wraps the signal if provided\n const abortController = new AbortController()\n\n if (shapeOptions.signal) {\n shapeOptions.signal.addEventListener(\n `abort`,\n () => {\n abortController.abort()\n },\n {\n once: true,\n },\n )\n if (shapeOptions.signal.aborted) {\n abortController.abort()\n }\n }\n\n // Cleanup pending matches on abort\n abortController.signal.addEventListener(`abort`, () => {\n pendingMatches.setState((current) => {\n current.forEach((match) => {\n clearTimeout(match.timeoutId)\n match.reject(new StreamAbortedError())\n })\n return new Map() // Clear all pending matches\n })\n })\n\n const stream = new ShapeStream({\n ...shapeOptions,\n // In on-demand mode, we only want to sync changes, so we set the log to `changes_only`\n log: syncMode === `on-demand` ? `changes_only` : undefined,\n // In on-demand mode, we only need the changes from the point of time the collection was created\n // so we default to `now` when there is no saved offset.\n offset:\n shapeOptions.offset ?? (syncMode === `on-demand` ? `now` : undefined),\n signal: abortController.signal,\n onError: (errorParams) => {\n // Just immediately mark ready if there's an error to avoid blocking\n // apps waiting for `.preload()` to finish.\n // Note that Electric sends a 409 error on a `must-refetch` message, but the\n // ShapeStream handled this and it will not reach this handler, therefor\n // this markReady will not be triggers by a `must-refetch`.\n markReady()\n\n if (shapeOptions.onError) {\n return shapeOptions.onError(errorParams)\n } else {\n console.error(\n `An error occurred while syncing collection: ${collection.id}, \\n` +\n `it has been marked as ready to avoid blocking apps waiting for '.preload()' to finish. \\n` +\n `You can provide an 'onError' handler on the shapeOptions to handle this error, and this message will not be logged.`,\n errorParams,\n )\n }\n\n return\n },\n })\n let transactionStarted = false\n const newTxids = new Set<Txid>()\n const newSnapshots: Array<PostgresSnapshot> = []\n let hasReceivedUpToDate = false // Track if we've completed initial sync in progressive mode\n\n // Progressive mode state\n // Helper to determine if we're buffering the initial sync\n const isBufferingInitialSync = () =>\n syncMode === `progressive` && !hasReceivedUpToDate\n const bufferedMessages: Array<Message<T>> = [] // Buffer change messages during initial sync\n\n // Track keys that have been synced to handle overlapping subset queries.\n // When multiple subset queries return the same row, the server sends `insert`\n // for each response. We convert subsequent inserts to updates to avoid\n // duplicate key errors when the row's data has changed between requests.\n const syncedKeys = new Set<string | number>()\n\n /**\n * Process a change message: handle tags and write the mutation\n */\n const processChangeMessage = (changeMessage: Message<T>) => {\n if (!isChangeMessage(changeMessage)) {\n return\n }\n\n // Process tags if present\n const tags = changeMessage.headers.tags\n const removedTags = changeMessage.headers.removed_tags\n const hasTags = tags || removedTags\n\n const rowId = collection.getKeyFromItem(changeMessage.value)\n const operation = changeMessage.headers.operation\n\n // Track synced keys and handle overlapping subset queries.\n // When multiple subset queries return the same row, the server sends\n // `insert` for each response. We convert subsequent inserts to updates\n // to avoid duplicate key errors when the row's data has changed.\n const isDelete = operation === `delete`\n const isDuplicateInsert =\n operation === `insert` && syncedKeys.has(rowId)\n\n if (isDelete) {\n syncedKeys.delete(rowId)\n } else {\n syncedKeys.add(rowId)\n }\n\n if (isDelete) {\n clearTagsForRow(rowId)\n } else if (hasTags) {\n processTagsForChangeMessage(tags, removedTags, rowId)\n }\n\n write({\n type: isDuplicateInsert ? `update` : operation,\n value: changeMessage.value,\n // Include the primary key and relation info in the metadata\n metadata: {\n ...changeMessage.headers,\n },\n })\n }\n\n // Create deduplicated loadSubset wrapper for non-eager modes\n // This prevents redundant snapshot requests when multiple concurrent\n // live queries request overlapping or subset predicates\n const loadSubsetDedupe = createLoadSubsetDedupe({\n stream,\n syncMode,\n isBufferingInitialSync,\n begin,\n write,\n commit,\n collectionId,\n // Pass the columnMapper's encode function to transform column names\n // (e.g., camelCase to snake_case) when compiling SQL for subset queries\n encodeColumnName: shapeOptions.columnMapper?.encode,\n // Pass abort signal so requestSnapshot errors can be ignored during cleanup\n signal: abortController.signal,\n })\n\n unsubscribeStream = stream.subscribe((messages: Array<Message<T>>) => {\n // Track commit point type - up-to-date takes precedence as it also triggers progressive mode atomic swap\n let commitPoint: `up-to-date` | `subset-end` | null = null\n\n // Don't clear the buffer between batches - this preserves messages for awaitMatch\n // to find even if multiple batches arrive before awaitMatch is called.\n // The buffer is naturally limited by MAX_BATCH_MESSAGES (oldest messages are dropped).\n // Reset batchCommitted since we're starting a new batch\n batchCommitted.setState(() => false)\n\n for (const message of messages) {\n // Add message to current batch buffer (for race condition handling)\n if (isChangeMessage(message) || isMoveOutMessage(message)) {\n currentBatchMessages.setState((currentBuffer) => {\n const newBuffer = [...currentBuffer, message]\n // Limit buffer size for safety\n if (newBuffer.length > MAX_BATCH_MESSAGES) {\n newBuffer.splice(0, newBuffer.length - MAX_BATCH_MESSAGES)\n }\n return newBuffer\n })\n }\n\n // Check for txids in the message and add them to our store\n // Skip during buffered initial sync in progressive mode (txids will be extracted during atomic swap)\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), track txids\n // to avoid losing them when messages are written to the existing transaction.\n if (\n hasTxids(message) &&\n (!isBufferingInitialSync() || transactionStarted)\n ) {\n message.headers.txids?.forEach((txid) => newTxids.add(txid))\n }\n\n // Check pending matches against this message\n // Note: matchFn will mark matches internally, we don't resolve here\n const matchesToRemove: Array<string> = []\n pendingMatches.state.forEach((match, matchId) => {\n if (!match.matched) {\n try {\n match.matchFn(message)\n } catch (err) {\n // If matchFn throws, clean up and reject the promise\n clearTimeout(match.timeoutId)\n match.reject(\n err instanceof Error ? err : new Error(String(err)),\n )\n matchesToRemove.push(matchId)\n debug(`matchFn error: %o`, err)\n }\n }\n })\n\n // Remove matches that errored\n removePendingMatches(matchesToRemove)\n\n if (isChangeMessage(message)) {\n // Check if the message contains schema information\n const schema = message.headers.schema\n if (schema && typeof schema === `string`) {\n // Store the schema for future use if it's a valid string\n relationSchema.setState(() => schema)\n }\n\n // In buffered initial sync of progressive mode, buffer messages instead of writing\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), write\n // directly to it instead of buffering. This prevents orphan transactions.\n if (isBufferingInitialSync() && !transactionStarted) {\n bufferedMessages.push(message)\n } else {\n // Normal processing: write changes immediately\n if (!transactionStarted) {\n begin()\n transactionStarted = true\n }\n\n processChangeMessage(message)\n }\n } else if (isSnapshotEndMessage(message)) {\n // Track postgres snapshot metadata for resolving awaiting mutations\n // Skip during buffered initial sync (will be extracted during atomic swap)\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), track snapshots\n // to avoid losing them when messages are written to the existing transaction.\n if (!isBufferingInitialSync() || transactionStarted) {\n newSnapshots.push(parseSnapshotMessage(message))\n }\n } else if (isUpToDateMessage(message)) {\n // up-to-date takes precedence - also triggers progressive mode atomic swap\n commitPoint = `up-to-date`\n } else if (isSubsetEndMessage(message)) {\n // subset-end triggers commit but not progressive mode atomic swap\n if (commitPoint !== `up-to-date`) {\n commitPoint = `subset-end`\n }\n } else if (isMoveOutMessage(message)) {\n // Handle move-out event: buffer if buffering, otherwise process immediately\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), process\n // immediately to avoid orphan transactions.\n if (isBufferingInitialSync() && !transactionStarted) {\n bufferedMessages.push(message)\n } else {\n // Normal processing: process move-out immediately\n transactionStarted = processMoveOutEvent(\n message.headers.patterns,\n begin,\n write,\n transactionStarted,\n )\n }\n } else if (isMustRefetchMessage(message)) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Received must-refetch message, starting transaction with truncate`,\n )\n\n // Start a transaction and truncate the collection\n if (!transactionStarted) {\n begin()\n transactionStarted = true\n }\n\n truncate()\n\n // Clear tag tracking state\n clearTagTrackingState()\n\n // Clear synced keys tracking since we're starting fresh\n syncedKeys.clear()\n\n // Reset the loadSubset deduplication state since we're starting fresh\n // This ensures that previously loaded predicates don't prevent refetching after truncate\n loadSubsetDedupe?.reset()\n\n // Reset flags so we continue accumulating changes until next up-to-date\n commitPoint = null\n hasReceivedUpToDate = false // Reset for progressive mode (isBufferingInitialSync will reflect this)\n bufferedMessages.length = 0 // Clear buffered messages\n }\n }\n\n if (commitPoint !== null) {\n // PROGRESSIVE MODE: Atomic swap on first up-to-date (not subset-end)\n // EXCEPTION: Skip atomic swap if a transaction is already started (e.g., from must-refetch).\n // In that case, do a normal commit to properly close the existing transaction.\n if (\n isBufferingInitialSync() &&\n commitPoint === `up-to-date` &&\n !transactionStarted\n ) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Progressive mode: Performing atomic swap with ${bufferedMessages.length} buffered messages`,\n )\n\n // Start atomic swap transaction\n begin()\n\n // Truncate to clear all snapshot data\n truncate()\n\n // Clear tag tracking state for atomic swap\n clearTagTrackingState()\n\n // Clear synced keys tracking for atomic swap\n syncedKeys.clear()\n\n // Apply all buffered change messages and extract txids/snapshots\n for (const bufferedMsg of bufferedMessages) {\n if (isChangeMessage(bufferedMsg)) {\n processChangeMessage(bufferedMsg)\n\n // Extract txids from buffered messages (will be committed to store after transaction)\n if (hasTxids(bufferedMsg)) {\n bufferedMsg.headers.txids?.forEach((txid) =>\n newTxids.add(txid),\n )\n }\n } else if (isSnapshotEndMessage(bufferedMsg)) {\n // Extract snapshots from buffered messages (will be committed to store after transaction)\n newSnapshots.push(parseSnapshotMessage(bufferedMsg))\n } else if (isMoveOutMessage(bufferedMsg)) {\n // Process buffered move-out messages during atomic swap\n processMoveOutEvent(\n bufferedMsg.headers.patterns,\n begin,\n write,\n transactionStarted,\n )\n }\n }\n\n // Commit the atomic swap\n commit()\n\n // Exit buffering phase by marking that we've received up-to-date\n // isBufferingInitialSync() will now return false\n bufferedMessages.length = 0\n\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Progressive mode: Atomic swap complete, now in normal sync mode`,\n )\n } else {\n // Normal mode or on-demand: commit transaction if one was started\n // Both up-to-date and subset-end trigger a commit\n if (transactionStarted) {\n commit()\n transactionStarted = false\n }\n }\n wrappedMarkReady(isBufferingInitialSync())\n\n // Track that we've received the first up-to-date for progressive mode\n if (commitPoint === `up-to-date`) {\n hasReceivedUpToDate = true\n }\n\n // Always commit txids when we receive up-to-date, regardless of transaction state\n seenTxids.setState((currentTxids) => {\n const clonedSeen = new Set<Txid>(currentTxids)\n if (newTxids.size > 0) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}new txids synced from pg %O`,\n Array.from(newTxids),\n )\n }\n newTxids.forEach((txid) => clonedSeen.add(txid))\n newTxids.clear()\n return clonedSeen\n })\n\n // Always commit snapshots when we receive up-to-date, regardless of transaction state\n seenSnapshots.setState((currentSnapshots) => {\n const seen = [...currentSnapshots, ...newSnapshots]\n newSnapshots.forEach((snapshot) =>\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}new snapshot synced from pg %o`,\n snapshot,\n ),\n )\n newSnapshots.length = 0\n return seen\n })\n\n // Resolve all matched pending matches on up-to-date or subset-end\n // Set batchCommitted BEFORE resolving to avoid timing window where late awaitMatch\n // calls could register as \"matched\" after resolver pass already ran\n batchCommitted.setState(() => true)\n\n resolveMatchedPendingMatches()\n }\n })\n\n // Return the deduplicated loadSubset if available (on-demand or progressive mode)\n // The loadSubset method is auto-bound, so it can be safely returned directly\n return {\n loadSubset: loadSubsetDedupe?.loadSubset,\n cleanup: () => {\n // Unsubscribe from the stream\n unsubscribeStream()\n // Abort the abort controller to stop the stream\n abortController.abort()\n // Reset deduplication tracking so collection can load fresh data if restarted\n loadSubsetDedupe?.reset()\n },\n }\n },\n // Expose the getSyncMetadata function\n getSyncMetadata,\n }\n}\n"],"names":["isControlMessage","compileSQL","and","DeduplicatedLoadSubset","Store","ExpectedNumberInAwaitTxIdError","isVisibleInSnapshot","TimeoutWaitingForTxIdError","TimeoutWaitingForMatchError","tagIndex","getTagLength","addTagToIndex","removeTagFromIndex","tagMatchesPattern","findRowsMatchingPattern","StreamAbortedError","ShapeStream","isChangeMessage","isMoveOutMessage"],"mappings":";;;;;;;;;AAyDA,MAAM,QAAQ,YAAY,MAAM,gBAAgB;AAKzC,MAAM,6CAA6B,mBAAmB;AAmO7D,SAAS,kBACP,SACkD;AAClD,SAAOA,OAAAA,iBAAiB,OAAO,KAAK,QAAQ,QAAQ,YAAY;AAClE;AAEA,SAAS,qBACP,SACsE;AACtE,SAAOA,OAAAA,iBAAiB,OAAO,KAAK,QAAQ,QAAQ,YAAY;AAClE;AAEA,SAAS,qBACP,SAC+B;AAC/B,SAAOA,OAAAA,iBAAiB,OAAO,KAAK,QAAQ,QAAQ,YAAY;AAClE;AAEA,SAAS,mBACP,SACoE;AACpE,SACEA,OAAAA,iBAAiB,OAAO,KACvB,QAAQ,QAAQ,YAAuB;AAE5C;AAEA,SAAS,qBAAqB,SAA+C;AAC3E,SAAO;AAAA,IACL,MAAM,QAAQ,QAAQ;AAAA,IACtB,MAAM,QAAQ,QAAQ;AAAA,IACtB,UAAU,QAAQ,QAAQ;AAAA,EAAA;AAE9B;AAGA,SAAS,SACP,SAC8D;AAC9D,SAAO,WAAW,QAAQ,WAAW,MAAM,QAAQ,QAAQ,QAAQ,KAAK;AAC1E;AAaA,SAAS,uBAA+C;AAAA,EACtD;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,GAsBkC;AAChC,MAAI,aAAa,SAAS;AACxB,WAAO;AAAA,EACT;AAEA,QAAM,iBAAiB,mBAAmB,EAAE,iBAAA,IAAqB;AACjE,QAAM,YAAY,eAAe,IAAI,YAAY,OAAO;AAMxD,WAAS,oBAAoB,OAAgB,WAA4B;AACvE,QAAI,OAAO,SAAS;AAClB,YAAM,GAAG,SAAS,YAAY,SAAS,6BAA6B,KAAK;AACzE,aAAO;AAAA,IACT;AACA,UAAM,GAAG,SAAS,YAAY,SAAS,QAAQ,KAAK;AACpD,WAAO;AAAA,EACT;AAEA,QAAM,aAAa,OAAO,SAA4B;AACpD,QAAI,0BAA0B;AAC5B,YAAM,iBAAiBC,YAAAA,WAAc,MAAM,cAAc;AACzD,UAAI;AACF,cAAM,EAAE,MAAM,KAAA,IAAS,MAAM,OAAO,cAAc,cAAc;AAEhE,YAAI,CAAC,0BAA0B;AAC7B,gBAAM,GAAG,SAAS,mDAAmD;AACrE;AAAA,QACF;AAEA,YAAI,KAAK,SAAS,GAAG;AACnB,gBAAA;AACA,qBAAW,OAAO,MAAM;AACtB,kBAAM;AAAA,cACJ,MAAM;AAAA,cACN,OAAO,IAAI;AAAA,cACX,UAAU,EAAE,GAAG,IAAI,QAAA;AAAA,YAAQ,CAC5B;AAAA,UACH;AACA,iBAAA;AACA,gBAAM,GAAG,SAAS,yBAAyB,KAAK,MAAM,OAAO;AAAA,QAC/D;AAAA,MACF,SAAS,OAAO;AACd,YAAI,oBAAoB,OAAO,eAAe,GAAG;AAC/C;AAAA,QACF;AACA,cAAM;AAAA,MACR;AACA;AAAA,IACF;AAEA,QAAI,aAAa,eAAe;AAC9B;AAAA,IACF;AAEA,UAAM,EAAE,QAAQ,OAAO,SAAS,UAAU;AAE1C,QAAI;AACF,UAAI,QAAQ;AACV,cAAM,mBAAsC;AAAA,UAC1C,OAAO,QAAQC,OAAI,OAAO,OAAO,YAAY,IAAI,OAAO;AAAA,UACxD;AAAA,QAAA;AAEF,cAAM,qBAAqBD,YAAAA;AAAAA,UACzB;AAAA,UACA;AAAA,QAAA;AAGF,cAAM,gBAAmC;AAAA,UACvC,OAAO,QAAQC,OAAI,OAAO,OAAO,SAAS,IAAI,OAAO;AAAA,UACrD;AAAA,UACA;AAAA,QAAA;AAEF,cAAM,kBAAkBD,YAAAA,WAAc,eAAe,cAAc;AAEnE,cAAM,GAAG,SAAS,oDAAoD;AACtE;AAAA,UACE,GAAG,SAAS,oDAAoD,KAAK;AAAA,QAAA;AAGvE,cAAM,QAAQ,IAAI;AAAA,UAChB,OAAO,gBAAgB,kBAAkB;AAAA,UACzC,OAAO,gBAAgB,eAAe;AAAA,QAAA,CACvC;AAAA,MACH,OAAO;AACL,cAAM,iBAAiBA,YAAAA,WAAc,MAAM,cAAc;AACzD,cAAM,OAAO,gBAAgB,cAAc;AAAA,MAC7C;AAAA,IACF,SAAS,OAAO;AACd,UAAI,oBAAoB,OAAO,iBAAiB,GAAG;AACjD;AAAA,MACF;AACA,YAAM;AAAA,IACR;AAAA,EACF;AAEA,SAAO,IAAIE,GAAAA,uBAAuB,EAAE,YAAY;AAClD;AAyDO,SAAS,0BACd,QAQA;AACA,QAAM,YAAY,IAAIC,MAAAA,0BAAqB,IAAI,CAAA,CAAE,CAAC;AAClD,QAAM,gBAAgB,IAAIA,MAAAA,MAA+B,EAAE;AAC3D,QAAM,mBAAmB,OAAO,YAAY;AAC5C,QAAM,gBACJ,qBAAqB,gBAAgB,cAAc;AACrD,QAAM,iBAAiB,IAAIA,YAWzB,oBAAI,KAAK;AAGX,QAAM,uBAAuB,IAAIA,MAAAA,MAA2B,EAAE;AAI9D,QAAM,iBAAiB,IAAIA,MAAAA,MAAe,KAAK;AAK/C,QAAM,uBAAuB,CAAC,aAA4B;AACxD,QAAI,SAAS,SAAS,GAAG;AACvB,qBAAe,SAAS,CAAC,YAAY;AACnC,cAAM,aAAa,IAAI,IAAI,OAAO;AAClC,iBAAS,QAAQ,CAAC,OAAO,WAAW,OAAO,EAAE,CAAC;AAC9C,eAAO;AAAA,MACT,CAAC;AAAA,IACH;AAAA,EACF;AAKA,QAAM,+BAA+B,MAAM;AACzC,UAAM,mBAAkC,CAAA;AACxC,mBAAe,MAAM,QAAQ,CAAC,OAAO,YAAY;AAC/C,UAAI,MAAM,SAAS;AACjB,qBAAa,MAAM,SAAS;AAC5B,cAAM,QAAQ,IAAI;AAClB,yBAAiB,KAAK,OAAO;AAC7B;AAAA,UACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,UACrC;AAAA,QAAA;AAAA,MAEJ;AAAA,IACF,CAAC;AACD,yBAAqB,gBAAgB;AAAA,EACvC;AACA,QAAM,OAAO,mBAAsB,OAAO,cAAc;AAAA,IACtD;AAAA,IACA;AAAA,IACA,UAAU;AAAA,IACV;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,cAAc,OAAO;AAAA,IACrB,WAAW,OAAO,mBAAmB;AAAA,EAAA,CACtC;AAQD,QAAM,YAAyB,OAC7B,MACA,UAAkB,QACG;AACrB;AAAA,MACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,MACrC;AAAA,IAAA;AAEF,QAAI,OAAO,SAAS,UAAU;AAC5B,YAAM,IAAIC,OAAAA,+BAA+B,OAAO,MAAM,OAAO,EAAE;AAAA,IACjE;AAGA,UAAM,UAAU,UAAU,MAAM,IAAI,IAAI;AACxC,QAAI,QAAS,QAAO;AAGpB,UAAM,cAAc,cAAc,MAAM;AAAA,MAAK,CAAC,aAC5CC,2BAAoB,MAAM,QAAQ;AAAA,IAAA;AAEpC,QAAI,YAAa,QAAO;AAExB,WAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,YAAM,YAAY,WAAW,MAAM;AACjC,6BAAA;AACA,iCAAA;AACA,eAAO,IAAIC,OAAAA,2BAA2B,MAAM,OAAO,EAAE,CAAC;AAAA,MACxD,GAAG,OAAO;AAEV,YAAM,uBAAuB,UAAU,UAAU,MAAM;AACrD,YAAI,UAAU,MAAM,IAAI,IAAI,GAAG;AAC7B;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,YACrC;AAAA,UAAA;AAEF,uBAAa,SAAS;AACtB,+BAAA;AACA,mCAAA;AACA,kBAAQ,IAAI;AAAA,QACd;AAAA,MACF,CAAC;AAED,YAAM,2BAA2B,cAAc,UAAU,MAAM;AAC7D,cAAM,kBAAkB,cAAc,MAAM;AAAA,UAAK,CAAC,aAChDD,2BAAoB,MAAM,QAAQ;AAAA,QAAA;AAEpC,YAAI,iBAAiB;AACnB;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,YACrC;AAAA,YACA;AAAA,UAAA;AAEF,uBAAa,SAAS;AACtB,mCAAA;AACA,+BAAA;AACA,kBAAQ,IAAI;AAAA,QACd;AAAA,MACF,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAQA,QAAM,aAAgC,OACpC,SACA,UAAkB,QACG;AACrB;AAAA,MACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,IAAA;AAGvC,WAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,YAAM,UAAU,KAAK,OAAA,EAAS,SAAS,EAAE;AAEzC,YAAM,eAAe,MAAM;AACzB,uBAAe,SAAS,CAAC,YAAY;AACnC,gBAAM,aAAa,IAAI,IAAI,OAAO;AAClC,qBAAW,OAAO,OAAO;AACzB,iBAAO;AAAA,QACT,CAAC;AAAA,MACH;AAEA,YAAM,YAAY,MAAM;AACtB,qBAAA;AACA,eAAO,IAAIE,OAAAA,4BAA4B,OAAO,EAAE,CAAC;AAAA,MACnD;AAEA,YAAM,YAAY,WAAW,WAAW,OAAO;AAI/C,YAAM,aAAa,CAAC,YAA0B;AAC5C,YAAI,QAAQ,OAAO,GAAG;AACpB;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,UAAA;AAGvC,yBAAe,SAAS,CAAC,YAAY;AACnC,kBAAM,aAAa,IAAI,IAAI,OAAO;AAClC,kBAAM,WAAW,WAAW,IAAI,OAAO;AACvC,gBAAI,UAAU;AACZ,yBAAW,IAAI,SAAS,EAAE,GAAG,UAAU,SAAS,MAAM;AAAA,YACxD;AACA,mBAAO;AAAA,UACT,CAAC;AACD,iBAAO;AAAA,QACT;AACA,eAAO;AAAA,MACT;AAGA,iBAAW,WAAW,qBAAqB,OAAO;AAChD,YAAI,QAAQ,OAAO,GAAG;AAGpB,cAAI,eAAe,OAAO;AACxB;AAAA,cACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,YAAA;AAEvC,yBAAa,SAAS;AACtB,oBAAQ,IAAI;AACZ;AAAA,UACF;AAGA;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,UAAA;AAEvC,yBAAe,SAAS,CAAC,YAAY;AACnC,kBAAM,aAAa,IAAI,IAAI,OAAO;AAClC,uBAAW,IAAI,SAAS;AAAA,cACtB,SAAS;AAAA,cACT;AAAA,cACA;AAAA,cACA;AAAA,cACA,SAAS;AAAA;AAAA,YAAA,CACV;AACD,mBAAO;AAAA,UACT,CAAC;AACD;AAAA,QACF;AAAA,MACF;AAIA,qBAAe,SAAS,CAAC,YAAY;AACnC,cAAM,aAAa,IAAI,IAAI,OAAO;AAClC,mBAAW,IAAI,SAAS;AAAA,UACtB,SAAS;AAAA,UACT;AAAA,UACA;AAAA,UACA;AAAA,UACA,SAAS;AAAA,QAAA,CACV;AACD,eAAO;AAAA,MACT,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAKA,QAAM,0BAA0B,OAC9B,WACkB;AAElB,QAAI,UAAU,UAAU,QAAQ;AAC9B,YAAM,UAAU,OAAO;AAEvB,UAAI,MAAM,QAAQ,OAAO,IAAI,GAAG;AAC9B,cAAM,QAAQ,IAAI,OAAO,KAAK,IAAI,CAAC,SAAS,UAAU,MAAM,OAAO,CAAC,CAAC;AAAA,MACvE,OAAO;AACL,cAAM,UAAU,OAAO,MAAM,OAAO;AAAA,MACtC;AAAA,IACF;AAAA,EAEF;AAGA,QAAM,kBAAkB,OAAO,WAC3B,OACE,WAKG;AACH,UAAM,gBAAgB,MAAM,OAAO,SAAU,MAAM;AACnD,UAAM,wBAAwB,aAAa;AAC3C,WAAO;AAAA,EACT,IACA;AAEJ,QAAM,kBAAkB,OAAO,WAC3B,OACE,WAKG;AACH,UAAM,gBAAgB,MAAM,OAAO,SAAU,MAAM;AACnD,UAAM,wBAAwB,aAAa;AAC3C,WAAO;AAAA,EACT,IACA;AAEJ,QAAM,kBAAkB,OAAO,WAC3B,OACE,WAKG;AACH,UAAM,gBAAgB,MAAM,OAAO,SAAU,MAAM;AACnD,UAAM,wBAAwB,aAAa;AAC3C,WAAO;AAAA,EACT,IACA;AAGJ,QAAM;AAAA,IACJ,cAAc;AAAA,IACd,UAAU;AAAA,IACV,UAAU;AAAA,IACV,UAAU;AAAA,IACV,GAAG;AAAA,EAAA,IACD;AAEJ,SAAO;AAAA,IACL,GAAG;AAAA,IACH,UAAU;AAAA,IACV;AAAA,IACA,UAAU;AAAA,IACV,UAAU;AAAA,IACV,UAAU;AAAA,IACV,OAAO;AAAA,MACL;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAEJ;AAKA,SAAS,mBACP,cACA,SAuBe;AACf,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA,IACE;AACJ,QAAM,qBAAqB;AAG3B,QAAM,iBAAiB,IAAIJ,MAAAA,MAA0B,MAAS;AAE9D,QAAM,+BAAe,IAAA;AAKrB,QAAM,WAAW,CAAC,QAAgC;AAChD,UAAM,YAAY,SAAS,IAAI,GAAG;AAClC,QAAI,WAAW;AACb,aAAO;AAAA,IACT;AAEA,UAAM,YAAY,IAAI,MAAM,GAAG;AAC/B,aAAS,IAAI,KAAK,SAAS;AAC3B,WAAO;AAAA,EACT;AAGA,QAAM,iCAAiB,IAAA;AACvB,QAAMK,aAAqB,CAAA;AAC3B,MAAI,YAAgC;AAKpC,QAAM,qBAAqB,CAAC,WAAyB;AACnD,QAAIA,WAAS,SAAS,QAAQ;AAE5B,eAAS,IAAIA,WAAS,QAAQ,IAAI,QAAQ,KAAK;AAC7CA,mBAAS,CAAC,IAAI,oBAAI,IAAA;AAAA,MACpB;AAAA,IACF;AAAA,EACF;AAKA,QAAM,eAAe,CACnB,MACA,OACA,cACS;AACT,eAAW,OAAO,MAAM;AACtB,YAAM,YAAY,SAAS,GAAG;AAG9B,UAAI,cAAc,QAAW;AAC3B,oBAAYC,SAAAA,aAAa,SAAS;AAClC,2BAAmB,SAAS;AAAA,MAC9B;AAGA,YAAM,mBAAmBA,SAAAA,aAAa,SAAS;AAC/C,UAAI,qBAAqB,WAAW;AAClC;AAAA,UACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE,iCAAiC,SAAS,SAAS,gBAAgB;AAAA,QAAA;AAEhH;AAAA,MACF;AAEA,gBAAU,IAAI,GAAG;AACjBC,eAAAA,cAAc,WAAW,OAAOF,YAAU,SAAS;AAAA,IACrD;AAAA,EACF;AAKA,QAAM,oBAAoB,CACxB,aACA,OACA,cACS;AACT,QAAI,cAAc,QAAW;AAC3B;AAAA,IACF;AAEA,eAAW,OAAO,aAAa;AAC7B,YAAM,YAAY,SAAS,GAAG;AAC9B,gBAAU,OAAO,GAAG;AACpBG,eAAAA,mBAAmB,WAAW,OAAOH,YAAU,SAAS;AAKxD,eAAS,OAAO,GAAG;AAAA,IACrB;AAAA,EACF;AAKA,QAAM,8BAA8B,CAClC,MACA,aACA,UACiB;AAEjB,QAAI,CAAC,WAAW,IAAI,KAAK,GAAG;AAC1B,iBAAW,IAAI,OAAO,oBAAI,IAAA,CAAK;AAAA,IACjC;AACA,UAAM,YAAY,WAAW,IAAI,KAAK;AAGtC,QAAI,MAAM;AACR,mBAAa,MAAM,OAAO,SAAS;AAAA,IACrC;AAGA,QAAI,aAAa;AACf,wBAAkB,aAAa,OAAO,SAAS;AAAA,IACjD;AAEA,WAAO;AAAA,EACT;AAKA,QAAM,wBAAwB,MAAY;AACxC,eAAW,MAAA;AACXA,eAAS,SAAS;AAClB,gBAAY;AAAA,EACd;AAMA,QAAM,kBAAkB,CAAC,UAAuB;AAC9C,QAAI,cAAc,QAAW;AAC3B;AAAA,IACF;AAEA,UAAM,YAAY,WAAW,IAAI,KAAK;AACtC,QAAI,CAAC,WAAW;AACd;AAAA,IACF;AAGA,eAAW,OAAO,WAAW;AAC3B,YAAM,YAAY,SAAS,GAAG;AAC9B,YAAM,mBAAmBC,SAAAA,aAAa,SAAS;AAC/C,UAAI,qBAAqB,WAAW;AAClCE,iBAAAA,mBAAmB,WAAW,OAAOH,YAAU,SAAS;AAAA,MAC1D;AACA,eAAS,OAAO,GAAG;AAAA,IACrB;AAGA,eAAW,OAAO,KAAK;AAAA,EACzB;AAMA,QAAM,4BAA4B,CAChC,OACA,YACY;AACZ,UAAM,YAAY,WAAW,IAAI,KAAK;AACtC,QAAI,CAAC,WAAW;AACd,aAAO;AAAA,IACT;AAGA,eAAW,OAAO,WAAW;AAC3B,YAAM,YAAY,SAAS,GAAG;AAC9B,UAAII,SAAAA,kBAAkB,WAAW,OAAO,GAAG;AACzC,kBAAU,OAAO,GAAG;AACpBD,iBAAAA,mBAAmB,WAAW,OAAOH,YAAU,SAAU;AAAA,MAC3D;AAAA,IACF;AAGA,QAAI,UAAU,SAAS,GAAG;AACxB,iBAAW,OAAO,KAAK;AACvB,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,EACT;AAKA,QAAM,sBAAsB,CAC1B,UACA,OACA,OACA,uBACY;AACZ,QAAI,cAAc,QAAW;AAC3B;AAAA,QACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,MAAA;AAE7C,aAAO;AAAA,IACT;AAEA,QAAI,YAAY;AAGhB,eAAW,WAAW,UAAU;AAE9B,YAAM,iBAAiBK,SAAAA,wBAAwB,SAASL,UAAQ;AAEhE,iBAAW,SAAS,gBAAgB;AAClC,YAAI,0BAA0B,OAAO,OAAO,GAAG;AAE7C,cAAI,CAAC,WAAW;AACd,kBAAA;AACA,wBAAY;AAAA,UACd;AAEA,gBAAM;AAAA,YACJ,MAAM;AAAA,YACN,KAAK;AAAA,UAAA,CACN;AAAA,QACH;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAMA,QAAM,kBAAkB,MAA+B;AAErD,UAAM,SAAS,eAAe,SAAS;AAEvC,WAAO;AAAA,MACL,UAAU,aAAa,QAAQ,QAC3B,CAAC,QAAQ,aAAa,OAAO,KAAK,IAClC;AAAA,IAAA;AAAA,EAER;AAEA,MAAI;AAEJ,SAAO;AAAA,IACL,MAAM,CAAC,WAAiD;AACtD,YAAM,EAAE,OAAO,OAAO,QAAQ,WAAW,UAAU,eAAe;AAGlE,UAAI,uBAA6C;AACjD,YAAM,mBAAmB,CAAC,gBAAyB;AAEjD,YACE,eACA,aAAa,iBACb,WAAW,oBACX;AAEA,iCAAuB,UAAU,mBAAA;AACjC,+BAAqB,KAAK,MAAM;AAC9B,sBAAA;AAAA,UACF,CAAC;AAAA,QACH,OAAO;AAEL,oBAAA;AAAA,QACF;AAAA,MACF;AAGA,YAAM,kBAAkB,IAAI,gBAAA;AAE5B,UAAI,aAAa,QAAQ;AACvB,qBAAa,OAAO;AAAA,UAClB;AAAA,UACA,MAAM;AACJ,4BAAgB,MAAA;AAAA,UAClB;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UAAA;AAAA,QACR;AAEF,YAAI,aAAa,OAAO,SAAS;AAC/B,0BAAgB,MAAA;AAAA,QAClB;AAAA,MACF;AAGA,sBAAgB,OAAO,iBAAiB,SAAS,MAAM;AACrD,uBAAe,SAAS,CAAC,YAAY;AACnC,kBAAQ,QAAQ,CAAC,UAAU;AACzB,yBAAa,MAAM,SAAS;AAC5B,kBAAM,OAAO,IAAIM,OAAAA,oBAAoB;AAAA,UACvC,CAAC;AACD,qCAAW,IAAA;AAAA,QACb,CAAC;AAAA,MACH,CAAC;AAED,YAAM,SAAS,IAAIC,mBAAY;AAAA,QAC7B,GAAG;AAAA;AAAA,QAEH,KAAK,aAAa,cAAc,iBAAiB;AAAA;AAAA;AAAA,QAGjD,QACE,aAAa,WAAW,aAAa,cAAc,QAAQ;AAAA,QAC7D,QAAQ,gBAAgB;AAAA,QACxB,SAAS,CAAC,gBAAgB;AAMxB,oBAAA;AAEA,cAAI,aAAa,SAAS;AACxB,mBAAO,aAAa,QAAQ,WAAW;AAAA,UACzC,OAAO;AACL,oBAAQ;AAAA,cACN,+CAA+C,WAAW,EAAE;AAAA;AAAA;AAAA,cAG5D;AAAA,YAAA;AAAA,UAEJ;AAEA;AAAA,QACF;AAAA,MAAA,CACD;AACD,UAAI,qBAAqB;AACzB,YAAM,+BAAe,IAAA;AACrB,YAAM,eAAwC,CAAA;AAC9C,UAAI,sBAAsB;AAI1B,YAAM,yBAAyB,MAC7B,aAAa,iBAAiB,CAAC;AACjC,YAAM,mBAAsC,CAAA;AAM5C,YAAM,iCAAiB,IAAA;AAKvB,YAAM,uBAAuB,CAAC,kBAA8B;AAC1D,YAAI,CAACC,OAAAA,gBAAgB,aAAa,GAAG;AACnC;AAAA,QACF;AAGA,cAAM,OAAO,cAAc,QAAQ;AACnC,cAAM,cAAc,cAAc,QAAQ;AAC1C,cAAM,UAAU,QAAQ;AAExB,cAAM,QAAQ,WAAW,eAAe,cAAc,KAAK;AAC3D,cAAM,YAAY,cAAc,QAAQ;AAMxC,cAAM,WAAW,cAAc;AAC/B,cAAM,oBACJ,cAAc,YAAY,WAAW,IAAI,KAAK;AAEhD,YAAI,UAAU;AACZ,qBAAW,OAAO,KAAK;AAAA,QACzB,OAAO;AACL,qBAAW,IAAI,KAAK;AAAA,QACtB;AAEA,YAAI,UAAU;AACZ,0BAAgB,KAAK;AAAA,QACvB,WAAW,SAAS;AAClB,sCAA4B,MAAM,aAAa,KAAK;AAAA,QACtD;AAEA,cAAM;AAAA,UACJ,MAAM,oBAAoB,WAAW;AAAA,UACrC,OAAO,cAAc;AAAA;AAAA,UAErB,UAAU;AAAA,YACR,GAAG,cAAc;AAAA,UAAA;AAAA,QACnB,CACD;AAAA,MACH;AAKA,YAAM,mBAAmB,uBAAuB;AAAA,QAC9C;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA;AAAA;AAAA,QAGA,kBAAkB,aAAa,cAAc;AAAA;AAAA,QAE7C,QAAQ,gBAAgB;AAAA,MAAA,CACzB;AAED,0BAAoB,OAAO,UAAU,CAAC,aAAgC;AAEpE,YAAI,cAAkD;AAMtD,uBAAe,SAAS,MAAM,KAAK;AAEnC,mBAAW,WAAW,UAAU;AAE9B,cAAIA,OAAAA,gBAAgB,OAAO,KAAKC,SAAAA,iBAAiB,OAAO,GAAG;AACzD,iCAAqB,SAAS,CAAC,kBAAkB;AAC/C,oBAAM,YAAY,CAAC,GAAG,eAAe,OAAO;AAE5C,kBAAI,UAAU,SAAS,oBAAoB;AACzC,0BAAU,OAAO,GAAG,UAAU,SAAS,kBAAkB;AAAA,cAC3D;AACA,qBAAO;AAAA,YACT,CAAC;AAAA,UACH;AAMA,cACE,SAAS,OAAO,MACf,CAAC,uBAAA,KAA4B,qBAC9B;AACA,oBAAQ,QAAQ,OAAO,QAAQ,CAAC,SAAS,SAAS,IAAI,IAAI,CAAC;AAAA,UAC7D;AAIA,gBAAM,kBAAiC,CAAA;AACvC,yBAAe,MAAM,QAAQ,CAAC,OAAO,YAAY;AAC/C,gBAAI,CAAC,MAAM,SAAS;AAClB,kBAAI;AACF,sBAAM,QAAQ,OAAO;AAAA,cACvB,SAAS,KAAK;AAEZ,6BAAa,MAAM,SAAS;AAC5B,sBAAM;AAAA,kBACJ,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,GAAG,CAAC;AAAA,gBAAA;AAEpD,gCAAgB,KAAK,OAAO;AAC5B,sBAAM,qBAAqB,GAAG;AAAA,cAChC;AAAA,YACF;AAAA,UACF,CAAC;AAGD,+BAAqB,eAAe;AAEpC,cAAID,OAAAA,gBAAgB,OAAO,GAAG;AAE5B,kBAAM,SAAS,QAAQ,QAAQ;AAC/B,gBAAI,UAAU,OAAO,WAAW,UAAU;AAExC,6BAAe,SAAS,MAAM,MAAM;AAAA,YACtC;AAKA,gBAAI,uBAAA,KAA4B,CAAC,oBAAoB;AACnD,+BAAiB,KAAK,OAAO;AAAA,YAC/B,OAAO;AAEL,kBAAI,CAAC,oBAAoB;AACvB,sBAAA;AACA,qCAAqB;AAAA,cACvB;AAEA,mCAAqB,OAAO;AAAA,YAC9B;AAAA,UACF,WAAW,qBAAqB,OAAO,GAAG;AAKxC,gBAAI,CAAC,uBAAA,KAA4B,oBAAoB;AACnD,2BAAa,KAAK,qBAAqB,OAAO,CAAC;AAAA,YACjD;AAAA,UACF,WAAW,kBAAkB,OAAO,GAAG;AAErC,0BAAc;AAAA,UAChB,WAAW,mBAAmB,OAAO,GAAG;AAEtC,gBAAI,gBAAgB,cAAc;AAChC,4BAAc;AAAA,YAChB;AAAA,UACF,WAAWC,0BAAiB,OAAO,GAAG;AAIpC,gBAAI,uBAAA,KAA4B,CAAC,oBAAoB;AACnD,+BAAiB,KAAK,OAAO;AAAA,YAC/B,OAAO;AAEL,mCAAqB;AAAA,gBACnB,QAAQ,QAAQ;AAAA,gBAChB;AAAA,gBACA;AAAA,gBACA;AAAA,cAAA;AAAA,YAEJ;AAAA,UACF,WAAW,qBAAqB,OAAO,GAAG;AACxC;AAAA,cACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,YAAA;AAI7C,gBAAI,CAAC,oBAAoB;AACvB,oBAAA;AACA,mCAAqB;AAAA,YACvB;AAEA,qBAAA;AAGA,kCAAA;AAGA,uBAAW,MAAA;AAIX,8BAAkB,MAAA;AAGlB,0BAAc;AACd,kCAAsB;AACtB,6BAAiB,SAAS;AAAA,UAC5B;AAAA,QACF;AAEA,YAAI,gBAAgB,MAAM;AAIxB,cACE,uBAAA,KACA,gBAAgB,gBAChB,CAAC,oBACD;AACA;AAAA,cACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE,iDAAiD,iBAAiB,MAAM;AAAA,YAAA;AAIrH,kBAAA;AAGA,qBAAA;AAGA,kCAAA;AAGA,uBAAW,MAAA;AAGX,uBAAW,eAAe,kBAAkB;AAC1C,kBAAID,OAAAA,gBAAgB,WAAW,GAAG;AAChC,qCAAqB,WAAW;AAGhC,oBAAI,SAAS,WAAW,GAAG;AACzB,8BAAY,QAAQ,OAAO;AAAA,oBAAQ,CAAC,SAClC,SAAS,IAAI,IAAI;AAAA,kBAAA;AAAA,gBAErB;AAAA,cACF,WAAW,qBAAqB,WAAW,GAAG;AAE5C,6BAAa,KAAK,qBAAqB,WAAW,CAAC;AAAA,cACrD,WAAWC,0BAAiB,WAAW,GAAG;AAExC;AAAA,kBACE,YAAY,QAAQ;AAAA,kBACpB;AAAA,kBACA;AAAA,kBACA;AAAA,gBAAA;AAAA,cAEJ;AAAA,YACF;AAGA,mBAAA;AAIA,6BAAiB,SAAS;AAE1B;AAAA,cACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,YAAA;AAAA,UAE/C,OAAO;AAGL,gBAAI,oBAAoB;AACtB,qBAAA;AACA,mCAAqB;AAAA,YACvB;AAAA,UACF;AACA,2BAAiB,wBAAwB;AAGzC,cAAI,gBAAgB,cAAc;AAChC,kCAAsB;AAAA,UACxB;AAGA,oBAAU,SAAS,CAAC,iBAAiB;AACnC,kBAAM,aAAa,IAAI,IAAU,YAAY;AAC7C,gBAAI,SAAS,OAAO,GAAG;AACrB;AAAA,gBACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,gBAC3C,MAAM,KAAK,QAAQ;AAAA,cAAA;AAAA,YAEvB;AACA,qBAAS,QAAQ,CAAC,SAAS,WAAW,IAAI,IAAI,CAAC;AAC/C,qBAAS,MAAA;AACT,mBAAO;AAAA,UACT,CAAC;AAGD,wBAAc,SAAS,CAAC,qBAAqB;AAC3C,kBAAM,OAAO,CAAC,GAAG,kBAAkB,GAAG,YAAY;AAClD,yBAAa;AAAA,cAAQ,CAAC,aACpB;AAAA,gBACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,gBAC3C;AAAA,cAAA;AAAA,YACF;AAEF,yBAAa,SAAS;AACtB,mBAAO;AAAA,UACT,CAAC;AAKD,yBAAe,SAAS,MAAM,IAAI;AAElC,uCAAA;AAAA,QACF;AAAA,MACF,CAAC;AAID,aAAO;AAAA,QACL,YAAY,kBAAkB;AAAA,QAC9B,SAAS,MAAM;AAEb,4BAAA;AAEA,0BAAgB,MAAA;AAEhB,4BAAkB,MAAA;AAAA,QACpB;AAAA,MAAA;AAAA,IAEJ;AAAA;AAAA,IAEA;AAAA,EAAA;AAEJ;;;;;;;;;;;"}
@@ -38,21 +38,29 @@ function createLoadSubsetDedupe({
38
38
  write,
39
39
  commit,
40
40
  collectionId,
41
- encodeColumnName
41
+ encodeColumnName,
42
+ signal
42
43
  }) {
43
44
  if (syncMode === `eager`) {
44
45
  return null;
45
46
  }
46
47
  const compileOptions = encodeColumnName ? { encodeColumnName } : void 0;
48
+ const logPrefix = collectionId ? `[${collectionId}] ` : ``;
49
+ function handleSnapshotError(error, operation) {
50
+ if (signal.aborted) {
51
+ debug(`${logPrefix}Ignoring ${operation} error during cleanup: %o`, error);
52
+ return true;
53
+ }
54
+ debug(`${logPrefix}Error in ${operation}: %o`, error);
55
+ return false;
56
+ }
47
57
  const loadSubset = async (opts) => {
48
58
  if (isBufferingInitialSync()) {
49
59
  const snapshotParams = compileSQL(opts, compileOptions);
50
60
  try {
51
61
  const { data: rows } = await stream.fetchSnapshot(snapshotParams);
52
62
  if (!isBufferingInitialSync()) {
53
- debug(
54
- `${collectionId ? `[${collectionId}] ` : ``}Ignoring snapshot - sync completed while fetching`
55
- );
63
+ debug(`${logPrefix}Ignoring snapshot - sync completed while fetching`);
56
64
  return;
57
65
  }
58
66
  if (rows.length > 0) {
@@ -61,57 +69,57 @@ function createLoadSubsetDedupe({
61
69
  write({
62
70
  type: `insert`,
63
71
  value: row.value,
64
- metadata: {
65
- ...row.headers
66
- }
72
+ metadata: { ...row.headers }
67
73
  });
68
74
  }
69
75
  commit();
70
- debug(
71
- `${collectionId ? `[${collectionId}] ` : ``}Applied snapshot with ${rows.length} rows`
72
- );
76
+ debug(`${logPrefix}Applied snapshot with ${rows.length} rows`);
73
77
  }
74
78
  } catch (error) {
75
- debug(
76
- `${collectionId ? `[${collectionId}] ` : ``}Error fetching snapshot: %o`,
77
- error
78
- );
79
+ if (handleSnapshotError(error, `fetchSnapshot`)) {
80
+ return;
81
+ }
79
82
  throw error;
80
83
  }
81
- } else if (syncMode === `progressive`) {
82
84
  return;
83
- } else {
84
- const { cursor, where, orderBy, limit } = opts;
85
+ }
86
+ if (syncMode === `progressive`) {
87
+ return;
88
+ }
89
+ const { cursor, where, orderBy, limit } = opts;
90
+ try {
85
91
  if (cursor) {
86
- const promises = [];
87
92
  const whereCurrentOpts = {
88
93
  where: where ? and(where, cursor.whereCurrent) : cursor.whereCurrent,
89
94
  orderBy
90
- // No limit - get all ties
91
95
  };
92
96
  const whereCurrentParams = compileSQL(
93
97
  whereCurrentOpts,
94
98
  compileOptions
95
99
  );
96
- promises.push(stream.requestSnapshot(whereCurrentParams));
97
- debug(
98
- `${collectionId ? `[${collectionId}] ` : ``}Requesting cursor.whereCurrent snapshot (all ties)`
99
- );
100
100
  const whereFromOpts = {
101
101
  where: where ? and(where, cursor.whereFrom) : cursor.whereFrom,
102
102
  orderBy,
103
103
  limit
104
104
  };
105
105
  const whereFromParams = compileSQL(whereFromOpts, compileOptions);
106
- promises.push(stream.requestSnapshot(whereFromParams));
106
+ debug(`${logPrefix}Requesting cursor.whereCurrent snapshot (all ties)`);
107
107
  debug(
108
- `${collectionId ? `[${collectionId}] ` : ``}Requesting cursor.whereFrom snapshot (with limit ${limit})`
108
+ `${logPrefix}Requesting cursor.whereFrom snapshot (with limit ${limit})`
109
109
  );
110
- await Promise.all(promises);
110
+ await Promise.all([
111
+ stream.requestSnapshot(whereCurrentParams),
112
+ stream.requestSnapshot(whereFromParams)
113
+ ]);
111
114
  } else {
112
115
  const snapshotParams = compileSQL(opts, compileOptions);
113
116
  await stream.requestSnapshot(snapshotParams);
114
117
  }
118
+ } catch (error) {
119
+ if (handleSnapshotError(error, `requestSnapshot`)) {
120
+ return;
121
+ }
122
+ throw error;
115
123
  }
116
124
  };
117
125
  return new DeduplicatedLoadSubset({ loadSubset });
@@ -589,7 +597,9 @@ You can provide an 'onError' handler on the shapeOptions to handle this error, a
589
597
  collectionId,
590
598
  // Pass the columnMapper's encode function to transform column names
591
599
  // (e.g., camelCase to snake_case) when compiling SQL for subset queries
592
- encodeColumnName: shapeOptions.columnMapper?.encode
600
+ encodeColumnName: shapeOptions.columnMapper?.encode,
601
+ // Pass abort signal so requestSnapshot errors can be ignored during cleanup
602
+ signal: abortController.signal
593
603
  });
594
604
  unsubscribeStream = stream.subscribe((messages) => {
595
605
  let commitPoint = null;
@@ -1 +1 @@
1
- {"version":3,"file":"electric.js","sources":["../../src/electric.ts"],"sourcesContent":["import {\n ShapeStream,\n isChangeMessage,\n isControlMessage,\n isVisibleInSnapshot,\n} from '@electric-sql/client'\nimport { Store } from '@tanstack/store'\nimport DebugModule from 'debug'\nimport { DeduplicatedLoadSubset, and } from '@tanstack/db'\nimport {\n ExpectedNumberInAwaitTxIdError,\n StreamAbortedError,\n TimeoutWaitingForMatchError,\n TimeoutWaitingForTxIdError,\n} from './errors'\nimport { compileSQL } from './sql-compiler'\nimport {\n addTagToIndex,\n findRowsMatchingPattern,\n getTagLength,\n isMoveOutMessage,\n removeTagFromIndex,\n tagMatchesPattern,\n} from './tag-index'\nimport type { ColumnEncoder } from './sql-compiler'\nimport type {\n MoveOutPattern,\n MoveTag,\n ParsedMoveTag,\n RowId,\n TagIndex,\n} from './tag-index'\nimport type {\n BaseCollectionConfig,\n ChangeMessageOrDeleteKeyMessage,\n CollectionConfig,\n DeleteMutationFnParams,\n InsertMutationFnParams,\n LoadSubsetOptions,\n SyncConfig,\n SyncMode,\n UpdateMutationFnParams,\n UtilsRecord,\n} from '@tanstack/db'\nimport type { StandardSchemaV1 } from '@standard-schema/spec'\nimport type {\n ControlMessage,\n GetExtensions,\n Message,\n PostgresSnapshot,\n Row,\n ShapeStreamOptions,\n} from '@electric-sql/client'\n\n// Re-export for user convenience in custom match functions\nexport { isChangeMessage, isControlMessage } from '@electric-sql/client'\n\nconst debug = DebugModule.debug(`ts/db:electric`)\n\n/**\n * Symbol for internal test hooks (hidden from public API)\n */\nexport const ELECTRIC_TEST_HOOKS = Symbol(`electricTestHooks`)\n\n/**\n * Internal test hooks interface (for testing only)\n */\nexport interface ElectricTestHooks {\n /**\n * Called before marking collection ready after first up-to-date in progressive mode\n * Allows tests to pause and validate snapshot phase before atomic swap completes\n */\n beforeMarkingReady?: () => Promise<void>\n}\n\n/**\n * Type representing a transaction ID in ElectricSQL\n */\nexport type Txid = number\n\n/**\n * Custom match function type - receives stream messages and returns boolean\n * indicating if the mutation has been synchronized\n */\nexport type MatchFunction<T extends Row<unknown>> = (\n message: Message<T>,\n) => boolean\n\n/**\n * Matching strategies for Electric synchronization\n * Handlers can return:\n * - Txid strategy: { txid: number | number[], timeout?: number } (recommended)\n * - Void (no return value) - mutation completes without waiting\n *\n * The optional timeout property specifies how long to wait for the txid(s) in milliseconds.\n * If not specified, defaults to 5000ms.\n */\nexport type MatchingStrategy = {\n txid: Txid | Array<Txid>\n timeout?: number\n} | void\n\n/**\n * Type representing a snapshot end message\n */\ntype SnapshotEndMessage = ControlMessage & {\n headers: { control: `snapshot-end` }\n}\n// The `InferSchemaOutput` and `ResolveType` are copied from the `@tanstack/db` package\n// but we modified `InferSchemaOutput` slightly to restrict the schema output to `Row<unknown>`\n// This is needed in order for `GetExtensions` to be able to infer the parser extensions type from the schema\ntype InferSchemaOutput<T> = T extends StandardSchemaV1\n ? StandardSchemaV1.InferOutput<T> extends Row<unknown>\n ? StandardSchemaV1.InferOutput<T>\n : Record<string, unknown>\n : Record<string, unknown>\n\n/**\n * The mode of sync to use for the collection.\n * @default `eager`\n * @description\n * - `eager`:\n * - syncs all data immediately on preload\n * - collection will be marked as ready once the sync is complete\n * - there is no incremental sync\n * - `on-demand`:\n * - syncs data in incremental snapshots when the collection is queried\n * - collection will be marked as ready immediately after the first snapshot is synced\n * - `progressive`:\n * - syncs all data for the collection in the background\n * - uses incremental snapshots during the initial sync to provide a fast path to the data required for queries\n * - collection will be marked as ready once the full sync is complete\n */\nexport type ElectricSyncMode = SyncMode | `progressive`\n\n/**\n * Configuration interface for Electric collection options\n * @template T - The type of items in the collection\n * @template TSchema - The schema type for validation\n */\nexport interface ElectricCollectionConfig<\n T extends Row<unknown> = Row<unknown>,\n TSchema extends StandardSchemaV1 = never,\n> extends Omit<\n BaseCollectionConfig<\n T,\n string | number,\n TSchema,\n ElectricCollectionUtils<T>,\n any\n >,\n `onInsert` | `onUpdate` | `onDelete` | `syncMode`\n> {\n /**\n * Configuration options for the ElectricSQL ShapeStream\n */\n shapeOptions: ShapeStreamOptions<GetExtensions<T>>\n syncMode?: ElectricSyncMode\n\n /**\n * Internal test hooks (for testing only)\n * Hidden via Symbol to prevent accidental usage in production\n */\n [ELECTRIC_TEST_HOOKS]?: ElectricTestHooks\n\n /**\n * Optional asynchronous handler function called before an insert operation\n * @param params Object containing transaction and collection information\n * @returns Promise resolving to { txid, timeout? } or void\n * @example\n * // Basic Electric insert handler with txid (recommended)\n * onInsert: async ({ transaction }) => {\n * const newItem = transaction.mutations[0].modified\n * const result = await api.todos.create({\n * data: newItem\n * })\n * return { txid: result.txid }\n * }\n *\n * @example\n * // Insert handler with custom timeout\n * onInsert: async ({ transaction }) => {\n * const newItem = transaction.mutations[0].modified\n * const result = await api.todos.create({\n * data: newItem\n * })\n * return { txid: result.txid, timeout: 10000 } // Wait up to 10 seconds\n * }\n *\n * @example\n * // Insert handler with multiple items - return array of txids\n * onInsert: async ({ transaction }) => {\n * const items = transaction.mutations.map(m => m.modified)\n * const results = await Promise.all(\n * items.map(item => api.todos.create({ data: item }))\n * )\n * return { txid: results.map(r => r.txid) }\n * }\n *\n * @example\n * // Use awaitMatch utility for custom matching\n * onInsert: async ({ transaction, collection }) => {\n * const newItem = transaction.mutations[0].modified\n * await api.todos.create({ data: newItem })\n * await collection.utils.awaitMatch(\n * (message) => isChangeMessage(message) &&\n * message.headers.operation === 'insert' &&\n * message.value.name === newItem.name\n * )\n * }\n */\n onInsert?: (\n params: InsertMutationFnParams<\n T,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => Promise<MatchingStrategy>\n\n /**\n * Optional asynchronous handler function called before an update operation\n * @param params Object containing transaction and collection information\n * @returns Promise resolving to { txid, timeout? } or void\n * @example\n * // Basic Electric update handler with txid (recommended)\n * onUpdate: async ({ transaction }) => {\n * const { original, changes } = transaction.mutations[0]\n * const result = await api.todos.update({\n * where: { id: original.id },\n * data: changes\n * })\n * return { txid: result.txid }\n * }\n *\n * @example\n * // Use awaitMatch utility for custom matching\n * onUpdate: async ({ transaction, collection }) => {\n * const { original, changes } = transaction.mutations[0]\n * await api.todos.update({ where: { id: original.id }, data: changes })\n * await collection.utils.awaitMatch(\n * (message) => isChangeMessage(message) &&\n * message.headers.operation === 'update' &&\n * message.value.id === original.id\n * )\n * }\n */\n onUpdate?: (\n params: UpdateMutationFnParams<\n T,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => Promise<MatchingStrategy>\n\n /**\n * Optional asynchronous handler function called before a delete operation\n * @param params Object containing transaction and collection information\n * @returns Promise resolving to { txid, timeout? } or void\n * @example\n * // Basic Electric delete handler with txid (recommended)\n * onDelete: async ({ transaction }) => {\n * const mutation = transaction.mutations[0]\n * const result = await api.todos.delete({\n * id: mutation.original.id\n * })\n * return { txid: result.txid }\n * }\n *\n * @example\n * // Use awaitMatch utility for custom matching\n * onDelete: async ({ transaction, collection }) => {\n * const mutation = transaction.mutations[0]\n * await api.todos.delete({ id: mutation.original.id })\n * await collection.utils.awaitMatch(\n * (message) => isChangeMessage(message) &&\n * message.headers.operation === 'delete' &&\n * message.value.id === mutation.original.id\n * )\n * }\n */\n onDelete?: (\n params: DeleteMutationFnParams<\n T,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => Promise<MatchingStrategy>\n}\n\nfunction isUpToDateMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is ControlMessage & { up_to_date: true } {\n return isControlMessage(message) && message.headers.control === `up-to-date`\n}\n\nfunction isMustRefetchMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is ControlMessage & { headers: { control: `must-refetch` } } {\n return isControlMessage(message) && message.headers.control === `must-refetch`\n}\n\nfunction isSnapshotEndMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is SnapshotEndMessage {\n return isControlMessage(message) && message.headers.control === `snapshot-end`\n}\n\nfunction isSubsetEndMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is ControlMessage & { headers: { control: `subset-end` } } {\n return (\n isControlMessage(message) &&\n (message.headers.control as string) === `subset-end`\n )\n}\n\nfunction parseSnapshotMessage(message: SnapshotEndMessage): PostgresSnapshot {\n return {\n xmin: message.headers.xmin,\n xmax: message.headers.xmax,\n xip_list: message.headers.xip_list,\n }\n}\n\n// Check if a message contains txids in its headers\nfunction hasTxids<T extends Row<unknown>>(\n message: Message<T>,\n): message is Message<T> & { headers: { txids?: Array<Txid> } } {\n return `txids` in message.headers && Array.isArray(message.headers.txids)\n}\n\n/**\n * Creates a deduplicated loadSubset handler for progressive/on-demand modes\n * Returns null for eager mode, or a DeduplicatedLoadSubset instance for other modes.\n * Handles fetching snapshots in progressive mode during buffering phase,\n * and requesting snapshots in on-demand mode.\n *\n * When cursor expressions are provided (whereFrom/whereCurrent), makes two\n * requestSnapshot calls:\n * - One for whereFrom (rows > cursor) with limit\n * - One for whereCurrent (rows = cursor, for tie-breaking) without limit\n */\nfunction createLoadSubsetDedupe<T extends Row<unknown>>({\n stream,\n syncMode,\n isBufferingInitialSync,\n begin,\n write,\n commit,\n collectionId,\n encodeColumnName,\n}: {\n stream: ShapeStream<T>\n syncMode: ElectricSyncMode\n isBufferingInitialSync: () => boolean\n begin: () => void\n write: (mutation: {\n type: `insert` | `update` | `delete`\n value: T\n metadata: Record<string, unknown>\n }) => void\n commit: () => void\n collectionId?: string\n /**\n * Optional function to encode column names (e.g., camelCase to snake_case).\n * This is typically the `encode` function from shapeOptions.columnMapper.\n */\n encodeColumnName?: ColumnEncoder\n}): DeduplicatedLoadSubset | null {\n // Eager mode doesn't need subset loading\n if (syncMode === `eager`) {\n return null\n }\n\n const compileOptions = encodeColumnName ? { encodeColumnName } : undefined\n\n const loadSubset = async (opts: LoadSubsetOptions) => {\n // In progressive mode, use fetchSnapshot during snapshot phase\n if (isBufferingInitialSync()) {\n // Progressive mode snapshot phase: fetch and apply immediately\n const snapshotParams = compileSQL<T>(opts, compileOptions)\n try {\n const { data: rows } = await stream.fetchSnapshot(snapshotParams)\n\n // Check again if we're still buffering - we might have received up-to-date\n // and completed the atomic swap while waiting for the snapshot\n if (!isBufferingInitialSync()) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Ignoring snapshot - sync completed while fetching`,\n )\n return\n }\n\n // Apply snapshot data in a sync transaction (only if we have data)\n if (rows.length > 0) {\n begin()\n for (const row of rows) {\n write({\n type: `insert`,\n value: row.value,\n metadata: {\n ...row.headers,\n },\n })\n }\n commit()\n\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Applied snapshot with ${rows.length} rows`,\n )\n }\n } catch (error) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Error fetching snapshot: %o`,\n error,\n )\n throw error\n }\n } else if (syncMode === `progressive`) {\n // Progressive mode after full sync complete: no need to load more\n return\n } else {\n // On-demand mode: use requestSnapshot\n // When cursor is provided, make two calls:\n // 1. whereCurrent (all ties, no limit)\n // 2. whereFrom (rows > cursor, with limit)\n const { cursor, where, orderBy, limit } = opts\n\n if (cursor) {\n // Make parallel requests for cursor-based pagination\n const promises: Array<Promise<unknown>> = []\n\n // Request 1: All rows matching whereCurrent (ties at boundary, no limit)\n // Combine main where with cursor.whereCurrent\n const whereCurrentOpts: LoadSubsetOptions = {\n where: where ? and(where, cursor.whereCurrent) : cursor.whereCurrent,\n orderBy,\n // No limit - get all ties\n }\n const whereCurrentParams = compileSQL<T>(\n whereCurrentOpts,\n compileOptions,\n )\n promises.push(stream.requestSnapshot(whereCurrentParams))\n\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Requesting cursor.whereCurrent snapshot (all ties)`,\n )\n\n // Request 2: Rows matching whereFrom (rows > cursor, with limit)\n // Combine main where with cursor.whereFrom\n const whereFromOpts: LoadSubsetOptions = {\n where: where ? and(where, cursor.whereFrom) : cursor.whereFrom,\n orderBy,\n limit,\n }\n const whereFromParams = compileSQL<T>(whereFromOpts, compileOptions)\n promises.push(stream.requestSnapshot(whereFromParams))\n\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Requesting cursor.whereFrom snapshot (with limit ${limit})`,\n )\n\n // Wait for both requests to complete\n await Promise.all(promises)\n } else {\n // No cursor - standard single request\n const snapshotParams = compileSQL<T>(opts, compileOptions)\n await stream.requestSnapshot(snapshotParams)\n }\n }\n }\n\n return new DeduplicatedLoadSubset({ loadSubset })\n}\n\n/**\n * Type for the awaitTxId utility function\n */\nexport type AwaitTxIdFn = (txId: Txid, timeout?: number) => Promise<boolean>\n\n/**\n * Type for the awaitMatch utility function\n */\nexport type AwaitMatchFn<T extends Row<unknown>> = (\n matchFn: MatchFunction<T>,\n timeout?: number,\n) => Promise<boolean>\n\n/**\n * Electric collection utilities type\n */\nexport interface ElectricCollectionUtils<\n T extends Row<unknown> = Row<unknown>,\n> extends UtilsRecord {\n awaitTxId: AwaitTxIdFn\n awaitMatch: AwaitMatchFn<T>\n}\n\n/**\n * Creates Electric collection options for use with a standard Collection\n *\n * @template T - The explicit type of items in the collection (highest priority)\n * @template TSchema - The schema type for validation and type inference (second priority)\n * @template TFallback - The fallback type if no explicit or schema type is provided\n * @param config - Configuration options for the Electric collection\n * @returns Collection options with utilities\n */\n\n// Overload for when schema is provided\nexport function electricCollectionOptions<T extends StandardSchemaV1>(\n config: ElectricCollectionConfig<InferSchemaOutput<T>, T> & {\n schema: T\n },\n): Omit<CollectionConfig<InferSchemaOutput<T>, string | number, T>, `utils`> & {\n id?: string\n utils: ElectricCollectionUtils<InferSchemaOutput<T>>\n schema: T\n}\n\n// Overload for when no schema is provided\nexport function electricCollectionOptions<T extends Row<unknown>>(\n config: ElectricCollectionConfig<T> & {\n schema?: never // prohibit schema\n },\n): Omit<CollectionConfig<T, string | number>, `utils`> & {\n id?: string\n utils: ElectricCollectionUtils<T>\n schema?: never // no schema in the result\n}\n\nexport function electricCollectionOptions<T extends Row<unknown>>(\n config: ElectricCollectionConfig<T, any>,\n): Omit<\n CollectionConfig<T, string | number, any, ElectricCollectionUtils<T>>,\n `utils`\n> & {\n id?: string\n utils: ElectricCollectionUtils<T>\n schema?: any\n} {\n const seenTxids = new Store<Set<Txid>>(new Set([]))\n const seenSnapshots = new Store<Array<PostgresSnapshot>>([])\n const internalSyncMode = config.syncMode ?? `eager`\n const finalSyncMode =\n internalSyncMode === `progressive` ? `on-demand` : internalSyncMode\n const pendingMatches = new Store<\n Map<\n string,\n {\n matchFn: (message: Message<any>) => boolean\n resolve: (value: boolean) => void\n reject: (error: Error) => void\n timeoutId: ReturnType<typeof setTimeout>\n matched: boolean\n }\n >\n >(new Map())\n\n // Buffer messages since last up-to-date to handle race conditions\n const currentBatchMessages = new Store<Array<Message<any>>>([])\n\n // Track whether the current batch has been committed (up-to-date received)\n // This allows awaitMatch to resolve immediately for messages from committed batches\n const batchCommitted = new Store<boolean>(false)\n\n /**\n * Helper function to remove multiple matches from the pendingMatches store\n */\n const removePendingMatches = (matchIds: Array<string>) => {\n if (matchIds.length > 0) {\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n matchIds.forEach((id) => newMatches.delete(id))\n return newMatches\n })\n }\n }\n\n /**\n * Helper function to resolve and cleanup matched pending matches\n */\n const resolveMatchedPendingMatches = () => {\n const matchesToResolve: Array<string> = []\n pendingMatches.state.forEach((match, matchId) => {\n if (match.matched) {\n clearTimeout(match.timeoutId)\n match.resolve(true)\n matchesToResolve.push(matchId)\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch resolved on up-to-date for match %s`,\n matchId,\n )\n }\n })\n removePendingMatches(matchesToResolve)\n }\n const sync = createElectricSync<T>(config.shapeOptions, {\n seenTxids,\n seenSnapshots,\n syncMode: internalSyncMode,\n pendingMatches,\n currentBatchMessages,\n batchCommitted,\n removePendingMatches,\n resolveMatchedPendingMatches,\n collectionId: config.id,\n testHooks: config[ELECTRIC_TEST_HOOKS],\n })\n\n /**\n * Wait for a specific transaction ID to be synced\n * @param txId The transaction ID to wait for as a number\n * @param timeout Optional timeout in milliseconds (defaults to 5000ms)\n * @returns Promise that resolves when the txId is synced\n */\n const awaitTxId: AwaitTxIdFn = async (\n txId: Txid,\n timeout: number = 5000,\n ): Promise<boolean> => {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitTxId called with txid %d`,\n txId,\n )\n if (typeof txId !== `number`) {\n throw new ExpectedNumberInAwaitTxIdError(typeof txId, config.id)\n }\n\n // First check if the txid is in the seenTxids store\n const hasTxid = seenTxids.state.has(txId)\n if (hasTxid) return true\n\n // Then check if the txid is in any of the seen snapshots\n const hasSnapshot = seenSnapshots.state.some((snapshot) =>\n isVisibleInSnapshot(txId, snapshot),\n )\n if (hasSnapshot) return true\n\n return new Promise((resolve, reject) => {\n const timeoutId = setTimeout(() => {\n unsubscribeSeenTxids()\n unsubscribeSeenSnapshots()\n reject(new TimeoutWaitingForTxIdError(txId, config.id))\n }, timeout)\n\n const unsubscribeSeenTxids = seenTxids.subscribe(() => {\n if (seenTxids.state.has(txId)) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitTxId found match for txid %o`,\n txId,\n )\n clearTimeout(timeoutId)\n unsubscribeSeenTxids()\n unsubscribeSeenSnapshots()\n resolve(true)\n }\n })\n\n const unsubscribeSeenSnapshots = seenSnapshots.subscribe(() => {\n const visibleSnapshot = seenSnapshots.state.find((snapshot) =>\n isVisibleInSnapshot(txId, snapshot),\n )\n if (visibleSnapshot) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitTxId found match for txid %o in snapshot %o`,\n txId,\n visibleSnapshot,\n )\n clearTimeout(timeoutId)\n unsubscribeSeenSnapshots()\n unsubscribeSeenTxids()\n resolve(true)\n }\n })\n })\n }\n\n /**\n * Wait for a custom match function to find a matching message\n * @param matchFn Function that returns true when a message matches\n * @param timeout Optional timeout in milliseconds (defaults to 5000ms)\n * @returns Promise that resolves when a matching message is found\n */\n const awaitMatch: AwaitMatchFn<any> = async (\n matchFn: MatchFunction<any>,\n timeout: number = 3000,\n ): Promise<boolean> => {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch called with custom function`,\n )\n\n return new Promise((resolve, reject) => {\n const matchId = Math.random().toString(36)\n\n const cleanupMatch = () => {\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n newMatches.delete(matchId)\n return newMatches\n })\n }\n\n const onTimeout = () => {\n cleanupMatch()\n reject(new TimeoutWaitingForMatchError(config.id))\n }\n\n const timeoutId = setTimeout(onTimeout, timeout)\n\n // We need access to the stream messages to check against the match function\n // This will be handled by the sync configuration\n const checkMatch = (message: Message<any>) => {\n if (matchFn(message)) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch found matching message, waiting for up-to-date`,\n )\n // Mark as matched but don't resolve yet - wait for up-to-date\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n const existing = newMatches.get(matchId)\n if (existing) {\n newMatches.set(matchId, { ...existing, matched: true })\n }\n return newMatches\n })\n return true\n }\n return false\n }\n\n // Check against current batch messages first to handle race conditions\n for (const message of currentBatchMessages.state) {\n if (matchFn(message)) {\n // If batch is committed (up-to-date already received), resolve immediately\n // just like awaitTxId does when it finds a txid in seenTxids\n if (batchCommitted.state) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch found immediate match in committed batch, resolving immediately`,\n )\n clearTimeout(timeoutId)\n resolve(true)\n return\n }\n\n // If batch is not yet committed, register match and wait for up-to-date\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch found immediate match in current batch, waiting for up-to-date`,\n )\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n newMatches.set(matchId, {\n matchFn: checkMatch,\n resolve,\n reject,\n timeoutId,\n matched: true, // Already matched, will resolve on up-to-date\n })\n return newMatches\n })\n return\n }\n }\n\n // Store the match function for the sync process to use\n // We'll add this to a pending matches store\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n newMatches.set(matchId, {\n matchFn: checkMatch,\n resolve,\n reject,\n timeoutId,\n matched: false,\n })\n return newMatches\n })\n })\n }\n\n /**\n * Process matching strategy and wait for synchronization\n */\n const processMatchingStrategy = async (\n result: MatchingStrategy,\n ): Promise<void> => {\n // Only wait if result contains txid\n if (result && `txid` in result) {\n const timeout = result.timeout\n // Handle both single txid and array of txids\n if (Array.isArray(result.txid)) {\n await Promise.all(result.txid.map((txid) => awaitTxId(txid, timeout)))\n } else {\n await awaitTxId(result.txid, timeout)\n }\n }\n // If result is void/undefined, don't wait - mutation completes immediately\n }\n\n // Create wrapper handlers for direct persistence operations that handle different matching strategies\n const wrappedOnInsert = config.onInsert\n ? async (\n params: InsertMutationFnParams<\n any,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => {\n const handlerResult = await config.onInsert!(params)\n await processMatchingStrategy(handlerResult)\n return handlerResult\n }\n : undefined\n\n const wrappedOnUpdate = config.onUpdate\n ? async (\n params: UpdateMutationFnParams<\n any,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => {\n const handlerResult = await config.onUpdate!(params)\n await processMatchingStrategy(handlerResult)\n return handlerResult\n }\n : undefined\n\n const wrappedOnDelete = config.onDelete\n ? async (\n params: DeleteMutationFnParams<\n any,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => {\n const handlerResult = await config.onDelete!(params)\n await processMatchingStrategy(handlerResult)\n return handlerResult\n }\n : undefined\n\n // Extract standard Collection config properties\n const {\n shapeOptions: _shapeOptions,\n onInsert: _onInsert,\n onUpdate: _onUpdate,\n onDelete: _onDelete,\n ...restConfig\n } = config\n\n return {\n ...restConfig,\n syncMode: finalSyncMode,\n sync,\n onInsert: wrappedOnInsert,\n onUpdate: wrappedOnUpdate,\n onDelete: wrappedOnDelete,\n utils: {\n awaitTxId,\n awaitMatch,\n },\n }\n}\n\n/**\n * Internal function to create ElectricSQL sync configuration\n */\nfunction createElectricSync<T extends Row<unknown>>(\n shapeOptions: ShapeStreamOptions<GetExtensions<T>>,\n options: {\n syncMode: ElectricSyncMode\n seenTxids: Store<Set<Txid>>\n seenSnapshots: Store<Array<PostgresSnapshot>>\n pendingMatches: Store<\n Map<\n string,\n {\n matchFn: (message: Message<T>) => boolean\n resolve: (value: boolean) => void\n reject: (error: Error) => void\n timeoutId: ReturnType<typeof setTimeout>\n matched: boolean\n }\n >\n >\n currentBatchMessages: Store<Array<Message<T>>>\n batchCommitted: Store<boolean>\n removePendingMatches: (matchIds: Array<string>) => void\n resolveMatchedPendingMatches: () => void\n collectionId?: string\n testHooks?: ElectricTestHooks\n },\n): SyncConfig<T> {\n const {\n seenTxids,\n seenSnapshots,\n syncMode,\n pendingMatches,\n currentBatchMessages,\n batchCommitted,\n removePendingMatches,\n resolveMatchedPendingMatches,\n collectionId,\n testHooks,\n } = options\n const MAX_BATCH_MESSAGES = 1000 // Safety limit for message buffer\n\n // Store for the relation schema information\n const relationSchema = new Store<string | undefined>(undefined)\n\n const tagCache = new Map<MoveTag, ParsedMoveTag>()\n\n // Parses a tag string into a MoveTag.\n // It memoizes the result parsed tag such that future calls\n // for the same tag string return the same MoveTag array.\n const parseTag = (tag: MoveTag): ParsedMoveTag => {\n const cachedTag = tagCache.get(tag)\n if (cachedTag) {\n return cachedTag\n }\n\n const parsedTag = tag.split(`|`)\n tagCache.set(tag, parsedTag)\n return parsedTag\n }\n\n // Tag tracking state\n const rowTagSets = new Map<RowId, Set<MoveTag>>()\n const tagIndex: TagIndex = []\n let tagLength: number | undefined = undefined\n\n /**\n * Initialize the tag index with the correct length\n */\n const initializeTagIndex = (length: number): void => {\n if (tagIndex.length < length) {\n // Extend the index array to the required length\n for (let i = tagIndex.length; i < length; i++) {\n tagIndex[i] = new Map()\n }\n }\n }\n\n /**\n * Add tags to a row and update the tag index\n */\n const addTagsToRow = (\n tags: Array<MoveTag>,\n rowId: RowId,\n rowTagSet: Set<MoveTag>,\n ): void => {\n for (const tag of tags) {\n const parsedTag = parseTag(tag)\n\n // Infer tag length from first tag\n if (tagLength === undefined) {\n tagLength = getTagLength(parsedTag)\n initializeTagIndex(tagLength)\n }\n\n // Validate tag length matches\n const currentTagLength = getTagLength(parsedTag)\n if (currentTagLength !== tagLength) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Tag length mismatch: expected ${tagLength}, got ${currentTagLength}`,\n )\n continue\n }\n\n rowTagSet.add(tag)\n addTagToIndex(parsedTag, rowId, tagIndex, tagLength)\n }\n }\n\n /**\n * Remove tags from a row and update the tag index\n */\n const removeTagsFromRow = (\n removedTags: Array<MoveTag>,\n rowId: RowId,\n rowTagSet: Set<MoveTag>,\n ): void => {\n if (tagLength === undefined) {\n return\n }\n\n for (const tag of removedTags) {\n const parsedTag = parseTag(tag)\n rowTagSet.delete(tag)\n removeTagFromIndex(parsedTag, rowId, tagIndex, tagLength)\n // We aggresively evict the tag from the cache\n // if this tag is shared with another row\n // and is not removed from that other row\n // then next time we encounter the tag it will be parsed again\n tagCache.delete(tag)\n }\n }\n\n /**\n * Process tags for a change message (add and remove tags)\n */\n const processTagsForChangeMessage = (\n tags: Array<MoveTag> | undefined,\n removedTags: Array<MoveTag> | undefined,\n rowId: RowId,\n ): Set<MoveTag> => {\n // Initialize tag set for this row if it doesn't exist (needed for checking deletion)\n if (!rowTagSets.has(rowId)) {\n rowTagSets.set(rowId, new Set())\n }\n const rowTagSet = rowTagSets.get(rowId)!\n\n // Add new tags\n if (tags) {\n addTagsToRow(tags, rowId, rowTagSet)\n }\n\n // Remove tags\n if (removedTags) {\n removeTagsFromRow(removedTags, rowId, rowTagSet)\n }\n\n return rowTagSet\n }\n\n /**\n * Clear all tag tracking state (used when truncating)\n */\n const clearTagTrackingState = (): void => {\n rowTagSets.clear()\n tagIndex.length = 0\n tagLength = undefined\n }\n\n /**\n * Remove all tags for a row from both the tag set and the index\n * Used when a row is deleted\n */\n const clearTagsForRow = (rowId: RowId): void => {\n if (tagLength === undefined) {\n return\n }\n\n const rowTagSet = rowTagSets.get(rowId)\n if (!rowTagSet) {\n return\n }\n\n // Remove each tag from the index\n for (const tag of rowTagSet) {\n const parsedTag = parseTag(tag)\n const currentTagLength = getTagLength(parsedTag)\n if (currentTagLength === tagLength) {\n removeTagFromIndex(parsedTag, rowId, tagIndex, tagLength)\n }\n tagCache.delete(tag)\n }\n\n // Remove the row from the tag sets map\n rowTagSets.delete(rowId)\n }\n\n /**\n * Remove matching tags from a row based on a pattern\n * Returns true if the row's tag set is now empty\n */\n const removeMatchingTagsFromRow = (\n rowId: RowId,\n pattern: MoveOutPattern,\n ): boolean => {\n const rowTagSet = rowTagSets.get(rowId)\n if (!rowTagSet) {\n return false\n }\n\n // Find tags that match this pattern and remove them\n for (const tag of rowTagSet) {\n const parsedTag = parseTag(tag)\n if (tagMatchesPattern(parsedTag, pattern)) {\n rowTagSet.delete(tag)\n removeTagFromIndex(parsedTag, rowId, tagIndex, tagLength!)\n }\n }\n\n // Check if row's tag set is now empty\n if (rowTagSet.size === 0) {\n rowTagSets.delete(rowId)\n return true\n }\n\n return false\n }\n\n /**\n * Process move-out event: remove matching tags from rows and delete rows with empty tag sets\n */\n const processMoveOutEvent = (\n patterns: Array<MoveOutPattern>,\n begin: () => void,\n write: (message: ChangeMessageOrDeleteKeyMessage<T>) => void,\n transactionStarted: boolean,\n ): boolean => {\n if (tagLength === undefined) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Received move-out message but no tag length set yet, ignoring`,\n )\n return transactionStarted\n }\n\n let txStarted = transactionStarted\n\n // Process all patterns and collect rows to delete\n for (const pattern of patterns) {\n // Find all rows that match this pattern\n const affectedRowIds = findRowsMatchingPattern(pattern, tagIndex)\n\n for (const rowId of affectedRowIds) {\n if (removeMatchingTagsFromRow(rowId, pattern)) {\n // Delete rows with empty tag sets\n if (!txStarted) {\n begin()\n txStarted = true\n }\n\n write({\n type: `delete`,\n key: rowId,\n })\n }\n }\n }\n\n return txStarted\n }\n\n /**\n * Get the sync metadata for insert operations\n * @returns Record containing relation information\n */\n const getSyncMetadata = (): Record<string, unknown> => {\n // Use the stored schema if available, otherwise default to 'public'\n const schema = relationSchema.state || `public`\n\n return {\n relation: shapeOptions.params?.table\n ? [schema, shapeOptions.params.table]\n : undefined,\n }\n }\n\n let unsubscribeStream: () => void\n\n return {\n sync: (params: Parameters<SyncConfig<T>[`sync`]>[0]) => {\n const { begin, write, commit, markReady, truncate, collection } = params\n\n // Wrap markReady to wait for test hook in progressive mode\n let progressiveReadyGate: Promise<void> | null = null\n const wrappedMarkReady = (isBuffering: boolean) => {\n // Only create gate if we're in buffering phase (first up-to-date)\n if (\n isBuffering &&\n syncMode === `progressive` &&\n testHooks?.beforeMarkingReady\n ) {\n // Create a new gate promise for this sync cycle\n progressiveReadyGate = testHooks.beforeMarkingReady()\n progressiveReadyGate.then(() => {\n markReady()\n })\n } else {\n // No hook, not buffering, or already past first up-to-date\n markReady()\n }\n }\n\n // Abort controller for the stream - wraps the signal if provided\n const abortController = new AbortController()\n\n if (shapeOptions.signal) {\n shapeOptions.signal.addEventListener(\n `abort`,\n () => {\n abortController.abort()\n },\n {\n once: true,\n },\n )\n if (shapeOptions.signal.aborted) {\n abortController.abort()\n }\n }\n\n // Cleanup pending matches on abort\n abortController.signal.addEventListener(`abort`, () => {\n pendingMatches.setState((current) => {\n current.forEach((match) => {\n clearTimeout(match.timeoutId)\n match.reject(new StreamAbortedError())\n })\n return new Map() // Clear all pending matches\n })\n })\n\n const stream = new ShapeStream({\n ...shapeOptions,\n // In on-demand mode, we only want to sync changes, so we set the log to `changes_only`\n log: syncMode === `on-demand` ? `changes_only` : undefined,\n // In on-demand mode, we only need the changes from the point of time the collection was created\n // so we default to `now` when there is no saved offset.\n offset:\n shapeOptions.offset ?? (syncMode === `on-demand` ? `now` : undefined),\n signal: abortController.signal,\n onError: (errorParams) => {\n // Just immediately mark ready if there's an error to avoid blocking\n // apps waiting for `.preload()` to finish.\n // Note that Electric sends a 409 error on a `must-refetch` message, but the\n // ShapeStream handled this and it will not reach this handler, therefor\n // this markReady will not be triggers by a `must-refetch`.\n markReady()\n\n if (shapeOptions.onError) {\n return shapeOptions.onError(errorParams)\n } else {\n console.error(\n `An error occurred while syncing collection: ${collection.id}, \\n` +\n `it has been marked as ready to avoid blocking apps waiting for '.preload()' to finish. \\n` +\n `You can provide an 'onError' handler on the shapeOptions to handle this error, and this message will not be logged.`,\n errorParams,\n )\n }\n\n return\n },\n })\n let transactionStarted = false\n const newTxids = new Set<Txid>()\n const newSnapshots: Array<PostgresSnapshot> = []\n let hasReceivedUpToDate = false // Track if we've completed initial sync in progressive mode\n\n // Progressive mode state\n // Helper to determine if we're buffering the initial sync\n const isBufferingInitialSync = () =>\n syncMode === `progressive` && !hasReceivedUpToDate\n const bufferedMessages: Array<Message<T>> = [] // Buffer change messages during initial sync\n\n // Track keys that have been synced to handle overlapping subset queries.\n // When multiple subset queries return the same row, the server sends `insert`\n // for each response. We convert subsequent inserts to updates to avoid\n // duplicate key errors when the row's data has changed between requests.\n const syncedKeys = new Set<string | number>()\n\n /**\n * Process a change message: handle tags and write the mutation\n */\n const processChangeMessage = (changeMessage: Message<T>) => {\n if (!isChangeMessage(changeMessage)) {\n return\n }\n\n // Process tags if present\n const tags = changeMessage.headers.tags\n const removedTags = changeMessage.headers.removed_tags\n const hasTags = tags || removedTags\n\n const rowId = collection.getKeyFromItem(changeMessage.value)\n const operation = changeMessage.headers.operation\n\n // Track synced keys and handle overlapping subset queries.\n // When multiple subset queries return the same row, the server sends\n // `insert` for each response. We convert subsequent inserts to updates\n // to avoid duplicate key errors when the row's data has changed.\n const isDelete = operation === `delete`\n const isDuplicateInsert =\n operation === `insert` && syncedKeys.has(rowId)\n\n if (isDelete) {\n syncedKeys.delete(rowId)\n } else {\n syncedKeys.add(rowId)\n }\n\n if (isDelete) {\n clearTagsForRow(rowId)\n } else if (hasTags) {\n processTagsForChangeMessage(tags, removedTags, rowId)\n }\n\n write({\n type: isDuplicateInsert ? `update` : operation,\n value: changeMessage.value,\n // Include the primary key and relation info in the metadata\n metadata: {\n ...changeMessage.headers,\n },\n })\n }\n\n // Create deduplicated loadSubset wrapper for non-eager modes\n // This prevents redundant snapshot requests when multiple concurrent\n // live queries request overlapping or subset predicates\n const loadSubsetDedupe = createLoadSubsetDedupe({\n stream,\n syncMode,\n isBufferingInitialSync,\n begin,\n write,\n commit,\n collectionId,\n // Pass the columnMapper's encode function to transform column names\n // (e.g., camelCase to snake_case) when compiling SQL for subset queries\n encodeColumnName: shapeOptions.columnMapper?.encode,\n })\n\n unsubscribeStream = stream.subscribe((messages: Array<Message<T>>) => {\n // Track commit point type - up-to-date takes precedence as it also triggers progressive mode atomic swap\n let commitPoint: `up-to-date` | `subset-end` | null = null\n\n // Don't clear the buffer between batches - this preserves messages for awaitMatch\n // to find even if multiple batches arrive before awaitMatch is called.\n // The buffer is naturally limited by MAX_BATCH_MESSAGES (oldest messages are dropped).\n // Reset batchCommitted since we're starting a new batch\n batchCommitted.setState(() => false)\n\n for (const message of messages) {\n // Add message to current batch buffer (for race condition handling)\n if (isChangeMessage(message) || isMoveOutMessage(message)) {\n currentBatchMessages.setState((currentBuffer) => {\n const newBuffer = [...currentBuffer, message]\n // Limit buffer size for safety\n if (newBuffer.length > MAX_BATCH_MESSAGES) {\n newBuffer.splice(0, newBuffer.length - MAX_BATCH_MESSAGES)\n }\n return newBuffer\n })\n }\n\n // Check for txids in the message and add them to our store\n // Skip during buffered initial sync in progressive mode (txids will be extracted during atomic swap)\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), track txids\n // to avoid losing them when messages are written to the existing transaction.\n if (\n hasTxids(message) &&\n (!isBufferingInitialSync() || transactionStarted)\n ) {\n message.headers.txids?.forEach((txid) => newTxids.add(txid))\n }\n\n // Check pending matches against this message\n // Note: matchFn will mark matches internally, we don't resolve here\n const matchesToRemove: Array<string> = []\n pendingMatches.state.forEach((match, matchId) => {\n if (!match.matched) {\n try {\n match.matchFn(message)\n } catch (err) {\n // If matchFn throws, clean up and reject the promise\n clearTimeout(match.timeoutId)\n match.reject(\n err instanceof Error ? err : new Error(String(err)),\n )\n matchesToRemove.push(matchId)\n debug(`matchFn error: %o`, err)\n }\n }\n })\n\n // Remove matches that errored\n removePendingMatches(matchesToRemove)\n\n if (isChangeMessage(message)) {\n // Check if the message contains schema information\n const schema = message.headers.schema\n if (schema && typeof schema === `string`) {\n // Store the schema for future use if it's a valid string\n relationSchema.setState(() => schema)\n }\n\n // In buffered initial sync of progressive mode, buffer messages instead of writing\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), write\n // directly to it instead of buffering. This prevents orphan transactions.\n if (isBufferingInitialSync() && !transactionStarted) {\n bufferedMessages.push(message)\n } else {\n // Normal processing: write changes immediately\n if (!transactionStarted) {\n begin()\n transactionStarted = true\n }\n\n processChangeMessage(message)\n }\n } else if (isSnapshotEndMessage(message)) {\n // Track postgres snapshot metadata for resolving awaiting mutations\n // Skip during buffered initial sync (will be extracted during atomic swap)\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), track snapshots\n // to avoid losing them when messages are written to the existing transaction.\n if (!isBufferingInitialSync() || transactionStarted) {\n newSnapshots.push(parseSnapshotMessage(message))\n }\n } else if (isUpToDateMessage(message)) {\n // up-to-date takes precedence - also triggers progressive mode atomic swap\n commitPoint = `up-to-date`\n } else if (isSubsetEndMessage(message)) {\n // subset-end triggers commit but not progressive mode atomic swap\n if (commitPoint !== `up-to-date`) {\n commitPoint = `subset-end`\n }\n } else if (isMoveOutMessage(message)) {\n // Handle move-out event: buffer if buffering, otherwise process immediately\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), process\n // immediately to avoid orphan transactions.\n if (isBufferingInitialSync() && !transactionStarted) {\n bufferedMessages.push(message)\n } else {\n // Normal processing: process move-out immediately\n transactionStarted = processMoveOutEvent(\n message.headers.patterns,\n begin,\n write,\n transactionStarted,\n )\n }\n } else if (isMustRefetchMessage(message)) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Received must-refetch message, starting transaction with truncate`,\n )\n\n // Start a transaction and truncate the collection\n if (!transactionStarted) {\n begin()\n transactionStarted = true\n }\n\n truncate()\n\n // Clear tag tracking state\n clearTagTrackingState()\n\n // Clear synced keys tracking since we're starting fresh\n syncedKeys.clear()\n\n // Reset the loadSubset deduplication state since we're starting fresh\n // This ensures that previously loaded predicates don't prevent refetching after truncate\n loadSubsetDedupe?.reset()\n\n // Reset flags so we continue accumulating changes until next up-to-date\n commitPoint = null\n hasReceivedUpToDate = false // Reset for progressive mode (isBufferingInitialSync will reflect this)\n bufferedMessages.length = 0 // Clear buffered messages\n }\n }\n\n if (commitPoint !== null) {\n // PROGRESSIVE MODE: Atomic swap on first up-to-date (not subset-end)\n // EXCEPTION: Skip atomic swap if a transaction is already started (e.g., from must-refetch).\n // In that case, do a normal commit to properly close the existing transaction.\n if (\n isBufferingInitialSync() &&\n commitPoint === `up-to-date` &&\n !transactionStarted\n ) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Progressive mode: Performing atomic swap with ${bufferedMessages.length} buffered messages`,\n )\n\n // Start atomic swap transaction\n begin()\n\n // Truncate to clear all snapshot data\n truncate()\n\n // Clear tag tracking state for atomic swap\n clearTagTrackingState()\n\n // Clear synced keys tracking for atomic swap\n syncedKeys.clear()\n\n // Apply all buffered change messages and extract txids/snapshots\n for (const bufferedMsg of bufferedMessages) {\n if (isChangeMessage(bufferedMsg)) {\n processChangeMessage(bufferedMsg)\n\n // Extract txids from buffered messages (will be committed to store after transaction)\n if (hasTxids(bufferedMsg)) {\n bufferedMsg.headers.txids?.forEach((txid) =>\n newTxids.add(txid),\n )\n }\n } else if (isSnapshotEndMessage(bufferedMsg)) {\n // Extract snapshots from buffered messages (will be committed to store after transaction)\n newSnapshots.push(parseSnapshotMessage(bufferedMsg))\n } else if (isMoveOutMessage(bufferedMsg)) {\n // Process buffered move-out messages during atomic swap\n processMoveOutEvent(\n bufferedMsg.headers.patterns,\n begin,\n write,\n transactionStarted,\n )\n }\n }\n\n // Commit the atomic swap\n commit()\n\n // Exit buffering phase by marking that we've received up-to-date\n // isBufferingInitialSync() will now return false\n bufferedMessages.length = 0\n\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Progressive mode: Atomic swap complete, now in normal sync mode`,\n )\n } else {\n // Normal mode or on-demand: commit transaction if one was started\n // Both up-to-date and subset-end trigger a commit\n if (transactionStarted) {\n commit()\n transactionStarted = false\n }\n }\n wrappedMarkReady(isBufferingInitialSync())\n\n // Track that we've received the first up-to-date for progressive mode\n if (commitPoint === `up-to-date`) {\n hasReceivedUpToDate = true\n }\n\n // Always commit txids when we receive up-to-date, regardless of transaction state\n seenTxids.setState((currentTxids) => {\n const clonedSeen = new Set<Txid>(currentTxids)\n if (newTxids.size > 0) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}new txids synced from pg %O`,\n Array.from(newTxids),\n )\n }\n newTxids.forEach((txid) => clonedSeen.add(txid))\n newTxids.clear()\n return clonedSeen\n })\n\n // Always commit snapshots when we receive up-to-date, regardless of transaction state\n seenSnapshots.setState((currentSnapshots) => {\n const seen = [...currentSnapshots, ...newSnapshots]\n newSnapshots.forEach((snapshot) =>\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}new snapshot synced from pg %o`,\n snapshot,\n ),\n )\n newSnapshots.length = 0\n return seen\n })\n\n // Resolve all matched pending matches on up-to-date or subset-end\n // Set batchCommitted BEFORE resolving to avoid timing window where late awaitMatch\n // calls could register as \"matched\" after resolver pass already ran\n batchCommitted.setState(() => true)\n\n resolveMatchedPendingMatches()\n }\n })\n\n // Return the deduplicated loadSubset if available (on-demand or progressive mode)\n // The loadSubset method is auto-bound, so it can be safely returned directly\n return {\n loadSubset: loadSubsetDedupe?.loadSubset,\n cleanup: () => {\n // Unsubscribe from the stream\n unsubscribeStream()\n // Abort the abort controller to stop the stream\n abortController.abort()\n // Reset deduplication tracking so collection can load fresh data if restarted\n loadSubsetDedupe?.reset()\n },\n }\n },\n // Expose the getSyncMetadata function\n getSyncMetadata,\n }\n}\n"],"names":[],"mappings":";;;;;;;;AAyDA,MAAM,QAAQ,YAAY,MAAM,gBAAgB;AAKzC,MAAM,6CAA6B,mBAAmB;AAmO7D,SAAS,kBACP,SACkD;AAClD,SAAO,iBAAiB,OAAO,KAAK,QAAQ,QAAQ,YAAY;AAClE;AAEA,SAAS,qBACP,SACsE;AACtE,SAAO,iBAAiB,OAAO,KAAK,QAAQ,QAAQ,YAAY;AAClE;AAEA,SAAS,qBACP,SAC+B;AAC/B,SAAO,iBAAiB,OAAO,KAAK,QAAQ,QAAQ,YAAY;AAClE;AAEA,SAAS,mBACP,SACoE;AACpE,SACE,iBAAiB,OAAO,KACvB,QAAQ,QAAQ,YAAuB;AAE5C;AAEA,SAAS,qBAAqB,SAA+C;AAC3E,SAAO;AAAA,IACL,MAAM,QAAQ,QAAQ;AAAA,IACtB,MAAM,QAAQ,QAAQ;AAAA,IACtB,UAAU,QAAQ,QAAQ;AAAA,EAAA;AAE9B;AAGA,SAAS,SACP,SAC8D;AAC9D,SAAO,WAAW,QAAQ,WAAW,MAAM,QAAQ,QAAQ,QAAQ,KAAK;AAC1E;AAaA,SAAS,uBAA+C;AAAA,EACtD;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,GAiBkC;AAEhC,MAAI,aAAa,SAAS;AACxB,WAAO;AAAA,EACT;AAEA,QAAM,iBAAiB,mBAAmB,EAAE,iBAAA,IAAqB;AAEjE,QAAM,aAAa,OAAO,SAA4B;AAEpD,QAAI,0BAA0B;AAE5B,YAAM,iBAAiB,WAAc,MAAM,cAAc;AACzD,UAAI;AACF,cAAM,EAAE,MAAM,KAAA,IAAS,MAAM,OAAO,cAAc,cAAc;AAIhE,YAAI,CAAC,0BAA0B;AAC7B;AAAA,YACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,UAAA;AAE7C;AAAA,QACF;AAGA,YAAI,KAAK,SAAS,GAAG;AACnB,gBAAA;AACA,qBAAW,OAAO,MAAM;AACtB,kBAAM;AAAA,cACJ,MAAM;AAAA,cACN,OAAO,IAAI;AAAA,cACX,UAAU;AAAA,gBACR,GAAG,IAAI;AAAA,cAAA;AAAA,YACT,CACD;AAAA,UACH;AACA,iBAAA;AAEA;AAAA,YACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE,yBAAyB,KAAK,MAAM;AAAA,UAAA;AAAA,QAEnF;AAAA,MACF,SAAS,OAAO;AACd;AAAA,UACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,UAC3C;AAAA,QAAA;AAEF,cAAM;AAAA,MACR;AAAA,IACF,WAAW,aAAa,eAAe;AAErC;AAAA,IACF,OAAO;AAKL,YAAM,EAAE,QAAQ,OAAO,SAAS,UAAU;AAE1C,UAAI,QAAQ;AAEV,cAAM,WAAoC,CAAA;AAI1C,cAAM,mBAAsC;AAAA,UAC1C,OAAO,QAAQ,IAAI,OAAO,OAAO,YAAY,IAAI,OAAO;AAAA,UACxD;AAAA;AAAA,QAAA;AAGF,cAAM,qBAAqB;AAAA,UACzB;AAAA,UACA;AAAA,QAAA;AAEF,iBAAS,KAAK,OAAO,gBAAgB,kBAAkB,CAAC;AAExD;AAAA,UACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,QAAA;AAK7C,cAAM,gBAAmC;AAAA,UACvC,OAAO,QAAQ,IAAI,OAAO,OAAO,SAAS,IAAI,OAAO;AAAA,UACrD;AAAA,UACA;AAAA,QAAA;AAEF,cAAM,kBAAkB,WAAc,eAAe,cAAc;AACnE,iBAAS,KAAK,OAAO,gBAAgB,eAAe,CAAC;AAErD;AAAA,UACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE,oDAAoD,KAAK;AAAA,QAAA;AAItG,cAAM,QAAQ,IAAI,QAAQ;AAAA,MAC5B,OAAO;AAEL,cAAM,iBAAiB,WAAc,MAAM,cAAc;AACzD,cAAM,OAAO,gBAAgB,cAAc;AAAA,MAC7C;AAAA,IACF;AAAA,EACF;AAEA,SAAO,IAAI,uBAAuB,EAAE,YAAY;AAClD;AAyDO,SAAS,0BACd,QAQA;AACA,QAAM,YAAY,IAAI,0BAAqB,IAAI,CAAA,CAAE,CAAC;AAClD,QAAM,gBAAgB,IAAI,MAA+B,EAAE;AAC3D,QAAM,mBAAmB,OAAO,YAAY;AAC5C,QAAM,gBACJ,qBAAqB,gBAAgB,cAAc;AACrD,QAAM,iBAAiB,IAAI,MAWzB,oBAAI,KAAK;AAGX,QAAM,uBAAuB,IAAI,MAA2B,EAAE;AAI9D,QAAM,iBAAiB,IAAI,MAAe,KAAK;AAK/C,QAAM,uBAAuB,CAAC,aAA4B;AACxD,QAAI,SAAS,SAAS,GAAG;AACvB,qBAAe,SAAS,CAAC,YAAY;AACnC,cAAM,aAAa,IAAI,IAAI,OAAO;AAClC,iBAAS,QAAQ,CAAC,OAAO,WAAW,OAAO,EAAE,CAAC;AAC9C,eAAO;AAAA,MACT,CAAC;AAAA,IACH;AAAA,EACF;AAKA,QAAM,+BAA+B,MAAM;AACzC,UAAM,mBAAkC,CAAA;AACxC,mBAAe,MAAM,QAAQ,CAAC,OAAO,YAAY;AAC/C,UAAI,MAAM,SAAS;AACjB,qBAAa,MAAM,SAAS;AAC5B,cAAM,QAAQ,IAAI;AAClB,yBAAiB,KAAK,OAAO;AAC7B;AAAA,UACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,UACrC;AAAA,QAAA;AAAA,MAEJ;AAAA,IACF,CAAC;AACD,yBAAqB,gBAAgB;AAAA,EACvC;AACA,QAAM,OAAO,mBAAsB,OAAO,cAAc;AAAA,IACtD;AAAA,IACA;AAAA,IACA,UAAU;AAAA,IACV;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,cAAc,OAAO;AAAA,IACrB,WAAW,OAAO,mBAAmB;AAAA,EAAA,CACtC;AAQD,QAAM,YAAyB,OAC7B,MACA,UAAkB,QACG;AACrB;AAAA,MACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,MACrC;AAAA,IAAA;AAEF,QAAI,OAAO,SAAS,UAAU;AAC5B,YAAM,IAAI,+BAA+B,OAAO,MAAM,OAAO,EAAE;AAAA,IACjE;AAGA,UAAM,UAAU,UAAU,MAAM,IAAI,IAAI;AACxC,QAAI,QAAS,QAAO;AAGpB,UAAM,cAAc,cAAc,MAAM;AAAA,MAAK,CAAC,aAC5C,oBAAoB,MAAM,QAAQ;AAAA,IAAA;AAEpC,QAAI,YAAa,QAAO;AAExB,WAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,YAAM,YAAY,WAAW,MAAM;AACjC,6BAAA;AACA,iCAAA;AACA,eAAO,IAAI,2BAA2B,MAAM,OAAO,EAAE,CAAC;AAAA,MACxD,GAAG,OAAO;AAEV,YAAM,uBAAuB,UAAU,UAAU,MAAM;AACrD,YAAI,UAAU,MAAM,IAAI,IAAI,GAAG;AAC7B;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,YACrC;AAAA,UAAA;AAEF,uBAAa,SAAS;AACtB,+BAAA;AACA,mCAAA;AACA,kBAAQ,IAAI;AAAA,QACd;AAAA,MACF,CAAC;AAED,YAAM,2BAA2B,cAAc,UAAU,MAAM;AAC7D,cAAM,kBAAkB,cAAc,MAAM;AAAA,UAAK,CAAC,aAChD,oBAAoB,MAAM,QAAQ;AAAA,QAAA;AAEpC,YAAI,iBAAiB;AACnB;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,YACrC;AAAA,YACA;AAAA,UAAA;AAEF,uBAAa,SAAS;AACtB,mCAAA;AACA,+BAAA;AACA,kBAAQ,IAAI;AAAA,QACd;AAAA,MACF,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAQA,QAAM,aAAgC,OACpC,SACA,UAAkB,QACG;AACrB;AAAA,MACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,IAAA;AAGvC,WAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,YAAM,UAAU,KAAK,OAAA,EAAS,SAAS,EAAE;AAEzC,YAAM,eAAe,MAAM;AACzB,uBAAe,SAAS,CAAC,YAAY;AACnC,gBAAM,aAAa,IAAI,IAAI,OAAO;AAClC,qBAAW,OAAO,OAAO;AACzB,iBAAO;AAAA,QACT,CAAC;AAAA,MACH;AAEA,YAAM,YAAY,MAAM;AACtB,qBAAA;AACA,eAAO,IAAI,4BAA4B,OAAO,EAAE,CAAC;AAAA,MACnD;AAEA,YAAM,YAAY,WAAW,WAAW,OAAO;AAI/C,YAAM,aAAa,CAAC,YAA0B;AAC5C,YAAI,QAAQ,OAAO,GAAG;AACpB;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,UAAA;AAGvC,yBAAe,SAAS,CAAC,YAAY;AACnC,kBAAM,aAAa,IAAI,IAAI,OAAO;AAClC,kBAAM,WAAW,WAAW,IAAI,OAAO;AACvC,gBAAI,UAAU;AACZ,yBAAW,IAAI,SAAS,EAAE,GAAG,UAAU,SAAS,MAAM;AAAA,YACxD;AACA,mBAAO;AAAA,UACT,CAAC;AACD,iBAAO;AAAA,QACT;AACA,eAAO;AAAA,MACT;AAGA,iBAAW,WAAW,qBAAqB,OAAO;AAChD,YAAI,QAAQ,OAAO,GAAG;AAGpB,cAAI,eAAe,OAAO;AACxB;AAAA,cACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,YAAA;AAEvC,yBAAa,SAAS;AACtB,oBAAQ,IAAI;AACZ;AAAA,UACF;AAGA;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,UAAA;AAEvC,yBAAe,SAAS,CAAC,YAAY;AACnC,kBAAM,aAAa,IAAI,IAAI,OAAO;AAClC,uBAAW,IAAI,SAAS;AAAA,cACtB,SAAS;AAAA,cACT;AAAA,cACA;AAAA,cACA;AAAA,cACA,SAAS;AAAA;AAAA,YAAA,CACV;AACD,mBAAO;AAAA,UACT,CAAC;AACD;AAAA,QACF;AAAA,MACF;AAIA,qBAAe,SAAS,CAAC,YAAY;AACnC,cAAM,aAAa,IAAI,IAAI,OAAO;AAClC,mBAAW,IAAI,SAAS;AAAA,UACtB,SAAS;AAAA,UACT;AAAA,UACA;AAAA,UACA;AAAA,UACA,SAAS;AAAA,QAAA,CACV;AACD,eAAO;AAAA,MACT,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAKA,QAAM,0BAA0B,OAC9B,WACkB;AAElB,QAAI,UAAU,UAAU,QAAQ;AAC9B,YAAM,UAAU,OAAO;AAEvB,UAAI,MAAM,QAAQ,OAAO,IAAI,GAAG;AAC9B,cAAM,QAAQ,IAAI,OAAO,KAAK,IAAI,CAAC,SAAS,UAAU,MAAM,OAAO,CAAC,CAAC;AAAA,MACvE,OAAO;AACL,cAAM,UAAU,OAAO,MAAM,OAAO;AAAA,MACtC;AAAA,IACF;AAAA,EAEF;AAGA,QAAM,kBAAkB,OAAO,WAC3B,OACE,WAKG;AACH,UAAM,gBAAgB,MAAM,OAAO,SAAU,MAAM;AACnD,UAAM,wBAAwB,aAAa;AAC3C,WAAO;AAAA,EACT,IACA;AAEJ,QAAM,kBAAkB,OAAO,WAC3B,OACE,WAKG;AACH,UAAM,gBAAgB,MAAM,OAAO,SAAU,MAAM;AACnD,UAAM,wBAAwB,aAAa;AAC3C,WAAO;AAAA,EACT,IACA;AAEJ,QAAM,kBAAkB,OAAO,WAC3B,OACE,WAKG;AACH,UAAM,gBAAgB,MAAM,OAAO,SAAU,MAAM;AACnD,UAAM,wBAAwB,aAAa;AAC3C,WAAO;AAAA,EACT,IACA;AAGJ,QAAM;AAAA,IACJ,cAAc;AAAA,IACd,UAAU;AAAA,IACV,UAAU;AAAA,IACV,UAAU;AAAA,IACV,GAAG;AAAA,EAAA,IACD;AAEJ,SAAO;AAAA,IACL,GAAG;AAAA,IACH,UAAU;AAAA,IACV;AAAA,IACA,UAAU;AAAA,IACV,UAAU;AAAA,IACV,UAAU;AAAA,IACV,OAAO;AAAA,MACL;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAEJ;AAKA,SAAS,mBACP,cACA,SAuBe;AACf,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA,IACE;AACJ,QAAM,qBAAqB;AAG3B,QAAM,iBAAiB,IAAI,MAA0B,MAAS;AAE9D,QAAM,+BAAe,IAAA;AAKrB,QAAM,WAAW,CAAC,QAAgC;AAChD,UAAM,YAAY,SAAS,IAAI,GAAG;AAClC,QAAI,WAAW;AACb,aAAO;AAAA,IACT;AAEA,UAAM,YAAY,IAAI,MAAM,GAAG;AAC/B,aAAS,IAAI,KAAK,SAAS;AAC3B,WAAO;AAAA,EACT;AAGA,QAAM,iCAAiB,IAAA;AACvB,QAAM,WAAqB,CAAA;AAC3B,MAAI,YAAgC;AAKpC,QAAM,qBAAqB,CAAC,WAAyB;AACnD,QAAI,SAAS,SAAS,QAAQ;AAE5B,eAAS,IAAI,SAAS,QAAQ,IAAI,QAAQ,KAAK;AAC7C,iBAAS,CAAC,IAAI,oBAAI,IAAA;AAAA,MACpB;AAAA,IACF;AAAA,EACF;AAKA,QAAM,eAAe,CACnB,MACA,OACA,cACS;AACT,eAAW,OAAO,MAAM;AACtB,YAAM,YAAY,SAAS,GAAG;AAG9B,UAAI,cAAc,QAAW;AAC3B,oBAAY,aAAa,SAAS;AAClC,2BAAmB,SAAS;AAAA,MAC9B;AAGA,YAAM,mBAAmB,aAAa,SAAS;AAC/C,UAAI,qBAAqB,WAAW;AAClC;AAAA,UACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE,iCAAiC,SAAS,SAAS,gBAAgB;AAAA,QAAA;AAEhH;AAAA,MACF;AAEA,gBAAU,IAAI,GAAG;AACjB,oBAAc,WAAW,OAAO,UAAU,SAAS;AAAA,IACrD;AAAA,EACF;AAKA,QAAM,oBAAoB,CACxB,aACA,OACA,cACS;AACT,QAAI,cAAc,QAAW;AAC3B;AAAA,IACF;AAEA,eAAW,OAAO,aAAa;AAC7B,YAAM,YAAY,SAAS,GAAG;AAC9B,gBAAU,OAAO,GAAG;AACpB,yBAAmB,WAAW,OAAO,UAAU,SAAS;AAKxD,eAAS,OAAO,GAAG;AAAA,IACrB;AAAA,EACF;AAKA,QAAM,8BAA8B,CAClC,MACA,aACA,UACiB;AAEjB,QAAI,CAAC,WAAW,IAAI,KAAK,GAAG;AAC1B,iBAAW,IAAI,OAAO,oBAAI,IAAA,CAAK;AAAA,IACjC;AACA,UAAM,YAAY,WAAW,IAAI,KAAK;AAGtC,QAAI,MAAM;AACR,mBAAa,MAAM,OAAO,SAAS;AAAA,IACrC;AAGA,QAAI,aAAa;AACf,wBAAkB,aAAa,OAAO,SAAS;AAAA,IACjD;AAEA,WAAO;AAAA,EACT;AAKA,QAAM,wBAAwB,MAAY;AACxC,eAAW,MAAA;AACX,aAAS,SAAS;AAClB,gBAAY;AAAA,EACd;AAMA,QAAM,kBAAkB,CAAC,UAAuB;AAC9C,QAAI,cAAc,QAAW;AAC3B;AAAA,IACF;AAEA,UAAM,YAAY,WAAW,IAAI,KAAK;AACtC,QAAI,CAAC,WAAW;AACd;AAAA,IACF;AAGA,eAAW,OAAO,WAAW;AAC3B,YAAM,YAAY,SAAS,GAAG;AAC9B,YAAM,mBAAmB,aAAa,SAAS;AAC/C,UAAI,qBAAqB,WAAW;AAClC,2BAAmB,WAAW,OAAO,UAAU,SAAS;AAAA,MAC1D;AACA,eAAS,OAAO,GAAG;AAAA,IACrB;AAGA,eAAW,OAAO,KAAK;AAAA,EACzB;AAMA,QAAM,4BAA4B,CAChC,OACA,YACY;AACZ,UAAM,YAAY,WAAW,IAAI,KAAK;AACtC,QAAI,CAAC,WAAW;AACd,aAAO;AAAA,IACT;AAGA,eAAW,OAAO,WAAW;AAC3B,YAAM,YAAY,SAAS,GAAG;AAC9B,UAAI,kBAAkB,WAAW,OAAO,GAAG;AACzC,kBAAU,OAAO,GAAG;AACpB,2BAAmB,WAAW,OAAO,UAAU,SAAU;AAAA,MAC3D;AAAA,IACF;AAGA,QAAI,UAAU,SAAS,GAAG;AACxB,iBAAW,OAAO,KAAK;AACvB,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,EACT;AAKA,QAAM,sBAAsB,CAC1B,UACA,OACA,OACA,uBACY;AACZ,QAAI,cAAc,QAAW;AAC3B;AAAA,QACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,MAAA;AAE7C,aAAO;AAAA,IACT;AAEA,QAAI,YAAY;AAGhB,eAAW,WAAW,UAAU;AAE9B,YAAM,iBAAiB,wBAAwB,SAAS,QAAQ;AAEhE,iBAAW,SAAS,gBAAgB;AAClC,YAAI,0BAA0B,OAAO,OAAO,GAAG;AAE7C,cAAI,CAAC,WAAW;AACd,kBAAA;AACA,wBAAY;AAAA,UACd;AAEA,gBAAM;AAAA,YACJ,MAAM;AAAA,YACN,KAAK;AAAA,UAAA,CACN;AAAA,QACH;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAMA,QAAM,kBAAkB,MAA+B;AAErD,UAAM,SAAS,eAAe,SAAS;AAEvC,WAAO;AAAA,MACL,UAAU,aAAa,QAAQ,QAC3B,CAAC,QAAQ,aAAa,OAAO,KAAK,IAClC;AAAA,IAAA;AAAA,EAER;AAEA,MAAI;AAEJ,SAAO;AAAA,IACL,MAAM,CAAC,WAAiD;AACtD,YAAM,EAAE,OAAO,OAAO,QAAQ,WAAW,UAAU,eAAe;AAGlE,UAAI,uBAA6C;AACjD,YAAM,mBAAmB,CAAC,gBAAyB;AAEjD,YACE,eACA,aAAa,iBACb,WAAW,oBACX;AAEA,iCAAuB,UAAU,mBAAA;AACjC,+BAAqB,KAAK,MAAM;AAC9B,sBAAA;AAAA,UACF,CAAC;AAAA,QACH,OAAO;AAEL,oBAAA;AAAA,QACF;AAAA,MACF;AAGA,YAAM,kBAAkB,IAAI,gBAAA;AAE5B,UAAI,aAAa,QAAQ;AACvB,qBAAa,OAAO;AAAA,UAClB;AAAA,UACA,MAAM;AACJ,4BAAgB,MAAA;AAAA,UAClB;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UAAA;AAAA,QACR;AAEF,YAAI,aAAa,OAAO,SAAS;AAC/B,0BAAgB,MAAA;AAAA,QAClB;AAAA,MACF;AAGA,sBAAgB,OAAO,iBAAiB,SAAS,MAAM;AACrD,uBAAe,SAAS,CAAC,YAAY;AACnC,kBAAQ,QAAQ,CAAC,UAAU;AACzB,yBAAa,MAAM,SAAS;AAC5B,kBAAM,OAAO,IAAI,oBAAoB;AAAA,UACvC,CAAC;AACD,qCAAW,IAAA;AAAA,QACb,CAAC;AAAA,MACH,CAAC;AAED,YAAM,SAAS,IAAI,YAAY;AAAA,QAC7B,GAAG;AAAA;AAAA,QAEH,KAAK,aAAa,cAAc,iBAAiB;AAAA;AAAA;AAAA,QAGjD,QACE,aAAa,WAAW,aAAa,cAAc,QAAQ;AAAA,QAC7D,QAAQ,gBAAgB;AAAA,QACxB,SAAS,CAAC,gBAAgB;AAMxB,oBAAA;AAEA,cAAI,aAAa,SAAS;AACxB,mBAAO,aAAa,QAAQ,WAAW;AAAA,UACzC,OAAO;AACL,oBAAQ;AAAA,cACN,+CAA+C,WAAW,EAAE;AAAA;AAAA;AAAA,cAG5D;AAAA,YAAA;AAAA,UAEJ;AAEA;AAAA,QACF;AAAA,MAAA,CACD;AACD,UAAI,qBAAqB;AACzB,YAAM,+BAAe,IAAA;AACrB,YAAM,eAAwC,CAAA;AAC9C,UAAI,sBAAsB;AAI1B,YAAM,yBAAyB,MAC7B,aAAa,iBAAiB,CAAC;AACjC,YAAM,mBAAsC,CAAA;AAM5C,YAAM,iCAAiB,IAAA;AAKvB,YAAM,uBAAuB,CAAC,kBAA8B;AAC1D,YAAI,CAAC,gBAAgB,aAAa,GAAG;AACnC;AAAA,QACF;AAGA,cAAM,OAAO,cAAc,QAAQ;AACnC,cAAM,cAAc,cAAc,QAAQ;AAC1C,cAAM,UAAU,QAAQ;AAExB,cAAM,QAAQ,WAAW,eAAe,cAAc,KAAK;AAC3D,cAAM,YAAY,cAAc,QAAQ;AAMxC,cAAM,WAAW,cAAc;AAC/B,cAAM,oBACJ,cAAc,YAAY,WAAW,IAAI,KAAK;AAEhD,YAAI,UAAU;AACZ,qBAAW,OAAO,KAAK;AAAA,QACzB,OAAO;AACL,qBAAW,IAAI,KAAK;AAAA,QACtB;AAEA,YAAI,UAAU;AACZ,0BAAgB,KAAK;AAAA,QACvB,WAAW,SAAS;AAClB,sCAA4B,MAAM,aAAa,KAAK;AAAA,QACtD;AAEA,cAAM;AAAA,UACJ,MAAM,oBAAoB,WAAW;AAAA,UACrC,OAAO,cAAc;AAAA;AAAA,UAErB,UAAU;AAAA,YACR,GAAG,cAAc;AAAA,UAAA;AAAA,QACnB,CACD;AAAA,MACH;AAKA,YAAM,mBAAmB,uBAAuB;AAAA,QAC9C;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA;AAAA;AAAA,QAGA,kBAAkB,aAAa,cAAc;AAAA,MAAA,CAC9C;AAED,0BAAoB,OAAO,UAAU,CAAC,aAAgC;AAEpE,YAAI,cAAkD;AAMtD,uBAAe,SAAS,MAAM,KAAK;AAEnC,mBAAW,WAAW,UAAU;AAE9B,cAAI,gBAAgB,OAAO,KAAK,iBAAiB,OAAO,GAAG;AACzD,iCAAqB,SAAS,CAAC,kBAAkB;AAC/C,oBAAM,YAAY,CAAC,GAAG,eAAe,OAAO;AAE5C,kBAAI,UAAU,SAAS,oBAAoB;AACzC,0BAAU,OAAO,GAAG,UAAU,SAAS,kBAAkB;AAAA,cAC3D;AACA,qBAAO;AAAA,YACT,CAAC;AAAA,UACH;AAMA,cACE,SAAS,OAAO,MACf,CAAC,uBAAA,KAA4B,qBAC9B;AACA,oBAAQ,QAAQ,OAAO,QAAQ,CAAC,SAAS,SAAS,IAAI,IAAI,CAAC;AAAA,UAC7D;AAIA,gBAAM,kBAAiC,CAAA;AACvC,yBAAe,MAAM,QAAQ,CAAC,OAAO,YAAY;AAC/C,gBAAI,CAAC,MAAM,SAAS;AAClB,kBAAI;AACF,sBAAM,QAAQ,OAAO;AAAA,cACvB,SAAS,KAAK;AAEZ,6BAAa,MAAM,SAAS;AAC5B,sBAAM;AAAA,kBACJ,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,GAAG,CAAC;AAAA,gBAAA;AAEpD,gCAAgB,KAAK,OAAO;AAC5B,sBAAM,qBAAqB,GAAG;AAAA,cAChC;AAAA,YACF;AAAA,UACF,CAAC;AAGD,+BAAqB,eAAe;AAEpC,cAAI,gBAAgB,OAAO,GAAG;AAE5B,kBAAM,SAAS,QAAQ,QAAQ;AAC/B,gBAAI,UAAU,OAAO,WAAW,UAAU;AAExC,6BAAe,SAAS,MAAM,MAAM;AAAA,YACtC;AAKA,gBAAI,uBAAA,KAA4B,CAAC,oBAAoB;AACnD,+BAAiB,KAAK,OAAO;AAAA,YAC/B,OAAO;AAEL,kBAAI,CAAC,oBAAoB;AACvB,sBAAA;AACA,qCAAqB;AAAA,cACvB;AAEA,mCAAqB,OAAO;AAAA,YAC9B;AAAA,UACF,WAAW,qBAAqB,OAAO,GAAG;AAKxC,gBAAI,CAAC,uBAAA,KAA4B,oBAAoB;AACnD,2BAAa,KAAK,qBAAqB,OAAO,CAAC;AAAA,YACjD;AAAA,UACF,WAAW,kBAAkB,OAAO,GAAG;AAErC,0BAAc;AAAA,UAChB,WAAW,mBAAmB,OAAO,GAAG;AAEtC,gBAAI,gBAAgB,cAAc;AAChC,4BAAc;AAAA,YAChB;AAAA,UACF,WAAW,iBAAiB,OAAO,GAAG;AAIpC,gBAAI,uBAAA,KAA4B,CAAC,oBAAoB;AACnD,+BAAiB,KAAK,OAAO;AAAA,YAC/B,OAAO;AAEL,mCAAqB;AAAA,gBACnB,QAAQ,QAAQ;AAAA,gBAChB;AAAA,gBACA;AAAA,gBACA;AAAA,cAAA;AAAA,YAEJ;AAAA,UACF,WAAW,qBAAqB,OAAO,GAAG;AACxC;AAAA,cACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,YAAA;AAI7C,gBAAI,CAAC,oBAAoB;AACvB,oBAAA;AACA,mCAAqB;AAAA,YACvB;AAEA,qBAAA;AAGA,kCAAA;AAGA,uBAAW,MAAA;AAIX,8BAAkB,MAAA;AAGlB,0BAAc;AACd,kCAAsB;AACtB,6BAAiB,SAAS;AAAA,UAC5B;AAAA,QACF;AAEA,YAAI,gBAAgB,MAAM;AAIxB,cACE,uBAAA,KACA,gBAAgB,gBAChB,CAAC,oBACD;AACA;AAAA,cACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE,iDAAiD,iBAAiB,MAAM;AAAA,YAAA;AAIrH,kBAAA;AAGA,qBAAA;AAGA,kCAAA;AAGA,uBAAW,MAAA;AAGX,uBAAW,eAAe,kBAAkB;AAC1C,kBAAI,gBAAgB,WAAW,GAAG;AAChC,qCAAqB,WAAW;AAGhC,oBAAI,SAAS,WAAW,GAAG;AACzB,8BAAY,QAAQ,OAAO;AAAA,oBAAQ,CAAC,SAClC,SAAS,IAAI,IAAI;AAAA,kBAAA;AAAA,gBAErB;AAAA,cACF,WAAW,qBAAqB,WAAW,GAAG;AAE5C,6BAAa,KAAK,qBAAqB,WAAW,CAAC;AAAA,cACrD,WAAW,iBAAiB,WAAW,GAAG;AAExC;AAAA,kBACE,YAAY,QAAQ;AAAA,kBACpB;AAAA,kBACA;AAAA,kBACA;AAAA,gBAAA;AAAA,cAEJ;AAAA,YACF;AAGA,mBAAA;AAIA,6BAAiB,SAAS;AAE1B;AAAA,cACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,YAAA;AAAA,UAE/C,OAAO;AAGL,gBAAI,oBAAoB;AACtB,qBAAA;AACA,mCAAqB;AAAA,YACvB;AAAA,UACF;AACA,2BAAiB,wBAAwB;AAGzC,cAAI,gBAAgB,cAAc;AAChC,kCAAsB;AAAA,UACxB;AAGA,oBAAU,SAAS,CAAC,iBAAiB;AACnC,kBAAM,aAAa,IAAI,IAAU,YAAY;AAC7C,gBAAI,SAAS,OAAO,GAAG;AACrB;AAAA,gBACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,gBAC3C,MAAM,KAAK,QAAQ;AAAA,cAAA;AAAA,YAEvB;AACA,qBAAS,QAAQ,CAAC,SAAS,WAAW,IAAI,IAAI,CAAC;AAC/C,qBAAS,MAAA;AACT,mBAAO;AAAA,UACT,CAAC;AAGD,wBAAc,SAAS,CAAC,qBAAqB;AAC3C,kBAAM,OAAO,CAAC,GAAG,kBAAkB,GAAG,YAAY;AAClD,yBAAa;AAAA,cAAQ,CAAC,aACpB;AAAA,gBACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,gBAC3C;AAAA,cAAA;AAAA,YACF;AAEF,yBAAa,SAAS;AACtB,mBAAO;AAAA,UACT,CAAC;AAKD,yBAAe,SAAS,MAAM,IAAI;AAElC,uCAAA;AAAA,QACF;AAAA,MACF,CAAC;AAID,aAAO;AAAA,QACL,YAAY,kBAAkB;AAAA,QAC9B,SAAS,MAAM;AAEb,4BAAA;AAEA,0BAAgB,MAAA;AAEhB,4BAAkB,MAAA;AAAA,QACpB;AAAA,MAAA;AAAA,IAEJ;AAAA;AAAA,IAEA;AAAA,EAAA;AAEJ;"}
1
+ {"version":3,"file":"electric.js","sources":["../../src/electric.ts"],"sourcesContent":["import {\n ShapeStream,\n isChangeMessage,\n isControlMessage,\n isVisibleInSnapshot,\n} from '@electric-sql/client'\nimport { Store } from '@tanstack/store'\nimport DebugModule from 'debug'\nimport { DeduplicatedLoadSubset, and } from '@tanstack/db'\nimport {\n ExpectedNumberInAwaitTxIdError,\n StreamAbortedError,\n TimeoutWaitingForMatchError,\n TimeoutWaitingForTxIdError,\n} from './errors'\nimport { compileSQL } from './sql-compiler'\nimport {\n addTagToIndex,\n findRowsMatchingPattern,\n getTagLength,\n isMoveOutMessage,\n removeTagFromIndex,\n tagMatchesPattern,\n} from './tag-index'\nimport type { ColumnEncoder } from './sql-compiler'\nimport type {\n MoveOutPattern,\n MoveTag,\n ParsedMoveTag,\n RowId,\n TagIndex,\n} from './tag-index'\nimport type {\n BaseCollectionConfig,\n ChangeMessageOrDeleteKeyMessage,\n CollectionConfig,\n DeleteMutationFnParams,\n InsertMutationFnParams,\n LoadSubsetOptions,\n SyncConfig,\n SyncMode,\n UpdateMutationFnParams,\n UtilsRecord,\n} from '@tanstack/db'\nimport type { StandardSchemaV1 } from '@standard-schema/spec'\nimport type {\n ControlMessage,\n GetExtensions,\n Message,\n PostgresSnapshot,\n Row,\n ShapeStreamOptions,\n} from '@electric-sql/client'\n\n// Re-export for user convenience in custom match functions\nexport { isChangeMessage, isControlMessage } from '@electric-sql/client'\n\nconst debug = DebugModule.debug(`ts/db:electric`)\n\n/**\n * Symbol for internal test hooks (hidden from public API)\n */\nexport const ELECTRIC_TEST_HOOKS = Symbol(`electricTestHooks`)\n\n/**\n * Internal test hooks interface (for testing only)\n */\nexport interface ElectricTestHooks {\n /**\n * Called before marking collection ready after first up-to-date in progressive mode\n * Allows tests to pause and validate snapshot phase before atomic swap completes\n */\n beforeMarkingReady?: () => Promise<void>\n}\n\n/**\n * Type representing a transaction ID in ElectricSQL\n */\nexport type Txid = number\n\n/**\n * Custom match function type - receives stream messages and returns boolean\n * indicating if the mutation has been synchronized\n */\nexport type MatchFunction<T extends Row<unknown>> = (\n message: Message<T>,\n) => boolean\n\n/**\n * Matching strategies for Electric synchronization\n * Handlers can return:\n * - Txid strategy: { txid: number | number[], timeout?: number } (recommended)\n * - Void (no return value) - mutation completes without waiting\n *\n * The optional timeout property specifies how long to wait for the txid(s) in milliseconds.\n * If not specified, defaults to 5000ms.\n */\nexport type MatchingStrategy = {\n txid: Txid | Array<Txid>\n timeout?: number\n} | void\n\n/**\n * Type representing a snapshot end message\n */\ntype SnapshotEndMessage = ControlMessage & {\n headers: { control: `snapshot-end` }\n}\n// The `InferSchemaOutput` and `ResolveType` are copied from the `@tanstack/db` package\n// but we modified `InferSchemaOutput` slightly to restrict the schema output to `Row<unknown>`\n// This is needed in order for `GetExtensions` to be able to infer the parser extensions type from the schema\ntype InferSchemaOutput<T> = T extends StandardSchemaV1\n ? StandardSchemaV1.InferOutput<T> extends Row<unknown>\n ? StandardSchemaV1.InferOutput<T>\n : Record<string, unknown>\n : Record<string, unknown>\n\n/**\n * The mode of sync to use for the collection.\n * @default `eager`\n * @description\n * - `eager`:\n * - syncs all data immediately on preload\n * - collection will be marked as ready once the sync is complete\n * - there is no incremental sync\n * - `on-demand`:\n * - syncs data in incremental snapshots when the collection is queried\n * - collection will be marked as ready immediately after the first snapshot is synced\n * - `progressive`:\n * - syncs all data for the collection in the background\n * - uses incremental snapshots during the initial sync to provide a fast path to the data required for queries\n * - collection will be marked as ready once the full sync is complete\n */\nexport type ElectricSyncMode = SyncMode | `progressive`\n\n/**\n * Configuration interface for Electric collection options\n * @template T - The type of items in the collection\n * @template TSchema - The schema type for validation\n */\nexport interface ElectricCollectionConfig<\n T extends Row<unknown> = Row<unknown>,\n TSchema extends StandardSchemaV1 = never,\n> extends Omit<\n BaseCollectionConfig<\n T,\n string | number,\n TSchema,\n ElectricCollectionUtils<T>,\n any\n >,\n `onInsert` | `onUpdate` | `onDelete` | `syncMode`\n> {\n /**\n * Configuration options for the ElectricSQL ShapeStream\n */\n shapeOptions: ShapeStreamOptions<GetExtensions<T>>\n syncMode?: ElectricSyncMode\n\n /**\n * Internal test hooks (for testing only)\n * Hidden via Symbol to prevent accidental usage in production\n */\n [ELECTRIC_TEST_HOOKS]?: ElectricTestHooks\n\n /**\n * Optional asynchronous handler function called before an insert operation\n * @param params Object containing transaction and collection information\n * @returns Promise resolving to { txid, timeout? } or void\n * @example\n * // Basic Electric insert handler with txid (recommended)\n * onInsert: async ({ transaction }) => {\n * const newItem = transaction.mutations[0].modified\n * const result = await api.todos.create({\n * data: newItem\n * })\n * return { txid: result.txid }\n * }\n *\n * @example\n * // Insert handler with custom timeout\n * onInsert: async ({ transaction }) => {\n * const newItem = transaction.mutations[0].modified\n * const result = await api.todos.create({\n * data: newItem\n * })\n * return { txid: result.txid, timeout: 10000 } // Wait up to 10 seconds\n * }\n *\n * @example\n * // Insert handler with multiple items - return array of txids\n * onInsert: async ({ transaction }) => {\n * const items = transaction.mutations.map(m => m.modified)\n * const results = await Promise.all(\n * items.map(item => api.todos.create({ data: item }))\n * )\n * return { txid: results.map(r => r.txid) }\n * }\n *\n * @example\n * // Use awaitMatch utility for custom matching\n * onInsert: async ({ transaction, collection }) => {\n * const newItem = transaction.mutations[0].modified\n * await api.todos.create({ data: newItem })\n * await collection.utils.awaitMatch(\n * (message) => isChangeMessage(message) &&\n * message.headers.operation === 'insert' &&\n * message.value.name === newItem.name\n * )\n * }\n */\n onInsert?: (\n params: InsertMutationFnParams<\n T,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => Promise<MatchingStrategy>\n\n /**\n * Optional asynchronous handler function called before an update operation\n * @param params Object containing transaction and collection information\n * @returns Promise resolving to { txid, timeout? } or void\n * @example\n * // Basic Electric update handler with txid (recommended)\n * onUpdate: async ({ transaction }) => {\n * const { original, changes } = transaction.mutations[0]\n * const result = await api.todos.update({\n * where: { id: original.id },\n * data: changes\n * })\n * return { txid: result.txid }\n * }\n *\n * @example\n * // Use awaitMatch utility for custom matching\n * onUpdate: async ({ transaction, collection }) => {\n * const { original, changes } = transaction.mutations[0]\n * await api.todos.update({ where: { id: original.id }, data: changes })\n * await collection.utils.awaitMatch(\n * (message) => isChangeMessage(message) &&\n * message.headers.operation === 'update' &&\n * message.value.id === original.id\n * )\n * }\n */\n onUpdate?: (\n params: UpdateMutationFnParams<\n T,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => Promise<MatchingStrategy>\n\n /**\n * Optional asynchronous handler function called before a delete operation\n * @param params Object containing transaction and collection information\n * @returns Promise resolving to { txid, timeout? } or void\n * @example\n * // Basic Electric delete handler with txid (recommended)\n * onDelete: async ({ transaction }) => {\n * const mutation = transaction.mutations[0]\n * const result = await api.todos.delete({\n * id: mutation.original.id\n * })\n * return { txid: result.txid }\n * }\n *\n * @example\n * // Use awaitMatch utility for custom matching\n * onDelete: async ({ transaction, collection }) => {\n * const mutation = transaction.mutations[0]\n * await api.todos.delete({ id: mutation.original.id })\n * await collection.utils.awaitMatch(\n * (message) => isChangeMessage(message) &&\n * message.headers.operation === 'delete' &&\n * message.value.id === mutation.original.id\n * )\n * }\n */\n onDelete?: (\n params: DeleteMutationFnParams<\n T,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => Promise<MatchingStrategy>\n}\n\nfunction isUpToDateMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is ControlMessage & { up_to_date: true } {\n return isControlMessage(message) && message.headers.control === `up-to-date`\n}\n\nfunction isMustRefetchMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is ControlMessage & { headers: { control: `must-refetch` } } {\n return isControlMessage(message) && message.headers.control === `must-refetch`\n}\n\nfunction isSnapshotEndMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is SnapshotEndMessage {\n return isControlMessage(message) && message.headers.control === `snapshot-end`\n}\n\nfunction isSubsetEndMessage<T extends Row<unknown>>(\n message: Message<T>,\n): message is ControlMessage & { headers: { control: `subset-end` } } {\n return (\n isControlMessage(message) &&\n (message.headers.control as string) === `subset-end`\n )\n}\n\nfunction parseSnapshotMessage(message: SnapshotEndMessage): PostgresSnapshot {\n return {\n xmin: message.headers.xmin,\n xmax: message.headers.xmax,\n xip_list: message.headers.xip_list,\n }\n}\n\n// Check if a message contains txids in its headers\nfunction hasTxids<T extends Row<unknown>>(\n message: Message<T>,\n): message is Message<T> & { headers: { txids?: Array<Txid> } } {\n return `txids` in message.headers && Array.isArray(message.headers.txids)\n}\n\n/**\n * Creates a deduplicated loadSubset handler for progressive/on-demand modes\n * Returns null for eager mode, or a DeduplicatedLoadSubset instance for other modes.\n * Handles fetching snapshots in progressive mode during buffering phase,\n * and requesting snapshots in on-demand mode.\n *\n * When cursor expressions are provided (whereFrom/whereCurrent), makes two\n * requestSnapshot calls:\n * - One for whereFrom (rows > cursor) with limit\n * - One for whereCurrent (rows = cursor, for tie-breaking) without limit\n */\nfunction createLoadSubsetDedupe<T extends Row<unknown>>({\n stream,\n syncMode,\n isBufferingInitialSync,\n begin,\n write,\n commit,\n collectionId,\n encodeColumnName,\n signal,\n}: {\n stream: ShapeStream<T>\n syncMode: ElectricSyncMode\n isBufferingInitialSync: () => boolean\n begin: () => void\n write: (mutation: {\n type: `insert` | `update` | `delete`\n value: T\n metadata: Record<string, unknown>\n }) => void\n commit: () => void\n collectionId?: string\n /**\n * Optional function to encode column names (e.g., camelCase to snake_case).\n * This is typically the `encode` function from shapeOptions.columnMapper.\n */\n encodeColumnName?: ColumnEncoder\n /**\n * Abort signal to check if the stream has been aborted during cleanup.\n * When aborted, errors from requestSnapshot are silently ignored.\n */\n signal: AbortSignal\n}): DeduplicatedLoadSubset | null {\n if (syncMode === `eager`) {\n return null\n }\n\n const compileOptions = encodeColumnName ? { encodeColumnName } : undefined\n const logPrefix = collectionId ? `[${collectionId}] ` : ``\n\n /**\n * Handles errors from snapshot operations. Returns true if the error was\n * handled (signal aborted during cleanup), false if it should be re-thrown.\n */\n function handleSnapshotError(error: unknown, operation: string): boolean {\n if (signal.aborted) {\n debug(`${logPrefix}Ignoring ${operation} error during cleanup: %o`, error)\n return true\n }\n debug(`${logPrefix}Error in ${operation}: %o`, error)\n return false\n }\n\n const loadSubset = async (opts: LoadSubsetOptions) => {\n if (isBufferingInitialSync()) {\n const snapshotParams = compileSQL<T>(opts, compileOptions)\n try {\n const { data: rows } = await stream.fetchSnapshot(snapshotParams)\n\n if (!isBufferingInitialSync()) {\n debug(`${logPrefix}Ignoring snapshot - sync completed while fetching`)\n return\n }\n\n if (rows.length > 0) {\n begin()\n for (const row of rows) {\n write({\n type: `insert`,\n value: row.value,\n metadata: { ...row.headers },\n })\n }\n commit()\n debug(`${logPrefix}Applied snapshot with ${rows.length} rows`)\n }\n } catch (error) {\n if (handleSnapshotError(error, `fetchSnapshot`)) {\n return\n }\n throw error\n }\n return\n }\n\n if (syncMode === `progressive`) {\n return\n }\n\n const { cursor, where, orderBy, limit } = opts\n\n try {\n if (cursor) {\n const whereCurrentOpts: LoadSubsetOptions = {\n where: where ? and(where, cursor.whereCurrent) : cursor.whereCurrent,\n orderBy,\n }\n const whereCurrentParams = compileSQL<T>(\n whereCurrentOpts,\n compileOptions,\n )\n\n const whereFromOpts: LoadSubsetOptions = {\n where: where ? and(where, cursor.whereFrom) : cursor.whereFrom,\n orderBy,\n limit,\n }\n const whereFromParams = compileSQL<T>(whereFromOpts, compileOptions)\n\n debug(`${logPrefix}Requesting cursor.whereCurrent snapshot (all ties)`)\n debug(\n `${logPrefix}Requesting cursor.whereFrom snapshot (with limit ${limit})`,\n )\n\n await Promise.all([\n stream.requestSnapshot(whereCurrentParams),\n stream.requestSnapshot(whereFromParams),\n ])\n } else {\n const snapshotParams = compileSQL<T>(opts, compileOptions)\n await stream.requestSnapshot(snapshotParams)\n }\n } catch (error) {\n if (handleSnapshotError(error, `requestSnapshot`)) {\n return\n }\n throw error\n }\n }\n\n return new DeduplicatedLoadSubset({ loadSubset })\n}\n\n/**\n * Type for the awaitTxId utility function\n */\nexport type AwaitTxIdFn = (txId: Txid, timeout?: number) => Promise<boolean>\n\n/**\n * Type for the awaitMatch utility function\n */\nexport type AwaitMatchFn<T extends Row<unknown>> = (\n matchFn: MatchFunction<T>,\n timeout?: number,\n) => Promise<boolean>\n\n/**\n * Electric collection utilities type\n */\nexport interface ElectricCollectionUtils<\n T extends Row<unknown> = Row<unknown>,\n> extends UtilsRecord {\n awaitTxId: AwaitTxIdFn\n awaitMatch: AwaitMatchFn<T>\n}\n\n/**\n * Creates Electric collection options for use with a standard Collection\n *\n * @template T - The explicit type of items in the collection (highest priority)\n * @template TSchema - The schema type for validation and type inference (second priority)\n * @template TFallback - The fallback type if no explicit or schema type is provided\n * @param config - Configuration options for the Electric collection\n * @returns Collection options with utilities\n */\n\n// Overload for when schema is provided\nexport function electricCollectionOptions<T extends StandardSchemaV1>(\n config: ElectricCollectionConfig<InferSchemaOutput<T>, T> & {\n schema: T\n },\n): Omit<CollectionConfig<InferSchemaOutput<T>, string | number, T>, `utils`> & {\n id?: string\n utils: ElectricCollectionUtils<InferSchemaOutput<T>>\n schema: T\n}\n\n// Overload for when no schema is provided\nexport function electricCollectionOptions<T extends Row<unknown>>(\n config: ElectricCollectionConfig<T> & {\n schema?: never // prohibit schema\n },\n): Omit<CollectionConfig<T, string | number>, `utils`> & {\n id?: string\n utils: ElectricCollectionUtils<T>\n schema?: never // no schema in the result\n}\n\nexport function electricCollectionOptions<T extends Row<unknown>>(\n config: ElectricCollectionConfig<T, any>,\n): Omit<\n CollectionConfig<T, string | number, any, ElectricCollectionUtils<T>>,\n `utils`\n> & {\n id?: string\n utils: ElectricCollectionUtils<T>\n schema?: any\n} {\n const seenTxids = new Store<Set<Txid>>(new Set([]))\n const seenSnapshots = new Store<Array<PostgresSnapshot>>([])\n const internalSyncMode = config.syncMode ?? `eager`\n const finalSyncMode =\n internalSyncMode === `progressive` ? `on-demand` : internalSyncMode\n const pendingMatches = new Store<\n Map<\n string,\n {\n matchFn: (message: Message<any>) => boolean\n resolve: (value: boolean) => void\n reject: (error: Error) => void\n timeoutId: ReturnType<typeof setTimeout>\n matched: boolean\n }\n >\n >(new Map())\n\n // Buffer messages since last up-to-date to handle race conditions\n const currentBatchMessages = new Store<Array<Message<any>>>([])\n\n // Track whether the current batch has been committed (up-to-date received)\n // This allows awaitMatch to resolve immediately for messages from committed batches\n const batchCommitted = new Store<boolean>(false)\n\n /**\n * Helper function to remove multiple matches from the pendingMatches store\n */\n const removePendingMatches = (matchIds: Array<string>) => {\n if (matchIds.length > 0) {\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n matchIds.forEach((id) => newMatches.delete(id))\n return newMatches\n })\n }\n }\n\n /**\n * Helper function to resolve and cleanup matched pending matches\n */\n const resolveMatchedPendingMatches = () => {\n const matchesToResolve: Array<string> = []\n pendingMatches.state.forEach((match, matchId) => {\n if (match.matched) {\n clearTimeout(match.timeoutId)\n match.resolve(true)\n matchesToResolve.push(matchId)\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch resolved on up-to-date for match %s`,\n matchId,\n )\n }\n })\n removePendingMatches(matchesToResolve)\n }\n const sync = createElectricSync<T>(config.shapeOptions, {\n seenTxids,\n seenSnapshots,\n syncMode: internalSyncMode,\n pendingMatches,\n currentBatchMessages,\n batchCommitted,\n removePendingMatches,\n resolveMatchedPendingMatches,\n collectionId: config.id,\n testHooks: config[ELECTRIC_TEST_HOOKS],\n })\n\n /**\n * Wait for a specific transaction ID to be synced\n * @param txId The transaction ID to wait for as a number\n * @param timeout Optional timeout in milliseconds (defaults to 5000ms)\n * @returns Promise that resolves when the txId is synced\n */\n const awaitTxId: AwaitTxIdFn = async (\n txId: Txid,\n timeout: number = 5000,\n ): Promise<boolean> => {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitTxId called with txid %d`,\n txId,\n )\n if (typeof txId !== `number`) {\n throw new ExpectedNumberInAwaitTxIdError(typeof txId, config.id)\n }\n\n // First check if the txid is in the seenTxids store\n const hasTxid = seenTxids.state.has(txId)\n if (hasTxid) return true\n\n // Then check if the txid is in any of the seen snapshots\n const hasSnapshot = seenSnapshots.state.some((snapshot) =>\n isVisibleInSnapshot(txId, snapshot),\n )\n if (hasSnapshot) return true\n\n return new Promise((resolve, reject) => {\n const timeoutId = setTimeout(() => {\n unsubscribeSeenTxids()\n unsubscribeSeenSnapshots()\n reject(new TimeoutWaitingForTxIdError(txId, config.id))\n }, timeout)\n\n const unsubscribeSeenTxids = seenTxids.subscribe(() => {\n if (seenTxids.state.has(txId)) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitTxId found match for txid %o`,\n txId,\n )\n clearTimeout(timeoutId)\n unsubscribeSeenTxids()\n unsubscribeSeenSnapshots()\n resolve(true)\n }\n })\n\n const unsubscribeSeenSnapshots = seenSnapshots.subscribe(() => {\n const visibleSnapshot = seenSnapshots.state.find((snapshot) =>\n isVisibleInSnapshot(txId, snapshot),\n )\n if (visibleSnapshot) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitTxId found match for txid %o in snapshot %o`,\n txId,\n visibleSnapshot,\n )\n clearTimeout(timeoutId)\n unsubscribeSeenSnapshots()\n unsubscribeSeenTxids()\n resolve(true)\n }\n })\n })\n }\n\n /**\n * Wait for a custom match function to find a matching message\n * @param matchFn Function that returns true when a message matches\n * @param timeout Optional timeout in milliseconds (defaults to 5000ms)\n * @returns Promise that resolves when a matching message is found\n */\n const awaitMatch: AwaitMatchFn<any> = async (\n matchFn: MatchFunction<any>,\n timeout: number = 3000,\n ): Promise<boolean> => {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch called with custom function`,\n )\n\n return new Promise((resolve, reject) => {\n const matchId = Math.random().toString(36)\n\n const cleanupMatch = () => {\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n newMatches.delete(matchId)\n return newMatches\n })\n }\n\n const onTimeout = () => {\n cleanupMatch()\n reject(new TimeoutWaitingForMatchError(config.id))\n }\n\n const timeoutId = setTimeout(onTimeout, timeout)\n\n // We need access to the stream messages to check against the match function\n // This will be handled by the sync configuration\n const checkMatch = (message: Message<any>) => {\n if (matchFn(message)) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch found matching message, waiting for up-to-date`,\n )\n // Mark as matched but don't resolve yet - wait for up-to-date\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n const existing = newMatches.get(matchId)\n if (existing) {\n newMatches.set(matchId, { ...existing, matched: true })\n }\n return newMatches\n })\n return true\n }\n return false\n }\n\n // Check against current batch messages first to handle race conditions\n for (const message of currentBatchMessages.state) {\n if (matchFn(message)) {\n // If batch is committed (up-to-date already received), resolve immediately\n // just like awaitTxId does when it finds a txid in seenTxids\n if (batchCommitted.state) {\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch found immediate match in committed batch, resolving immediately`,\n )\n clearTimeout(timeoutId)\n resolve(true)\n return\n }\n\n // If batch is not yet committed, register match and wait for up-to-date\n debug(\n `${config.id ? `[${config.id}] ` : ``}awaitMatch found immediate match in current batch, waiting for up-to-date`,\n )\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n newMatches.set(matchId, {\n matchFn: checkMatch,\n resolve,\n reject,\n timeoutId,\n matched: true, // Already matched, will resolve on up-to-date\n })\n return newMatches\n })\n return\n }\n }\n\n // Store the match function for the sync process to use\n // We'll add this to a pending matches store\n pendingMatches.setState((current) => {\n const newMatches = new Map(current)\n newMatches.set(matchId, {\n matchFn: checkMatch,\n resolve,\n reject,\n timeoutId,\n matched: false,\n })\n return newMatches\n })\n })\n }\n\n /**\n * Process matching strategy and wait for synchronization\n */\n const processMatchingStrategy = async (\n result: MatchingStrategy,\n ): Promise<void> => {\n // Only wait if result contains txid\n if (result && `txid` in result) {\n const timeout = result.timeout\n // Handle both single txid and array of txids\n if (Array.isArray(result.txid)) {\n await Promise.all(result.txid.map((txid) => awaitTxId(txid, timeout)))\n } else {\n await awaitTxId(result.txid, timeout)\n }\n }\n // If result is void/undefined, don't wait - mutation completes immediately\n }\n\n // Create wrapper handlers for direct persistence operations that handle different matching strategies\n const wrappedOnInsert = config.onInsert\n ? async (\n params: InsertMutationFnParams<\n any,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => {\n const handlerResult = await config.onInsert!(params)\n await processMatchingStrategy(handlerResult)\n return handlerResult\n }\n : undefined\n\n const wrappedOnUpdate = config.onUpdate\n ? async (\n params: UpdateMutationFnParams<\n any,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => {\n const handlerResult = await config.onUpdate!(params)\n await processMatchingStrategy(handlerResult)\n return handlerResult\n }\n : undefined\n\n const wrappedOnDelete = config.onDelete\n ? async (\n params: DeleteMutationFnParams<\n any,\n string | number,\n ElectricCollectionUtils<T>\n >,\n ) => {\n const handlerResult = await config.onDelete!(params)\n await processMatchingStrategy(handlerResult)\n return handlerResult\n }\n : undefined\n\n // Extract standard Collection config properties\n const {\n shapeOptions: _shapeOptions,\n onInsert: _onInsert,\n onUpdate: _onUpdate,\n onDelete: _onDelete,\n ...restConfig\n } = config\n\n return {\n ...restConfig,\n syncMode: finalSyncMode,\n sync,\n onInsert: wrappedOnInsert,\n onUpdate: wrappedOnUpdate,\n onDelete: wrappedOnDelete,\n utils: {\n awaitTxId,\n awaitMatch,\n },\n }\n}\n\n/**\n * Internal function to create ElectricSQL sync configuration\n */\nfunction createElectricSync<T extends Row<unknown>>(\n shapeOptions: ShapeStreamOptions<GetExtensions<T>>,\n options: {\n syncMode: ElectricSyncMode\n seenTxids: Store<Set<Txid>>\n seenSnapshots: Store<Array<PostgresSnapshot>>\n pendingMatches: Store<\n Map<\n string,\n {\n matchFn: (message: Message<T>) => boolean\n resolve: (value: boolean) => void\n reject: (error: Error) => void\n timeoutId: ReturnType<typeof setTimeout>\n matched: boolean\n }\n >\n >\n currentBatchMessages: Store<Array<Message<T>>>\n batchCommitted: Store<boolean>\n removePendingMatches: (matchIds: Array<string>) => void\n resolveMatchedPendingMatches: () => void\n collectionId?: string\n testHooks?: ElectricTestHooks\n },\n): SyncConfig<T> {\n const {\n seenTxids,\n seenSnapshots,\n syncMode,\n pendingMatches,\n currentBatchMessages,\n batchCommitted,\n removePendingMatches,\n resolveMatchedPendingMatches,\n collectionId,\n testHooks,\n } = options\n const MAX_BATCH_MESSAGES = 1000 // Safety limit for message buffer\n\n // Store for the relation schema information\n const relationSchema = new Store<string | undefined>(undefined)\n\n const tagCache = new Map<MoveTag, ParsedMoveTag>()\n\n // Parses a tag string into a MoveTag.\n // It memoizes the result parsed tag such that future calls\n // for the same tag string return the same MoveTag array.\n const parseTag = (tag: MoveTag): ParsedMoveTag => {\n const cachedTag = tagCache.get(tag)\n if (cachedTag) {\n return cachedTag\n }\n\n const parsedTag = tag.split(`|`)\n tagCache.set(tag, parsedTag)\n return parsedTag\n }\n\n // Tag tracking state\n const rowTagSets = new Map<RowId, Set<MoveTag>>()\n const tagIndex: TagIndex = []\n let tagLength: number | undefined = undefined\n\n /**\n * Initialize the tag index with the correct length\n */\n const initializeTagIndex = (length: number): void => {\n if (tagIndex.length < length) {\n // Extend the index array to the required length\n for (let i = tagIndex.length; i < length; i++) {\n tagIndex[i] = new Map()\n }\n }\n }\n\n /**\n * Add tags to a row and update the tag index\n */\n const addTagsToRow = (\n tags: Array<MoveTag>,\n rowId: RowId,\n rowTagSet: Set<MoveTag>,\n ): void => {\n for (const tag of tags) {\n const parsedTag = parseTag(tag)\n\n // Infer tag length from first tag\n if (tagLength === undefined) {\n tagLength = getTagLength(parsedTag)\n initializeTagIndex(tagLength)\n }\n\n // Validate tag length matches\n const currentTagLength = getTagLength(parsedTag)\n if (currentTagLength !== tagLength) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Tag length mismatch: expected ${tagLength}, got ${currentTagLength}`,\n )\n continue\n }\n\n rowTagSet.add(tag)\n addTagToIndex(parsedTag, rowId, tagIndex, tagLength)\n }\n }\n\n /**\n * Remove tags from a row and update the tag index\n */\n const removeTagsFromRow = (\n removedTags: Array<MoveTag>,\n rowId: RowId,\n rowTagSet: Set<MoveTag>,\n ): void => {\n if (tagLength === undefined) {\n return\n }\n\n for (const tag of removedTags) {\n const parsedTag = parseTag(tag)\n rowTagSet.delete(tag)\n removeTagFromIndex(parsedTag, rowId, tagIndex, tagLength)\n // We aggresively evict the tag from the cache\n // if this tag is shared with another row\n // and is not removed from that other row\n // then next time we encounter the tag it will be parsed again\n tagCache.delete(tag)\n }\n }\n\n /**\n * Process tags for a change message (add and remove tags)\n */\n const processTagsForChangeMessage = (\n tags: Array<MoveTag> | undefined,\n removedTags: Array<MoveTag> | undefined,\n rowId: RowId,\n ): Set<MoveTag> => {\n // Initialize tag set for this row if it doesn't exist (needed for checking deletion)\n if (!rowTagSets.has(rowId)) {\n rowTagSets.set(rowId, new Set())\n }\n const rowTagSet = rowTagSets.get(rowId)!\n\n // Add new tags\n if (tags) {\n addTagsToRow(tags, rowId, rowTagSet)\n }\n\n // Remove tags\n if (removedTags) {\n removeTagsFromRow(removedTags, rowId, rowTagSet)\n }\n\n return rowTagSet\n }\n\n /**\n * Clear all tag tracking state (used when truncating)\n */\n const clearTagTrackingState = (): void => {\n rowTagSets.clear()\n tagIndex.length = 0\n tagLength = undefined\n }\n\n /**\n * Remove all tags for a row from both the tag set and the index\n * Used when a row is deleted\n */\n const clearTagsForRow = (rowId: RowId): void => {\n if (tagLength === undefined) {\n return\n }\n\n const rowTagSet = rowTagSets.get(rowId)\n if (!rowTagSet) {\n return\n }\n\n // Remove each tag from the index\n for (const tag of rowTagSet) {\n const parsedTag = parseTag(tag)\n const currentTagLength = getTagLength(parsedTag)\n if (currentTagLength === tagLength) {\n removeTagFromIndex(parsedTag, rowId, tagIndex, tagLength)\n }\n tagCache.delete(tag)\n }\n\n // Remove the row from the tag sets map\n rowTagSets.delete(rowId)\n }\n\n /**\n * Remove matching tags from a row based on a pattern\n * Returns true if the row's tag set is now empty\n */\n const removeMatchingTagsFromRow = (\n rowId: RowId,\n pattern: MoveOutPattern,\n ): boolean => {\n const rowTagSet = rowTagSets.get(rowId)\n if (!rowTagSet) {\n return false\n }\n\n // Find tags that match this pattern and remove them\n for (const tag of rowTagSet) {\n const parsedTag = parseTag(tag)\n if (tagMatchesPattern(parsedTag, pattern)) {\n rowTagSet.delete(tag)\n removeTagFromIndex(parsedTag, rowId, tagIndex, tagLength!)\n }\n }\n\n // Check if row's tag set is now empty\n if (rowTagSet.size === 0) {\n rowTagSets.delete(rowId)\n return true\n }\n\n return false\n }\n\n /**\n * Process move-out event: remove matching tags from rows and delete rows with empty tag sets\n */\n const processMoveOutEvent = (\n patterns: Array<MoveOutPattern>,\n begin: () => void,\n write: (message: ChangeMessageOrDeleteKeyMessage<T>) => void,\n transactionStarted: boolean,\n ): boolean => {\n if (tagLength === undefined) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Received move-out message but no tag length set yet, ignoring`,\n )\n return transactionStarted\n }\n\n let txStarted = transactionStarted\n\n // Process all patterns and collect rows to delete\n for (const pattern of patterns) {\n // Find all rows that match this pattern\n const affectedRowIds = findRowsMatchingPattern(pattern, tagIndex)\n\n for (const rowId of affectedRowIds) {\n if (removeMatchingTagsFromRow(rowId, pattern)) {\n // Delete rows with empty tag sets\n if (!txStarted) {\n begin()\n txStarted = true\n }\n\n write({\n type: `delete`,\n key: rowId,\n })\n }\n }\n }\n\n return txStarted\n }\n\n /**\n * Get the sync metadata for insert operations\n * @returns Record containing relation information\n */\n const getSyncMetadata = (): Record<string, unknown> => {\n // Use the stored schema if available, otherwise default to 'public'\n const schema = relationSchema.state || `public`\n\n return {\n relation: shapeOptions.params?.table\n ? [schema, shapeOptions.params.table]\n : undefined,\n }\n }\n\n let unsubscribeStream: () => void\n\n return {\n sync: (params: Parameters<SyncConfig<T>[`sync`]>[0]) => {\n const { begin, write, commit, markReady, truncate, collection } = params\n\n // Wrap markReady to wait for test hook in progressive mode\n let progressiveReadyGate: Promise<void> | null = null\n const wrappedMarkReady = (isBuffering: boolean) => {\n // Only create gate if we're in buffering phase (first up-to-date)\n if (\n isBuffering &&\n syncMode === `progressive` &&\n testHooks?.beforeMarkingReady\n ) {\n // Create a new gate promise for this sync cycle\n progressiveReadyGate = testHooks.beforeMarkingReady()\n progressiveReadyGate.then(() => {\n markReady()\n })\n } else {\n // No hook, not buffering, or already past first up-to-date\n markReady()\n }\n }\n\n // Abort controller for the stream - wraps the signal if provided\n const abortController = new AbortController()\n\n if (shapeOptions.signal) {\n shapeOptions.signal.addEventListener(\n `abort`,\n () => {\n abortController.abort()\n },\n {\n once: true,\n },\n )\n if (shapeOptions.signal.aborted) {\n abortController.abort()\n }\n }\n\n // Cleanup pending matches on abort\n abortController.signal.addEventListener(`abort`, () => {\n pendingMatches.setState((current) => {\n current.forEach((match) => {\n clearTimeout(match.timeoutId)\n match.reject(new StreamAbortedError())\n })\n return new Map() // Clear all pending matches\n })\n })\n\n const stream = new ShapeStream({\n ...shapeOptions,\n // In on-demand mode, we only want to sync changes, so we set the log to `changes_only`\n log: syncMode === `on-demand` ? `changes_only` : undefined,\n // In on-demand mode, we only need the changes from the point of time the collection was created\n // so we default to `now` when there is no saved offset.\n offset:\n shapeOptions.offset ?? (syncMode === `on-demand` ? `now` : undefined),\n signal: abortController.signal,\n onError: (errorParams) => {\n // Just immediately mark ready if there's an error to avoid blocking\n // apps waiting for `.preload()` to finish.\n // Note that Electric sends a 409 error on a `must-refetch` message, but the\n // ShapeStream handled this and it will not reach this handler, therefor\n // this markReady will not be triggers by a `must-refetch`.\n markReady()\n\n if (shapeOptions.onError) {\n return shapeOptions.onError(errorParams)\n } else {\n console.error(\n `An error occurred while syncing collection: ${collection.id}, \\n` +\n `it has been marked as ready to avoid blocking apps waiting for '.preload()' to finish. \\n` +\n `You can provide an 'onError' handler on the shapeOptions to handle this error, and this message will not be logged.`,\n errorParams,\n )\n }\n\n return\n },\n })\n let transactionStarted = false\n const newTxids = new Set<Txid>()\n const newSnapshots: Array<PostgresSnapshot> = []\n let hasReceivedUpToDate = false // Track if we've completed initial sync in progressive mode\n\n // Progressive mode state\n // Helper to determine if we're buffering the initial sync\n const isBufferingInitialSync = () =>\n syncMode === `progressive` && !hasReceivedUpToDate\n const bufferedMessages: Array<Message<T>> = [] // Buffer change messages during initial sync\n\n // Track keys that have been synced to handle overlapping subset queries.\n // When multiple subset queries return the same row, the server sends `insert`\n // for each response. We convert subsequent inserts to updates to avoid\n // duplicate key errors when the row's data has changed between requests.\n const syncedKeys = new Set<string | number>()\n\n /**\n * Process a change message: handle tags and write the mutation\n */\n const processChangeMessage = (changeMessage: Message<T>) => {\n if (!isChangeMessage(changeMessage)) {\n return\n }\n\n // Process tags if present\n const tags = changeMessage.headers.tags\n const removedTags = changeMessage.headers.removed_tags\n const hasTags = tags || removedTags\n\n const rowId = collection.getKeyFromItem(changeMessage.value)\n const operation = changeMessage.headers.operation\n\n // Track synced keys and handle overlapping subset queries.\n // When multiple subset queries return the same row, the server sends\n // `insert` for each response. We convert subsequent inserts to updates\n // to avoid duplicate key errors when the row's data has changed.\n const isDelete = operation === `delete`\n const isDuplicateInsert =\n operation === `insert` && syncedKeys.has(rowId)\n\n if (isDelete) {\n syncedKeys.delete(rowId)\n } else {\n syncedKeys.add(rowId)\n }\n\n if (isDelete) {\n clearTagsForRow(rowId)\n } else if (hasTags) {\n processTagsForChangeMessage(tags, removedTags, rowId)\n }\n\n write({\n type: isDuplicateInsert ? `update` : operation,\n value: changeMessage.value,\n // Include the primary key and relation info in the metadata\n metadata: {\n ...changeMessage.headers,\n },\n })\n }\n\n // Create deduplicated loadSubset wrapper for non-eager modes\n // This prevents redundant snapshot requests when multiple concurrent\n // live queries request overlapping or subset predicates\n const loadSubsetDedupe = createLoadSubsetDedupe({\n stream,\n syncMode,\n isBufferingInitialSync,\n begin,\n write,\n commit,\n collectionId,\n // Pass the columnMapper's encode function to transform column names\n // (e.g., camelCase to snake_case) when compiling SQL for subset queries\n encodeColumnName: shapeOptions.columnMapper?.encode,\n // Pass abort signal so requestSnapshot errors can be ignored during cleanup\n signal: abortController.signal,\n })\n\n unsubscribeStream = stream.subscribe((messages: Array<Message<T>>) => {\n // Track commit point type - up-to-date takes precedence as it also triggers progressive mode atomic swap\n let commitPoint: `up-to-date` | `subset-end` | null = null\n\n // Don't clear the buffer between batches - this preserves messages for awaitMatch\n // to find even if multiple batches arrive before awaitMatch is called.\n // The buffer is naturally limited by MAX_BATCH_MESSAGES (oldest messages are dropped).\n // Reset batchCommitted since we're starting a new batch\n batchCommitted.setState(() => false)\n\n for (const message of messages) {\n // Add message to current batch buffer (for race condition handling)\n if (isChangeMessage(message) || isMoveOutMessage(message)) {\n currentBatchMessages.setState((currentBuffer) => {\n const newBuffer = [...currentBuffer, message]\n // Limit buffer size for safety\n if (newBuffer.length > MAX_BATCH_MESSAGES) {\n newBuffer.splice(0, newBuffer.length - MAX_BATCH_MESSAGES)\n }\n return newBuffer\n })\n }\n\n // Check for txids in the message and add them to our store\n // Skip during buffered initial sync in progressive mode (txids will be extracted during atomic swap)\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), track txids\n // to avoid losing them when messages are written to the existing transaction.\n if (\n hasTxids(message) &&\n (!isBufferingInitialSync() || transactionStarted)\n ) {\n message.headers.txids?.forEach((txid) => newTxids.add(txid))\n }\n\n // Check pending matches against this message\n // Note: matchFn will mark matches internally, we don't resolve here\n const matchesToRemove: Array<string> = []\n pendingMatches.state.forEach((match, matchId) => {\n if (!match.matched) {\n try {\n match.matchFn(message)\n } catch (err) {\n // If matchFn throws, clean up and reject the promise\n clearTimeout(match.timeoutId)\n match.reject(\n err instanceof Error ? err : new Error(String(err)),\n )\n matchesToRemove.push(matchId)\n debug(`matchFn error: %o`, err)\n }\n }\n })\n\n // Remove matches that errored\n removePendingMatches(matchesToRemove)\n\n if (isChangeMessage(message)) {\n // Check if the message contains schema information\n const schema = message.headers.schema\n if (schema && typeof schema === `string`) {\n // Store the schema for future use if it's a valid string\n relationSchema.setState(() => schema)\n }\n\n // In buffered initial sync of progressive mode, buffer messages instead of writing\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), write\n // directly to it instead of buffering. This prevents orphan transactions.\n if (isBufferingInitialSync() && !transactionStarted) {\n bufferedMessages.push(message)\n } else {\n // Normal processing: write changes immediately\n if (!transactionStarted) {\n begin()\n transactionStarted = true\n }\n\n processChangeMessage(message)\n }\n } else if (isSnapshotEndMessage(message)) {\n // Track postgres snapshot metadata for resolving awaiting mutations\n // Skip during buffered initial sync (will be extracted during atomic swap)\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), track snapshots\n // to avoid losing them when messages are written to the existing transaction.\n if (!isBufferingInitialSync() || transactionStarted) {\n newSnapshots.push(parseSnapshotMessage(message))\n }\n } else if (isUpToDateMessage(message)) {\n // up-to-date takes precedence - also triggers progressive mode atomic swap\n commitPoint = `up-to-date`\n } else if (isSubsetEndMessage(message)) {\n // subset-end triggers commit but not progressive mode atomic swap\n if (commitPoint !== `up-to-date`) {\n commitPoint = `subset-end`\n }\n } else if (isMoveOutMessage(message)) {\n // Handle move-out event: buffer if buffering, otherwise process immediately\n // EXCEPTION: If a transaction is already started (e.g., from must-refetch), process\n // immediately to avoid orphan transactions.\n if (isBufferingInitialSync() && !transactionStarted) {\n bufferedMessages.push(message)\n } else {\n // Normal processing: process move-out immediately\n transactionStarted = processMoveOutEvent(\n message.headers.patterns,\n begin,\n write,\n transactionStarted,\n )\n }\n } else if (isMustRefetchMessage(message)) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Received must-refetch message, starting transaction with truncate`,\n )\n\n // Start a transaction and truncate the collection\n if (!transactionStarted) {\n begin()\n transactionStarted = true\n }\n\n truncate()\n\n // Clear tag tracking state\n clearTagTrackingState()\n\n // Clear synced keys tracking since we're starting fresh\n syncedKeys.clear()\n\n // Reset the loadSubset deduplication state since we're starting fresh\n // This ensures that previously loaded predicates don't prevent refetching after truncate\n loadSubsetDedupe?.reset()\n\n // Reset flags so we continue accumulating changes until next up-to-date\n commitPoint = null\n hasReceivedUpToDate = false // Reset for progressive mode (isBufferingInitialSync will reflect this)\n bufferedMessages.length = 0 // Clear buffered messages\n }\n }\n\n if (commitPoint !== null) {\n // PROGRESSIVE MODE: Atomic swap on first up-to-date (not subset-end)\n // EXCEPTION: Skip atomic swap if a transaction is already started (e.g., from must-refetch).\n // In that case, do a normal commit to properly close the existing transaction.\n if (\n isBufferingInitialSync() &&\n commitPoint === `up-to-date` &&\n !transactionStarted\n ) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Progressive mode: Performing atomic swap with ${bufferedMessages.length} buffered messages`,\n )\n\n // Start atomic swap transaction\n begin()\n\n // Truncate to clear all snapshot data\n truncate()\n\n // Clear tag tracking state for atomic swap\n clearTagTrackingState()\n\n // Clear synced keys tracking for atomic swap\n syncedKeys.clear()\n\n // Apply all buffered change messages and extract txids/snapshots\n for (const bufferedMsg of bufferedMessages) {\n if (isChangeMessage(bufferedMsg)) {\n processChangeMessage(bufferedMsg)\n\n // Extract txids from buffered messages (will be committed to store after transaction)\n if (hasTxids(bufferedMsg)) {\n bufferedMsg.headers.txids?.forEach((txid) =>\n newTxids.add(txid),\n )\n }\n } else if (isSnapshotEndMessage(bufferedMsg)) {\n // Extract snapshots from buffered messages (will be committed to store after transaction)\n newSnapshots.push(parseSnapshotMessage(bufferedMsg))\n } else if (isMoveOutMessage(bufferedMsg)) {\n // Process buffered move-out messages during atomic swap\n processMoveOutEvent(\n bufferedMsg.headers.patterns,\n begin,\n write,\n transactionStarted,\n )\n }\n }\n\n // Commit the atomic swap\n commit()\n\n // Exit buffering phase by marking that we've received up-to-date\n // isBufferingInitialSync() will now return false\n bufferedMessages.length = 0\n\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}Progressive mode: Atomic swap complete, now in normal sync mode`,\n )\n } else {\n // Normal mode or on-demand: commit transaction if one was started\n // Both up-to-date and subset-end trigger a commit\n if (transactionStarted) {\n commit()\n transactionStarted = false\n }\n }\n wrappedMarkReady(isBufferingInitialSync())\n\n // Track that we've received the first up-to-date for progressive mode\n if (commitPoint === `up-to-date`) {\n hasReceivedUpToDate = true\n }\n\n // Always commit txids when we receive up-to-date, regardless of transaction state\n seenTxids.setState((currentTxids) => {\n const clonedSeen = new Set<Txid>(currentTxids)\n if (newTxids.size > 0) {\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}new txids synced from pg %O`,\n Array.from(newTxids),\n )\n }\n newTxids.forEach((txid) => clonedSeen.add(txid))\n newTxids.clear()\n return clonedSeen\n })\n\n // Always commit snapshots when we receive up-to-date, regardless of transaction state\n seenSnapshots.setState((currentSnapshots) => {\n const seen = [...currentSnapshots, ...newSnapshots]\n newSnapshots.forEach((snapshot) =>\n debug(\n `${collectionId ? `[${collectionId}] ` : ``}new snapshot synced from pg %o`,\n snapshot,\n ),\n )\n newSnapshots.length = 0\n return seen\n })\n\n // Resolve all matched pending matches on up-to-date or subset-end\n // Set batchCommitted BEFORE resolving to avoid timing window where late awaitMatch\n // calls could register as \"matched\" after resolver pass already ran\n batchCommitted.setState(() => true)\n\n resolveMatchedPendingMatches()\n }\n })\n\n // Return the deduplicated loadSubset if available (on-demand or progressive mode)\n // The loadSubset method is auto-bound, so it can be safely returned directly\n return {\n loadSubset: loadSubsetDedupe?.loadSubset,\n cleanup: () => {\n // Unsubscribe from the stream\n unsubscribeStream()\n // Abort the abort controller to stop the stream\n abortController.abort()\n // Reset deduplication tracking so collection can load fresh data if restarted\n loadSubsetDedupe?.reset()\n },\n }\n },\n // Expose the getSyncMetadata function\n getSyncMetadata,\n }\n}\n"],"names":[],"mappings":";;;;;;;;AAyDA,MAAM,QAAQ,YAAY,MAAM,gBAAgB;AAKzC,MAAM,6CAA6B,mBAAmB;AAmO7D,SAAS,kBACP,SACkD;AAClD,SAAO,iBAAiB,OAAO,KAAK,QAAQ,QAAQ,YAAY;AAClE;AAEA,SAAS,qBACP,SACsE;AACtE,SAAO,iBAAiB,OAAO,KAAK,QAAQ,QAAQ,YAAY;AAClE;AAEA,SAAS,qBACP,SAC+B;AAC/B,SAAO,iBAAiB,OAAO,KAAK,QAAQ,QAAQ,YAAY;AAClE;AAEA,SAAS,mBACP,SACoE;AACpE,SACE,iBAAiB,OAAO,KACvB,QAAQ,QAAQ,YAAuB;AAE5C;AAEA,SAAS,qBAAqB,SAA+C;AAC3E,SAAO;AAAA,IACL,MAAM,QAAQ,QAAQ;AAAA,IACtB,MAAM,QAAQ,QAAQ;AAAA,IACtB,UAAU,QAAQ,QAAQ;AAAA,EAAA;AAE9B;AAGA,SAAS,SACP,SAC8D;AAC9D,SAAO,WAAW,QAAQ,WAAW,MAAM,QAAQ,QAAQ,QAAQ,KAAK;AAC1E;AAaA,SAAS,uBAA+C;AAAA,EACtD;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,GAsBkC;AAChC,MAAI,aAAa,SAAS;AACxB,WAAO;AAAA,EACT;AAEA,QAAM,iBAAiB,mBAAmB,EAAE,iBAAA,IAAqB;AACjE,QAAM,YAAY,eAAe,IAAI,YAAY,OAAO;AAMxD,WAAS,oBAAoB,OAAgB,WAA4B;AACvE,QAAI,OAAO,SAAS;AAClB,YAAM,GAAG,SAAS,YAAY,SAAS,6BAA6B,KAAK;AACzE,aAAO;AAAA,IACT;AACA,UAAM,GAAG,SAAS,YAAY,SAAS,QAAQ,KAAK;AACpD,WAAO;AAAA,EACT;AAEA,QAAM,aAAa,OAAO,SAA4B;AACpD,QAAI,0BAA0B;AAC5B,YAAM,iBAAiB,WAAc,MAAM,cAAc;AACzD,UAAI;AACF,cAAM,EAAE,MAAM,KAAA,IAAS,MAAM,OAAO,cAAc,cAAc;AAEhE,YAAI,CAAC,0BAA0B;AAC7B,gBAAM,GAAG,SAAS,mDAAmD;AACrE;AAAA,QACF;AAEA,YAAI,KAAK,SAAS,GAAG;AACnB,gBAAA;AACA,qBAAW,OAAO,MAAM;AACtB,kBAAM;AAAA,cACJ,MAAM;AAAA,cACN,OAAO,IAAI;AAAA,cACX,UAAU,EAAE,GAAG,IAAI,QAAA;AAAA,YAAQ,CAC5B;AAAA,UACH;AACA,iBAAA;AACA,gBAAM,GAAG,SAAS,yBAAyB,KAAK,MAAM,OAAO;AAAA,QAC/D;AAAA,MACF,SAAS,OAAO;AACd,YAAI,oBAAoB,OAAO,eAAe,GAAG;AAC/C;AAAA,QACF;AACA,cAAM;AAAA,MACR;AACA;AAAA,IACF;AAEA,QAAI,aAAa,eAAe;AAC9B;AAAA,IACF;AAEA,UAAM,EAAE,QAAQ,OAAO,SAAS,UAAU;AAE1C,QAAI;AACF,UAAI,QAAQ;AACV,cAAM,mBAAsC;AAAA,UAC1C,OAAO,QAAQ,IAAI,OAAO,OAAO,YAAY,IAAI,OAAO;AAAA,UACxD;AAAA,QAAA;AAEF,cAAM,qBAAqB;AAAA,UACzB;AAAA,UACA;AAAA,QAAA;AAGF,cAAM,gBAAmC;AAAA,UACvC,OAAO,QAAQ,IAAI,OAAO,OAAO,SAAS,IAAI,OAAO;AAAA,UACrD;AAAA,UACA;AAAA,QAAA;AAEF,cAAM,kBAAkB,WAAc,eAAe,cAAc;AAEnE,cAAM,GAAG,SAAS,oDAAoD;AACtE;AAAA,UACE,GAAG,SAAS,oDAAoD,KAAK;AAAA,QAAA;AAGvE,cAAM,QAAQ,IAAI;AAAA,UAChB,OAAO,gBAAgB,kBAAkB;AAAA,UACzC,OAAO,gBAAgB,eAAe;AAAA,QAAA,CACvC;AAAA,MACH,OAAO;AACL,cAAM,iBAAiB,WAAc,MAAM,cAAc;AACzD,cAAM,OAAO,gBAAgB,cAAc;AAAA,MAC7C;AAAA,IACF,SAAS,OAAO;AACd,UAAI,oBAAoB,OAAO,iBAAiB,GAAG;AACjD;AAAA,MACF;AACA,YAAM;AAAA,IACR;AAAA,EACF;AAEA,SAAO,IAAI,uBAAuB,EAAE,YAAY;AAClD;AAyDO,SAAS,0BACd,QAQA;AACA,QAAM,YAAY,IAAI,0BAAqB,IAAI,CAAA,CAAE,CAAC;AAClD,QAAM,gBAAgB,IAAI,MAA+B,EAAE;AAC3D,QAAM,mBAAmB,OAAO,YAAY;AAC5C,QAAM,gBACJ,qBAAqB,gBAAgB,cAAc;AACrD,QAAM,iBAAiB,IAAI,MAWzB,oBAAI,KAAK;AAGX,QAAM,uBAAuB,IAAI,MAA2B,EAAE;AAI9D,QAAM,iBAAiB,IAAI,MAAe,KAAK;AAK/C,QAAM,uBAAuB,CAAC,aAA4B;AACxD,QAAI,SAAS,SAAS,GAAG;AACvB,qBAAe,SAAS,CAAC,YAAY;AACnC,cAAM,aAAa,IAAI,IAAI,OAAO;AAClC,iBAAS,QAAQ,CAAC,OAAO,WAAW,OAAO,EAAE,CAAC;AAC9C,eAAO;AAAA,MACT,CAAC;AAAA,IACH;AAAA,EACF;AAKA,QAAM,+BAA+B,MAAM;AACzC,UAAM,mBAAkC,CAAA;AACxC,mBAAe,MAAM,QAAQ,CAAC,OAAO,YAAY;AAC/C,UAAI,MAAM,SAAS;AACjB,qBAAa,MAAM,SAAS;AAC5B,cAAM,QAAQ,IAAI;AAClB,yBAAiB,KAAK,OAAO;AAC7B;AAAA,UACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,UACrC;AAAA,QAAA;AAAA,MAEJ;AAAA,IACF,CAAC;AACD,yBAAqB,gBAAgB;AAAA,EACvC;AACA,QAAM,OAAO,mBAAsB,OAAO,cAAc;AAAA,IACtD;AAAA,IACA;AAAA,IACA,UAAU;AAAA,IACV;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,cAAc,OAAO;AAAA,IACrB,WAAW,OAAO,mBAAmB;AAAA,EAAA,CACtC;AAQD,QAAM,YAAyB,OAC7B,MACA,UAAkB,QACG;AACrB;AAAA,MACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,MACrC;AAAA,IAAA;AAEF,QAAI,OAAO,SAAS,UAAU;AAC5B,YAAM,IAAI,+BAA+B,OAAO,MAAM,OAAO,EAAE;AAAA,IACjE;AAGA,UAAM,UAAU,UAAU,MAAM,IAAI,IAAI;AACxC,QAAI,QAAS,QAAO;AAGpB,UAAM,cAAc,cAAc,MAAM;AAAA,MAAK,CAAC,aAC5C,oBAAoB,MAAM,QAAQ;AAAA,IAAA;AAEpC,QAAI,YAAa,QAAO;AAExB,WAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,YAAM,YAAY,WAAW,MAAM;AACjC,6BAAA;AACA,iCAAA;AACA,eAAO,IAAI,2BAA2B,MAAM,OAAO,EAAE,CAAC;AAAA,MACxD,GAAG,OAAO;AAEV,YAAM,uBAAuB,UAAU,UAAU,MAAM;AACrD,YAAI,UAAU,MAAM,IAAI,IAAI,GAAG;AAC7B;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,YACrC;AAAA,UAAA;AAEF,uBAAa,SAAS;AACtB,+BAAA;AACA,mCAAA;AACA,kBAAQ,IAAI;AAAA,QACd;AAAA,MACF,CAAC;AAED,YAAM,2BAA2B,cAAc,UAAU,MAAM;AAC7D,cAAM,kBAAkB,cAAc,MAAM;AAAA,UAAK,CAAC,aAChD,oBAAoB,MAAM,QAAQ;AAAA,QAAA;AAEpC,YAAI,iBAAiB;AACnB;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,YACrC;AAAA,YACA;AAAA,UAAA;AAEF,uBAAa,SAAS;AACtB,mCAAA;AACA,+BAAA;AACA,kBAAQ,IAAI;AAAA,QACd;AAAA,MACF,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAQA,QAAM,aAAgC,OACpC,SACA,UAAkB,QACG;AACrB;AAAA,MACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,IAAA;AAGvC,WAAO,IAAI,QAAQ,CAAC,SAAS,WAAW;AACtC,YAAM,UAAU,KAAK,OAAA,EAAS,SAAS,EAAE;AAEzC,YAAM,eAAe,MAAM;AACzB,uBAAe,SAAS,CAAC,YAAY;AACnC,gBAAM,aAAa,IAAI,IAAI,OAAO;AAClC,qBAAW,OAAO,OAAO;AACzB,iBAAO;AAAA,QACT,CAAC;AAAA,MACH;AAEA,YAAM,YAAY,MAAM;AACtB,qBAAA;AACA,eAAO,IAAI,4BAA4B,OAAO,EAAE,CAAC;AAAA,MACnD;AAEA,YAAM,YAAY,WAAW,WAAW,OAAO;AAI/C,YAAM,aAAa,CAAC,YAA0B;AAC5C,YAAI,QAAQ,OAAO,GAAG;AACpB;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,UAAA;AAGvC,yBAAe,SAAS,CAAC,YAAY;AACnC,kBAAM,aAAa,IAAI,IAAI,OAAO;AAClC,kBAAM,WAAW,WAAW,IAAI,OAAO;AACvC,gBAAI,UAAU;AACZ,yBAAW,IAAI,SAAS,EAAE,GAAG,UAAU,SAAS,MAAM;AAAA,YACxD;AACA,mBAAO;AAAA,UACT,CAAC;AACD,iBAAO;AAAA,QACT;AACA,eAAO;AAAA,MACT;AAGA,iBAAW,WAAW,qBAAqB,OAAO;AAChD,YAAI,QAAQ,OAAO,GAAG;AAGpB,cAAI,eAAe,OAAO;AACxB;AAAA,cACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,YAAA;AAEvC,yBAAa,SAAS;AACtB,oBAAQ,IAAI;AACZ;AAAA,UACF;AAGA;AAAA,YACE,GAAG,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,EAAE;AAAA,UAAA;AAEvC,yBAAe,SAAS,CAAC,YAAY;AACnC,kBAAM,aAAa,IAAI,IAAI,OAAO;AAClC,uBAAW,IAAI,SAAS;AAAA,cACtB,SAAS;AAAA,cACT;AAAA,cACA;AAAA,cACA;AAAA,cACA,SAAS;AAAA;AAAA,YAAA,CACV;AACD,mBAAO;AAAA,UACT,CAAC;AACD;AAAA,QACF;AAAA,MACF;AAIA,qBAAe,SAAS,CAAC,YAAY;AACnC,cAAM,aAAa,IAAI,IAAI,OAAO;AAClC,mBAAW,IAAI,SAAS;AAAA,UACtB,SAAS;AAAA,UACT;AAAA,UACA;AAAA,UACA;AAAA,UACA,SAAS;AAAA,QAAA,CACV;AACD,eAAO;AAAA,MACT,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAKA,QAAM,0BAA0B,OAC9B,WACkB;AAElB,QAAI,UAAU,UAAU,QAAQ;AAC9B,YAAM,UAAU,OAAO;AAEvB,UAAI,MAAM,QAAQ,OAAO,IAAI,GAAG;AAC9B,cAAM,QAAQ,IAAI,OAAO,KAAK,IAAI,CAAC,SAAS,UAAU,MAAM,OAAO,CAAC,CAAC;AAAA,MACvE,OAAO;AACL,cAAM,UAAU,OAAO,MAAM,OAAO;AAAA,MACtC;AAAA,IACF;AAAA,EAEF;AAGA,QAAM,kBAAkB,OAAO,WAC3B,OACE,WAKG;AACH,UAAM,gBAAgB,MAAM,OAAO,SAAU,MAAM;AACnD,UAAM,wBAAwB,aAAa;AAC3C,WAAO;AAAA,EACT,IACA;AAEJ,QAAM,kBAAkB,OAAO,WAC3B,OACE,WAKG;AACH,UAAM,gBAAgB,MAAM,OAAO,SAAU,MAAM;AACnD,UAAM,wBAAwB,aAAa;AAC3C,WAAO;AAAA,EACT,IACA;AAEJ,QAAM,kBAAkB,OAAO,WAC3B,OACE,WAKG;AACH,UAAM,gBAAgB,MAAM,OAAO,SAAU,MAAM;AACnD,UAAM,wBAAwB,aAAa;AAC3C,WAAO;AAAA,EACT,IACA;AAGJ,QAAM;AAAA,IACJ,cAAc;AAAA,IACd,UAAU;AAAA,IACV,UAAU;AAAA,IACV,UAAU;AAAA,IACV,GAAG;AAAA,EAAA,IACD;AAEJ,SAAO;AAAA,IACL,GAAG;AAAA,IACH,UAAU;AAAA,IACV;AAAA,IACA,UAAU;AAAA,IACV,UAAU;AAAA,IACV,UAAU;AAAA,IACV,OAAO;AAAA,MACL;AAAA,MACA;AAAA,IAAA;AAAA,EACF;AAEJ;AAKA,SAAS,mBACP,cACA,SAuBe;AACf,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA,IACE;AACJ,QAAM,qBAAqB;AAG3B,QAAM,iBAAiB,IAAI,MAA0B,MAAS;AAE9D,QAAM,+BAAe,IAAA;AAKrB,QAAM,WAAW,CAAC,QAAgC;AAChD,UAAM,YAAY,SAAS,IAAI,GAAG;AAClC,QAAI,WAAW;AACb,aAAO;AAAA,IACT;AAEA,UAAM,YAAY,IAAI,MAAM,GAAG;AAC/B,aAAS,IAAI,KAAK,SAAS;AAC3B,WAAO;AAAA,EACT;AAGA,QAAM,iCAAiB,IAAA;AACvB,QAAM,WAAqB,CAAA;AAC3B,MAAI,YAAgC;AAKpC,QAAM,qBAAqB,CAAC,WAAyB;AACnD,QAAI,SAAS,SAAS,QAAQ;AAE5B,eAAS,IAAI,SAAS,QAAQ,IAAI,QAAQ,KAAK;AAC7C,iBAAS,CAAC,IAAI,oBAAI,IAAA;AAAA,MACpB;AAAA,IACF;AAAA,EACF;AAKA,QAAM,eAAe,CACnB,MACA,OACA,cACS;AACT,eAAW,OAAO,MAAM;AACtB,YAAM,YAAY,SAAS,GAAG;AAG9B,UAAI,cAAc,QAAW;AAC3B,oBAAY,aAAa,SAAS;AAClC,2BAAmB,SAAS;AAAA,MAC9B;AAGA,YAAM,mBAAmB,aAAa,SAAS;AAC/C,UAAI,qBAAqB,WAAW;AAClC;AAAA,UACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE,iCAAiC,SAAS,SAAS,gBAAgB;AAAA,QAAA;AAEhH;AAAA,MACF;AAEA,gBAAU,IAAI,GAAG;AACjB,oBAAc,WAAW,OAAO,UAAU,SAAS;AAAA,IACrD;AAAA,EACF;AAKA,QAAM,oBAAoB,CACxB,aACA,OACA,cACS;AACT,QAAI,cAAc,QAAW;AAC3B;AAAA,IACF;AAEA,eAAW,OAAO,aAAa;AAC7B,YAAM,YAAY,SAAS,GAAG;AAC9B,gBAAU,OAAO,GAAG;AACpB,yBAAmB,WAAW,OAAO,UAAU,SAAS;AAKxD,eAAS,OAAO,GAAG;AAAA,IACrB;AAAA,EACF;AAKA,QAAM,8BAA8B,CAClC,MACA,aACA,UACiB;AAEjB,QAAI,CAAC,WAAW,IAAI,KAAK,GAAG;AAC1B,iBAAW,IAAI,OAAO,oBAAI,IAAA,CAAK;AAAA,IACjC;AACA,UAAM,YAAY,WAAW,IAAI,KAAK;AAGtC,QAAI,MAAM;AACR,mBAAa,MAAM,OAAO,SAAS;AAAA,IACrC;AAGA,QAAI,aAAa;AACf,wBAAkB,aAAa,OAAO,SAAS;AAAA,IACjD;AAEA,WAAO;AAAA,EACT;AAKA,QAAM,wBAAwB,MAAY;AACxC,eAAW,MAAA;AACX,aAAS,SAAS;AAClB,gBAAY;AAAA,EACd;AAMA,QAAM,kBAAkB,CAAC,UAAuB;AAC9C,QAAI,cAAc,QAAW;AAC3B;AAAA,IACF;AAEA,UAAM,YAAY,WAAW,IAAI,KAAK;AACtC,QAAI,CAAC,WAAW;AACd;AAAA,IACF;AAGA,eAAW,OAAO,WAAW;AAC3B,YAAM,YAAY,SAAS,GAAG;AAC9B,YAAM,mBAAmB,aAAa,SAAS;AAC/C,UAAI,qBAAqB,WAAW;AAClC,2BAAmB,WAAW,OAAO,UAAU,SAAS;AAAA,MAC1D;AACA,eAAS,OAAO,GAAG;AAAA,IACrB;AAGA,eAAW,OAAO,KAAK;AAAA,EACzB;AAMA,QAAM,4BAA4B,CAChC,OACA,YACY;AACZ,UAAM,YAAY,WAAW,IAAI,KAAK;AACtC,QAAI,CAAC,WAAW;AACd,aAAO;AAAA,IACT;AAGA,eAAW,OAAO,WAAW;AAC3B,YAAM,YAAY,SAAS,GAAG;AAC9B,UAAI,kBAAkB,WAAW,OAAO,GAAG;AACzC,kBAAU,OAAO,GAAG;AACpB,2BAAmB,WAAW,OAAO,UAAU,SAAU;AAAA,MAC3D;AAAA,IACF;AAGA,QAAI,UAAU,SAAS,GAAG;AACxB,iBAAW,OAAO,KAAK;AACvB,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,EACT;AAKA,QAAM,sBAAsB,CAC1B,UACA,OACA,OACA,uBACY;AACZ,QAAI,cAAc,QAAW;AAC3B;AAAA,QACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,MAAA;AAE7C,aAAO;AAAA,IACT;AAEA,QAAI,YAAY;AAGhB,eAAW,WAAW,UAAU;AAE9B,YAAM,iBAAiB,wBAAwB,SAAS,QAAQ;AAEhE,iBAAW,SAAS,gBAAgB;AAClC,YAAI,0BAA0B,OAAO,OAAO,GAAG;AAE7C,cAAI,CAAC,WAAW;AACd,kBAAA;AACA,wBAAY;AAAA,UACd;AAEA,gBAAM;AAAA,YACJ,MAAM;AAAA,YACN,KAAK;AAAA,UAAA,CACN;AAAA,QACH;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAMA,QAAM,kBAAkB,MAA+B;AAErD,UAAM,SAAS,eAAe,SAAS;AAEvC,WAAO;AAAA,MACL,UAAU,aAAa,QAAQ,QAC3B,CAAC,QAAQ,aAAa,OAAO,KAAK,IAClC;AAAA,IAAA;AAAA,EAER;AAEA,MAAI;AAEJ,SAAO;AAAA,IACL,MAAM,CAAC,WAAiD;AACtD,YAAM,EAAE,OAAO,OAAO,QAAQ,WAAW,UAAU,eAAe;AAGlE,UAAI,uBAA6C;AACjD,YAAM,mBAAmB,CAAC,gBAAyB;AAEjD,YACE,eACA,aAAa,iBACb,WAAW,oBACX;AAEA,iCAAuB,UAAU,mBAAA;AACjC,+BAAqB,KAAK,MAAM;AAC9B,sBAAA;AAAA,UACF,CAAC;AAAA,QACH,OAAO;AAEL,oBAAA;AAAA,QACF;AAAA,MACF;AAGA,YAAM,kBAAkB,IAAI,gBAAA;AAE5B,UAAI,aAAa,QAAQ;AACvB,qBAAa,OAAO;AAAA,UAClB;AAAA,UACA,MAAM;AACJ,4BAAgB,MAAA;AAAA,UAClB;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UAAA;AAAA,QACR;AAEF,YAAI,aAAa,OAAO,SAAS;AAC/B,0BAAgB,MAAA;AAAA,QAClB;AAAA,MACF;AAGA,sBAAgB,OAAO,iBAAiB,SAAS,MAAM;AACrD,uBAAe,SAAS,CAAC,YAAY;AACnC,kBAAQ,QAAQ,CAAC,UAAU;AACzB,yBAAa,MAAM,SAAS;AAC5B,kBAAM,OAAO,IAAI,oBAAoB;AAAA,UACvC,CAAC;AACD,qCAAW,IAAA;AAAA,QACb,CAAC;AAAA,MACH,CAAC;AAED,YAAM,SAAS,IAAI,YAAY;AAAA,QAC7B,GAAG;AAAA;AAAA,QAEH,KAAK,aAAa,cAAc,iBAAiB;AAAA;AAAA;AAAA,QAGjD,QACE,aAAa,WAAW,aAAa,cAAc,QAAQ;AAAA,QAC7D,QAAQ,gBAAgB;AAAA,QACxB,SAAS,CAAC,gBAAgB;AAMxB,oBAAA;AAEA,cAAI,aAAa,SAAS;AACxB,mBAAO,aAAa,QAAQ,WAAW;AAAA,UACzC,OAAO;AACL,oBAAQ;AAAA,cACN,+CAA+C,WAAW,EAAE;AAAA;AAAA;AAAA,cAG5D;AAAA,YAAA;AAAA,UAEJ;AAEA;AAAA,QACF;AAAA,MAAA,CACD;AACD,UAAI,qBAAqB;AACzB,YAAM,+BAAe,IAAA;AACrB,YAAM,eAAwC,CAAA;AAC9C,UAAI,sBAAsB;AAI1B,YAAM,yBAAyB,MAC7B,aAAa,iBAAiB,CAAC;AACjC,YAAM,mBAAsC,CAAA;AAM5C,YAAM,iCAAiB,IAAA;AAKvB,YAAM,uBAAuB,CAAC,kBAA8B;AAC1D,YAAI,CAAC,gBAAgB,aAAa,GAAG;AACnC;AAAA,QACF;AAGA,cAAM,OAAO,cAAc,QAAQ;AACnC,cAAM,cAAc,cAAc,QAAQ;AAC1C,cAAM,UAAU,QAAQ;AAExB,cAAM,QAAQ,WAAW,eAAe,cAAc,KAAK;AAC3D,cAAM,YAAY,cAAc,QAAQ;AAMxC,cAAM,WAAW,cAAc;AAC/B,cAAM,oBACJ,cAAc,YAAY,WAAW,IAAI,KAAK;AAEhD,YAAI,UAAU;AACZ,qBAAW,OAAO,KAAK;AAAA,QACzB,OAAO;AACL,qBAAW,IAAI,KAAK;AAAA,QACtB;AAEA,YAAI,UAAU;AACZ,0BAAgB,KAAK;AAAA,QACvB,WAAW,SAAS;AAClB,sCAA4B,MAAM,aAAa,KAAK;AAAA,QACtD;AAEA,cAAM;AAAA,UACJ,MAAM,oBAAoB,WAAW;AAAA,UACrC,OAAO,cAAc;AAAA;AAAA,UAErB,UAAU;AAAA,YACR,GAAG,cAAc;AAAA,UAAA;AAAA,QACnB,CACD;AAAA,MACH;AAKA,YAAM,mBAAmB,uBAAuB;AAAA,QAC9C;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA;AAAA;AAAA,QAGA,kBAAkB,aAAa,cAAc;AAAA;AAAA,QAE7C,QAAQ,gBAAgB;AAAA,MAAA,CACzB;AAED,0BAAoB,OAAO,UAAU,CAAC,aAAgC;AAEpE,YAAI,cAAkD;AAMtD,uBAAe,SAAS,MAAM,KAAK;AAEnC,mBAAW,WAAW,UAAU;AAE9B,cAAI,gBAAgB,OAAO,KAAK,iBAAiB,OAAO,GAAG;AACzD,iCAAqB,SAAS,CAAC,kBAAkB;AAC/C,oBAAM,YAAY,CAAC,GAAG,eAAe,OAAO;AAE5C,kBAAI,UAAU,SAAS,oBAAoB;AACzC,0BAAU,OAAO,GAAG,UAAU,SAAS,kBAAkB;AAAA,cAC3D;AACA,qBAAO;AAAA,YACT,CAAC;AAAA,UACH;AAMA,cACE,SAAS,OAAO,MACf,CAAC,uBAAA,KAA4B,qBAC9B;AACA,oBAAQ,QAAQ,OAAO,QAAQ,CAAC,SAAS,SAAS,IAAI,IAAI,CAAC;AAAA,UAC7D;AAIA,gBAAM,kBAAiC,CAAA;AACvC,yBAAe,MAAM,QAAQ,CAAC,OAAO,YAAY;AAC/C,gBAAI,CAAC,MAAM,SAAS;AAClB,kBAAI;AACF,sBAAM,QAAQ,OAAO;AAAA,cACvB,SAAS,KAAK;AAEZ,6BAAa,MAAM,SAAS;AAC5B,sBAAM;AAAA,kBACJ,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,GAAG,CAAC;AAAA,gBAAA;AAEpD,gCAAgB,KAAK,OAAO;AAC5B,sBAAM,qBAAqB,GAAG;AAAA,cAChC;AAAA,YACF;AAAA,UACF,CAAC;AAGD,+BAAqB,eAAe;AAEpC,cAAI,gBAAgB,OAAO,GAAG;AAE5B,kBAAM,SAAS,QAAQ,QAAQ;AAC/B,gBAAI,UAAU,OAAO,WAAW,UAAU;AAExC,6BAAe,SAAS,MAAM,MAAM;AAAA,YACtC;AAKA,gBAAI,uBAAA,KAA4B,CAAC,oBAAoB;AACnD,+BAAiB,KAAK,OAAO;AAAA,YAC/B,OAAO;AAEL,kBAAI,CAAC,oBAAoB;AACvB,sBAAA;AACA,qCAAqB;AAAA,cACvB;AAEA,mCAAqB,OAAO;AAAA,YAC9B;AAAA,UACF,WAAW,qBAAqB,OAAO,GAAG;AAKxC,gBAAI,CAAC,uBAAA,KAA4B,oBAAoB;AACnD,2BAAa,KAAK,qBAAqB,OAAO,CAAC;AAAA,YACjD;AAAA,UACF,WAAW,kBAAkB,OAAO,GAAG;AAErC,0BAAc;AAAA,UAChB,WAAW,mBAAmB,OAAO,GAAG;AAEtC,gBAAI,gBAAgB,cAAc;AAChC,4BAAc;AAAA,YAChB;AAAA,UACF,WAAW,iBAAiB,OAAO,GAAG;AAIpC,gBAAI,uBAAA,KAA4B,CAAC,oBAAoB;AACnD,+BAAiB,KAAK,OAAO;AAAA,YAC/B,OAAO;AAEL,mCAAqB;AAAA,gBACnB,QAAQ,QAAQ;AAAA,gBAChB;AAAA,gBACA;AAAA,gBACA;AAAA,cAAA;AAAA,YAEJ;AAAA,UACF,WAAW,qBAAqB,OAAO,GAAG;AACxC;AAAA,cACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,YAAA;AAI7C,gBAAI,CAAC,oBAAoB;AACvB,oBAAA;AACA,mCAAqB;AAAA,YACvB;AAEA,qBAAA;AAGA,kCAAA;AAGA,uBAAW,MAAA;AAIX,8BAAkB,MAAA;AAGlB,0BAAc;AACd,kCAAsB;AACtB,6BAAiB,SAAS;AAAA,UAC5B;AAAA,QACF;AAEA,YAAI,gBAAgB,MAAM;AAIxB,cACE,uBAAA,KACA,gBAAgB,gBAChB,CAAC,oBACD;AACA;AAAA,cACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE,iDAAiD,iBAAiB,MAAM;AAAA,YAAA;AAIrH,kBAAA;AAGA,qBAAA;AAGA,kCAAA;AAGA,uBAAW,MAAA;AAGX,uBAAW,eAAe,kBAAkB;AAC1C,kBAAI,gBAAgB,WAAW,GAAG;AAChC,qCAAqB,WAAW;AAGhC,oBAAI,SAAS,WAAW,GAAG;AACzB,8BAAY,QAAQ,OAAO;AAAA,oBAAQ,CAAC,SAClC,SAAS,IAAI,IAAI;AAAA,kBAAA;AAAA,gBAErB;AAAA,cACF,WAAW,qBAAqB,WAAW,GAAG;AAE5C,6BAAa,KAAK,qBAAqB,WAAW,CAAC;AAAA,cACrD,WAAW,iBAAiB,WAAW,GAAG;AAExC;AAAA,kBACE,YAAY,QAAQ;AAAA,kBACpB;AAAA,kBACA;AAAA,kBACA;AAAA,gBAAA;AAAA,cAEJ;AAAA,YACF;AAGA,mBAAA;AAIA,6BAAiB,SAAS;AAE1B;AAAA,cACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,YAAA;AAAA,UAE/C,OAAO;AAGL,gBAAI,oBAAoB;AACtB,qBAAA;AACA,mCAAqB;AAAA,YACvB;AAAA,UACF;AACA,2BAAiB,wBAAwB;AAGzC,cAAI,gBAAgB,cAAc;AAChC,kCAAsB;AAAA,UACxB;AAGA,oBAAU,SAAS,CAAC,iBAAiB;AACnC,kBAAM,aAAa,IAAI,IAAU,YAAY;AAC7C,gBAAI,SAAS,OAAO,GAAG;AACrB;AAAA,gBACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,gBAC3C,MAAM,KAAK,QAAQ;AAAA,cAAA;AAAA,YAEvB;AACA,qBAAS,QAAQ,CAAC,SAAS,WAAW,IAAI,IAAI,CAAC;AAC/C,qBAAS,MAAA;AACT,mBAAO;AAAA,UACT,CAAC;AAGD,wBAAc,SAAS,CAAC,qBAAqB;AAC3C,kBAAM,OAAO,CAAC,GAAG,kBAAkB,GAAG,YAAY;AAClD,yBAAa;AAAA,cAAQ,CAAC,aACpB;AAAA,gBACE,GAAG,eAAe,IAAI,YAAY,OAAO,EAAE;AAAA,gBAC3C;AAAA,cAAA;AAAA,YACF;AAEF,yBAAa,SAAS;AACtB,mBAAO;AAAA,UACT,CAAC;AAKD,yBAAe,SAAS,MAAM,IAAI;AAElC,uCAAA;AAAA,QACF;AAAA,MACF,CAAC;AAID,aAAO;AAAA,QACL,YAAY,kBAAkB;AAAA,QAC9B,SAAS,MAAM;AAEb,4BAAA;AAEA,0BAAgB,MAAA;AAEhB,4BAAkB,MAAA;AAAA,QACpB;AAAA,MAAA;AAAA,IAEJ;AAAA;AAAA,IAEA;AAAA,EAAA;AAEJ;"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@tanstack/electric-db-collection",
3
- "version": "0.2.28",
3
+ "version": "0.2.29",
4
4
  "description": "ElectricSQL collection for TanStack DB",
5
5
  "author": "Kyle Mathews",
6
6
  "license": "MIT",
package/src/electric.ts CHANGED
@@ -349,6 +349,7 @@ function createLoadSubsetDedupe<T extends Row<unknown>>({
349
349
  commit,
350
350
  collectionId,
351
351
  encodeColumnName,
352
+ signal,
352
353
  }: {
353
354
  stream: ShapeStream<T>
354
355
  syncMode: ElectricSyncMode
@@ -366,108 +367,106 @@ function createLoadSubsetDedupe<T extends Row<unknown>>({
366
367
  * This is typically the `encode` function from shapeOptions.columnMapper.
367
368
  */
368
369
  encodeColumnName?: ColumnEncoder
370
+ /**
371
+ * Abort signal to check if the stream has been aborted during cleanup.
372
+ * When aborted, errors from requestSnapshot are silently ignored.
373
+ */
374
+ signal: AbortSignal
369
375
  }): DeduplicatedLoadSubset | null {
370
- // Eager mode doesn't need subset loading
371
376
  if (syncMode === `eager`) {
372
377
  return null
373
378
  }
374
379
 
375
380
  const compileOptions = encodeColumnName ? { encodeColumnName } : undefined
381
+ const logPrefix = collectionId ? `[${collectionId}] ` : ``
382
+
383
+ /**
384
+ * Handles errors from snapshot operations. Returns true if the error was
385
+ * handled (signal aborted during cleanup), false if it should be re-thrown.
386
+ */
387
+ function handleSnapshotError(error: unknown, operation: string): boolean {
388
+ if (signal.aborted) {
389
+ debug(`${logPrefix}Ignoring ${operation} error during cleanup: %o`, error)
390
+ return true
391
+ }
392
+ debug(`${logPrefix}Error in ${operation}: %o`, error)
393
+ return false
394
+ }
376
395
 
377
396
  const loadSubset = async (opts: LoadSubsetOptions) => {
378
- // In progressive mode, use fetchSnapshot during snapshot phase
379
397
  if (isBufferingInitialSync()) {
380
- // Progressive mode snapshot phase: fetch and apply immediately
381
398
  const snapshotParams = compileSQL<T>(opts, compileOptions)
382
399
  try {
383
400
  const { data: rows } = await stream.fetchSnapshot(snapshotParams)
384
401
 
385
- // Check again if we're still buffering - we might have received up-to-date
386
- // and completed the atomic swap while waiting for the snapshot
387
402
  if (!isBufferingInitialSync()) {
388
- debug(
389
- `${collectionId ? `[${collectionId}] ` : ``}Ignoring snapshot - sync completed while fetching`,
390
- )
403
+ debug(`${logPrefix}Ignoring snapshot - sync completed while fetching`)
391
404
  return
392
405
  }
393
406
 
394
- // Apply snapshot data in a sync transaction (only if we have data)
395
407
  if (rows.length > 0) {
396
408
  begin()
397
409
  for (const row of rows) {
398
410
  write({
399
411
  type: `insert`,
400
412
  value: row.value,
401
- metadata: {
402
- ...row.headers,
403
- },
413
+ metadata: { ...row.headers },
404
414
  })
405
415
  }
406
416
  commit()
407
-
408
- debug(
409
- `${collectionId ? `[${collectionId}] ` : ``}Applied snapshot with ${rows.length} rows`,
410
- )
417
+ debug(`${logPrefix}Applied snapshot with ${rows.length} rows`)
411
418
  }
412
419
  } catch (error) {
413
- debug(
414
- `${collectionId ? `[${collectionId}] ` : ``}Error fetching snapshot: %o`,
415
- error,
416
- )
420
+ if (handleSnapshotError(error, `fetchSnapshot`)) {
421
+ return
422
+ }
417
423
  throw error
418
424
  }
419
- } else if (syncMode === `progressive`) {
420
- // Progressive mode after full sync complete: no need to load more
421
425
  return
422
- } else {
423
- // On-demand mode: use requestSnapshot
424
- // When cursor is provided, make two calls:
425
- // 1. whereCurrent (all ties, no limit)
426
- // 2. whereFrom (rows > cursor, with limit)
427
- const { cursor, where, orderBy, limit } = opts
426
+ }
428
427
 
429
- if (cursor) {
430
- // Make parallel requests for cursor-based pagination
431
- const promises: Array<Promise<unknown>> = []
428
+ if (syncMode === `progressive`) {
429
+ return
430
+ }
432
431
 
433
- // Request 1: All rows matching whereCurrent (ties at boundary, no limit)
434
- // Combine main where with cursor.whereCurrent
432
+ const { cursor, where, orderBy, limit } = opts
433
+
434
+ try {
435
+ if (cursor) {
435
436
  const whereCurrentOpts: LoadSubsetOptions = {
436
437
  where: where ? and(where, cursor.whereCurrent) : cursor.whereCurrent,
437
438
  orderBy,
438
- // No limit - get all ties
439
439
  }
440
440
  const whereCurrentParams = compileSQL<T>(
441
441
  whereCurrentOpts,
442
442
  compileOptions,
443
443
  )
444
- promises.push(stream.requestSnapshot(whereCurrentParams))
445
444
 
446
- debug(
447
- `${collectionId ? `[${collectionId}] ` : ``}Requesting cursor.whereCurrent snapshot (all ties)`,
448
- )
449
-
450
- // Request 2: Rows matching whereFrom (rows > cursor, with limit)
451
- // Combine main where with cursor.whereFrom
452
445
  const whereFromOpts: LoadSubsetOptions = {
453
446
  where: where ? and(where, cursor.whereFrom) : cursor.whereFrom,
454
447
  orderBy,
455
448
  limit,
456
449
  }
457
450
  const whereFromParams = compileSQL<T>(whereFromOpts, compileOptions)
458
- promises.push(stream.requestSnapshot(whereFromParams))
459
451
 
452
+ debug(`${logPrefix}Requesting cursor.whereCurrent snapshot (all ties)`)
460
453
  debug(
461
- `${collectionId ? `[${collectionId}] ` : ``}Requesting cursor.whereFrom snapshot (with limit ${limit})`,
454
+ `${logPrefix}Requesting cursor.whereFrom snapshot (with limit ${limit})`,
462
455
  )
463
456
 
464
- // Wait for both requests to complete
465
- await Promise.all(promises)
457
+ await Promise.all([
458
+ stream.requestSnapshot(whereCurrentParams),
459
+ stream.requestSnapshot(whereFromParams),
460
+ ])
466
461
  } else {
467
- // No cursor - standard single request
468
462
  const snapshotParams = compileSQL<T>(opts, compileOptions)
469
463
  await stream.requestSnapshot(snapshotParams)
470
464
  }
465
+ } catch (error) {
466
+ if (handleSnapshotError(error, `requestSnapshot`)) {
467
+ return
468
+ }
469
+ throw error
471
470
  }
472
471
  }
473
472
 
@@ -1311,6 +1310,8 @@ function createElectricSync<T extends Row<unknown>>(
1311
1310
  // Pass the columnMapper's encode function to transform column names
1312
1311
  // (e.g., camelCase to snake_case) when compiling SQL for subset queries
1313
1312
  encodeColumnName: shapeOptions.columnMapper?.encode,
1313
+ // Pass abort signal so requestSnapshot errors can be ignored during cleanup
1314
+ signal: abortController.signal,
1314
1315
  })
1315
1316
 
1316
1317
  unsubscribeStream = stream.subscribe((messages: Array<Message<T>>) => {