@tanstack/db 0.5.18 → 0.5.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/dist/cjs/collection/sync.cjs +2 -1
  2. package/dist/cjs/collection/sync.cjs.map +1 -1
  3. package/dist/cjs/proxy.cjs +1 -1
  4. package/dist/cjs/proxy.cjs.map +1 -1
  5. package/dist/cjs/query/builder/index.cjs +5 -4
  6. package/dist/cjs/query/builder/index.cjs.map +1 -1
  7. package/dist/cjs/query/builder/index.d.cts +5 -4
  8. package/dist/cjs/query/builder/ref-proxy.cjs +63 -0
  9. package/dist/cjs/query/builder/ref-proxy.cjs.map +1 -1
  10. package/dist/cjs/query/builder/ref-proxy.d.cts +13 -0
  11. package/dist/cjs/query/builder/types.d.cts +26 -1
  12. package/dist/cjs/query/compiler/evaluators.cjs +29 -2
  13. package/dist/cjs/query/compiler/evaluators.cjs.map +1 -1
  14. package/dist/cjs/query/compiler/group-by.cjs +29 -12
  15. package/dist/cjs/query/compiler/group-by.cjs.map +1 -1
  16. package/dist/cjs/query/compiler/group-by.d.cts +18 -2
  17. package/dist/cjs/query/compiler/index.cjs +5 -5
  18. package/dist/cjs/query/compiler/index.cjs.map +1 -1
  19. package/dist/cjs/query/compiler/order-by.cjs +1 -1
  20. package/dist/cjs/query/compiler/order-by.cjs.map +1 -1
  21. package/dist/cjs/query/compiler/order-by.d.cts +1 -1
  22. package/dist/cjs/query/compiler/select.cjs +1 -1
  23. package/dist/cjs/query/compiler/select.cjs.map +1 -1
  24. package/dist/cjs/query/compiler/select.d.cts +1 -1
  25. package/dist/cjs/query/live/collection-config-builder.cjs +17 -10
  26. package/dist/cjs/query/live/collection-config-builder.cjs.map +1 -1
  27. package/dist/cjs/query/live/collection-subscriber.cjs +1 -1
  28. package/dist/cjs/query/live/collection-subscriber.cjs.map +1 -1
  29. package/dist/cjs/query/live/internal.cjs +1 -1
  30. package/dist/cjs/query/live/internal.cjs.map +1 -1
  31. package/dist/cjs/query/live/types.d.cts +2 -1
  32. package/dist/esm/collection/sync.js +2 -1
  33. package/dist/esm/collection/sync.js.map +1 -1
  34. package/dist/esm/proxy.js +1 -1
  35. package/dist/esm/proxy.js.map +1 -1
  36. package/dist/esm/query/builder/index.d.ts +5 -4
  37. package/dist/esm/query/builder/index.js +6 -5
  38. package/dist/esm/query/builder/index.js.map +1 -1
  39. package/dist/esm/query/builder/ref-proxy.d.ts +13 -0
  40. package/dist/esm/query/builder/ref-proxy.js +63 -0
  41. package/dist/esm/query/builder/ref-proxy.js.map +1 -1
  42. package/dist/esm/query/builder/types.d.ts +26 -1
  43. package/dist/esm/query/compiler/evaluators.js +29 -2
  44. package/dist/esm/query/compiler/evaluators.js.map +1 -1
  45. package/dist/esm/query/compiler/group-by.d.ts +18 -2
  46. package/dist/esm/query/compiler/group-by.js +30 -13
  47. package/dist/esm/query/compiler/group-by.js.map +1 -1
  48. package/dist/esm/query/compiler/index.js +5 -5
  49. package/dist/esm/query/compiler/index.js.map +1 -1
  50. package/dist/esm/query/compiler/order-by.d.ts +1 -1
  51. package/dist/esm/query/compiler/order-by.js +1 -1
  52. package/dist/esm/query/compiler/order-by.js.map +1 -1
  53. package/dist/esm/query/compiler/select.d.ts +1 -1
  54. package/dist/esm/query/compiler/select.js +1 -1
  55. package/dist/esm/query/compiler/select.js.map +1 -1
  56. package/dist/esm/query/live/collection-config-builder.js +17 -10
  57. package/dist/esm/query/live/collection-config-builder.js.map +1 -1
  58. package/dist/esm/query/live/collection-subscriber.js +1 -1
  59. package/dist/esm/query/live/collection-subscriber.js.map +1 -1
  60. package/dist/esm/query/live/internal.js +1 -1
  61. package/dist/esm/query/live/internal.js.map +1 -1
  62. package/dist/esm/query/live/types.d.ts +2 -1
  63. package/package.json +2 -2
  64. package/src/collection/sync.ts +2 -2
  65. package/src/query/builder/index.ts +22 -6
  66. package/src/query/builder/ref-proxy.ts +90 -0
  67. package/src/query/builder/types.ts +26 -1
  68. package/src/query/compiler/evaluators.ts +38 -2
  69. package/src/query/compiler/group-by.ts +76 -22
  70. package/src/query/compiler/index.ts +13 -13
  71. package/src/query/compiler/order-by.ts +7 -6
  72. package/src/query/compiler/select.ts +5 -8
  73. package/src/query/live/collection-config-builder.ts +46 -17
  74. package/src/query/live/types.ts +3 -1
@@ -345,6 +345,26 @@ export type JoinOnCallback<TContext extends Context> = (
345
345
  refs: RefsForContext<TContext>,
346
346
  ) => any
347
347
 
348
+ /**
349
+ * FunctionalHavingRow - Type for the row parameter in functional having callbacks
350
+ *
351
+ * Functional having callbacks receive a namespaced row that includes:
352
+ * - Table data from the schema (when available)
353
+ * - $selected: The SELECT result fields (when select() has been called)
354
+ *
355
+ * After `select()` is called, this type includes `$selected` which provides access
356
+ * to the SELECT result fields via `$selected.fieldName` syntax.
357
+ *
358
+ * Note: When used with GROUP BY, functional having receives `{ $selected: ... }` with the
359
+ * aggregated SELECT results. When used without GROUP BY, it receives the full namespaced row
360
+ * which includes both table data and `$selected`.
361
+ *
362
+ * Example: `({ $selected }) => $selected.sessionCount > 2`
363
+ * Example (no GROUP BY): `(row) => row.user.salary > 70000 && row.$selected.user_count > 2`
364
+ */
365
+ export type FunctionalHavingRow<TContext extends Context> = TContext[`schema`] &
366
+ (TContext[`result`] extends object ? { $selected: TContext[`result`] } : {})
367
+
348
368
  /**
349
369
  * RefProxyForContext - Creates ref proxies for all tables/collections in a query context
350
370
  *
@@ -364,6 +384,9 @@ export type JoinOnCallback<TContext extends Context> = (
364
384
  *
365
385
  * The logic prioritizes optional chaining by always placing `undefined` outside when
366
386
  * a type is both optional and nullable (e.g., `string | null | undefined`).
387
+ *
388
+ * After `select()` is called, this type also includes `$selected` which provides access
389
+ * to the SELECT result fields via `$selected.fieldName` syntax.
367
390
  */
368
391
  export type RefsForContext<TContext extends Context> = {
369
392
  [K in keyof TContext[`schema`]]: IsNonExactOptional<
@@ -383,7 +406,9 @@ export type RefsForContext<TContext extends Context> = {
383
406
  : // T is exactly undefined, exactly null, or neither optional nor nullable
384
407
  // Wrap in RefProxy as-is (includes exact undefined, exact null, and normal types)
385
408
  Ref<TContext[`schema`][K]>
386
- }
409
+ } & (TContext[`result`] extends object
410
+ ? { $selected: Ref<TContext[`result`]> }
411
+ : {})
387
412
 
388
413
  /**
389
414
  * Type Detection Helpers
@@ -95,12 +95,48 @@ function compileExpressionInternal(
95
95
  * Compiles a reference expression into an optimized evaluator
96
96
  */
97
97
  function compileRef(ref: PropRef): CompiledExpression {
98
- const [tableAlias, ...propertyPath] = ref.path
98
+ const [namespace, ...propertyPath] = ref.path
99
99
 
100
- if (!tableAlias) {
100
+ if (!namespace) {
101
101
  throw new EmptyReferencePathError()
102
102
  }
103
103
 
104
+ // Handle $selected namespace - references SELECT result fields
105
+ if (namespace === `$selected`) {
106
+ // Access $selected directly
107
+ if (propertyPath.length === 0) {
108
+ // Just $selected - return entire $selected object
109
+ return (namespacedRow) => (namespacedRow as any).$selected
110
+ } else if (propertyPath.length === 1) {
111
+ // Single property access - most common case
112
+ const prop = propertyPath[0]!
113
+ return (namespacedRow) => {
114
+ const selectResults = (namespacedRow as any).$selected
115
+ return selectResults?.[prop]
116
+ }
117
+ } else {
118
+ // Multiple property navigation (nested SELECT fields)
119
+ return (namespacedRow) => {
120
+ const selectResults = (namespacedRow as any).$selected
121
+ if (selectResults === undefined) {
122
+ return undefined
123
+ }
124
+
125
+ let value: any = selectResults
126
+ for (const prop of propertyPath) {
127
+ if (value == null) {
128
+ return value
129
+ }
130
+ value = value[prop]
131
+ }
132
+ return value
133
+ }
134
+ }
135
+ }
136
+
137
+ // Handle table alias namespace (existing logic)
138
+ const tableAlias = namespace
139
+
104
140
  // Pre-compile the property path navigation
105
141
  if (propertyPath.length === 0) {
106
142
  // Simple table reference
@@ -1,4 +1,10 @@
1
- import { filter, groupBy, groupByOperators, map } from '@tanstack/db-ivm'
1
+ import {
2
+ filter,
3
+ groupBy,
4
+ groupByOperators,
5
+ map,
6
+ serializeValue,
7
+ } from '@tanstack/db-ivm'
2
8
  import { Func, PropRef, getHavingExpression } from '../ir.js'
3
9
  import {
4
10
  AggregateFunctionNotInSelectError,
@@ -66,7 +72,7 @@ function validateAndCreateMapping(
66
72
 
67
73
  /**
68
74
  * Processes the GROUP BY clause with optional HAVING and SELECT
69
- * Works with the new __select_results structure from early SELECT processing
75
+ * Works with the new $selected structure from early SELECT processing
70
76
  */
71
77
  export function processGroupBy(
72
78
  pipeline: NamespacedAndKeyedStream,
@@ -98,11 +104,11 @@ export function processGroupBy(
98
104
  groupBy(keyExtractor, aggregates),
99
105
  ) as NamespacedAndKeyedStream
100
106
 
101
- // Update __select_results to include aggregate values
107
+ // Update $selected to include aggregate values
102
108
  pipeline = pipeline.pipe(
103
109
  map(([, aggregatedRow]) => {
104
- // Start with the existing __select_results from early SELECT processing
105
- const selectResults = (aggregatedRow as any).__select_results || {}
110
+ // Start with the existing $selected from early SELECT processing
111
+ const selectResults = (aggregatedRow as any).$selected || {}
106
112
  const finalResults: Record<string, any> = { ...selectResults }
107
113
 
108
114
  if (selectClause) {
@@ -115,12 +121,12 @@ export function processGroupBy(
115
121
  }
116
122
  }
117
123
 
118
- // Use a single key for the result and update __select_results
124
+ // Use a single key for the result and update $selected
119
125
  return [
120
126
  `single_group`,
121
127
  {
122
128
  ...aggregatedRow,
123
- __select_results: finalResults,
129
+ $selected: finalResults,
124
130
  },
125
131
  ] as [unknown, Record<string, any>]
126
132
  }),
@@ -133,13 +139,14 @@ export function processGroupBy(
133
139
  const transformedHavingClause = replaceAggregatesByRefs(
134
140
  havingExpression,
135
141
  selectClause || {},
142
+ `$selected`,
136
143
  )
137
144
  const compiledHaving = compileExpression(transformedHavingClause)
138
145
 
139
146
  pipeline = pipeline.pipe(
140
147
  filter(([, row]) => {
141
148
  // Create a namespaced row structure for HAVING evaluation
142
- const namespacedRow = { result: (row as any).__select_results }
149
+ const namespacedRow = { $selected: (row as any).$selected }
143
150
  return toBooleanPredicate(compiledHaving(namespacedRow))
144
151
  }),
145
152
  )
@@ -152,7 +159,7 @@ export function processGroupBy(
152
159
  pipeline = pipeline.pipe(
153
160
  filter(([, row]) => {
154
161
  // Create a namespaced row structure for functional HAVING evaluation
155
- const namespacedRow = { result: (row as any).__select_results }
162
+ const namespacedRow = { $selected: (row as any).$selected }
156
163
  return toBooleanPredicate(fnHaving(namespacedRow))
157
164
  }),
158
165
  )
@@ -174,11 +181,11 @@ export function processGroupBy(
174
181
  // Create a key extractor function using simple __key_X format
175
182
  const keyExtractor = ([, row]: [
176
183
  string,
177
- NamespacedRow & { __select_results?: any },
184
+ NamespacedRow & { $selected?: any },
178
185
  ]) => {
179
- // Use the original namespaced row for GROUP BY expressions, not __select_results
186
+ // Use the original namespaced row for GROUP BY expressions, not $selected
180
187
  const namespacedRow = { ...row }
181
- delete (namespacedRow as any).__select_results
188
+ delete (namespacedRow as any).$selected
182
189
 
183
190
  const key: Record<string, unknown> = {}
184
191
 
@@ -208,11 +215,11 @@ export function processGroupBy(
208
215
  // Apply the groupBy operator
209
216
  pipeline = pipeline.pipe(groupBy(keyExtractor, aggregates))
210
217
 
211
- // Update __select_results to handle GROUP BY results
218
+ // Update $selected to handle GROUP BY results
212
219
  pipeline = pipeline.pipe(
213
220
  map(([, aggregatedRow]) => {
214
- // Start with the existing __select_results from early SELECT processing
215
- const selectResults = (aggregatedRow as any).__select_results || {}
221
+ // Start with the existing $selected from early SELECT processing
222
+ const selectResults = (aggregatedRow as any).$selected || {}
216
223
  const finalResults: Record<string, any> = {}
217
224
 
218
225
  if (selectClause) {
@@ -248,14 +255,14 @@ export function processGroupBy(
248
255
  for (let i = 0; i < groupByClause.length; i++) {
249
256
  keyParts.push(aggregatedRow[`__key_${i}`])
250
257
  }
251
- finalKey = JSON.stringify(keyParts)
258
+ finalKey = serializeValue(keyParts)
252
259
  }
253
260
 
254
261
  return [
255
262
  finalKey,
256
263
  {
257
264
  ...aggregatedRow,
258
- __select_results: finalResults,
265
+ $selected: finalResults,
259
266
  },
260
267
  ] as [unknown, Record<string, any>]
261
268
  }),
@@ -274,7 +281,7 @@ export function processGroupBy(
274
281
  pipeline = pipeline.pipe(
275
282
  filter(([, row]) => {
276
283
  // Create a namespaced row structure for HAVING evaluation
277
- const namespacedRow = { result: (row as any).__select_results }
284
+ const namespacedRow = { $selected: (row as any).$selected }
278
285
  return compiledHaving(namespacedRow)
279
286
  }),
280
287
  )
@@ -287,7 +294,7 @@ export function processGroupBy(
287
294
  pipeline = pipeline.pipe(
288
295
  filter(([, row]) => {
289
296
  // Create a namespaced row structure for functional HAVING evaluation
290
- const namespacedRow = { result: (row as any).__select_results }
297
+ const namespacedRow = { $selected: (row as any).$selected }
291
298
  return toBooleanPredicate(fnHaving(namespacedRow))
292
299
  }),
293
300
  )
@@ -385,12 +392,28 @@ function getAggregateFunction(aggExpr: Aggregate) {
385
392
  }
386
393
 
387
394
  /**
388
- * Transforms basic expressions and aggregates to replace Agg expressions with references to computed values
395
+ * Transforms expressions to replace aggregate functions with references to computed values.
396
+ *
397
+ * This function is used in both ORDER BY and HAVING clauses to transform expressions that reference:
398
+ * 1. Aggregate functions (e.g., `max()`, `count()`) - replaces with references to computed aggregates in SELECT
399
+ * 2. SELECT field references via $selected namespace (e.g., `$selected.latestActivity`) - validates and passes through unchanged
400
+ *
401
+ * For aggregate expressions, it finds matching aggregates in the SELECT clause and replaces them with
402
+ * PropRef([resultAlias, alias]) to reference the computed aggregate value.
403
+ *
404
+ * For ref expressions using the $selected namespace, it validates that the field exists in the SELECT clause
405
+ * and passes them through unchanged (since $selected is already the correct namespace). All other ref expressions
406
+ * are passed through unchanged (treating them as table column references).
407
+ *
408
+ * @param havingExpr - The expression to transform (can be aggregate, ref, func, or val)
409
+ * @param selectClause - The SELECT clause containing aliases and aggregate definitions
410
+ * @param resultAlias - The namespace alias for SELECT results (default: '$selected', used for aggregate references)
411
+ * @returns A transformed BasicExpression that references computed values instead of raw expressions
389
412
  */
390
413
  export function replaceAggregatesByRefs(
391
414
  havingExpr: BasicExpression | Aggregate,
392
415
  selectClause: Select,
393
- resultAlias: string = `result`,
416
+ resultAlias: string = `$selected`,
394
417
  ): BasicExpression {
395
418
  switch (havingExpr.type) {
396
419
  case `agg`: {
@@ -417,7 +440,38 @@ export function replaceAggregatesByRefs(
417
440
  }
418
441
 
419
442
  case `ref`: {
420
- // Non-aggregate refs are passed through unchanged (they reference table columns)
443
+ const refExpr = havingExpr
444
+ const path = refExpr.path
445
+
446
+ if (path.length === 0) {
447
+ // Empty path - pass through
448
+ return havingExpr as BasicExpression
449
+ }
450
+
451
+ // Check if this is a $selected reference
452
+ if (path.length > 0 && path[0] === `$selected`) {
453
+ // Extract the field path after $selected
454
+ const fieldPath = path.slice(1)
455
+
456
+ if (fieldPath.length === 0) {
457
+ // Just $selected without a field - pass through unchanged
458
+ return havingExpr as BasicExpression
459
+ }
460
+
461
+ // Verify the field exists in SELECT clause
462
+ const alias = fieldPath.join(`.`)
463
+ if (alias in selectClause) {
464
+ // Pass through unchanged - $selected is already the correct namespace
465
+ return havingExpr as BasicExpression
466
+ }
467
+
468
+ // Field doesn't exist in SELECT - this is an error, but we'll pass through for now
469
+ // (Could throw an error here in the future)
470
+ return havingExpr as BasicExpression
471
+ }
472
+
473
+ // Not a $selected reference - this is a table column reference, pass through unchanged
474
+ // SELECT fields should only be accessed via $selected namespace
421
475
  return havingExpr as BasicExpression
422
476
  }
423
477
 
@@ -216,7 +216,7 @@ export function compileQuery(
216
216
  throw new DistinctRequiresSelectError()
217
217
  }
218
218
 
219
- // Process the SELECT clause early - always create __select_results
219
+ // Process the SELECT clause early - always create $selected
220
220
  // This eliminates duplication and allows for DISTINCT implementation
221
221
  if (query.fnSelect) {
222
222
  // Handle functional select - apply the function to transform the row
@@ -227,15 +227,15 @@ export function compileQuery(
227
227
  key,
228
228
  {
229
229
  ...namespacedRow,
230
- __select_results: selectResults,
230
+ $selected: selectResults,
231
231
  },
232
- ] as [string, typeof namespacedRow & { __select_results: any }]
232
+ ] as [string, typeof namespacedRow & { $selected: any }]
233
233
  }),
234
234
  )
235
235
  } else if (query.select) {
236
236
  pipeline = processSelect(pipeline, query.select, allInputs)
237
237
  } else {
238
- // If no SELECT clause, create __select_results with the main table data
238
+ // If no SELECT clause, create $selected with the main table data
239
239
  pipeline = pipeline.pipe(
240
240
  map(([key, namespacedRow]) => {
241
241
  const selectResults =
@@ -247,9 +247,9 @@ export function compileQuery(
247
247
  key,
248
248
  {
249
249
  ...namespacedRow,
250
- __select_results: selectResults,
250
+ $selected: selectResults,
251
251
  },
252
- ] as [string, typeof namespacedRow & { __select_results: any }]
252
+ ] as [string, typeof namespacedRow & { $selected: any }]
253
253
  }),
254
254
  )
255
255
  }
@@ -310,7 +310,7 @@ export function compileQuery(
310
310
 
311
311
  // Process the DISTINCT clause if it exists
312
312
  if (query.distinct) {
313
- pipeline = pipeline.pipe(distinct(([_key, row]) => row.__select_results))
313
+ pipeline = pipeline.pipe(distinct(([_key, row]) => row.$selected))
314
314
  }
315
315
 
316
316
  // Process orderBy parameter if it exists
@@ -327,11 +327,11 @@ export function compileQuery(
327
327
  query.offset,
328
328
  )
329
329
 
330
- // Final step: extract the __select_results and include orderBy index
330
+ // Final step: extract the $selected and include orderBy index
331
331
  const resultPipeline = orderedPipeline.pipe(
332
332
  map(([key, [row, orderByIndex]]) => {
333
- // Extract the final results from __select_results and include orderBy index
334
- const raw = (row as any).__select_results
333
+ // Extract the final results from $selected and include orderBy index
334
+ const raw = (row as any).$selected
335
335
  const finalResults = unwrapValue(raw)
336
336
  return [key, [finalResults, orderByIndex]] as [unknown, [any, string]]
337
337
  }),
@@ -354,11 +354,11 @@ export function compileQuery(
354
354
  throw new LimitOffsetRequireOrderByError()
355
355
  }
356
356
 
357
- // Final step: extract the __select_results and return tuple format (no orderBy)
357
+ // Final step: extract the $selected and return tuple format (no orderBy)
358
358
  const resultPipeline: ResultStream = pipeline.pipe(
359
359
  map(([key, row]) => {
360
- // Extract the final results from __select_results and return [key, [results, undefined]]
361
- const raw = (row as any).__select_results
360
+ // Extract the final results from $selected and return [key, [results, undefined]]
361
+ const raw = (row as any).$selected
362
362
  const finalResults = unwrapValue(raw)
363
363
  return [key, [finalResults, undefined]] as [
364
364
  unknown,
@@ -38,7 +38,7 @@ export type OrderByOptimizationInfo = {
38
38
 
39
39
  /**
40
40
  * Processes the ORDER BY clause
41
- * Works with the new structure that has both namespaced row data and __select_results
41
+ * Works with the new structure that has both namespaced row data and $selected
42
42
  * Always uses fractional indexing and adds the index as __ordering_index to the result
43
43
  */
44
44
  export function processOrderBy(
@@ -57,7 +57,7 @@ export function processOrderBy(
57
57
  const clauseWithoutAggregates = replaceAggregatesByRefs(
58
58
  clause.expression,
59
59
  selectClause,
60
- `__select_results`,
60
+ `$selected`,
61
61
  )
62
62
 
63
63
  return {
@@ -67,12 +67,13 @@ export function processOrderBy(
67
67
  })
68
68
 
69
69
  // Create a value extractor function for the orderBy operator
70
- const valueExtractor = (row: NamespacedRow & { __select_results?: any }) => {
70
+ const valueExtractor = (row: NamespacedRow & { $selected?: any }) => {
71
71
  // The namespaced row contains:
72
72
  // 1. Table aliases as top-level properties (e.g., row["tableName"])
73
- // 2. SELECT results in __select_results (e.g., row.__select_results["aggregateAlias"])
74
- // The replaceAggregatesByRefs function has already transformed any aggregate expressions
75
- // that match SELECT aggregates to use the __select_results namespace.
73
+ // 2. SELECT results in $selected (e.g., row.$selected["aggregateAlias"])
74
+ // The replaceAggregatesByRefs function has already transformed:
75
+ // - Aggregate expressions that match SELECT aggregates to use the $selected namespace
76
+ // - $selected ref expressions are passed through unchanged (already using the correct namespace)
76
77
  const orderByContext = row
77
78
 
78
79
  if (orderByClause.length > 1) {
@@ -100,7 +100,7 @@ function processNonMergeOp(
100
100
  function processRow(
101
101
  [key, namespacedRow]: [unknown, NamespacedRow],
102
102
  ops: Array<SelectOp>,
103
- ): [unknown, typeof namespacedRow & { __select_results: any }] {
103
+ ): [unknown, typeof namespacedRow & { $selected: any }] {
104
104
  const selectResults: Record<string, any> = {}
105
105
 
106
106
  for (const op of ops) {
@@ -111,21 +111,18 @@ function processRow(
111
111
  }
112
112
  }
113
113
 
114
- // Return the namespaced row with __select_results added
114
+ // Return the namespaced row with $selected added
115
115
  return [
116
116
  key,
117
117
  {
118
118
  ...namespacedRow,
119
- __select_results: selectResults,
119
+ $selected: selectResults,
120
120
  },
121
- ] as [
122
- unknown,
123
- typeof namespacedRow & { __select_results: typeof selectResults },
124
- ]
121
+ ] as [unknown, typeof namespacedRow & { $selected: typeof selectResults }]
125
122
  }
126
123
 
127
124
  /**
128
- * Processes the SELECT clause and places results in __select_results
125
+ * Processes the SELECT clause and places results in $selected
129
126
  * while preserving the original namespaced row for ORDER BY access
130
127
  */
131
128
  export function processSelect(
@@ -337,6 +337,10 @@ export class CollectionConfigBuilder<
337
337
  if (syncState.subscribedToAllCollections) {
338
338
  while (syncState.graph.pendingWork()) {
339
339
  syncState.graph.run()
340
+ // Flush accumulated changes after each graph step to commit them as one transaction.
341
+ // This ensures intermediate join states (like null on one side) don't cause
342
+ // duplicate key errors when the full join result arrives in the same step.
343
+ syncState.flushPendingChanges?.()
340
344
  callback?.()
341
345
  }
342
346
 
@@ -345,10 +349,14 @@ export class CollectionConfigBuilder<
345
349
  if (syncState.messagesCount === 0) {
346
350
  begin()
347
351
  commit()
348
- // After initial commit, check if we should mark ready
349
- // (in case all sources were already ready before we subscribed)
350
- this.updateLiveQueryStatus(this.currentSyncConfig)
351
352
  }
353
+
354
+ // After graph processing completes, check if we should mark ready.
355
+ // This is the canonical place to transition to ready state because:
356
+ // 1. All data has been processed through the graph
357
+ // 2. All source collections have had a chance to send their initial data
358
+ // This prevents marking ready before data is processed (fixes isReady=true with empty data)
359
+ this.updateLiveQueryStatus(this.currentSyncConfig)
352
360
  }
353
361
  } finally {
354
362
  this.isGraphRunning = false
@@ -687,22 +695,35 @@ export class CollectionConfigBuilder<
687
695
  const { begin, commit } = config
688
696
  const { graph, inputs, pipeline } = this.maybeCompileBasePipeline()
689
697
 
698
+ // Accumulator for changes across all output callbacks within a single graph run.
699
+ // This allows us to batch all changes from intermediate join states into a single
700
+ // transaction, avoiding duplicate key errors when joins produce multiple outputs
701
+ // for the same key (e.g., first output with null, then output with joined data).
702
+ let pendingChanges: Map<unknown, Changes<TResult>> = new Map()
703
+
690
704
  pipeline.pipe(
691
705
  output((data) => {
692
706
  const messages = data.getInner()
693
707
  syncState.messagesCount += messages.length
694
708
 
695
- begin()
696
- messages
697
- .reduce(
698
- accumulateChanges<TResult>,
699
- new Map<unknown, Changes<TResult>>(),
700
- )
701
- .forEach(this.applyChanges.bind(this, config))
702
- commit()
709
+ // Accumulate changes from this output callback into the pending changes map.
710
+ // Changes for the same key are merged (inserts/deletes are added together).
711
+ messages.reduce(accumulateChanges<TResult>, pendingChanges)
703
712
  }),
704
713
  )
705
714
 
715
+ // Flush pending changes and reset the accumulator.
716
+ // Called at the end of each graph run to commit all accumulated changes.
717
+ syncState.flushPendingChanges = () => {
718
+ if (pendingChanges.size === 0) {
719
+ return
720
+ }
721
+ begin()
722
+ pendingChanges.forEach(this.applyChanges.bind(this, config))
723
+ commit()
724
+ pendingChanges = new Map()
725
+ }
726
+
706
727
  graph.finalize()
707
728
 
708
729
  // Extend the sync state with the graph, inputs, and pipeline
@@ -808,11 +829,14 @@ export class CollectionConfigBuilder<
808
829
  return
809
830
  }
810
831
 
811
- // Mark ready when all source collections are ready AND
812
- // the live query collection is not loading subset data.
813
- // This prevents marking the live query ready before its data is loaded
832
+ // Mark ready when:
833
+ // 1. All subscriptions are set up (subscribedToAllCollections)
834
+ // 2. All source collections are ready
835
+ // 3. The live query collection is not loading subset data
836
+ // This prevents marking the live query ready before its data is processed
814
837
  // (fixes issue where useLiveQuery returns isReady=true with empty data)
815
838
  if (
839
+ this.currentSyncState?.subscribedToAllCollections &&
816
840
  this.allCollectionsReady() &&
817
841
  !this.liveQueryCollection?.isLoadingSubset
818
842
  ) {
@@ -913,8 +937,10 @@ export class CollectionConfigBuilder<
913
937
  // (graph only runs when all collections are subscribed)
914
938
  syncState.subscribedToAllCollections = true
915
939
 
916
- // Initial status check after all subscriptions are set up
917
- this.updateLiveQueryStatus(config)
940
+ // Note: We intentionally don't call updateLiveQueryStatus() here.
941
+ // The graph hasn't run yet, so marking ready would be premature.
942
+ // The canonical place to mark ready is after the graph processes data
943
+ // in maybeRunGraph(), which ensures data has been processed first.
918
944
 
919
945
  return loadSubsetDataCallbacks
920
946
  }
@@ -1096,8 +1122,11 @@ function accumulateChanges<T>(
1096
1122
  changes.deletes += Math.abs(multiplicity)
1097
1123
  } else if (multiplicity > 0) {
1098
1124
  changes.inserts += multiplicity
1125
+ // Update value to the latest version for this key
1099
1126
  changes.value = value
1100
- changes.orderByIndex = orderByIndex
1127
+ if (orderByIndex !== undefined) {
1128
+ changes.orderByIndex = orderByIndex
1129
+ }
1101
1130
  }
1102
1131
  acc.set(key, changes)
1103
1132
  return acc
@@ -22,9 +22,11 @@ export type SyncState = {
22
22
  graph?: D2
23
23
  inputs?: Record<string, RootStreamBuilder<unknown>>
24
24
  pipeline?: ResultStream
25
+ flushPendingChanges?: () => void
25
26
  }
26
27
 
27
- export type FullSyncState = Required<SyncState>
28
+ export type FullSyncState = Required<Omit<SyncState, `flushPendingChanges`>> &
29
+ Pick<SyncState, `flushPendingChanges`>
28
30
 
29
31
  /**
30
32
  * Configuration interface for live query collection options