@noy-db/hub 0.1.0-pre.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +197 -0
  3. package/dist/aggregate/index.cjs +476 -0
  4. package/dist/aggregate/index.cjs.map +1 -0
  5. package/dist/aggregate/index.d.cts +38 -0
  6. package/dist/aggregate/index.d.ts +38 -0
  7. package/dist/aggregate/index.js +53 -0
  8. package/dist/aggregate/index.js.map +1 -0
  9. package/dist/blobs/index.cjs +1480 -0
  10. package/dist/blobs/index.cjs.map +1 -0
  11. package/dist/blobs/index.d.cts +45 -0
  12. package/dist/blobs/index.d.ts +45 -0
  13. package/dist/blobs/index.js +48 -0
  14. package/dist/blobs/index.js.map +1 -0
  15. package/dist/bundle/index.cjs +436 -0
  16. package/dist/bundle/index.cjs.map +1 -0
  17. package/dist/bundle/index.d.cts +7 -0
  18. package/dist/bundle/index.d.ts +7 -0
  19. package/dist/bundle/index.js +40 -0
  20. package/dist/bundle/index.js.map +1 -0
  21. package/dist/chunk-2QR2PQTT.js +217 -0
  22. package/dist/chunk-2QR2PQTT.js.map +1 -0
  23. package/dist/chunk-4OWFYIDQ.js +79 -0
  24. package/dist/chunk-4OWFYIDQ.js.map +1 -0
  25. package/dist/chunk-5AATM2M2.js +90 -0
  26. package/dist/chunk-5AATM2M2.js.map +1 -0
  27. package/dist/chunk-ACLDOTNQ.js +543 -0
  28. package/dist/chunk-ACLDOTNQ.js.map +1 -0
  29. package/dist/chunk-BTDCBVJW.js +160 -0
  30. package/dist/chunk-BTDCBVJW.js.map +1 -0
  31. package/dist/chunk-CIMZBAZB.js +72 -0
  32. package/dist/chunk-CIMZBAZB.js.map +1 -0
  33. package/dist/chunk-E445ICYI.js +365 -0
  34. package/dist/chunk-E445ICYI.js.map +1 -0
  35. package/dist/chunk-EXQRC2L4.js +722 -0
  36. package/dist/chunk-EXQRC2L4.js.map +1 -0
  37. package/dist/chunk-FZU343FL.js +32 -0
  38. package/dist/chunk-FZU343FL.js.map +1 -0
  39. package/dist/chunk-GJILMRPO.js +354 -0
  40. package/dist/chunk-GJILMRPO.js.map +1 -0
  41. package/dist/chunk-GOUT6DND.js +1285 -0
  42. package/dist/chunk-GOUT6DND.js.map +1 -0
  43. package/dist/chunk-J66GRPNH.js +111 -0
  44. package/dist/chunk-J66GRPNH.js.map +1 -0
  45. package/dist/chunk-M2F2JAWB.js +464 -0
  46. package/dist/chunk-M2F2JAWB.js.map +1 -0
  47. package/dist/chunk-M5INGEFC.js +84 -0
  48. package/dist/chunk-M5INGEFC.js.map +1 -0
  49. package/dist/chunk-M62XNWRA.js +72 -0
  50. package/dist/chunk-M62XNWRA.js.map +1 -0
  51. package/dist/chunk-MR4424N3.js +275 -0
  52. package/dist/chunk-MR4424N3.js.map +1 -0
  53. package/dist/chunk-NPC4LFV5.js +132 -0
  54. package/dist/chunk-NPC4LFV5.js.map +1 -0
  55. package/dist/chunk-NXFEYLVG.js +311 -0
  56. package/dist/chunk-NXFEYLVG.js.map +1 -0
  57. package/dist/chunk-R36SIKES.js +79 -0
  58. package/dist/chunk-R36SIKES.js.map +1 -0
  59. package/dist/chunk-TDR6T5CJ.js +381 -0
  60. package/dist/chunk-TDR6T5CJ.js.map +1 -0
  61. package/dist/chunk-UF3BUNQZ.js +1 -0
  62. package/dist/chunk-UF3BUNQZ.js.map +1 -0
  63. package/dist/chunk-UQFSPSWG.js +1109 -0
  64. package/dist/chunk-UQFSPSWG.js.map +1 -0
  65. package/dist/chunk-USKYUS74.js +793 -0
  66. package/dist/chunk-USKYUS74.js.map +1 -0
  67. package/dist/chunk-XCL3WP6J.js +121 -0
  68. package/dist/chunk-XCL3WP6J.js.map +1 -0
  69. package/dist/chunk-XHFOENR2.js +680 -0
  70. package/dist/chunk-XHFOENR2.js.map +1 -0
  71. package/dist/chunk-ZFKD4QMV.js +430 -0
  72. package/dist/chunk-ZFKD4QMV.js.map +1 -0
  73. package/dist/chunk-ZLMV3TUA.js +490 -0
  74. package/dist/chunk-ZLMV3TUA.js.map +1 -0
  75. package/dist/chunk-ZRG4V3F5.js +17 -0
  76. package/dist/chunk-ZRG4V3F5.js.map +1 -0
  77. package/dist/consent/index.cjs +204 -0
  78. package/dist/consent/index.cjs.map +1 -0
  79. package/dist/consent/index.d.cts +24 -0
  80. package/dist/consent/index.d.ts +24 -0
  81. package/dist/consent/index.js +23 -0
  82. package/dist/consent/index.js.map +1 -0
  83. package/dist/crdt/index.cjs +152 -0
  84. package/dist/crdt/index.cjs.map +1 -0
  85. package/dist/crdt/index.d.cts +30 -0
  86. package/dist/crdt/index.d.ts +30 -0
  87. package/dist/crdt/index.js +24 -0
  88. package/dist/crdt/index.js.map +1 -0
  89. package/dist/crypto-IVKU7YTT.js +44 -0
  90. package/dist/crypto-IVKU7YTT.js.map +1 -0
  91. package/dist/delegation-XDJCBTI2.js +16 -0
  92. package/dist/delegation-XDJCBTI2.js.map +1 -0
  93. package/dist/dev-unlock-CeXic1xC.d.cts +263 -0
  94. package/dist/dev-unlock-KrKkcqD3.d.ts +263 -0
  95. package/dist/hash-9KO1BGxh.d.cts +63 -0
  96. package/dist/hash-ChfJjRjQ.d.ts +63 -0
  97. package/dist/history/index.cjs +1215 -0
  98. package/dist/history/index.cjs.map +1 -0
  99. package/dist/history/index.d.cts +62 -0
  100. package/dist/history/index.d.ts +62 -0
  101. package/dist/history/index.js +79 -0
  102. package/dist/history/index.js.map +1 -0
  103. package/dist/i18n/index.cjs +746 -0
  104. package/dist/i18n/index.cjs.map +1 -0
  105. package/dist/i18n/index.d.cts +38 -0
  106. package/dist/i18n/index.d.ts +38 -0
  107. package/dist/i18n/index.js +55 -0
  108. package/dist/i18n/index.js.map +1 -0
  109. package/dist/index-BRHBCmLt.d.ts +1940 -0
  110. package/dist/index-C8kQtmOk.d.ts +380 -0
  111. package/dist/index-DN-J-5wT.d.cts +1940 -0
  112. package/dist/index-DhjMjz7L.d.cts +380 -0
  113. package/dist/index.cjs +14756 -0
  114. package/dist/index.cjs.map +1 -0
  115. package/dist/index.d.cts +269 -0
  116. package/dist/index.d.ts +269 -0
  117. package/dist/index.js +6085 -0
  118. package/dist/index.js.map +1 -0
  119. package/dist/indexing/index.cjs +736 -0
  120. package/dist/indexing/index.cjs.map +1 -0
  121. package/dist/indexing/index.d.cts +36 -0
  122. package/dist/indexing/index.d.ts +36 -0
  123. package/dist/indexing/index.js +77 -0
  124. package/dist/indexing/index.js.map +1 -0
  125. package/dist/lazy-builder-BwEoBQZ9.d.ts +304 -0
  126. package/dist/lazy-builder-CZVLKh0Z.d.cts +304 -0
  127. package/dist/ledger-2NX4L7PN.js +33 -0
  128. package/dist/ledger-2NX4L7PN.js.map +1 -0
  129. package/dist/mime-magic-CBBSOkjm.d.cts +50 -0
  130. package/dist/mime-magic-CBBSOkjm.d.ts +50 -0
  131. package/dist/periods/index.cjs +1035 -0
  132. package/dist/periods/index.cjs.map +1 -0
  133. package/dist/periods/index.d.cts +21 -0
  134. package/dist/periods/index.d.ts +21 -0
  135. package/dist/periods/index.js +25 -0
  136. package/dist/periods/index.js.map +1 -0
  137. package/dist/predicate-SBHmi6D0.d.cts +161 -0
  138. package/dist/predicate-SBHmi6D0.d.ts +161 -0
  139. package/dist/query/index.cjs +1957 -0
  140. package/dist/query/index.cjs.map +1 -0
  141. package/dist/query/index.d.cts +3 -0
  142. package/dist/query/index.d.ts +3 -0
  143. package/dist/query/index.js +62 -0
  144. package/dist/query/index.js.map +1 -0
  145. package/dist/session/index.cjs +487 -0
  146. package/dist/session/index.cjs.map +1 -0
  147. package/dist/session/index.d.cts +45 -0
  148. package/dist/session/index.d.ts +45 -0
  149. package/dist/session/index.js +44 -0
  150. package/dist/session/index.js.map +1 -0
  151. package/dist/shadow/index.cjs +133 -0
  152. package/dist/shadow/index.cjs.map +1 -0
  153. package/dist/shadow/index.d.cts +16 -0
  154. package/dist/shadow/index.d.ts +16 -0
  155. package/dist/shadow/index.js +20 -0
  156. package/dist/shadow/index.js.map +1 -0
  157. package/dist/store/index.cjs +1069 -0
  158. package/dist/store/index.cjs.map +1 -0
  159. package/dist/store/index.d.cts +491 -0
  160. package/dist/store/index.d.ts +491 -0
  161. package/dist/store/index.js +34 -0
  162. package/dist/store/index.js.map +1 -0
  163. package/dist/strategy-BSxFXGzb.d.cts +110 -0
  164. package/dist/strategy-BSxFXGzb.d.ts +110 -0
  165. package/dist/strategy-D-SrOLCl.d.cts +548 -0
  166. package/dist/strategy-D-SrOLCl.d.ts +548 -0
  167. package/dist/sync/index.cjs +1062 -0
  168. package/dist/sync/index.cjs.map +1 -0
  169. package/dist/sync/index.d.cts +42 -0
  170. package/dist/sync/index.d.ts +42 -0
  171. package/dist/sync/index.js +28 -0
  172. package/dist/sync/index.js.map +1 -0
  173. package/dist/team/index.cjs +1233 -0
  174. package/dist/team/index.cjs.map +1 -0
  175. package/dist/team/index.d.cts +117 -0
  176. package/dist/team/index.d.ts +117 -0
  177. package/dist/team/index.js +39 -0
  178. package/dist/team/index.js.map +1 -0
  179. package/dist/tx/index.cjs +212 -0
  180. package/dist/tx/index.cjs.map +1 -0
  181. package/dist/tx/index.d.cts +20 -0
  182. package/dist/tx/index.d.ts +20 -0
  183. package/dist/tx/index.js +20 -0
  184. package/dist/tx/index.js.map +1 -0
  185. package/dist/types-BZpCZB8N.d.ts +7526 -0
  186. package/dist/types-Bfs0qr5F.d.cts +7526 -0
  187. package/dist/ulid-COREQ2RQ.js +9 -0
  188. package/dist/ulid-COREQ2RQ.js.map +1 -0
  189. package/dist/util/index.cjs +230 -0
  190. package/dist/util/index.cjs.map +1 -0
  191. package/dist/util/index.d.cts +77 -0
  192. package/dist/util/index.d.ts +77 -0
  193. package/dist/util/index.js +190 -0
  194. package/dist/util/index.js.map +1 -0
  195. package/package.json +244 -0
@@ -0,0 +1,1957 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/query/index.ts
21
+ var query_exports = {};
22
+ __export(query_exports, {
23
+ Aggregation: () => Aggregation,
24
+ CollectionIndexes: () => CollectionIndexes,
25
+ DEFAULT_JOIN_MAX_ROWS: () => DEFAULT_JOIN_MAX_ROWS,
26
+ GROUPBY_MAX_CARDINALITY: () => GROUPBY_MAX_CARDINALITY,
27
+ GROUPBY_WARN_CARDINALITY: () => GROUPBY_WARN_CARDINALITY,
28
+ GroupedAggregation: () => GroupedAggregation,
29
+ GroupedQuery: () => GroupedQuery,
30
+ Query: () => Query,
31
+ ScanBuilder: () => ScanBuilder,
32
+ applyJoins: () => applyJoins,
33
+ avg: () => avg,
34
+ buildLiveAggregation: () => buildLiveAggregation,
35
+ buildLiveQuery: () => buildLiveQuery,
36
+ count: () => count,
37
+ evaluateClause: () => evaluateClause,
38
+ evaluateFieldClause: () => evaluateFieldClause,
39
+ executePlan: () => executePlan,
40
+ groupAndReduce: () => groupAndReduce,
41
+ max: () => max,
42
+ min: () => min,
43
+ readPath: () => readPath,
44
+ reduceRecords: () => reduceRecords,
45
+ resetGroupByWarnings: () => resetGroupByWarnings,
46
+ resetJoinWarnings: () => resetJoinWarnings,
47
+ sum: () => sum
48
+ });
49
+ module.exports = __toCommonJS(query_exports);
50
+
51
+ // src/query/predicate.ts
52
+ function readPath(record, path) {
53
+ if (record === null || record === void 0) return void 0;
54
+ if (!path.includes(".")) {
55
+ return record[path];
56
+ }
57
+ const segments = path.split(".");
58
+ let cursor = record;
59
+ for (const segment of segments) {
60
+ if (cursor === null || cursor === void 0) return void 0;
61
+ cursor = cursor[segment];
62
+ }
63
+ return cursor;
64
+ }
65
+ function evaluateFieldClause(record, clause) {
66
+ const actual = readPath(record, clause.field);
67
+ const { op, value } = clause;
68
+ switch (op) {
69
+ case "==":
70
+ return actual === value;
71
+ case "!=":
72
+ return actual !== value;
73
+ case "<":
74
+ return isComparable(actual, value) && actual < value;
75
+ case "<=":
76
+ return isComparable(actual, value) && actual <= value;
77
+ case ">":
78
+ return isComparable(actual, value) && actual > value;
79
+ case ">=":
80
+ return isComparable(actual, value) && actual >= value;
81
+ case "in":
82
+ return Array.isArray(value) && value.includes(actual);
83
+ case "contains":
84
+ if (typeof actual === "string") return typeof value === "string" && actual.includes(value);
85
+ if (Array.isArray(actual)) return actual.includes(value);
86
+ return false;
87
+ case "startsWith":
88
+ return typeof actual === "string" && typeof value === "string" && actual.startsWith(value);
89
+ case "between": {
90
+ if (!Array.isArray(value) || value.length !== 2) return false;
91
+ const [lo, hi] = value;
92
+ if (!isComparable(actual, lo) || !isComparable(actual, hi)) return false;
93
+ return actual >= lo && actual <= hi;
94
+ }
95
+ default: {
96
+ const _exhaustive = op;
97
+ void _exhaustive;
98
+ return false;
99
+ }
100
+ }
101
+ }
102
+ function isComparable(a, b) {
103
+ if (typeof a === "number" && typeof b === "number") return true;
104
+ if (typeof a === "string" && typeof b === "string") return true;
105
+ if (a instanceof Date && b instanceof Date) return true;
106
+ return false;
107
+ }
108
+ function evaluateClause(record, clause) {
109
+ switch (clause.type) {
110
+ case "field":
111
+ return evaluateFieldClause(record, clause);
112
+ case "filter":
113
+ return clause.fn(record);
114
+ case "group":
115
+ if (clause.op === "and") {
116
+ for (const child of clause.clauses) {
117
+ if (!evaluateClause(record, child)) return false;
118
+ }
119
+ return true;
120
+ } else {
121
+ for (const child of clause.clauses) {
122
+ if (evaluateClause(record, child)) return true;
123
+ }
124
+ return false;
125
+ }
126
+ }
127
+ }
128
+
129
+ // src/errors.ts
130
+ var NoydbError = class extends Error {
131
+ /** Machine-readable error code. Stable across library versions. */
132
+ code;
133
+ constructor(code, message) {
134
+ super(message);
135
+ this.name = "NoydbError";
136
+ this.code = code;
137
+ }
138
+ };
139
+ var GroupCardinalityError = class extends NoydbError {
140
+ /** The field being grouped on. */
141
+ field;
142
+ /** Observed number of distinct groups at the moment the cap tripped. */
143
+ cardinality;
144
+ /** The cap that was exceeded. */
145
+ maxGroups;
146
+ constructor(field, cardinality, maxGroups) {
147
+ super(
148
+ "GROUP_CARDINALITY",
149
+ `.groupBy("${field}") produced ${cardinality} distinct groups, exceeding the ${maxGroups}-group ceiling. This is almost always a query mistake \u2014 grouping on a high-uniqueness field like "id" or "createdAt" produces one bucket per record. Narrow the query with .where() before grouping, or group on a lower-cardinality field (status, category, clientId). If you genuinely need high-cardinality grouping, file an issue with your use case.`
150
+ );
151
+ this.name = "GroupCardinalityError";
152
+ this.field = field;
153
+ this.cardinality = cardinality;
154
+ this.maxGroups = maxGroups;
155
+ }
156
+ };
157
+ var JoinTooLargeError = class extends NoydbError {
158
+ leftRows;
159
+ rightRows;
160
+ maxRows;
161
+ side;
162
+ constructor(opts) {
163
+ super("JOIN_TOO_LARGE", opts.message);
164
+ this.name = "JoinTooLargeError";
165
+ this.leftRows = opts.leftRows;
166
+ this.rightRows = opts.rightRows;
167
+ this.maxRows = opts.maxRows;
168
+ this.side = opts.side;
169
+ }
170
+ };
171
+ var DanglingReferenceError = class extends NoydbError {
172
+ field;
173
+ target;
174
+ refId;
175
+ constructor(opts) {
176
+ super("DANGLING_REFERENCE", opts.message);
177
+ this.name = "DanglingReferenceError";
178
+ this.field = opts.field;
179
+ this.target = opts.target;
180
+ this.refId = opts.refId;
181
+ }
182
+ };
183
+
184
+ // src/query/join.ts
185
+ var DEFAULT_JOIN_MAX_ROWS = 5e4;
186
+ var JOIN_WARN_FRACTION = 0.8;
187
+ function coerceRefKey(value) {
188
+ if (value === null || value === void 0) return null;
189
+ if (typeof value === "string") return value;
190
+ if (typeof value === "number" || typeof value === "bigint") return String(value);
191
+ return null;
192
+ }
193
+ var warnedDanglingKeys = /* @__PURE__ */ new Set();
194
+ function warnOnceDangling(field, target, refId) {
195
+ const key = `${field}\u2192${target}:${refId}`;
196
+ if (warnedDanglingKeys.has(key)) return;
197
+ warnedDanglingKeys.add(key);
198
+ console.warn(
199
+ `[noy-db] .join() encountered dangling ref in 'warn' mode: field "${field}" \u2192 "${target}:${refId}" not found. Attaching null.`
200
+ );
201
+ }
202
+ var warnedCeilingKeys = /* @__PURE__ */ new Set();
203
+ function warnCeilingApproaching(target, side, rows, maxRows) {
204
+ const key = `${target}:${side}`;
205
+ if (warnedCeilingKeys.has(key)) return;
206
+ warnedCeilingKeys.add(key);
207
+ const pct = Math.round(rows / maxRows * 100);
208
+ console.warn(
209
+ `[noy-db] .join() ${side} side is at ${pct}% of the ${maxRows}-row ceiling for target "${target}" (${rows} rows). Streaming joins over scan() are not yet supported for collections that need to exceed this.`
210
+ );
211
+ }
212
+ function applyJoins(rows, joins, context) {
213
+ if (joins.length === 0) return [...rows];
214
+ let result = [...rows];
215
+ for (const leg of joins) {
216
+ result = applyOneJoin(result, leg, context);
217
+ }
218
+ return result;
219
+ }
220
+ function applyOneJoin(leftRows, leg, context) {
221
+ if (leg.isDictJoin) {
222
+ const dictSource = context.resolveDictSource?.(leg.field);
223
+ if (!dictSource) {
224
+ throw new Error(
225
+ `.join() field "${leg.field}" on "${context.leftCollection}" is declared as a dictKey join but the dict source could not be resolved. Ensure the dictionary has at least one entry.`
226
+ );
227
+ }
228
+ const out = [];
229
+ const snapshot = dictSource.snapshot();
230
+ const dictMap = /* @__PURE__ */ new Map();
231
+ for (const entry of snapshot) {
232
+ const k = readPath(entry, "key");
233
+ if (typeof k === "string") dictMap.set(k, entry);
234
+ }
235
+ for (const left of leftRows) {
236
+ const rawId = readPath(left, leg.field);
237
+ const key = coerceRefKey(rawId);
238
+ const dictEntry = key === null ? void 0 : dictMap.get(key);
239
+ out.push({ ...left, [leg.as]: dictEntry ?? null });
240
+ }
241
+ return out;
242
+ }
243
+ const source = context.resolveSource(leg.target);
244
+ if (!source) {
245
+ throw new Error(
246
+ `.join() cannot resolve target collection "${leg.target}" (referenced from field "${leg.field}" on "${context.leftCollection}"). Make sure the target collection has been opened via vault.collection() at least once before running the query.`
247
+ );
248
+ }
249
+ const maxRows = leg.maxRows ?? DEFAULT_JOIN_MAX_ROWS;
250
+ if (leftRows.length > maxRows) {
251
+ throw new JoinTooLargeError({
252
+ leftRows: leftRows.length,
253
+ rightRows: -1,
254
+ maxRows,
255
+ side: "left",
256
+ message: `.join() left side has ${leftRows.length} rows, exceeding the ${maxRows}-row ceiling for target "${leg.target}". Filter the left side further with where()/limit() before joining, or raise the ceiling via { maxRows }. Streaming joins over scan() are not yet supported.`
257
+ });
258
+ }
259
+ if (leftRows.length > maxRows * JOIN_WARN_FRACTION) {
260
+ warnCeilingApproaching(leg.target, "left", leftRows.length, maxRows);
261
+ }
262
+ const rightSnapshot = source.snapshot();
263
+ if (rightSnapshot.length > maxRows) {
264
+ throw new JoinTooLargeError({
265
+ leftRows: leftRows.length,
266
+ rightRows: rightSnapshot.length,
267
+ maxRows,
268
+ side: "right",
269
+ message: `.join() right side "${leg.target}" has ${rightSnapshot.length} rows, exceeding the ${maxRows}-row ceiling. Raise the ceiling via { maxRows } if the data genuinely fits in memory, or track for streaming joins.`
270
+ });
271
+ }
272
+ if (rightSnapshot.length > maxRows * JOIN_WARN_FRACTION) {
273
+ warnCeilingApproaching(leg.target, "right", rightSnapshot.length, maxRows);
274
+ }
275
+ const strategy = leg.strategy ?? (source.lookupById ? "nested" : "hash");
276
+ if (strategy === "nested" && source.lookupById) {
277
+ const lookup = (id) => source.lookupById?.(id);
278
+ return nestedLoopJoin(leftRows, leg, lookup);
279
+ }
280
+ return hashJoin(leftRows, leg, rightSnapshot);
281
+ }
282
+ function nestedLoopJoin(leftRows, leg, lookupById) {
283
+ const out = [];
284
+ for (const left of leftRows) {
285
+ const rawId = readPath(left, leg.field);
286
+ const key = coerceRefKey(rawId);
287
+ const right = key === null ? void 0 : lookupById(key);
288
+ out.push(attachJoin(left, leg, right, rawId));
289
+ }
290
+ return out;
291
+ }
292
+ function hashJoin(leftRows, leg, rightSnapshot) {
293
+ const rightMap = /* @__PURE__ */ new Map();
294
+ for (const record of rightSnapshot) {
295
+ const rawId = readPath(record, "id");
296
+ const key = coerceRefKey(rawId);
297
+ if (key !== null) {
298
+ rightMap.set(key, record);
299
+ }
300
+ }
301
+ const out = [];
302
+ for (const left of leftRows) {
303
+ const rawId = readPath(left, leg.field);
304
+ const key = coerceRefKey(rawId);
305
+ const right = key === null ? void 0 : rightMap.get(key);
306
+ out.push(attachJoin(left, leg, right, rawId));
307
+ }
308
+ return out;
309
+ }
310
+ function attachJoin(left, leg, right, rawId) {
311
+ if (left === null || typeof left !== "object") {
312
+ return left;
313
+ }
314
+ const merged = { ...left };
315
+ const refKey = coerceRefKey(rawId);
316
+ if (right === void 0) {
317
+ if (refKey !== null && leg.mode === "strict") {
318
+ throw new DanglingReferenceError({
319
+ field: leg.field,
320
+ target: leg.target,
321
+ refId: refKey,
322
+ message: `.join() strict dangling: record references "${leg.target}:${refKey}" via field "${leg.field}", but no such record exists. Use ref() mode 'warn' or 'cascade' if dangling refs are acceptable, or run vault.checkIntegrity() to find and fix the orphans.`
323
+ });
324
+ }
325
+ if (refKey !== null && leg.mode === "warn") {
326
+ warnOnceDangling(leg.field, leg.target, refKey);
327
+ }
328
+ merged[leg.as] = null;
329
+ } else {
330
+ merged[leg.as] = right;
331
+ }
332
+ return merged;
333
+ }
334
+ function resetJoinWarnings() {
335
+ warnedDanglingKeys.clear();
336
+ warnedCeilingKeys.clear();
337
+ }
338
+
339
+ // src/query/live.ts
340
+ function buildLiveQuery(recompute, upstreams) {
341
+ return new LiveQueryImpl(recompute, upstreams);
342
+ }
343
+ var LiveQueryImpl = class {
344
+ constructor(recompute, upstreams) {
345
+ this.recompute = recompute;
346
+ this.refresh();
347
+ for (const upstream of upstreams) {
348
+ try {
349
+ this.unsubs.push(upstream.subscribe(this.onUpstreamChange));
350
+ } catch (err) {
351
+ this._error = err instanceof Error ? err : new Error(String(err));
352
+ }
353
+ }
354
+ }
355
+ recompute;
356
+ _value = [];
357
+ _error = null;
358
+ listeners = /* @__PURE__ */ new Set();
359
+ unsubs = [];
360
+ stopped = false;
361
+ get value() {
362
+ return this._value;
363
+ }
364
+ get error() {
365
+ return this._error;
366
+ }
367
+ /**
368
+ * Bound change handler — used as the callback passed to every
369
+ * upstream's subscribe. Bound via class field so the `this`
370
+ * context survives the indirect call from arbitrary upstreams.
371
+ */
372
+ onUpstreamChange = () => {
373
+ this.refresh();
374
+ for (const cb of this.listeners) {
375
+ try {
376
+ cb();
377
+ } catch {
378
+ }
379
+ }
380
+ };
381
+ refresh() {
382
+ if (this.stopped) return;
383
+ try {
384
+ this._value = this.recompute();
385
+ this._error = null;
386
+ } catch (err) {
387
+ this._error = err instanceof Error ? err : new Error(String(err));
388
+ }
389
+ }
390
+ subscribe(cb) {
391
+ if (this.stopped) return () => {
392
+ };
393
+ this.listeners.add(cb);
394
+ return () => this.listeners.delete(cb);
395
+ }
396
+ stop() {
397
+ if (this.stopped) return;
398
+ this.stopped = true;
399
+ for (const unsub of this.unsubs) {
400
+ try {
401
+ unsub();
402
+ } catch {
403
+ }
404
+ }
405
+ this.unsubs.length = 0;
406
+ this.listeners.clear();
407
+ }
408
+ };
409
+
410
+ // src/aggregate/strategy.ts
411
+ var NOT_ENABLED = new Error(
412
+ 'Aggregate / groupBy is not enabled on this Noydb instance. Import `{ withAggregate }` from "@noy-db/hub/aggregate" and pass it to `createNoydb({ aggregateStrategy: withAggregate() })`.'
413
+ );
414
+ var NO_AGGREGATE = {
415
+ aggregate() {
416
+ throw NOT_ENABLED;
417
+ },
418
+ groupBy() {
419
+ throw NOT_ENABLED;
420
+ },
421
+ scanAggregate() {
422
+ throw NOT_ENABLED;
423
+ }
424
+ };
425
+
426
+ // src/query/builder.ts
427
+ var EMPTY_PLAN = {
428
+ clauses: [],
429
+ orderBy: [],
430
+ limit: void 0,
431
+ offset: 0,
432
+ joins: []
433
+ };
434
+ var Query = class _Query {
435
+ source;
436
+ plan;
437
+ joinContext;
438
+ aggregateStrategy;
439
+ constructor(source, plan = EMPTY_PLAN, joinContext, aggregateStrategy = NO_AGGREGATE) {
440
+ this.source = source;
441
+ this.plan = plan;
442
+ this.joinContext = joinContext;
443
+ this.aggregateStrategy = aggregateStrategy;
444
+ }
445
+ /** Add a field comparison. Multiple where() calls are AND-combined. */
446
+ where(field, op, value) {
447
+ const clause = { type: "field", field, op, value };
448
+ return new _Query(
449
+ this.source,
450
+ { ...this.plan, clauses: [...this.plan.clauses, clause] },
451
+ this.joinContext,
452
+ this.aggregateStrategy
453
+ );
454
+ }
455
+ /**
456
+ * Logical OR group. Pass a callback that builds a sub-query.
457
+ * Each clause inside the callback is OR-combined; the group itself
458
+ * joins the parent plan with AND.
459
+ */
460
+ or(builder) {
461
+ const sub = builder(
462
+ new _Query(this.source, EMPTY_PLAN, this.joinContext, this.aggregateStrategy)
463
+ );
464
+ const group = {
465
+ type: "group",
466
+ op: "or",
467
+ clauses: sub.plan.clauses
468
+ };
469
+ return new _Query(
470
+ this.source,
471
+ { ...this.plan, clauses: [...this.plan.clauses, group] },
472
+ this.joinContext,
473
+ this.aggregateStrategy
474
+ );
475
+ }
476
+ /**
477
+ * Logical AND group. Same shape as `or()` but every clause inside the group
478
+ * must match. Useful for explicit grouping inside a larger OR.
479
+ */
480
+ and(builder) {
481
+ const sub = builder(
482
+ new _Query(this.source, EMPTY_PLAN, this.joinContext, this.aggregateStrategy)
483
+ );
484
+ const group = {
485
+ type: "group",
486
+ op: "and",
487
+ clauses: sub.plan.clauses
488
+ };
489
+ return new _Query(
490
+ this.source,
491
+ { ...this.plan, clauses: [...this.plan.clauses, group] },
492
+ this.joinContext,
493
+ this.aggregateStrategy
494
+ );
495
+ }
496
+ /** Escape hatch: add an arbitrary predicate function. Not serializable. */
497
+ filter(fn) {
498
+ const clause = {
499
+ type: "filter",
500
+ fn
501
+ };
502
+ return new _Query(
503
+ this.source,
504
+ { ...this.plan, clauses: [...this.plan.clauses, clause] },
505
+ this.joinContext,
506
+ this.aggregateStrategy
507
+ );
508
+ }
509
+ /** Sort by a field. Subsequent calls are tie-breakers. */
510
+ orderBy(field, direction = "asc") {
511
+ return new _Query(
512
+ this.source,
513
+ { ...this.plan, orderBy: [...this.plan.orderBy, { field, direction }] },
514
+ this.joinContext,
515
+ this.aggregateStrategy
516
+ );
517
+ }
518
+ /** Cap the result size. */
519
+ limit(n) {
520
+ return new _Query(
521
+ this.source,
522
+ { ...this.plan, limit: n },
523
+ this.joinContext,
524
+ this.aggregateStrategy
525
+ );
526
+ }
527
+ /** Skip the first N matching records (after ordering). */
528
+ offset(n) {
529
+ return new _Query(
530
+ this.source,
531
+ { ...this.plan, offset: n },
532
+ this.joinContext,
533
+ this.aggregateStrategy
534
+ );
535
+ }
536
+ /**
537
+ * Resolve a `ref()`-declared foreign key and attach the right-side
538
+ * record under `opts.as`. — eager, single-FK, intra-
539
+ * vault joins.
540
+ *
541
+ * ```ts
542
+ * const rows = invoices.query()
543
+ * .where('status', '==', 'open')
544
+ * .join('clientId', { as: 'client' })
545
+ * .toArray()
546
+ * // → [{ id, amount, client: { id, name, ... } }, ...]
547
+ * ```
548
+ *
549
+ * Preconditions:
550
+ * - The Query must have a `joinContext` (constructed via
551
+ * `Collection.query()`, not `new Query`).
552
+ * - `field` must have a matching `refs: { [field]: ref('<target>') }`
553
+ * declaration on the left collection.
554
+ * - The target collection must be reachable via the vault
555
+ * (either currently open or openable on demand).
556
+ *
557
+ * Strategy:
558
+ * - Nested-loop against `lookupById` when the target source
559
+ * provides it (the common path for Collection targets).
560
+ * - Hash join otherwise, or when `{ strategy: 'hash' }` is
561
+ * explicitly passed for test purposes.
562
+ *
563
+ * Ref-mode semantics on dangling refs (left record has a non-null
564
+ * FK value pointing at a right-side id that doesn't exist):
565
+ * - `strict` → throws `DanglingReferenceError` with the full
566
+ * field / target / refId context.
567
+ * - `warn` → attaches `null` and emits a one-shot warning per
568
+ * unique dangling pair.
569
+ * - `cascade` → attaches `null` silently. Cascade is a
570
+ * delete-time mode; dangling refs visible at read time are
571
+ * either mid-flight cascades or pre-existing orphans, not a
572
+ * DSL-level error.
573
+ *
574
+ * A left-side record whose FK field is `null` / `undefined` is NOT
575
+ * a dangling ref — it's "no reference at all", always allowed
576
+ * regardless of mode.
577
+ *
578
+ * The return type widens `T` with `Record<As, R | null>`. The `R`
579
+ * parameter is optional — supply it explicitly for type-checked
580
+ * access to the joined fields:
581
+ *
582
+ * ```ts
583
+ * invoices.query().join<'client', Client>('clientId', { as: 'client' })
584
+ * // ^^^^^^^^^^^^^^^^^^^ alias literal + right-side type
585
+ * ```
586
+ *
587
+ * Without the generic, the joined field is typed as `unknown`, which
588
+ * still works but requires a cast to access its properties.
589
+ *
590
+ * Joins stay intra-vault by construction — cross-vault
591
+ * correlation goes through `Noydb.queryAcross`, not
592
+ * `.join()`.
593
+ */
594
+ join(field, opts) {
595
+ if (!this.joinContext) {
596
+ throw new Error(
597
+ `Query.join() requires a join context. Use collection.query() to construct a join-capable Query instead of the Query constructor directly (the direct constructor is only used for tests with plain-object sources).`
598
+ );
599
+ }
600
+ const descriptor = this.joinContext.resolveRef(field);
601
+ const isDictJoinField = !descriptor && this.joinContext.resolveDictSource?.(field) != null;
602
+ if (!descriptor && !isDictJoinField) {
603
+ throw new Error(
604
+ `Query.join(): no ref() declared for field "${field}" on collection "${this.joinContext.leftCollection}". Add refs: { ${field}: ref('<target-collection>') } to the collection options, then retry. See the ref() docs for the full list of modes.`
605
+ );
606
+ }
607
+ const leg = descriptor ? {
608
+ field,
609
+ as: opts.as,
610
+ target: descriptor.target,
611
+ mode: descriptor.mode,
612
+ strategy: opts.strategy,
613
+ maxRows: opts.maxRows,
614
+ // constraint #1 — always 'all' in. Do not remove.
615
+ partitionScope: "all"
616
+ } : {
617
+ // Dict join leg
618
+ field,
619
+ as: opts.as,
620
+ target: field,
621
+ // dict name = field name for dictKey
622
+ mode: "strict",
623
+ strategy: opts.strategy,
624
+ maxRows: opts.maxRows,
625
+ partitionScope: "all",
626
+ isDictJoin: true
627
+ };
628
+ return new _Query(
629
+ this.source,
630
+ { ...this.plan, joins: [...this.plan.joins, leg] },
631
+ this.joinContext,
632
+ this.aggregateStrategy
633
+ );
634
+ }
635
+ /**
636
+ * Execute the plan and return the matching records. When the plan
637
+ * carries any join legs, they are applied after `where` / `orderBy`
638
+ * / `limit` / `offset` narrow the left set. See the `.join()` doc
639
+ * for the ordering rationale.
640
+ */
641
+ toArray() {
642
+ const base = executePlanWithSource(this.source, this.plan);
643
+ if (this.plan.joins.length === 0) return base;
644
+ if (!this.joinContext) {
645
+ throw new Error(
646
+ `Query.toArray(): plan carries ${this.plan.joins.length} join leg(s) but no JoinContext is attached. This usually means the Query was constructed via the raw Query constructor with a plan that had joins pre-populated. Use collection.query().join(...) instead.`
647
+ );
648
+ }
649
+ return applyJoins(base, this.plan.joins, this.joinContext);
650
+ }
651
+ /** Return the first matching record, or null. Joins are applied. */
652
+ first() {
653
+ const arr = this.limit(1).toArray();
654
+ return arr[0] ?? null;
655
+ }
656
+ /**
657
+ * Return the number of matching records (after where/filter,
658
+ * before limit). **Joins are NOT applied** — count() reports the
659
+ * left-side cardinality, because joins in are projection-only
660
+ * (they attach an aliased field; they never filter). Running joins
661
+ * here just to discard the aliases would be wasteful, and in strict
662
+ * mode it could throw `DanglingReferenceError` for a call whose
663
+ * intent is purely to count.
664
+ */
665
+ count() {
666
+ const { candidates, remainingClauses } = candidateRecords(this.source, this.plan.clauses);
667
+ if (remainingClauses.length === 0) return candidates.length;
668
+ return filterRecords(candidates, remainingClauses).length;
669
+ }
670
+ /**
671
+ * Reduce the matching records through a named set of reducers.
672
+ * the aggregation terminal.
673
+ *
674
+ * ```ts
675
+ * const { total, n, avgAmount } = invoices.query()
676
+ * .where('status', '==', 'open')
677
+ * .aggregate({
678
+ * total: sum('amount'),
679
+ * n: count(),
680
+ * avgAmount: avg('amount'),
681
+ * })
682
+ * .run()
683
+ * ```
684
+ *
685
+ * Returns an `Aggregation<R>` wrapper with two terminals:
686
+ * - `.run(): R` — synchronous one-shot reduction
687
+ * - `.live(): LiveAggregation<R>` — reactive primitive that
688
+ * re-runs the reduction whenever the source notifies of a
689
+ * change. Always call `live.stop()` when finished.
690
+ *
691
+ * The reducer spec is bound here once and reused by both
692
+ * terminals — this is why `.aggregate()` returns a wrapper instead
693
+ * of being a direct terminal. Consumers who only need the static
694
+ * value read `.run()`; consumers wiring a reactive UI read
695
+ * `.live()`.
696
+ *
697
+ * Joins are intentionally NOT applied to aggregations in —
698
+ * the same logic as `.count()`. Joins in are projection-only
699
+ * (they attach an aliased field and never filter), so running
700
+ * them just to throw the aliases away would be wasteful. If you
701
+ * need a reducer that reads a joined field, open an issue —
702
+ * aggregations-across-joins is explicitly out of scope for v1.
703
+ *
704
+ * Every reducer factory accepts an optional `{ seed }` parameter
705
+ * that is plumbed through the protocol but unused by the
706
+ * executor — that's constraint #2. When partition-aware
707
+ * aggregation lands, the seed will carry running state across
708
+ * partition boundaries without an API break.
709
+ */
710
+ aggregate(spec) {
711
+ const source = this.source;
712
+ const clauses = this.plan.clauses;
713
+ const executeRecords = () => {
714
+ const { candidates, remainingClauses } = candidateRecords(source, clauses);
715
+ return remainingClauses.length === 0 ? candidates : filterRecords(candidates, remainingClauses);
716
+ };
717
+ const upstreams = [];
718
+ if (source.subscribe) {
719
+ const subscribe = source.subscribe.bind(source);
720
+ upstreams.push({ subscribe: (cb) => subscribe(cb) });
721
+ }
722
+ return this.aggregateStrategy.aggregate(executeRecords, spec, upstreams);
723
+ }
724
+ /**
725
+ * Partition matching records into buckets keyed by a field, then
726
+ * terminate with `.aggregate(spec)` to compute per-bucket
727
+ * reducers..
728
+ *
729
+ * ```ts
730
+ * const byClient = invoices.query()
731
+ * .where('status', '==', 'open')
732
+ * .groupBy('clientId')
733
+ * .aggregate({ total: sum('amount'), n: count() })
734
+ * .run()
735
+ * // → [ { clientId: 'c1', total: 5250, n: 3 }, … ]
736
+ * ```
737
+ *
738
+ * Result rows carry the group key value under the grouping field
739
+ * name plus every reducer output from the spec. Buckets are
740
+ * emitted in first-seen order — consumers who want a specific
741
+ * ordering should `.sort()` downstream.
742
+ *
743
+ * **Cardinality caps:** a one-shot warning fires at 10_000
744
+ * distinct groups; `GroupCardinalityError` throws at 100_000.
745
+ * Grouping on a high-uniqueness field like `id` or `createdAt` is
746
+ * almost always a query mistake — the error message names the
747
+ * field and observed cardinality and suggests narrowing with
748
+ * `.where()` first.
749
+ *
750
+ * **Null / undefined keys:** records with a missing or explicitly
751
+ * `null` group field get their own buckets. `Map`-based
752
+ * partitioning distinguishes `undefined` from `null`, so the two
753
+ * cases do NOT merge. Consumers who want them merged should
754
+ * coalesce upstream with `.filter()`.
755
+ *
756
+ * **Joins are not applied** — same rationale as `.count()` and
757
+ * `.aggregate()`. Joined fields in are projection-only, so
758
+ * running a join inside a grouping pipeline would be wasteful and
759
+ * could trigger `DanglingReferenceError` in strict mode for a
760
+ * call whose intent is purely to bucket-and-reduce. Grouping by
761
+ * a joined field is explicitly out of scope for — file an
762
+ * issue if a real consumer needs it.
763
+ *
764
+ * **Filter clauses (`.filter(fn)`):** grouped queries still
765
+ * support filter clauses in the underlying plan — they run in
766
+ * the same candidate/filter pipeline that `.aggregate()` uses.
767
+ * The performance caveat is the same: filter clauses cost O(N)
768
+ * per record and can't be index-accelerated.
769
+ */
770
+ groupBy(field) {
771
+ const source = this.source;
772
+ const clauses = this.plan.clauses;
773
+ const executeRecords = () => {
774
+ const { candidates, remainingClauses } = candidateRecords(source, clauses);
775
+ return remainingClauses.length === 0 ? candidates : filterRecords(candidates, remainingClauses);
776
+ };
777
+ const upstreams = [];
778
+ if (source.subscribe) {
779
+ const subscribe = source.subscribe.bind(source);
780
+ upstreams.push({ subscribe: (cb) => subscribe(cb) });
781
+ }
782
+ const joinCtx = this.joinContext;
783
+ const dictLabelResolver = joinCtx?.resolveDictSource ? (() => {
784
+ const dictSource = joinCtx.resolveDictSource(field);
785
+ if (!dictSource) return void 0;
786
+ const snapshot = dictSource.snapshot();
787
+ const dictMap = /* @__PURE__ */ new Map();
788
+ for (const entry of snapshot) {
789
+ const k = entry["key"];
790
+ const labels = entry["labels"];
791
+ if (typeof k === "string" && labels && typeof labels === "object") {
792
+ dictMap.set(k, labels);
793
+ }
794
+ }
795
+ return async (key, locale, fallback) => {
796
+ const labels = dictMap.get(key);
797
+ if (!labels) return void 0;
798
+ if (labels[locale] !== void 0) return labels[locale];
799
+ const chain = Array.isArray(fallback) ? fallback : fallback ? [fallback] : [];
800
+ for (const fb of chain) {
801
+ if (fb === "any") {
802
+ const any = Object.values(labels)[0];
803
+ if (any !== void 0) return any;
804
+ } else if (labels[fb] !== void 0) {
805
+ return labels[fb];
806
+ }
807
+ }
808
+ return void 0;
809
+ };
810
+ })() : void 0;
811
+ return this.aggregateStrategy.groupBy(executeRecords, field, upstreams, dictLabelResolver);
812
+ }
813
+ /**
814
+ * Re-run the query whenever the source notifies of changes.
815
+ * Returns an unsubscribe function. The callback receives the latest result.
816
+ * Throws if the source does not support subscriptions.
817
+ *
818
+ * **For joined queries, prefer `.live()`** — `subscribe()`
819
+ * only re-fires on LEFT-side changes, so joined data can be
820
+ * stale if the right side mutates between emissions. `.live()`
821
+ * merges change streams from every join target.
822
+ */
823
+ subscribe(cb) {
824
+ if (!this.source.subscribe) {
825
+ throw new Error("Query source does not support subscriptions. Pass a source with a subscribe() method.");
826
+ }
827
+ cb(this.toArray());
828
+ return this.source.subscribe(() => cb(this.toArray()));
829
+ }
830
+ /**
831
+ * Reactive terminal — returns a `LiveQuery<T>` that re-runs the
832
+ * query and updates its `value` whenever any source feeding it
833
+ * mutates..
834
+ *
835
+ * For non-joined queries, `.live()` is a convenience over the
836
+ * existing `.subscribe()` callback shape: a hand-rolled reactive
837
+ * primitive with `value` / `error` fields and a `subscribe(cb)`
838
+ * notification channel. Frame-agnostic — Vue / React / Solid
839
+ * adapters wrap it in their own primitive.
840
+ *
841
+ * For joined queries, `.live()` additionally subscribes to every
842
+ * join target's change stream. Mutations on a right-side
843
+ * collection (insert / update / delete of a client referenced by
844
+ * an invoice) re-fire the live query and re-evaluate every
845
+ * dependent left row. Right-side targets are deduped by
846
+ * collection name, so a chain that joins the same target twice
847
+ * (e.g. billing client + shipping client → both 'clients') only
848
+ * subscribes once.
849
+ *
850
+ * **Ref-mode behavior on right-side disappearance** — matches the
851
+ * eager `.toArray()` contract from :
852
+ * - `strict` → re-run throws `DanglingReferenceError`. The
853
+ * LiveQuery catches the throw, stores it in `live.error`, and
854
+ * notifies listeners (the throw does NOT propagate out of
855
+ * the source's change handler — that would tear down the
856
+ * emitter). Consumers check `live.error` after each
857
+ * notification and render an error state in the UI.
858
+ * - `warn` → joined value flips to `null`; the existing
859
+ * warn-channel deduplication keeps repeated re-runs from
860
+ * spamming the console.
861
+ * - `cascade` → no special handling needed; the cascade-
862
+ * delete mechanism propagates the right-side delete into the
863
+ * left collection on the next tick, and the live query
864
+ * naturally re-fires with the orphaned left rows gone.
865
+ *
866
+ * Always call `live.stop()` when finished — it tears down every
867
+ * upstream subscription. The Vue layer's `onUnmounted` hook
868
+ * should call `stop()` automatically; raw consumers must do it
869
+ * themselves.
870
+ *
871
+ * **Limitations:**
872
+ * - No granular delta updates — the whole query re-runs on
873
+ * every change.
874
+ * - No microtask batching — bursty changes produce one re-run
875
+ * per change.
876
+ * - No re-planning under live mutations — the planner picks
877
+ * once at subscription time and reuses the same plan.
878
+ * - Streaming live joins are deferred.
879
+ */
880
+ live() {
881
+ const upstreams = [];
882
+ if (this.source.subscribe) {
883
+ const leftSubscribe = this.source.subscribe.bind(this.source);
884
+ upstreams.push({
885
+ subscribe: (cb) => leftSubscribe(cb)
886
+ });
887
+ }
888
+ if (this.plan.joins.length > 0 && this.joinContext) {
889
+ const subscribed = /* @__PURE__ */ new Set();
890
+ for (const leg of this.plan.joins) {
891
+ if (subscribed.has(leg.target)) continue;
892
+ subscribed.add(leg.target);
893
+ const rightSource = this.joinContext.resolveSource(leg.target);
894
+ if (rightSource?.subscribe) {
895
+ const rightSubscribe = rightSource.subscribe.bind(rightSource);
896
+ upstreams.push({
897
+ subscribe: (cb) => rightSubscribe(cb)
898
+ });
899
+ }
900
+ }
901
+ }
902
+ return buildLiveQuery(() => this.toArray(), upstreams);
903
+ }
904
+ /**
905
+ * Return the plan as a JSON-friendly object. FilterClause entries are
906
+ * stripped (their `fn` cannot be serialized) and replaced with
907
+ * { type: 'filter', fn: '[function]' } so devtools can still see them.
908
+ */
909
+ toPlan() {
910
+ return serializePlan(this.plan);
911
+ }
912
+ };
913
+ function executePlanWithSource(source, plan) {
914
+ const { candidates, remainingClauses } = candidateRecords(source, plan.clauses);
915
+ let result = remainingClauses.length === 0 ? [...candidates] : filterRecords(candidates, remainingClauses);
916
+ if (plan.orderBy.length > 0) {
917
+ result = sortRecords(result, plan.orderBy);
918
+ }
919
+ if (plan.offset > 0) {
920
+ result = result.slice(plan.offset);
921
+ }
922
+ if (plan.limit !== void 0) {
923
+ result = result.slice(0, plan.limit);
924
+ }
925
+ return result;
926
+ }
927
+ function candidateRecords(source, clauses) {
928
+ const indexes = source.getIndexes?.();
929
+ if (!indexes || !source.lookupById || clauses.length === 0) {
930
+ return { candidates: source.snapshot(), remainingClauses: clauses };
931
+ }
932
+ const lookupById = (id) => source.lookupById?.(id);
933
+ for (let i = 0; i < clauses.length; i++) {
934
+ const clause = clauses[i];
935
+ if (clause.type !== "field") continue;
936
+ if (!indexes.has(clause.field)) continue;
937
+ let ids = null;
938
+ if (clause.op === "==") {
939
+ ids = indexes.lookupEqual(clause.field, clause.value);
940
+ } else if (clause.op === "in" && Array.isArray(clause.value)) {
941
+ ids = indexes.lookupIn(clause.field, clause.value);
942
+ }
943
+ if (ids !== null) {
944
+ const remaining = [];
945
+ for (let j = 0; j < clauses.length; j++) {
946
+ if (j !== i) remaining.push(clauses[j]);
947
+ }
948
+ return {
949
+ candidates: materializeIds(ids, lookupById),
950
+ remainingClauses: remaining
951
+ };
952
+ }
953
+ }
954
+ return { candidates: source.snapshot(), remainingClauses: clauses };
955
+ }
956
+ function materializeIds(ids, lookupById) {
957
+ const out = [];
958
+ for (const id of ids) {
959
+ const record = lookupById(id);
960
+ if (record !== void 0) out.push(record);
961
+ }
962
+ return out;
963
+ }
964
+ function executePlan(records, plan) {
965
+ let result = filterRecords(records, plan.clauses);
966
+ if (plan.orderBy.length > 0) {
967
+ result = sortRecords(result, plan.orderBy);
968
+ }
969
+ if (plan.offset > 0) {
970
+ result = result.slice(plan.offset);
971
+ }
972
+ if (plan.limit !== void 0) {
973
+ result = result.slice(0, plan.limit);
974
+ }
975
+ return result;
976
+ }
977
+ function filterRecords(records, clauses) {
978
+ if (clauses.length === 0) return [...records];
979
+ const out = [];
980
+ for (const r of records) {
981
+ let matches = true;
982
+ for (const clause of clauses) {
983
+ if (!evaluateClause(r, clause)) {
984
+ matches = false;
985
+ break;
986
+ }
987
+ }
988
+ if (matches) out.push(r);
989
+ }
990
+ return out;
991
+ }
992
+ function sortRecords(records, orderBy) {
993
+ return [...records].sort((a, b) => {
994
+ for (const { field, direction } of orderBy) {
995
+ const av = readField(a, field);
996
+ const bv = readField(b, field);
997
+ const cmp = compareValues(av, bv);
998
+ if (cmp !== 0) return direction === "asc" ? cmp : -cmp;
999
+ }
1000
+ return 0;
1001
+ });
1002
+ }
1003
+ function readField(record, field) {
1004
+ if (record === null || record === void 0) return void 0;
1005
+ if (!field.includes(".")) {
1006
+ return record[field];
1007
+ }
1008
+ const segments = field.split(".");
1009
+ let cursor = record;
1010
+ for (const segment of segments) {
1011
+ if (cursor === null || cursor === void 0) return void 0;
1012
+ cursor = cursor[segment];
1013
+ }
1014
+ return cursor;
1015
+ }
1016
+ function compareValues(a, b) {
1017
+ if (a === void 0 || a === null) return b === void 0 || b === null ? 0 : 1;
1018
+ if (b === void 0 || b === null) return -1;
1019
+ if (typeof a === "number" && typeof b === "number") return a - b;
1020
+ if (typeof a === "string" && typeof b === "string") return a < b ? -1 : a > b ? 1 : 0;
1021
+ if (a instanceof Date && b instanceof Date) return a.getTime() - b.getTime();
1022
+ return 0;
1023
+ }
1024
+ function serializePlan(plan) {
1025
+ return {
1026
+ clauses: plan.clauses.map(serializeClause),
1027
+ orderBy: plan.orderBy,
1028
+ limit: plan.limit,
1029
+ offset: plan.offset,
1030
+ joins: plan.joins
1031
+ };
1032
+ }
1033
+ function serializeClause(clause) {
1034
+ if (clause.type === "filter") {
1035
+ return { type: "filter", fn: "[function]" };
1036
+ }
1037
+ if (clause.type === "group") {
1038
+ return {
1039
+ type: "group",
1040
+ op: clause.op,
1041
+ clauses: clause.clauses.map(serializeClause)
1042
+ };
1043
+ }
1044
+ return clause;
1045
+ }
1046
+
1047
+ // src/indexing/eager-indexes.ts
1048
+ var CollectionIndexes = class {
1049
+ indexes = /* @__PURE__ */ new Map();
1050
+ /**
1051
+ * Declare an index. Subsequent record additions are tracked under it.
1052
+ * Calling this twice for the same field is a no-op (idempotent).
1053
+ */
1054
+ declare(field) {
1055
+ if (this.indexes.has(field)) return;
1056
+ this.indexes.set(field, { field, buckets: /* @__PURE__ */ new Map() });
1057
+ }
1058
+ /** True if the given field has a declared index. */
1059
+ has(field) {
1060
+ return this.indexes.has(field);
1061
+ }
1062
+ /** All declared field names, in declaration order. */
1063
+ fields() {
1064
+ return [...this.indexes.keys()];
1065
+ }
1066
+ /**
1067
+ * Build all declared indexes from a snapshot of records.
1068
+ * Called once per hydration. O(N × indexes.size).
1069
+ */
1070
+ build(records) {
1071
+ for (const idx of this.indexes.values()) {
1072
+ idx.buckets.clear();
1073
+ for (const { id, record } of records) {
1074
+ addToIndex(idx, id, record);
1075
+ }
1076
+ }
1077
+ }
1078
+ /**
1079
+ * Insert or update a single record across all indexes.
1080
+ * Called by `Collection.put()` after the encrypted write succeeds.
1081
+ *
1082
+ * If `previousRecord` is provided, the record is removed from any old
1083
+ * buckets first — this is the update path. Pass `null` for fresh adds.
1084
+ */
1085
+ upsert(id, newRecord, previousRecord) {
1086
+ if (this.indexes.size === 0) return;
1087
+ if (previousRecord !== null) {
1088
+ this.remove(id, previousRecord);
1089
+ }
1090
+ for (const idx of this.indexes.values()) {
1091
+ addToIndex(idx, id, newRecord);
1092
+ }
1093
+ }
1094
+ /**
1095
+ * Remove a record from all indexes. Called by `Collection.delete()`
1096
+ * (and as the first half of `upsert` for the update path).
1097
+ */
1098
+ remove(id, record) {
1099
+ if (this.indexes.size === 0) return;
1100
+ for (const idx of this.indexes.values()) {
1101
+ removeFromIndex(idx, id, record);
1102
+ }
1103
+ }
1104
+ /** Drop all index data. Called when the collection is invalidated. */
1105
+ clear() {
1106
+ for (const idx of this.indexes.values()) {
1107
+ idx.buckets.clear();
1108
+ }
1109
+ }
1110
+ /**
1111
+ * Equality lookup: return the set of record ids whose `field` matches
1112
+ * the given value. Returns `null` if no index covers the field — the
1113
+ * caller should fall back to a linear scan.
1114
+ *
1115
+ * The returned Set is a reference to the index's internal storage —
1116
+ * callers must NOT mutate it.
1117
+ */
1118
+ lookupEqual(field, value) {
1119
+ const idx = this.indexes.get(field);
1120
+ if (!idx) return null;
1121
+ const key = stringifyKey(value);
1122
+ return idx.buckets.get(key) ?? EMPTY_SET;
1123
+ }
1124
+ /**
1125
+ * Set lookup: return the union of record ids whose `field` matches any
1126
+ * of the given values. Returns `null` if no index covers the field.
1127
+ */
1128
+ lookupIn(field, values) {
1129
+ const idx = this.indexes.get(field);
1130
+ if (!idx) return null;
1131
+ const out = /* @__PURE__ */ new Set();
1132
+ for (const value of values) {
1133
+ const key = stringifyKey(value);
1134
+ const bucket = idx.buckets.get(key);
1135
+ if (bucket) {
1136
+ for (const id of bucket) out.add(id);
1137
+ }
1138
+ }
1139
+ return out;
1140
+ }
1141
+ };
1142
+ var EMPTY_SET = /* @__PURE__ */ new Set();
1143
+ function stringifyKey(value) {
1144
+ if (value === null || value === void 0) return "\0NULL\0";
1145
+ if (typeof value === "string") return value;
1146
+ if (typeof value === "number" || typeof value === "boolean") return String(value);
1147
+ if (value instanceof Date) return value.toISOString();
1148
+ return "\0OBJECT\0";
1149
+ }
1150
+ function addToIndex(idx, id, record) {
1151
+ const value = readPath(record, idx.field);
1152
+ if (value === null || value === void 0) return;
1153
+ const key = stringifyKey(value);
1154
+ let bucket = idx.buckets.get(key);
1155
+ if (!bucket) {
1156
+ bucket = /* @__PURE__ */ new Set();
1157
+ idx.buckets.set(key, bucket);
1158
+ }
1159
+ bucket.add(id);
1160
+ }
1161
+ function removeFromIndex(idx, id, record) {
1162
+ const value = readPath(record, idx.field);
1163
+ if (value === null || value === void 0) return;
1164
+ const key = stringifyKey(value);
1165
+ const bucket = idx.buckets.get(key);
1166
+ if (!bucket) return;
1167
+ bucket.delete(id);
1168
+ if (bucket.size === 0) idx.buckets.delete(key);
1169
+ }
1170
+
1171
+ // src/aggregate/reducers.ts
1172
+ function count(opts) {
1173
+ const _seed = opts?.seed;
1174
+ void _seed;
1175
+ return {
1176
+ init: () => 0,
1177
+ step: (state) => state + 1,
1178
+ remove: (state) => state - 1,
1179
+ finalize: (state) => state
1180
+ };
1181
+ }
1182
+ function sum(field, opts) {
1183
+ const _seed = opts?.seed;
1184
+ void _seed;
1185
+ return {
1186
+ init: () => 0,
1187
+ step: (state, record) => state + readNumber(record, field),
1188
+ remove: (state, record) => state - readNumber(record, field),
1189
+ finalize: (state) => state
1190
+ };
1191
+ }
1192
+ function avg(field, opts) {
1193
+ const _seed = opts?.seed;
1194
+ void _seed;
1195
+ return {
1196
+ init: () => ({ sum: 0, count: 0 }),
1197
+ step: (state, record) => ({
1198
+ sum: state.sum + readNumber(record, field),
1199
+ count: state.count + 1
1200
+ }),
1201
+ remove: (state, record) => ({
1202
+ sum: state.sum - readNumber(record, field),
1203
+ count: state.count - 1
1204
+ }),
1205
+ finalize: (state) => state.count === 0 ? null : state.sum / state.count
1206
+ };
1207
+ }
1208
+ function pushValue(state, value) {
1209
+ return { values: [...state.values, value] };
1210
+ }
1211
+ function removeValue(state, value) {
1212
+ const idx = state.values.indexOf(value);
1213
+ if (idx < 0) return state;
1214
+ const next = state.values.slice();
1215
+ next.splice(idx, 1);
1216
+ return { values: next };
1217
+ }
1218
+ function min(field, opts) {
1219
+ const _seed = opts?.seed;
1220
+ void _seed;
1221
+ return {
1222
+ init: () => ({ values: [] }),
1223
+ step: (state, record) => pushValue(state, readNumber(record, field)),
1224
+ remove: (state, record) => removeValue(state, readNumber(record, field)),
1225
+ finalize: (state) => {
1226
+ if (state.values.length === 0) return null;
1227
+ let out = state.values[0];
1228
+ for (let i = 1; i < state.values.length; i++) {
1229
+ const v = state.values[i];
1230
+ if (v < out) out = v;
1231
+ }
1232
+ return out;
1233
+ }
1234
+ };
1235
+ }
1236
+ function max(field, opts) {
1237
+ const _seed = opts?.seed;
1238
+ void _seed;
1239
+ return {
1240
+ init: () => ({ values: [] }),
1241
+ step: (state, record) => pushValue(state, readNumber(record, field)),
1242
+ remove: (state, record) => removeValue(state, readNumber(record, field)),
1243
+ finalize: (state) => {
1244
+ if (state.values.length === 0) return null;
1245
+ let out = state.values[0];
1246
+ for (let i = 1; i < state.values.length; i++) {
1247
+ const v = state.values[i];
1248
+ if (v > out) out = v;
1249
+ }
1250
+ return out;
1251
+ }
1252
+ };
1253
+ }
1254
+ function readNumber(record, field) {
1255
+ const value = readPath(record, field);
1256
+ return typeof value === "number" && Number.isFinite(value) ? value : 0;
1257
+ }
1258
+
1259
+ // src/aggregate/aggregation.ts
1260
+ function reduceRecords(records, spec) {
1261
+ const state = {};
1262
+ for (const key of Object.keys(spec)) {
1263
+ state[key] = spec[key].init();
1264
+ }
1265
+ for (const record of records) {
1266
+ for (const key of Object.keys(spec)) {
1267
+ state[key] = spec[key].step(state[key], record);
1268
+ }
1269
+ }
1270
+ const result = {};
1271
+ for (const key of Object.keys(spec)) {
1272
+ result[key] = spec[key].finalize(state[key]);
1273
+ }
1274
+ return result;
1275
+ }
1276
+ var LiveAggregationImpl = class {
1277
+ constructor(recompute, upstreams) {
1278
+ this.recompute = recompute;
1279
+ try {
1280
+ this.value = recompute();
1281
+ this.error = void 0;
1282
+ } catch (err) {
1283
+ this.value = void 0;
1284
+ this.error = err;
1285
+ }
1286
+ for (const upstream of upstreams) {
1287
+ const unsub = upstream.subscribe(() => this.refresh());
1288
+ this.unsubscribes.push(unsub);
1289
+ }
1290
+ }
1291
+ recompute;
1292
+ value;
1293
+ error;
1294
+ listeners = /* @__PURE__ */ new Set();
1295
+ unsubscribes = [];
1296
+ stopped = false;
1297
+ refresh() {
1298
+ if (this.stopped) return;
1299
+ try {
1300
+ this.value = this.recompute();
1301
+ this.error = void 0;
1302
+ } catch (err) {
1303
+ this.error = err;
1304
+ }
1305
+ for (const listener of this.listeners) {
1306
+ try {
1307
+ listener();
1308
+ } catch (err) {
1309
+ console.warn("[noy-db] LiveAggregation listener threw:", err);
1310
+ }
1311
+ }
1312
+ }
1313
+ subscribe(cb) {
1314
+ if (this.stopped) {
1315
+ return () => {
1316
+ };
1317
+ }
1318
+ this.listeners.add(cb);
1319
+ return () => {
1320
+ this.listeners.delete(cb);
1321
+ };
1322
+ }
1323
+ stop() {
1324
+ if (this.stopped) return;
1325
+ this.stopped = true;
1326
+ for (const unsub of this.unsubscribes) {
1327
+ try {
1328
+ unsub();
1329
+ } catch (err) {
1330
+ console.warn("[noy-db] LiveAggregation upstream unsubscribe threw:", err);
1331
+ }
1332
+ }
1333
+ this.unsubscribes.length = 0;
1334
+ this.listeners.clear();
1335
+ }
1336
+ };
1337
+ var Aggregation = class {
1338
+ constructor(executeRecords, spec, upstreams) {
1339
+ this.executeRecords = executeRecords;
1340
+ this.spec = spec;
1341
+ this.upstreams = upstreams;
1342
+ }
1343
+ executeRecords;
1344
+ spec;
1345
+ upstreams;
1346
+ /**
1347
+ * Execute the query and reduce the results synchronously.
1348
+ * Returns the reduced shape matching the spec — e.g. a spec of
1349
+ * `{ total: sum('amount'), n: count() }` returns
1350
+ * `{ total: number, n: number }`.
1351
+ */
1352
+ run() {
1353
+ return reduceRecords(this.executeRecords(), this.spec);
1354
+ }
1355
+ /**
1356
+ * Build a reactive `LiveAggregation<R>` that re-runs the reduction
1357
+ * whenever any upstream source notifies of a change. The initial
1358
+ * value is computed eagerly in the constructor, so consumers can
1359
+ * read `live.value` immediately after calling `.live()`.
1360
+ *
1361
+ * Always call `live.stop()` when finished — it tears down the
1362
+ * upstream subscriptions. Vue's `onUnmounted` is the canonical
1363
+ * place.
1364
+ *
1365
+ * **Implementation note:** every upstream change triggers a full
1366
+ * re-reduction. Incremental maintenance (O(1) per delta for
1367
+ * sum/count/avg via the reducer protocol's `remove()` method) is a
1368
+ * planned follow-up optimization — the protocol already supports
1369
+ * it, but the executor doesn't drive it yet. Consumers get
1370
+ * correct, reactive values today; future PRs can switch to
1371
+ * delta-based maintenance without changing this API.
1372
+ */
1373
+ live() {
1374
+ const recompute = () => reduceRecords(this.executeRecords(), this.spec);
1375
+ return new LiveAggregationImpl(recompute, this.upstreams);
1376
+ }
1377
+ };
1378
+ function buildLiveAggregation(recompute, upstreams) {
1379
+ return new LiveAggregationImpl(recompute, upstreams);
1380
+ }
1381
+
1382
+ // src/aggregate/groupby.ts
1383
+ var GROUPBY_WARN_CARDINALITY = 1e4;
1384
+ var GROUPBY_MAX_CARDINALITY = 1e5;
1385
+ var warnedCardinalityFields = /* @__PURE__ */ new Set();
1386
+ function warnCardinalityApproaching(field, observed) {
1387
+ if (warnedCardinalityFields.has(field)) return;
1388
+ warnedCardinalityFields.add(field);
1389
+ console.warn(
1390
+ `[noy-db] .groupBy("${field}") produced ${observed} distinct groups, ${Math.round(observed / GROUPBY_MAX_CARDINALITY * 100)}% of the ${GROUPBY_MAX_CARDINALITY}-group ceiling. Narrow the query with .where() before grouping, or switch to a lower-cardinality field.`
1391
+ );
1392
+ }
1393
+ function resetGroupByWarnings() {
1394
+ warnedCardinalityFields.clear();
1395
+ }
1396
+ var GroupedQuery = class {
1397
+ constructor(executeRecords, field, upstreams, dictLabelResolver) {
1398
+ this.executeRecords = executeRecords;
1399
+ this.field = field;
1400
+ this.upstreams = upstreams;
1401
+ this.dictLabelResolver = dictLabelResolver;
1402
+ }
1403
+ executeRecords;
1404
+ field;
1405
+ upstreams;
1406
+ dictLabelResolver;
1407
+ /**
1408
+ * Build a grouped aggregation. Returns a `GroupedAggregation`
1409
+ * with `.run()`, `.runAsync()`, and `.live()` terminals — same shape
1410
+ * as the non-grouped `.aggregate()` wrapper, just with an array
1411
+ * result (one row per bucket) instead of a single reduced object.
1412
+ */
1413
+ aggregate(spec) {
1414
+ return new GroupedAggregation(
1415
+ this.executeRecords,
1416
+ this.field,
1417
+ spec,
1418
+ this.upstreams,
1419
+ this.dictLabelResolver
1420
+ );
1421
+ }
1422
+ };
1423
+ function groupAndReduce(records, field, spec) {
1424
+ const buckets = /* @__PURE__ */ new Map();
1425
+ for (const record of records) {
1426
+ const key = readPath(record, field);
1427
+ let bucket = buckets.get(key);
1428
+ if (bucket === void 0) {
1429
+ if (buckets.size >= GROUPBY_MAX_CARDINALITY) {
1430
+ throw new GroupCardinalityError(
1431
+ field,
1432
+ buckets.size + 1,
1433
+ GROUPBY_MAX_CARDINALITY
1434
+ );
1435
+ }
1436
+ bucket = [];
1437
+ buckets.set(key, bucket);
1438
+ }
1439
+ bucket.push(record);
1440
+ }
1441
+ if (buckets.size >= GROUPBY_WARN_CARDINALITY) {
1442
+ warnCardinalityApproaching(field, buckets.size);
1443
+ }
1444
+ const keys = Object.keys(spec);
1445
+ const out = [];
1446
+ for (const [groupKey, bucketRecords] of buckets) {
1447
+ const state = {};
1448
+ for (const key of keys) {
1449
+ state[key] = spec[key].init();
1450
+ }
1451
+ for (const record of bucketRecords) {
1452
+ for (const key of keys) {
1453
+ state[key] = spec[key].step(state[key], record);
1454
+ }
1455
+ }
1456
+ const row = { [field]: groupKey };
1457
+ for (const key of keys) {
1458
+ row[key] = spec[key].finalize(state[key]);
1459
+ }
1460
+ out.push(row);
1461
+ }
1462
+ return out;
1463
+ }
1464
+ var GroupedAggregation = class {
1465
+ constructor(executeRecords, field, spec, upstreams, dictLabelResolver) {
1466
+ this.executeRecords = executeRecords;
1467
+ this.field = field;
1468
+ this.spec = spec;
1469
+ this.upstreams = upstreams;
1470
+ this.dictLabelResolver = dictLabelResolver;
1471
+ }
1472
+ executeRecords;
1473
+ field;
1474
+ spec;
1475
+ upstreams;
1476
+ dictLabelResolver;
1477
+ /** Execute the query, group, reduce, and return an array of rows. */
1478
+ run() {
1479
+ return groupAndReduce(this.executeRecords(), this.field, this.spec);
1480
+ }
1481
+ /**
1482
+ * Execute the query, group, reduce, and resolve `<field>Label` for
1483
+ * each result row when the grouping field is a `dictKey` and a
1484
+ * `locale` is provided. Returns `R[]` synchronously when
1485
+ * no locale is specified (identical to `.run()`).
1486
+ *
1487
+ * The `<field>Label` field is appended to each row. Rows whose group
1488
+ * key has no dictionary entry get `<field>Label: undefined`.
1489
+ */
1490
+ async runAsync(opts) {
1491
+ const rows = groupAndReduce(this.executeRecords(), this.field, this.spec);
1492
+ if (!opts?.locale || !this.dictLabelResolver) return rows;
1493
+ const resolve = this.dictLabelResolver;
1494
+ const locale = opts.locale;
1495
+ const fallback = opts.fallback;
1496
+ const labelKey = `${this.field}Label`;
1497
+ return Promise.all(
1498
+ rows.map(async (row) => {
1499
+ const key = row[this.field];
1500
+ if (typeof key !== "string") return row;
1501
+ const label = await resolve(key, locale, fallback);
1502
+ return { ...row, [labelKey]: label };
1503
+ })
1504
+ );
1505
+ }
1506
+ /**
1507
+ * Build a reactive `LiveAggregation<R[]>` that re-runs the full
1508
+ * group-and-reduce pipeline whenever any upstream source notifies
1509
+ * of a change. Same error-isolation and idempotent-stop contract
1510
+ * as `Aggregation.live()` — the implementation delegates to the
1511
+ * same `LiveAggregationImpl` class by threading a fresh
1512
+ * recompute closure through the existing constructor.
1513
+ *
1514
+ * uses naive full re-run on every change. Incremental
1515
+ * per-bucket maintenance (apply `step` on inserted records,
1516
+ * `remove` on deleted records, route by bucket key) is a future
1517
+ * optimization — the reducer protocol admits it, but wiring
1518
+ * delta-aware source subscriptions is a separate PR.
1519
+ *
1520
+ * Always call `live.stop()` when finished.
1521
+ */
1522
+ live() {
1523
+ const recompute = () => groupAndReduce(this.executeRecords(), this.field, this.spec);
1524
+ return buildLiveAggregation(recompute, this.upstreams);
1525
+ }
1526
+ };
1527
+
1528
+ // src/query/scan-builder.ts
1529
+ var DEFAULT_SCAN_PAGE_SIZE = 100;
1530
+ var ScanBuilder = class _ScanBuilder {
1531
+ pageProvider;
1532
+ pageSize;
1533
+ clauses;
1534
+ /**
1535
+ * Zero-or-more join legs to apply per record as the stream flows.
1536
+ * Each leg attaches the resolved right-side record (or null) under
1537
+ * its alias. — streaming joins.
1538
+ *
1539
+ * Joins are evaluated AFTER clauses, so a `where()` filtered-out
1540
+ * record never triggers a right-side lookup. This is the same
1541
+ * ordering as `Query.toArray()` (clauses first, joins after) and
1542
+ * keeps the streaming path from doing wasted work.
1543
+ */
1544
+ joins;
1545
+ /**
1546
+ * Join resolution context. Required for `.join()` to translate a
1547
+ * field name into a target collection + ref mode and to resolve
1548
+ * the right-side `JoinableSource`. Optional because tests
1549
+ * construct ScanBuilder directly with synthetic page providers
1550
+ * that don't know about ref() — calling `.join()` without a
1551
+ * context throws with an actionable error.
1552
+ */
1553
+ joinContext;
1554
+ constructor(pageProvider, pageSize = DEFAULT_SCAN_PAGE_SIZE, clauses = [], joins = [], joinContext) {
1555
+ this.pageProvider = pageProvider;
1556
+ this.pageSize = pageSize;
1557
+ this.clauses = clauses;
1558
+ this.joins = joins;
1559
+ this.joinContext = joinContext;
1560
+ }
1561
+ /**
1562
+ * Add a field comparison. Runs per record as the scan stream
1563
+ * flows through, so non-matching records are dropped before they
1564
+ * reach `.aggregate()` or the iteration consumer. Multiple
1565
+ * `.where()` calls are AND-combined — same semantics as
1566
+ * `Query.where()`.
1567
+ *
1568
+ * Clauses cannot use the secondary-index fast path here because
1569
+ * the scan sources records from the adapter's paginator, not from
1570
+ * the in-memory cache where indexes live. Index-accelerated scans
1571
+ * are a future optimization — the current implementation
1572
+ * evaluates clauses per record in O(1) per clause.
1573
+ */
1574
+ where(field, op, value) {
1575
+ const clause = { type: "field", field, op, value };
1576
+ return new _ScanBuilder(
1577
+ this.pageProvider,
1578
+ this.pageSize,
1579
+ [...this.clauses, clause],
1580
+ this.joins,
1581
+ this.joinContext
1582
+ );
1583
+ }
1584
+ /**
1585
+ * Escape hatch: add an arbitrary predicate function. Same
1586
+ * non-serializable caveat as `Query.filter()` — filter clauses
1587
+ * don't round-trip through `toPlan()`. Prefer `.where()` when
1588
+ * possible.
1589
+ */
1590
+ filter(fn) {
1591
+ const clause = {
1592
+ type: "filter",
1593
+ fn
1594
+ };
1595
+ return new _ScanBuilder(
1596
+ this.pageProvider,
1597
+ this.pageSize,
1598
+ [...this.clauses, clause],
1599
+ this.joins,
1600
+ this.joinContext
1601
+ );
1602
+ }
1603
+ /**
1604
+ * Resolve a `ref()`-declared foreign key per record as the scan
1605
+ * stream flows, attaching the right-side record (or null) under
1606
+ * `opts.as`. — streaming joins over `scan()`.
1607
+ *
1608
+ * ```ts
1609
+ * for await (const inv of invoices.scan().join('clientId', { as: 'client' })) {
1610
+ * await processInvoice(inv) // inv.client is attached
1611
+ * }
1612
+ *
1613
+ * // Or terminate with .aggregate() for streaming joined aggregation
1614
+ * const { total } = await invoices.scan()
1615
+ * .where('status', '==', 'open')
1616
+ * .join('clientId', { as: 'client' })
1617
+ * .aggregate({ total: sum('amount') })
1618
+ * ```
1619
+ *
1620
+ * **The key difference from eager `.join()`:** the LEFT
1621
+ * side streams page-by-page from the adapter and is never
1622
+ * materialized. Memory ceiling on the left is O(pageSize), not
1623
+ * O(rowCount). This is what makes streaming joins suitable for
1624
+ * collections that exceed the eager join's 50_000-row ceiling.
1625
+ *
1626
+ * **Right-side strategy** is auto-selected per leg:
1627
+ * - **Indexed** — right source exposes `lookupById`, so each
1628
+ * left row costs O(1). This is the common path for
1629
+ * Collection right sides, which back `lookupById` with a Map
1630
+ * lookup over the in-memory cache. The right collection must
1631
+ * be in eager mode (the same constraint as eager join's
1632
+ * `querySourceForJoin` from ).
1633
+ * - **Hash** — right source has only `snapshot()`. Build a
1634
+ * `Map<id, record>` once at iteration start, probe per left
1635
+ * row. Same correctness, same per-row cost as the indexed
1636
+ * path; the difference is the upfront cost of materializing
1637
+ * the right side once.
1638
+ *
1639
+ * Both strategies hold the right side in memory for the duration
1640
+ * of the iteration. The "streaming" property applies to the LEFT
1641
+ * side only — true left-and-right streaming joins (where neither
1642
+ * side fits in memory) require a sort-merge join planner that's
1643
+ * out of scope for.
1644
+ *
1645
+ * **Ref-mode semantics** match eager `.join()` exactly:
1646
+ * - `strict` → throws `DanglingReferenceError` mid-stream
1647
+ * when a left record points at a non-existent right id.
1648
+ * The throw aborts the async iterator — consumers should
1649
+ * wrap the `for await` in try/catch if they want to recover.
1650
+ * - `warn` → attaches `null` and emits a one-shot warning
1651
+ * per unique dangling pair (deduped via the same warn
1652
+ * channel as eager join).
1653
+ * - `cascade` → attaches `null` silently. A delete-time mode;
1654
+ * dangling refs at read time are mid-flight or pre-existing
1655
+ * orphans, not a DSL error.
1656
+ *
1657
+ * Left records with null/undefined FK values attach `null`
1658
+ * regardless of mode — same "no reference at all" policy as
1659
+ * eager join and write-time `enforceRefsOnPut`.
1660
+ *
1661
+ * **Multi-FK chaining** is supported via repeated `.join()`
1662
+ * calls: each leg resolves an independent ref. Each leg
1663
+ * independently picks its right-side strategy and applies its
1664
+ * own ref mode.
1665
+ *
1666
+ * **Joins are NOT applied** to a `.aggregate()` terminal that
1667
+ * doesn't reference joined fields — wait, that's not quite
1668
+ * right. The streaming path actually DOES apply joins before
1669
+ * `.aggregate()` because the join attaches a field that the
1670
+ * spec might reference. Unlike `Query.aggregate()` (which skips
1671
+ * joins entirely as a projection-only short-circuit), the
1672
+ * streaming aggregation can't know whether the spec touches a
1673
+ * joined field, so it always applies joins. Consumers who want
1674
+ * unjoined streaming aggregation should leave `.join()` off the
1675
+ * chain — the chain is composable for a reason.
1676
+ *
1677
+ * constraint #1 — every JoinLeg carries `partitionScope:
1678
+ * 'all'` plumbed through but never read by. Same seam as
1679
+ * eager join.
1680
+ */
1681
+ join(field, opts) {
1682
+ if (!this.joinContext) {
1683
+ throw new Error(
1684
+ `ScanBuilder.join() requires a join context. Use collection.scan() to construct a join-capable scan instead of the ScanBuilder constructor directly (the direct constructor is only used for tests with synthetic page providers).`
1685
+ );
1686
+ }
1687
+ const descriptor = this.joinContext.resolveRef(field);
1688
+ if (!descriptor) {
1689
+ throw new Error(
1690
+ `ScanBuilder.join(): no ref() declared for field "${field}" on collection "${this.joinContext.leftCollection}". Add refs: { ${field}: ref('<target-collection>') } to the collection options, then retry.`
1691
+ );
1692
+ }
1693
+ const leg = {
1694
+ field,
1695
+ as: opts.as,
1696
+ target: descriptor.target,
1697
+ mode: descriptor.mode,
1698
+ strategy: void 0,
1699
+ maxRows: void 0,
1700
+ // constraint #1 — always 'all' in, never read by
1701
+ // the streaming executor. partition-aware scan joins
1702
+ // will populate this from where() predicates without
1703
+ // changing the planner shape.
1704
+ partitionScope: "all"
1705
+ };
1706
+ return new _ScanBuilder(
1707
+ this.pageProvider,
1708
+ this.pageSize,
1709
+ this.clauses,
1710
+ [...this.joins, leg],
1711
+ this.joinContext
1712
+ );
1713
+ }
1714
+ /**
1715
+ * Iterate the scan as an async iterable. Walks the page
1716
+ * provider's cursors forward until exhaustion, applying every
1717
+ * clause per record — only matching records are yielded.
1718
+ *
1719
+ * Backward-compatible with the previous async-generator `scan()`
1720
+ * return type for `for await … of` consumers.
1721
+ */
1722
+ async *[Symbol.asyncIterator]() {
1723
+ const joinResolvers = this.joins.length === 0 ? null : this.buildJoinResolvers();
1724
+ let page = await this.pageProvider.listPage({ limit: this.pageSize });
1725
+ while (true) {
1726
+ for (const record of page.items) {
1727
+ if (!this.recordMatches(record)) continue;
1728
+ if (joinResolvers === null) {
1729
+ yield record;
1730
+ } else {
1731
+ let attached = record;
1732
+ for (const resolver of joinResolvers) {
1733
+ attached = this.applyOneJoinStreaming(attached, resolver);
1734
+ }
1735
+ yield attached;
1736
+ }
1737
+ }
1738
+ if (page.nextCursor === null) return;
1739
+ page = await this.pageProvider.listPage({
1740
+ cursor: page.nextCursor,
1741
+ limit: this.pageSize
1742
+ });
1743
+ }
1744
+ }
1745
+ /**
1746
+ * Per-leg right-side resolution state. Built once at iteration
1747
+ * start and reused for every left record. Two strategies:
1748
+ *
1749
+ * - `lookupById`: present when the right source exposes the
1750
+ * hook directly (typical Collection right side). Per-row
1751
+ * cost is O(1).
1752
+ * - `hashByPrimaryKey`: built from `snapshot()` when no
1753
+ * lookupById. Per-row cost is O(1) after the upfront O(N)
1754
+ * materialization. Same as eager join's hash strategy.
1755
+ *
1756
+ * `warnedKeys` is the per-leg dedup set for ref-mode 'warn'. We
1757
+ * key on `field→target:refId` so the same dangling pair only
1758
+ * warns once per iteration. The dedup is per-iteration, not
1759
+ * per-process — a long-running scan that re-iterates would warn
1760
+ * again, which is the desired behavior (the data may have
1761
+ * changed between iterations).
1762
+ */
1763
+ buildJoinResolvers() {
1764
+ if (!this.joinContext) {
1765
+ throw new Error(
1766
+ `ScanBuilder iterator: ${this.joins.length} join leg(s) present but no JoinContext attached. Use collection.scan() to construct a join-capable scan.`
1767
+ );
1768
+ }
1769
+ const resolvers = [];
1770
+ for (const leg of this.joins) {
1771
+ const source = this.joinContext.resolveSource(leg.target);
1772
+ if (!source) {
1773
+ throw new Error(
1774
+ `ScanBuilder.join() cannot resolve target collection "${leg.target}" (referenced from field "${leg.field}" on "${this.joinContext.leftCollection}"). Make sure the target collection has been opened via vault.collection() at least once before iterating the scan.`
1775
+ );
1776
+ }
1777
+ let lookupById = null;
1778
+ let hashByPrimaryKey = null;
1779
+ if (source.lookupById) {
1780
+ const fn = source.lookupById.bind(source);
1781
+ lookupById = (id) => fn(id);
1782
+ } else {
1783
+ const map = /* @__PURE__ */ new Map();
1784
+ for (const record of source.snapshot()) {
1785
+ const rawId = readPath(record, "id");
1786
+ const key = coerceRefKey2(rawId);
1787
+ if (key !== null) map.set(key, record);
1788
+ }
1789
+ hashByPrimaryKey = map;
1790
+ }
1791
+ resolvers.push({
1792
+ leg,
1793
+ source,
1794
+ lookupById,
1795
+ hashByPrimaryKey,
1796
+ warnedKeys: /* @__PURE__ */ new Set()
1797
+ });
1798
+ }
1799
+ return resolvers;
1800
+ }
1801
+ /**
1802
+ * Resolve a single join leg for one left record and return the
1803
+ * left record with the joined field attached under
1804
+ * `leg.as`. Pure function over `(left, resolver)`; never
1805
+ * mutates the input.
1806
+ *
1807
+ * Ref-mode dispatch matches eager `applyJoins` from :
1808
+ * - null/undefined FK → attach null silently (always allowed)
1809
+ * - dangling FK + strict → throw `DanglingReferenceError`
1810
+ * - dangling FK + warn → attach null, warn-once per pair
1811
+ * - dangling FK + cascade → attach null silently
1812
+ */
1813
+ applyOneJoinStreaming(left, resolver) {
1814
+ if (left === null || typeof left !== "object") {
1815
+ return left;
1816
+ }
1817
+ const { leg } = resolver;
1818
+ const rawId = readPath(left, leg.field);
1819
+ const refKey = coerceRefKey2(rawId);
1820
+ let right = void 0;
1821
+ if (refKey !== null) {
1822
+ if (resolver.lookupById !== null) {
1823
+ right = resolver.lookupById(refKey);
1824
+ } else if (resolver.hashByPrimaryKey !== null) {
1825
+ right = resolver.hashByPrimaryKey.get(refKey);
1826
+ }
1827
+ }
1828
+ const merged = {
1829
+ ...left
1830
+ };
1831
+ if (right === void 0) {
1832
+ if (refKey !== null && leg.mode === "strict") {
1833
+ throw new DanglingReferenceError({
1834
+ field: leg.field,
1835
+ target: leg.target,
1836
+ refId: refKey,
1837
+ message: `ScanBuilder.join() strict dangling: record references "${leg.target}:${refKey}" via field "${leg.field}", but no such record exists. Use ref() mode 'warn' or 'cascade' if dangling refs are acceptable, or run vault.checkIntegrity() to find and fix the orphans.`
1838
+ });
1839
+ }
1840
+ if (refKey !== null && leg.mode === "warn") {
1841
+ const dedupKey = `${leg.field}\u2192${leg.target}:${refKey}`;
1842
+ if (!resolver.warnedKeys.has(dedupKey)) {
1843
+ resolver.warnedKeys.add(dedupKey);
1844
+ console.warn(
1845
+ `[noy-db] ScanBuilder.join() encountered dangling ref in 'warn' mode: field "${leg.field}" \u2192 "${leg.target}:${refKey}" not found. Attaching null.`
1846
+ );
1847
+ }
1848
+ }
1849
+ merged[leg.as] = null;
1850
+ } else {
1851
+ merged[leg.as] = right;
1852
+ }
1853
+ return merged;
1854
+ }
1855
+ /**
1856
+ * Reduce the scan stream through a named set of reducers and
1857
+ * return the final aggregated shape.
1858
+ *
1859
+ * Memory is O(reducers): one mutable state slot per spec key.
1860
+ * Records flow through the pipeline one at a time via
1861
+ * `for await` and are discarded after their `step()` is applied
1862
+ * — never collected into an array. This is the distinguishing
1863
+ * property from `Query.aggregate()`, which materializes the full
1864
+ * match set first.
1865
+ *
1866
+ * Reuses the same reducer protocol as `Query.aggregate()`,
1867
+ * so `count()`, `sum(field)`, `avg(field)`, `min(field)`,
1868
+ * `max(field)` all work unchanged. The `{ seed }` parameter
1869
+ * plumbing from constraint #2 is honored transparently — the
1870
+ * factories ignore it in and the scan executor never
1871
+ * touches the per-reducer state construction.
1872
+ *
1873
+ * **Returns a Promise**, unlike `Query.aggregate().run()` which
1874
+ * is synchronous. The scan is inherently async because it walks
1875
+ * adapter pages, so the terminal has to be too. Consumers
1876
+ * destructure with await:
1877
+ *
1878
+ * ```ts
1879
+ * const { total, n } = await invoices.scan()
1880
+ * .where('year', '==', 2025)
1881
+ * .aggregate({ total: sum('amount'), n: count() })
1882
+ * ```
1883
+ *
1884
+ * **No `.live()` in.** `scan().aggregate().live()` would
1885
+ * require reconciling an unbounded streaming iteration with a
1886
+ * change-stream subscription — a design problem, not just a code
1887
+ * one. Consumers with huge collections and live needs should
1888
+ * narrow with `.where()` enough to fit in the 50k `query()`
1889
+ * limit and use `query().aggregate().live()` instead.
1890
+ */
1891
+ async aggregate(spec) {
1892
+ const keys = Object.keys(spec);
1893
+ const state = {};
1894
+ for (const key of keys) {
1895
+ state[key] = spec[key].init();
1896
+ }
1897
+ for await (const record of this) {
1898
+ for (const key of keys) {
1899
+ state[key] = spec[key].step(state[key], record);
1900
+ }
1901
+ }
1902
+ const result = {};
1903
+ for (const key of keys) {
1904
+ result[key] = spec[key].finalize(state[key]);
1905
+ }
1906
+ return result;
1907
+ }
1908
+ /**
1909
+ * Evaluate the clause list against a single record. Linear in
1910
+ * the clause count; short-circuits on first false. Clauses on a
1911
+ * scan are always re-evaluated per record — no index-accelerated
1912
+ * path, because the stream sources records from the adapter
1913
+ * paginator, not from the in-memory cache where indexes live.
1914
+ */
1915
+ recordMatches(record) {
1916
+ if (this.clauses.length === 0) return true;
1917
+ for (const clause of this.clauses) {
1918
+ if (!evaluateClause(record, clause)) return false;
1919
+ }
1920
+ return true;
1921
+ }
1922
+ };
1923
+ function coerceRefKey2(value) {
1924
+ if (value === null || value === void 0) return null;
1925
+ if (typeof value === "string") return value;
1926
+ if (typeof value === "number" || typeof value === "bigint") return String(value);
1927
+ return null;
1928
+ }
1929
+ // Annotate the CommonJS export names for ESM import in node:
1930
+ 0 && (module.exports = {
1931
+ Aggregation,
1932
+ CollectionIndexes,
1933
+ DEFAULT_JOIN_MAX_ROWS,
1934
+ GROUPBY_MAX_CARDINALITY,
1935
+ GROUPBY_WARN_CARDINALITY,
1936
+ GroupedAggregation,
1937
+ GroupedQuery,
1938
+ Query,
1939
+ ScanBuilder,
1940
+ applyJoins,
1941
+ avg,
1942
+ buildLiveAggregation,
1943
+ buildLiveQuery,
1944
+ count,
1945
+ evaluateClause,
1946
+ evaluateFieldClause,
1947
+ executePlan,
1948
+ groupAndReduce,
1949
+ max,
1950
+ min,
1951
+ readPath,
1952
+ reduceRecords,
1953
+ resetGroupByWarnings,
1954
+ resetJoinWarnings,
1955
+ sum
1956
+ });
1957
+ //# sourceMappingURL=index.cjs.map