@noy-db/hub 0.1.0-pre.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +197 -0
  3. package/dist/aggregate/index.cjs +476 -0
  4. package/dist/aggregate/index.cjs.map +1 -0
  5. package/dist/aggregate/index.d.cts +38 -0
  6. package/dist/aggregate/index.d.ts +38 -0
  7. package/dist/aggregate/index.js +53 -0
  8. package/dist/aggregate/index.js.map +1 -0
  9. package/dist/blobs/index.cjs +1480 -0
  10. package/dist/blobs/index.cjs.map +1 -0
  11. package/dist/blobs/index.d.cts +45 -0
  12. package/dist/blobs/index.d.ts +45 -0
  13. package/dist/blobs/index.js +48 -0
  14. package/dist/blobs/index.js.map +1 -0
  15. package/dist/bundle/index.cjs +436 -0
  16. package/dist/bundle/index.cjs.map +1 -0
  17. package/dist/bundle/index.d.cts +7 -0
  18. package/dist/bundle/index.d.ts +7 -0
  19. package/dist/bundle/index.js +40 -0
  20. package/dist/bundle/index.js.map +1 -0
  21. package/dist/chunk-2QR2PQTT.js +217 -0
  22. package/dist/chunk-2QR2PQTT.js.map +1 -0
  23. package/dist/chunk-4OWFYIDQ.js +79 -0
  24. package/dist/chunk-4OWFYIDQ.js.map +1 -0
  25. package/dist/chunk-5AATM2M2.js +90 -0
  26. package/dist/chunk-5AATM2M2.js.map +1 -0
  27. package/dist/chunk-ACLDOTNQ.js +543 -0
  28. package/dist/chunk-ACLDOTNQ.js.map +1 -0
  29. package/dist/chunk-BTDCBVJW.js +160 -0
  30. package/dist/chunk-BTDCBVJW.js.map +1 -0
  31. package/dist/chunk-CIMZBAZB.js +72 -0
  32. package/dist/chunk-CIMZBAZB.js.map +1 -0
  33. package/dist/chunk-E445ICYI.js +365 -0
  34. package/dist/chunk-E445ICYI.js.map +1 -0
  35. package/dist/chunk-EXQRC2L4.js +722 -0
  36. package/dist/chunk-EXQRC2L4.js.map +1 -0
  37. package/dist/chunk-FZU343FL.js +32 -0
  38. package/dist/chunk-FZU343FL.js.map +1 -0
  39. package/dist/chunk-GJILMRPO.js +354 -0
  40. package/dist/chunk-GJILMRPO.js.map +1 -0
  41. package/dist/chunk-GOUT6DND.js +1285 -0
  42. package/dist/chunk-GOUT6DND.js.map +1 -0
  43. package/dist/chunk-J66GRPNH.js +111 -0
  44. package/dist/chunk-J66GRPNH.js.map +1 -0
  45. package/dist/chunk-M2F2JAWB.js +464 -0
  46. package/dist/chunk-M2F2JAWB.js.map +1 -0
  47. package/dist/chunk-M5INGEFC.js +84 -0
  48. package/dist/chunk-M5INGEFC.js.map +1 -0
  49. package/dist/chunk-M62XNWRA.js +72 -0
  50. package/dist/chunk-M62XNWRA.js.map +1 -0
  51. package/dist/chunk-MR4424N3.js +275 -0
  52. package/dist/chunk-MR4424N3.js.map +1 -0
  53. package/dist/chunk-NPC4LFV5.js +132 -0
  54. package/dist/chunk-NPC4LFV5.js.map +1 -0
  55. package/dist/chunk-NXFEYLVG.js +311 -0
  56. package/dist/chunk-NXFEYLVG.js.map +1 -0
  57. package/dist/chunk-R36SIKES.js +79 -0
  58. package/dist/chunk-R36SIKES.js.map +1 -0
  59. package/dist/chunk-TDR6T5CJ.js +381 -0
  60. package/dist/chunk-TDR6T5CJ.js.map +1 -0
  61. package/dist/chunk-UF3BUNQZ.js +1 -0
  62. package/dist/chunk-UF3BUNQZ.js.map +1 -0
  63. package/dist/chunk-UQFSPSWG.js +1109 -0
  64. package/dist/chunk-UQFSPSWG.js.map +1 -0
  65. package/dist/chunk-USKYUS74.js +793 -0
  66. package/dist/chunk-USKYUS74.js.map +1 -0
  67. package/dist/chunk-XCL3WP6J.js +121 -0
  68. package/dist/chunk-XCL3WP6J.js.map +1 -0
  69. package/dist/chunk-XHFOENR2.js +680 -0
  70. package/dist/chunk-XHFOENR2.js.map +1 -0
  71. package/dist/chunk-ZFKD4QMV.js +430 -0
  72. package/dist/chunk-ZFKD4QMV.js.map +1 -0
  73. package/dist/chunk-ZLMV3TUA.js +490 -0
  74. package/dist/chunk-ZLMV3TUA.js.map +1 -0
  75. package/dist/chunk-ZRG4V3F5.js +17 -0
  76. package/dist/chunk-ZRG4V3F5.js.map +1 -0
  77. package/dist/consent/index.cjs +204 -0
  78. package/dist/consent/index.cjs.map +1 -0
  79. package/dist/consent/index.d.cts +24 -0
  80. package/dist/consent/index.d.ts +24 -0
  81. package/dist/consent/index.js +23 -0
  82. package/dist/consent/index.js.map +1 -0
  83. package/dist/crdt/index.cjs +152 -0
  84. package/dist/crdt/index.cjs.map +1 -0
  85. package/dist/crdt/index.d.cts +30 -0
  86. package/dist/crdt/index.d.ts +30 -0
  87. package/dist/crdt/index.js +24 -0
  88. package/dist/crdt/index.js.map +1 -0
  89. package/dist/crypto-IVKU7YTT.js +44 -0
  90. package/dist/crypto-IVKU7YTT.js.map +1 -0
  91. package/dist/delegation-XDJCBTI2.js +16 -0
  92. package/dist/delegation-XDJCBTI2.js.map +1 -0
  93. package/dist/dev-unlock-CeXic1xC.d.cts +263 -0
  94. package/dist/dev-unlock-KrKkcqD3.d.ts +263 -0
  95. package/dist/hash-9KO1BGxh.d.cts +63 -0
  96. package/dist/hash-ChfJjRjQ.d.ts +63 -0
  97. package/dist/history/index.cjs +1215 -0
  98. package/dist/history/index.cjs.map +1 -0
  99. package/dist/history/index.d.cts +62 -0
  100. package/dist/history/index.d.ts +62 -0
  101. package/dist/history/index.js +79 -0
  102. package/dist/history/index.js.map +1 -0
  103. package/dist/i18n/index.cjs +746 -0
  104. package/dist/i18n/index.cjs.map +1 -0
  105. package/dist/i18n/index.d.cts +38 -0
  106. package/dist/i18n/index.d.ts +38 -0
  107. package/dist/i18n/index.js +55 -0
  108. package/dist/i18n/index.js.map +1 -0
  109. package/dist/index-BRHBCmLt.d.ts +1940 -0
  110. package/dist/index-C8kQtmOk.d.ts +380 -0
  111. package/dist/index-DN-J-5wT.d.cts +1940 -0
  112. package/dist/index-DhjMjz7L.d.cts +380 -0
  113. package/dist/index.cjs +14756 -0
  114. package/dist/index.cjs.map +1 -0
  115. package/dist/index.d.cts +269 -0
  116. package/dist/index.d.ts +269 -0
  117. package/dist/index.js +6085 -0
  118. package/dist/index.js.map +1 -0
  119. package/dist/indexing/index.cjs +736 -0
  120. package/dist/indexing/index.cjs.map +1 -0
  121. package/dist/indexing/index.d.cts +36 -0
  122. package/dist/indexing/index.d.ts +36 -0
  123. package/dist/indexing/index.js +77 -0
  124. package/dist/indexing/index.js.map +1 -0
  125. package/dist/lazy-builder-BwEoBQZ9.d.ts +304 -0
  126. package/dist/lazy-builder-CZVLKh0Z.d.cts +304 -0
  127. package/dist/ledger-2NX4L7PN.js +33 -0
  128. package/dist/ledger-2NX4L7PN.js.map +1 -0
  129. package/dist/mime-magic-CBBSOkjm.d.cts +50 -0
  130. package/dist/mime-magic-CBBSOkjm.d.ts +50 -0
  131. package/dist/periods/index.cjs +1035 -0
  132. package/dist/periods/index.cjs.map +1 -0
  133. package/dist/periods/index.d.cts +21 -0
  134. package/dist/periods/index.d.ts +21 -0
  135. package/dist/periods/index.js +25 -0
  136. package/dist/periods/index.js.map +1 -0
  137. package/dist/predicate-SBHmi6D0.d.cts +161 -0
  138. package/dist/predicate-SBHmi6D0.d.ts +161 -0
  139. package/dist/query/index.cjs +1957 -0
  140. package/dist/query/index.cjs.map +1 -0
  141. package/dist/query/index.d.cts +3 -0
  142. package/dist/query/index.d.ts +3 -0
  143. package/dist/query/index.js +62 -0
  144. package/dist/query/index.js.map +1 -0
  145. package/dist/session/index.cjs +487 -0
  146. package/dist/session/index.cjs.map +1 -0
  147. package/dist/session/index.d.cts +45 -0
  148. package/dist/session/index.d.ts +45 -0
  149. package/dist/session/index.js +44 -0
  150. package/dist/session/index.js.map +1 -0
  151. package/dist/shadow/index.cjs +133 -0
  152. package/dist/shadow/index.cjs.map +1 -0
  153. package/dist/shadow/index.d.cts +16 -0
  154. package/dist/shadow/index.d.ts +16 -0
  155. package/dist/shadow/index.js +20 -0
  156. package/dist/shadow/index.js.map +1 -0
  157. package/dist/store/index.cjs +1069 -0
  158. package/dist/store/index.cjs.map +1 -0
  159. package/dist/store/index.d.cts +491 -0
  160. package/dist/store/index.d.ts +491 -0
  161. package/dist/store/index.js +34 -0
  162. package/dist/store/index.js.map +1 -0
  163. package/dist/strategy-BSxFXGzb.d.cts +110 -0
  164. package/dist/strategy-BSxFXGzb.d.ts +110 -0
  165. package/dist/strategy-D-SrOLCl.d.cts +548 -0
  166. package/dist/strategy-D-SrOLCl.d.ts +548 -0
  167. package/dist/sync/index.cjs +1062 -0
  168. package/dist/sync/index.cjs.map +1 -0
  169. package/dist/sync/index.d.cts +42 -0
  170. package/dist/sync/index.d.ts +42 -0
  171. package/dist/sync/index.js +28 -0
  172. package/dist/sync/index.js.map +1 -0
  173. package/dist/team/index.cjs +1233 -0
  174. package/dist/team/index.cjs.map +1 -0
  175. package/dist/team/index.d.cts +117 -0
  176. package/dist/team/index.d.ts +117 -0
  177. package/dist/team/index.js +39 -0
  178. package/dist/team/index.js.map +1 -0
  179. package/dist/tx/index.cjs +212 -0
  180. package/dist/tx/index.cjs.map +1 -0
  181. package/dist/tx/index.d.cts +20 -0
  182. package/dist/tx/index.d.ts +20 -0
  183. package/dist/tx/index.js +20 -0
  184. package/dist/tx/index.js.map +1 -0
  185. package/dist/types-BZpCZB8N.d.ts +7526 -0
  186. package/dist/types-Bfs0qr5F.d.cts +7526 -0
  187. package/dist/ulid-COREQ2RQ.js +9 -0
  188. package/dist/ulid-COREQ2RQ.js.map +1 -0
  189. package/dist/util/index.cjs +230 -0
  190. package/dist/util/index.cjs.map +1 -0
  191. package/dist/util/index.d.cts +77 -0
  192. package/dist/util/index.d.ts +77 -0
  193. package/dist/util/index.js +190 -0
  194. package/dist/util/index.js.map +1 -0
  195. package/package.json +244 -0
@@ -0,0 +1,381 @@
1
+ import {
2
+ readPath
3
+ } from "./chunk-M5INGEFC.js";
4
+ import {
5
+ GroupCardinalityError
6
+ } from "./chunk-ACLDOTNQ.js";
7
+
8
+ // src/aggregate/reducers.ts
9
+ function count(opts) {
10
+ const _seed = opts?.seed;
11
+ void _seed;
12
+ return {
13
+ init: () => 0,
14
+ step: (state) => state + 1,
15
+ remove: (state) => state - 1,
16
+ finalize: (state) => state
17
+ };
18
+ }
19
+ function sum(field, opts) {
20
+ const _seed = opts?.seed;
21
+ void _seed;
22
+ return {
23
+ init: () => 0,
24
+ step: (state, record) => state + readNumber(record, field),
25
+ remove: (state, record) => state - readNumber(record, field),
26
+ finalize: (state) => state
27
+ };
28
+ }
29
+ function avg(field, opts) {
30
+ const _seed = opts?.seed;
31
+ void _seed;
32
+ return {
33
+ init: () => ({ sum: 0, count: 0 }),
34
+ step: (state, record) => ({
35
+ sum: state.sum + readNumber(record, field),
36
+ count: state.count + 1
37
+ }),
38
+ remove: (state, record) => ({
39
+ sum: state.sum - readNumber(record, field),
40
+ count: state.count - 1
41
+ }),
42
+ finalize: (state) => state.count === 0 ? null : state.sum / state.count
43
+ };
44
+ }
45
+ function pushValue(state, value) {
46
+ return { values: [...state.values, value] };
47
+ }
48
+ function removeValue(state, value) {
49
+ const idx = state.values.indexOf(value);
50
+ if (idx < 0) return state;
51
+ const next = state.values.slice();
52
+ next.splice(idx, 1);
53
+ return { values: next };
54
+ }
55
+ function min(field, opts) {
56
+ const _seed = opts?.seed;
57
+ void _seed;
58
+ return {
59
+ init: () => ({ values: [] }),
60
+ step: (state, record) => pushValue(state, readNumber(record, field)),
61
+ remove: (state, record) => removeValue(state, readNumber(record, field)),
62
+ finalize: (state) => {
63
+ if (state.values.length === 0) return null;
64
+ let out = state.values[0];
65
+ for (let i = 1; i < state.values.length; i++) {
66
+ const v = state.values[i];
67
+ if (v < out) out = v;
68
+ }
69
+ return out;
70
+ }
71
+ };
72
+ }
73
+ function max(field, opts) {
74
+ const _seed = opts?.seed;
75
+ void _seed;
76
+ return {
77
+ init: () => ({ values: [] }),
78
+ step: (state, record) => pushValue(state, readNumber(record, field)),
79
+ remove: (state, record) => removeValue(state, readNumber(record, field)),
80
+ finalize: (state) => {
81
+ if (state.values.length === 0) return null;
82
+ let out = state.values[0];
83
+ for (let i = 1; i < state.values.length; i++) {
84
+ const v = state.values[i];
85
+ if (v > out) out = v;
86
+ }
87
+ return out;
88
+ }
89
+ };
90
+ }
91
+ function readNumber(record, field) {
92
+ const value = readPath(record, field);
93
+ return typeof value === "number" && Number.isFinite(value) ? value : 0;
94
+ }
95
+
96
+ // src/aggregate/aggregation.ts
97
+ function reduceRecords(records, spec) {
98
+ const state = {};
99
+ for (const key of Object.keys(spec)) {
100
+ state[key] = spec[key].init();
101
+ }
102
+ for (const record of records) {
103
+ for (const key of Object.keys(spec)) {
104
+ state[key] = spec[key].step(state[key], record);
105
+ }
106
+ }
107
+ const result = {};
108
+ for (const key of Object.keys(spec)) {
109
+ result[key] = spec[key].finalize(state[key]);
110
+ }
111
+ return result;
112
+ }
113
+ var LiveAggregationImpl = class {
114
+ constructor(recompute, upstreams) {
115
+ this.recompute = recompute;
116
+ try {
117
+ this.value = recompute();
118
+ this.error = void 0;
119
+ } catch (err) {
120
+ this.value = void 0;
121
+ this.error = err;
122
+ }
123
+ for (const upstream of upstreams) {
124
+ const unsub = upstream.subscribe(() => this.refresh());
125
+ this.unsubscribes.push(unsub);
126
+ }
127
+ }
128
+ recompute;
129
+ value;
130
+ error;
131
+ listeners = /* @__PURE__ */ new Set();
132
+ unsubscribes = [];
133
+ stopped = false;
134
+ refresh() {
135
+ if (this.stopped) return;
136
+ try {
137
+ this.value = this.recompute();
138
+ this.error = void 0;
139
+ } catch (err) {
140
+ this.error = err;
141
+ }
142
+ for (const listener of this.listeners) {
143
+ try {
144
+ listener();
145
+ } catch (err) {
146
+ console.warn("[noy-db] LiveAggregation listener threw:", err);
147
+ }
148
+ }
149
+ }
150
+ subscribe(cb) {
151
+ if (this.stopped) {
152
+ return () => {
153
+ };
154
+ }
155
+ this.listeners.add(cb);
156
+ return () => {
157
+ this.listeners.delete(cb);
158
+ };
159
+ }
160
+ stop() {
161
+ if (this.stopped) return;
162
+ this.stopped = true;
163
+ for (const unsub of this.unsubscribes) {
164
+ try {
165
+ unsub();
166
+ } catch (err) {
167
+ console.warn("[noy-db] LiveAggregation upstream unsubscribe threw:", err);
168
+ }
169
+ }
170
+ this.unsubscribes.length = 0;
171
+ this.listeners.clear();
172
+ }
173
+ };
174
+ var Aggregation = class {
175
+ constructor(executeRecords, spec, upstreams) {
176
+ this.executeRecords = executeRecords;
177
+ this.spec = spec;
178
+ this.upstreams = upstreams;
179
+ }
180
+ executeRecords;
181
+ spec;
182
+ upstreams;
183
+ /**
184
+ * Execute the query and reduce the results synchronously.
185
+ * Returns the reduced shape matching the spec — e.g. a spec of
186
+ * `{ total: sum('amount'), n: count() }` returns
187
+ * `{ total: number, n: number }`.
188
+ */
189
+ run() {
190
+ return reduceRecords(this.executeRecords(), this.spec);
191
+ }
192
+ /**
193
+ * Build a reactive `LiveAggregation<R>` that re-runs the reduction
194
+ * whenever any upstream source notifies of a change. The initial
195
+ * value is computed eagerly in the constructor, so consumers can
196
+ * read `live.value` immediately after calling `.live()`.
197
+ *
198
+ * Always call `live.stop()` when finished — it tears down the
199
+ * upstream subscriptions. Vue's `onUnmounted` is the canonical
200
+ * place.
201
+ *
202
+ * **Implementation note:** every upstream change triggers a full
203
+ * re-reduction. Incremental maintenance (O(1) per delta for
204
+ * sum/count/avg via the reducer protocol's `remove()` method) is a
205
+ * planned follow-up optimization — the protocol already supports
206
+ * it, but the executor doesn't drive it yet. Consumers get
207
+ * correct, reactive values today; future PRs can switch to
208
+ * delta-based maintenance without changing this API.
209
+ */
210
+ live() {
211
+ const recompute = () => reduceRecords(this.executeRecords(), this.spec);
212
+ return new LiveAggregationImpl(recompute, this.upstreams);
213
+ }
214
+ };
215
+ function buildLiveAggregation(recompute, upstreams) {
216
+ return new LiveAggregationImpl(recompute, upstreams);
217
+ }
218
+
219
+ // src/aggregate/groupby.ts
220
+ var GROUPBY_WARN_CARDINALITY = 1e4;
221
+ var GROUPBY_MAX_CARDINALITY = 1e5;
222
+ var warnedCardinalityFields = /* @__PURE__ */ new Set();
223
+ function warnCardinalityApproaching(field, observed) {
224
+ if (warnedCardinalityFields.has(field)) return;
225
+ warnedCardinalityFields.add(field);
226
+ console.warn(
227
+ `[noy-db] .groupBy("${field}") produced ${observed} distinct groups, ${Math.round(observed / GROUPBY_MAX_CARDINALITY * 100)}% of the ${GROUPBY_MAX_CARDINALITY}-group ceiling. Narrow the query with .where() before grouping, or switch to a lower-cardinality field.`
228
+ );
229
+ }
230
+ function resetGroupByWarnings() {
231
+ warnedCardinalityFields.clear();
232
+ }
233
+ var GroupedQuery = class {
234
+ constructor(executeRecords, field, upstreams, dictLabelResolver) {
235
+ this.executeRecords = executeRecords;
236
+ this.field = field;
237
+ this.upstreams = upstreams;
238
+ this.dictLabelResolver = dictLabelResolver;
239
+ }
240
+ executeRecords;
241
+ field;
242
+ upstreams;
243
+ dictLabelResolver;
244
+ /**
245
+ * Build a grouped aggregation. Returns a `GroupedAggregation`
246
+ * with `.run()`, `.runAsync()`, and `.live()` terminals — same shape
247
+ * as the non-grouped `.aggregate()` wrapper, just with an array
248
+ * result (one row per bucket) instead of a single reduced object.
249
+ */
250
+ aggregate(spec) {
251
+ return new GroupedAggregation(
252
+ this.executeRecords,
253
+ this.field,
254
+ spec,
255
+ this.upstreams,
256
+ this.dictLabelResolver
257
+ );
258
+ }
259
+ };
260
+ function groupAndReduce(records, field, spec) {
261
+ const buckets = /* @__PURE__ */ new Map();
262
+ for (const record of records) {
263
+ const key = readPath(record, field);
264
+ let bucket = buckets.get(key);
265
+ if (bucket === void 0) {
266
+ if (buckets.size >= GROUPBY_MAX_CARDINALITY) {
267
+ throw new GroupCardinalityError(
268
+ field,
269
+ buckets.size + 1,
270
+ GROUPBY_MAX_CARDINALITY
271
+ );
272
+ }
273
+ bucket = [];
274
+ buckets.set(key, bucket);
275
+ }
276
+ bucket.push(record);
277
+ }
278
+ if (buckets.size >= GROUPBY_WARN_CARDINALITY) {
279
+ warnCardinalityApproaching(field, buckets.size);
280
+ }
281
+ const keys = Object.keys(spec);
282
+ const out = [];
283
+ for (const [groupKey, bucketRecords] of buckets) {
284
+ const state = {};
285
+ for (const key of keys) {
286
+ state[key] = spec[key].init();
287
+ }
288
+ for (const record of bucketRecords) {
289
+ for (const key of keys) {
290
+ state[key] = spec[key].step(state[key], record);
291
+ }
292
+ }
293
+ const row = { [field]: groupKey };
294
+ for (const key of keys) {
295
+ row[key] = spec[key].finalize(state[key]);
296
+ }
297
+ out.push(row);
298
+ }
299
+ return out;
300
+ }
301
+ var GroupedAggregation = class {
302
+ constructor(executeRecords, field, spec, upstreams, dictLabelResolver) {
303
+ this.executeRecords = executeRecords;
304
+ this.field = field;
305
+ this.spec = spec;
306
+ this.upstreams = upstreams;
307
+ this.dictLabelResolver = dictLabelResolver;
308
+ }
309
+ executeRecords;
310
+ field;
311
+ spec;
312
+ upstreams;
313
+ dictLabelResolver;
314
+ /** Execute the query, group, reduce, and return an array of rows. */
315
+ run() {
316
+ return groupAndReduce(this.executeRecords(), this.field, this.spec);
317
+ }
318
+ /**
319
+ * Execute the query, group, reduce, and resolve `<field>Label` for
320
+ * each result row when the grouping field is a `dictKey` and a
321
+ * `locale` is provided. Returns `R[]` synchronously when
322
+ * no locale is specified (identical to `.run()`).
323
+ *
324
+ * The `<field>Label` field is appended to each row. Rows whose group
325
+ * key has no dictionary entry get `<field>Label: undefined`.
326
+ */
327
+ async runAsync(opts) {
328
+ const rows = groupAndReduce(this.executeRecords(), this.field, this.spec);
329
+ if (!opts?.locale || !this.dictLabelResolver) return rows;
330
+ const resolve = this.dictLabelResolver;
331
+ const locale = opts.locale;
332
+ const fallback = opts.fallback;
333
+ const labelKey = `${this.field}Label`;
334
+ return Promise.all(
335
+ rows.map(async (row) => {
336
+ const key = row[this.field];
337
+ if (typeof key !== "string") return row;
338
+ const label = await resolve(key, locale, fallback);
339
+ return { ...row, [labelKey]: label };
340
+ })
341
+ );
342
+ }
343
+ /**
344
+ * Build a reactive `LiveAggregation<R[]>` that re-runs the full
345
+ * group-and-reduce pipeline whenever any upstream source notifies
346
+ * of a change. Same error-isolation and idempotent-stop contract
347
+ * as `Aggregation.live()` — the implementation delegates to the
348
+ * same `LiveAggregationImpl` class by threading a fresh
349
+ * recompute closure through the existing constructor.
350
+ *
351
+ * uses naive full re-run on every change. Incremental
352
+ * per-bucket maintenance (apply `step` on inserted records,
353
+ * `remove` on deleted records, route by bucket key) is a future
354
+ * optimization — the reducer protocol admits it, but wiring
355
+ * delta-aware source subscriptions is a separate PR.
356
+ *
357
+ * Always call `live.stop()` when finished.
358
+ */
359
+ live() {
360
+ const recompute = () => groupAndReduce(this.executeRecords(), this.field, this.spec);
361
+ return buildLiveAggregation(recompute, this.upstreams);
362
+ }
363
+ };
364
+
365
+ export {
366
+ count,
367
+ sum,
368
+ avg,
369
+ min,
370
+ max,
371
+ reduceRecords,
372
+ Aggregation,
373
+ buildLiveAggregation,
374
+ GROUPBY_WARN_CARDINALITY,
375
+ GROUPBY_MAX_CARDINALITY,
376
+ resetGroupByWarnings,
377
+ GroupedQuery,
378
+ groupAndReduce,
379
+ GroupedAggregation
380
+ };
381
+ //# sourceMappingURL=chunk-TDR6T5CJ.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/aggregate/reducers.ts","../src/aggregate/aggregation.ts","../src/aggregate/groupby.ts"],"sourcesContent":["/**\n * Aggregation reducers for the query DSL.\n *\n * the reducer protocol plus five built-in factories\n * (`count`, `sum`, `avg`, `min`, `max`) consumed by `Query.aggregate()`\n * and, in the future, `Scan.aggregate()`. Every factory accepts\n * an optional `{ seed }` parameter that is plumbed through the\n * protocol but unused by the executor — that's the load-bearing\n * half of constraint #2. When partition-aware aggregation\n * lands, the seed carries the previous partition's running total into\n * the next partition without requiring a protocol change.\n *\n * Reducers are intentionally generic over their internal state type\n * `S` so compound reducers (avg keeps `{sum, count}`, min/max keep a\n * value bag) can model internal bookkeeping without leaking the\n * implementation through the accumulator's public shape. `finalize`\n * collapses `S` back into the user-visible `R`.\n *\n * Reducers are pure data — `init` / `step` / `finalize` / optional\n * `remove` are stateless functions that receive and return `S`. This\n * is the shape that admits O(1) incremental maintenance in a future\n * optimization (delta-aware `LiveAggregation` applies `step` or\n * `remove` per delta), without blocking the simpler \"full re-run on\n * source change\" that ships.\n */\n\nimport { readPath } from '../query/predicate.js'\n\n/**\n * A single reducer: factory-produced, ready to plug into an\n * `.aggregate()` spec.\n *\n * Type parameters:\n * - `R` — user-visible result type (what the aggregation returns\n * for this slot, e.g. `number` for `sum()`)\n * - `S` — internal state type, defaults to `R` for simple reducers\n * that don't need compound bookkeeping\n *\n * A reducer is stateless: every method is pure over `S`. `init()` is\n * called once per aggregation run to build the initial state; `step()`\n * folds a record into the state; `remove()` (optional) un-folds a\n * record, enabling incremental live maintenance; `finalize()` reads\n * the final answer out of the state at the end of the run.\n */\nexport interface Reducer<R, S = R> {\n /** Build the initial state for a fresh aggregation run. */\n init(): S\n /** Fold a record into the state. Returns the new state. */\n step(state: S, record: unknown): S\n /**\n * Un-fold a record from the state. Returns the new state.\n *\n * Optional — reducers without `remove` cannot be maintained\n * incrementally and must be re-run from scratch when the underlying\n * record set changes. `sum`, `count`, `avg` implement `remove` in\n * O(1); `min` and `max` implement it in O(N) worst case (when the\n * extremum itself is removed and the next extremum must be\n * recomputed from the remaining contributing values).\n */\n remove?(state: S, record: unknown): S\n /** Collapse the internal state into the user-visible result. */\n finalize(state: S): R\n}\n\n/**\n * Common options accepted by every reducer factory.\n *\n * `seed` — optional initial value for the internal state. **Unused by\n * the executor**, plumbed through the protocol for constraint\n * #2 (partition-aware aggregation seam). In, partitioned\n * aggregations will pass the previous partition's carry as `seed` so\n * a long time series can be rolled forward one partition at a time\n * without re-aggregating closed partitions.\n *\n * always uses `init()` with the factory's zero value, regardless\n * of whether `seed` was passed. Do not remove the parameter — that's\n * the whole point of having it exist now.\n */\nexport interface ReducerOptions<TSeed = unknown> {\n /** constraint #2 — seed is plumbed through but unused in. */\n readonly seed?: TSeed\n}\n\n// ---------------------------------------------------------------------------\n// Factories\n// ---------------------------------------------------------------------------\n\n/**\n * Count the number of records that match the query. Ignores field\n * values entirely — the count is over the number of records, not over\n * the number of non-null field values in any column.\n */\nexport function count(opts?: ReducerOptions<number>): Reducer<number> {\n // Seed captured on the closure but unused at execution time in\n //. The reference in _seed keeps lint happy.\n const _seed = opts?.seed\n void _seed\n return {\n init: () => 0,\n step: (state) => state + 1,\n remove: (state) => state - 1,\n finalize: (state) => state,\n }\n}\n\n/**\n * Sum a numeric field across all matching records. Non-number values\n * at the field path are coerced to 0 — consumers who want a different\n * behavior (throw, skip, treat as NaN) should filter upstream via\n * `.where()` or write a custom reducer.\n */\nexport function sum(\n field: string,\n opts?: ReducerOptions<number>,\n): Reducer<number> {\n const _seed = opts?.seed\n void _seed\n return {\n init: () => 0,\n step: (state, record) => state + readNumber(record, field),\n remove: (state, record) => state - readNumber(record, field),\n finalize: (state) => state,\n }\n}\n\n/**\n * Arithmetic mean of a numeric field across all matching records.\n *\n * Returns `null` for an empty result set (zero records is not a\n * well-defined denominator — returning NaN would poison downstream\n * arithmetic, and throwing would force every consumer to wrap in\n * try/catch just to handle \"no matches\"). Consumers who want an\n * explicit zero should coalesce with `?? 0`.\n *\n * Internal state is `{sum, count}` so the running average can be\n * maintained incrementally — on each delta, both fields update in\n * O(1) and `finalize` divides. Directly storing `avg` as state would\n * not admit incremental removal without also tracking count.\n */\nexport function avg(\n field: string,\n opts?: ReducerOptions<{ sum: number; count: number }>,\n): Reducer<number | null, { sum: number; count: number }> {\n const _seed = opts?.seed\n void _seed\n return {\n init: () => ({ sum: 0, count: 0 }),\n step: (state, record) => ({\n sum: state.sum + readNumber(record, field),\n count: state.count + 1,\n }),\n remove: (state, record) => ({\n sum: state.sum - readNumber(record, field),\n count: state.count - 1,\n }),\n finalize: (state) => (state.count === 0 ? null : state.sum / state.count),\n }\n}\n\ninterface MinMaxState {\n /**\n * Multiset of contributing field values. Stored as a plain array\n * because we need to support `remove` and a plain array gives us\n * O(1) push + O(N) worst-case removal — which matches the\n * documented min/max removal complexity. A sorted structure would\n * let us drop the O(N) rescan but adds complexity that doesn't\n * need; consumers hitting the O(N) ceiling should file an issue.\n */\n readonly values: number[]\n}\n\nfunction pushValue(state: MinMaxState, value: number): MinMaxState {\n return { values: [...state.values, value] }\n}\n\nfunction removeValue(state: MinMaxState, value: number): MinMaxState {\n // Remove the first matching value — duplicates are fine, we only\n // need to drop one instance per `remove()` call so the multiset\n // count stays consistent with the record count.\n const idx = state.values.indexOf(value)\n if (idx < 0) return state\n const next = state.values.slice()\n next.splice(idx, 1)\n return { values: next }\n}\n\n/**\n * Smallest numeric value of a field across all matching records.\n * Returns `null` for an empty result set. See `avg()` for the\n * reasoning on `null` vs NaN vs throwing.\n *\n * Incremental complexity: O(1) for `step`, O(N) worst case for\n * `remove` when the current minimum is removed (the state holds the\n * full multiset of contributing values and `finalize` scans for the\n * new minimum). Consumers with very large result sets and frequent\n * removals of the current extremum should either accept the cost or\n * wait for a future optimization.\n */\nexport function min(\n field: string,\n opts?: ReducerOptions<number>,\n): Reducer<number | null, MinMaxState> {\n const _seed = opts?.seed\n void _seed\n return {\n init: () => ({ values: [] }),\n step: (state, record) => pushValue(state, readNumber(record, field)),\n remove: (state, record) => removeValue(state, readNumber(record, field)),\n finalize: (state) => {\n if (state.values.length === 0) return null\n let out = state.values[0]!\n for (let i = 1; i < state.values.length; i++) {\n const v = state.values[i]!\n if (v < out) out = v\n }\n return out\n },\n }\n}\n\n/**\n * Largest numeric value of a field across all matching records.\n * Mirror of `min()` — see that doc for semantics, null-on-empty\n * behavior, and the O(N) removal caveat.\n */\nexport function max(\n field: string,\n opts?: ReducerOptions<number>,\n): Reducer<number | null, MinMaxState> {\n const _seed = opts?.seed\n void _seed\n return {\n init: () => ({ values: [] }),\n step: (state, record) => pushValue(state, readNumber(record, field)),\n remove: (state, record) => removeValue(state, readNumber(record, field)),\n finalize: (state) => {\n if (state.values.length === 0) return null\n let out = state.values[0]!\n for (let i = 1; i < state.values.length; i++) {\n const v = state.values[i]!\n if (v > out) out = v\n }\n return out\n },\n }\n}\n\n// ---------------------------------------------------------------------------\n// Helpers\n// ---------------------------------------------------------------------------\n\n/**\n * Read a numeric field from a record. Non-number values (null,\n * undefined, strings, objects) coerce to 0 so sum/avg/min/max don't\n * produce NaN on one bad row. Consumers who want strict typing should\n * validate upstream with Standard Schema, which NOYDB already runs on\n * every `put()`.\n */\nfunction readNumber(record: unknown, field: string): number {\n const value = readPath(record, field)\n return typeof value === 'number' && Number.isFinite(value) ? value : 0\n}\n","/**\n * Aggregate execution — the runtime behind `Query.aggregate()`.\n *\n * takes an `AggregateSpec` (a record of named reducers\n * built from `reducers.ts`) and runs every reducer over the records\n * produced by the underlying query. Two terminal surfaces:\n *\n * - `.run(): R` — synchronous one-shot reduction. Matches the\n * existing `Query.toArray()` / `.first()` / `.count()` style.\n * - `.live(): LiveAggregation<R>` — reactive primitive that\n * re-runs the reduction whenever the query's source notifies of\n * a change. uses naive full re-run; incremental delta\n * maintenance is admitted by the reducer protocol (`remove()`)\n * but not wired to the executor yet — a follow-up optimization\n * can switch from full re-run to delta-based without breaking\n * the public API. Consumers get correct, reactive values today.\n *\n * The `Aggregation<R>` wrapper is deliberately tiny — it exists so\n * `.aggregate(spec)` can be chained with either `.run()` or `.live()`\n * without the builder needing two separate terminal methods. It\n * holds the closure over the query execution (produces the current\n * matching record set) and the spec, and stitches them together in\n * either mode.\n *\n * This file depends ONLY on `reducers.ts` — it has no knowledge of\n * the `Query` class. Tests can therefore exercise the reduction\n * surface with plain record arrays, without spinning up a Collection.\n */\n\nimport type { Reducer } from './reducers.js'\n\n/**\n * A named set of reducers, keyed by output field name. Each key\n * becomes a field on the aggregated result.\n *\n * ```ts\n * const spec = {\n * total: sum('amount'),\n * n: count(),\n * avgAmount: avg('amount'),\n * }\n * ```\n */\nexport type AggregateSpec = Readonly<Record<string, Reducer<unknown, unknown>>>\n\n/**\n * Map an `AggregateSpec` to its reduced result shape — each key\n * carries the finalized result type from its reducer. A spec built\n * from `{ total: sum('amount'), n: count() }` yields a result of\n * `{ total: number, n: number }`.\n *\n * This uses a mapped type with a conditional to extract `R` from\n * each `Reducer<R, _>`. The `infer` captures the user-visible result\n * type, discarding the internal state type `S`.\n */\nexport type AggregateResult<Spec extends AggregateSpec> = {\n [K in keyof Spec]: Spec[K] extends Reducer<infer R, unknown> ? R : never\n}\n\n/**\n * Pure reduction over a record array. Runs every reducer's\n * `init → step* → finalize` pipeline exactly once over the records.\n *\n * Called by `Aggregation.run()` and by the live-mode refresh path.\n * Exported for tests and for future `scan().aggregate()` reuse\n * — the streaming path will call the same reducer protocol with a\n * per-page loop instead of a single array.\n */\nexport function reduceRecords<Spec extends AggregateSpec>(\n records: readonly unknown[],\n spec: Spec,\n): AggregateResult<Spec> {\n // Per-slot state, keyed by the spec's output field name.\n const state: Record<string, unknown> = {}\n for (const key of Object.keys(spec)) {\n state[key] = spec[key]!.init()\n }\n for (const record of records) {\n for (const key of Object.keys(spec)) {\n state[key] = spec[key]!.step(state[key], record)\n }\n }\n const result: Record<string, unknown> = {}\n for (const key of Object.keys(spec)) {\n result[key] = spec[key]!.finalize(state[key])\n }\n return result as AggregateResult<Spec>\n}\n\n/**\n * A minimal reactive primitive for aggregation results.\n *\n * Same spirit as the `LiveQuery` in : frame-agnostic, a plain\n * object with `value` / `error` fields and a `subscribe(cb)`\n * notification channel that Vue / React / Solid adapters wrap in\n * their own primitive. Intentionally NOT a Promise — aggregations\n * have a well-defined \"current value\" at every instant, and the\n * reactive consumer wants to read that value synchronously.\n *\n * Error semantics mirror `LiveQuery`: if a re-run throws, the\n * previous successful `value` is preserved and the error is stored\n * in `error` so consumers can render an error state without losing\n * the last-known-good result. The throw does NOT propagate out of\n * the source's change handler (which would tear down the upstream\n * emitter).\n *\n * `stop()` tears down the upstream subscription. It is idempotent —\n * calling it multiple times is safe — and subscribe calls after\n * stop are no-ops (they immediately return a no-op unsubscribe).\n * Always call `stop()` when done; Vue's `onUnmounted` is the\n * canonical place. Raw consumers must do it themselves.\n */\nexport interface LiveAggregation<R> {\n /** Current reduced value. Undefined only if the first compute threw. */\n readonly value: R | undefined\n /** Last execution error, if any. Cleared on the next successful run. */\n readonly error: unknown\n /** Notify on every recomputation (success or error). Returns unsubscribe. */\n subscribe(cb: () => void): () => void\n /** Tear down the upstream subscription. Idempotent. */\n stop(): void\n}\n\n/**\n * Upstream change-notification hook for live aggregation.\n *\n * Matches the shape that `QuerySource.subscribe` already uses — a\n * single method that accepts a callback and returns an unsubscribe\n * function. The `Aggregation` wrapper collects upstreams from the\n * query's source and wires them into a single re-run trigger.\n */\nexport interface AggregationUpstream {\n subscribe(cb: () => void): () => void\n}\n\n/**\n * Internal implementation of `LiveAggregation`. Not exported —\n * consumers get the interface only. The class wraps a `recompute`\n * closure (which runs the full reduction and returns the new value)\n * and a list of upstreams (sources whose changes should trigger a\n * re-run).\n *\n * Error isolation: if an individual listener callback throws, the\n * other listeners still fire and the error is logged to the warn\n * channel. This matches `LiveQuery` from and keeps one misbehaving\n * consumer from tearing down the whole live aggregation.\n */\nclass LiveAggregationImpl<R> implements LiveAggregation<R> {\n public value: R | undefined\n public error: unknown\n private readonly listeners = new Set<() => void>()\n private readonly unsubscribes: Array<() => void> = []\n private stopped = false\n\n constructor(\n private readonly recompute: () => R,\n upstreams: readonly AggregationUpstream[],\n ) {\n // Initial computation — surface any error through the `error`\n // field rather than letting the constructor throw, so consumers\n // can always construct a LiveAggregation and check its state\n // afterwards. Throwing from a constructor would force every\n // caller to wrap in try/catch, which is the opposite of the\n // \"reactive value with error state\" ergonomics we want.\n try {\n this.value = recompute()\n this.error = undefined\n } catch (err) {\n this.value = undefined\n this.error = err\n }\n\n // Wire up upstream subscriptions. Each one triggers a full\n // recomputation; we don't attempt incremental updates in.\n for (const upstream of upstreams) {\n const unsub = upstream.subscribe(() => this.refresh())\n this.unsubscribes.push(unsub)\n }\n }\n\n private refresh(): void {\n if (this.stopped) return\n try {\n this.value = this.recompute()\n this.error = undefined\n } catch (err) {\n // Preserve the previous successful value — consumers render an\n // error state using `error` without losing the last-known-good\n // number. This matches LiveQuery's error-preservation contract.\n this.error = err\n }\n for (const listener of this.listeners) {\n try {\n listener()\n } catch (err) {\n // Isolate listener errors so one bad consumer can't tear\n // down every other subscriber on the same aggregation.\n console.warn('[noy-db] LiveAggregation listener threw:', err)\n }\n }\n }\n\n subscribe(cb: () => void): () => void {\n if (this.stopped) {\n // No-op after stop. Returning a harmless unsubscribe lets\n // consumers use the same teardown pattern unconditionally.\n return () => {}\n }\n this.listeners.add(cb)\n return () => {\n this.listeners.delete(cb)\n }\n }\n\n stop(): void {\n if (this.stopped) return\n this.stopped = true\n for (const unsub of this.unsubscribes) {\n try {\n unsub()\n } catch (err) {\n console.warn('[noy-db] LiveAggregation upstream unsubscribe threw:', err)\n }\n }\n this.unsubscribes.length = 0\n this.listeners.clear()\n }\n}\n\n/**\n * Chainable wrapper returned by `Query.aggregate(spec)`. Holds the\n * execute-records closure and the spec; terminal methods (`run`,\n * `live`) stitch them together in either mode.\n *\n * Why a wrapper instead of two terminal methods on `Query` directly?\n *\n * The `.aggregate(spec)` call is where the spec is bound — both\n * `.run()` and `.live()` need the same spec, and the consumer's\n * fluent style is `query.where(...).aggregate(spec).run()` or\n * `.aggregate(spec).live()`. Wrapping lets the spec be named once\n * and reused for either terminal, and keeps the `Query` class\n * from growing a pair of near-duplicate method overloads\n * (`aggregateRun` / `aggregateLive`) that would be harder to\n * discover.\n */\nexport class Aggregation<R> {\n constructor(\n private readonly executeRecords: () => readonly unknown[],\n private readonly spec: AggregateSpec,\n private readonly upstreams: readonly AggregationUpstream[],\n ) {}\n\n /**\n * Execute the query and reduce the results synchronously.\n * Returns the reduced shape matching the spec — e.g. a spec of\n * `{ total: sum('amount'), n: count() }` returns\n * `{ total: number, n: number }`.\n */\n run(): R {\n return reduceRecords(this.executeRecords(), this.spec) as unknown as R\n }\n\n /**\n * Build a reactive `LiveAggregation<R>` that re-runs the reduction\n * whenever any upstream source notifies of a change. The initial\n * value is computed eagerly in the constructor, so consumers can\n * read `live.value` immediately after calling `.live()`.\n *\n * Always call `live.stop()` when finished — it tears down the\n * upstream subscriptions. Vue's `onUnmounted` is the canonical\n * place.\n *\n * **Implementation note:** every upstream change triggers a full\n * re-reduction. Incremental maintenance (O(1) per delta for\n * sum/count/avg via the reducer protocol's `remove()` method) is a\n * planned follow-up optimization — the protocol already supports\n * it, but the executor doesn't drive it yet. Consumers get\n * correct, reactive values today; future PRs can switch to\n * delta-based maintenance without changing this API.\n */\n live(): LiveAggregation<R> {\n const recompute = (): R =>\n reduceRecords(this.executeRecords(), this.spec) as unknown as R\n return new LiveAggregationImpl<R>(recompute, this.upstreams)\n }\n}\n\n/**\n * Build a `LiveAggregation<V>` from a recompute closure and a list\n * of upstreams. Exposed so sibling files in the query DSL\n * (currently `groupby.ts`) can reuse the reactive primitive\n * without reaching into `LiveAggregationImpl` directly. This keeps\n * the implementation class private while still allowing planned\n * composition with `.groupBy().aggregate().live()`.\n */\nexport function buildLiveAggregation<V>(\n recompute: () => V,\n upstreams: readonly AggregationUpstream[],\n): LiveAggregation<V> {\n return new LiveAggregationImpl<V>(recompute, upstreams)\n}\n","/**\n * Query DSL `.groupBy()` —.\n *\n * Chains after `.where()` / `.filter()` / `.or()` / `.and()` on a\n * Query and before a reducer spec, so consumers can compute\n * per-bucket aggregates without folding in userland:\n *\n * ```ts\n * const byClient = invoices.query()\n * .where('status', '==', 'open')\n * .groupBy('clientId')\n * .aggregate({ total: sum('amount'), n: count() })\n * .run()\n * // → [ { clientId: 'c1', total: 5250, n: 3 }, … ]\n * ```\n *\n * Execution pipeline:\n *\n * 1. Run the query's where/filter clauses (same candidate /\n * filter pipeline as `.aggregate()` directly on Query).\n * 2. Partition the matching records into buckets keyed by\n * `readPath(record, field)`. JS `Map` preserves insertion\n * order, so the first-seen key for a bucket determines its\n * position in the result array — consumers who want a\n * specific ordering should `.sort()` downstream.\n * 3. Enforce cardinality: warn once per field at 10% of the cap\n * (10_000 buckets), throw `GroupCardinalityError` at 100% of\n * the cap (100_000 buckets).\n * 4. For each bucket, build a per-group reducer state and\n * step every record in the bucket through it.\n * 5. Emit one result row per bucket, shaped as\n * `{ [field]: key, ...reduced }`.\n *\n * **Null / undefined keys:** `Map` distinguishes `null` from\n * `undefined`, so records with a missing group field get their own\n * bucket, and records with an explicit `null` value get a separate\n * bucket from that. Consumers who want them merged can coalesce\n * upstream with `.filter()`.\n *\n * **Live mode:** `.groupBy().aggregate().live()` re-runs the full\n * grouping pipeline on every source change. Per-bucket incremental\n * delta maintenance is a future optimization — the reducer\n * protocol's `remove()` hook admits it, but ships naive\n * re-grouping for simplicity.\n *\n * **Type-level stable-key narrowing:** when\n * `dictKey` lands, `groupBy<DictField>()` will narrow the group key\n * type to the stable dictionary key rather than the resolved locale\n * label. That prevents grouping by the locale-resolved label,\n * which would produce different buckets per reader. types the\n * key as `unknown` at the result shape; the dictKey narrowing\n * layers on top without an API break.\n *\n * Partition-awareness seam: when partitioned collections land,\n * per-partition grouping will need to merge sub-results across\n * partitions. The reducer protocol's `{ seed }` parameter\n * (already plumbed through in `reducers.ts`) is the mechanism —\n * groupBy doesn't need its own seam for the moment, because it\n * delegates to the reducer protocol for all per-bucket state.\n */\n\nimport { readPath } from '../query/predicate.js'\nimport type {\n AggregateSpec,\n AggregateResult,\n AggregationUpstream,\n LiveAggregation,\n} from './aggregation.js'\nimport { buildLiveAggregation } from './aggregation.js'\nimport { GroupCardinalityError } from '../errors.js'\n\n/**\n * Cardinality thresholds for `.groupBy()`. The warn threshold gives\n * consumers a heads-up before the hard error; the cap is a fixed\n * constant in (not overridable). A `{ maxGroups }` override\n * can be added later without a break if a real consumer asks.\n */\nexport const GROUPBY_WARN_CARDINALITY = 10_000\nexport const GROUPBY_MAX_CARDINALITY = 100_000\n\n/**\n * One-shot warning dedup per-field — reactive dashboards\n * re-executing the same grouped query should produce the warning\n * once, not once per re-fire. Keyed on the grouping field name\n * because \"this field has high cardinality on your current data\"\n * is a field-level property, not a per-query one.\n */\nconst warnedCardinalityFields = new Set<string>()\nfunction warnCardinalityApproaching(field: string, observed: number): void {\n if (warnedCardinalityFields.has(field)) return\n warnedCardinalityFields.add(field)\n console.warn(\n `[noy-db] .groupBy(\"${field}\") produced ${observed} distinct groups, ` +\n `${Math.round((observed / GROUPBY_MAX_CARDINALITY) * 100)}% of the ` +\n `${GROUPBY_MAX_CARDINALITY}-group ceiling. Narrow the query with ` +\n `.where() before grouping, or switch to a lower-cardinality field.`,\n )\n}\n\n/**\n * Test-only: clear the per-field cardinality warning dedup between\n * tests. Production code never calls this — matching the\n * `resetJoinWarnings` pattern in `join.ts`.\n */\nexport function resetGroupByWarnings(): void {\n warnedCardinalityFields.clear()\n}\n\n/**\n * Result row shape for a grouped aggregation. Each row carries the\n * group key value under the grouping field name plus every reducer\n * output from the spec.\n *\n * types the group key as `unknown` at the result shape — the\n * runtime read via `readPath` can return any value, and narrowing\n * to a specific type would require the caller to assert at the\n * call site. `dictKey` narrowing layers on top of this by\n * adding an overload that constrains `F` when the grouping field\n * is a `dictKey`.\n */\nexport type GroupedRow<F extends string, R> = { [K in F]: unknown } & R\n\n/**\n * Chainable wrapper returned by `Query.groupBy(field)`. Terminates\n * with `.aggregate(spec)` which returns a `GroupedAggregation`.\n *\n * Kept minimal — the only operation on a grouped query is\n * aggregation. Ordering, limiting, and further filtering belong on\n * the underlying `Query` before `.groupBy()` is called; applying\n * them post-group would be a different operation (`having` /\n * `groupOrderBy`), out of scope for.\n */\nexport class GroupedQuery<T, F extends string> {\n constructor(\n private readonly executeRecords: () => readonly unknown[],\n private readonly field: F,\n private readonly upstreams: readonly AggregationUpstream[],\n /**\n * Optional dict label resolver attached by the query builder when\n * the grouping field is a dictKey.\n */\n private readonly dictLabelResolver?: (\n key: string,\n locale: string,\n fallback?: string | readonly string[],\n ) => Promise<string | undefined>,\n ) {\n // T is phantom on the wrapper so consumers can still see the\n // source row type on hover. Reference it to keep lint quiet.\n void undefined as T | undefined\n }\n\n /**\n * Build a grouped aggregation. Returns a `GroupedAggregation`\n * with `.run()`, `.runAsync()`, and `.live()` terminals — same shape\n * as the non-grouped `.aggregate()` wrapper, just with an array\n * result (one row per bucket) instead of a single reduced object.\n */\n aggregate<Spec extends AggregateSpec>(\n spec: Spec,\n ): GroupedAggregation<GroupedRow<F, AggregateResult<Spec>>> {\n return new GroupedAggregation<GroupedRow<F, AggregateResult<Spec>>>(\n this.executeRecords,\n this.field,\n spec,\n this.upstreams,\n this.dictLabelResolver,\n )\n }\n}\n\n/**\n * Execute the group-and-reduce pipeline. Pure function over a\n * record array and a spec — shared by `GroupedAggregation.run()`\n * and the live-mode refresh path. Exported for tests and for any\n * future `scan().groupBy().aggregate()` reuse.\n *\n * Enforces the cardinality cap incrementally during the partition\n * loop, so a runaway grouping throws at the moment the 100_001st\n * bucket would be created — the consumer doesn't have to wait for\n * the full partition to materialize before the error fires.\n */\nexport function groupAndReduce<R>(\n records: readonly unknown[],\n field: string,\n spec: AggregateSpec,\n): R[] {\n // Map preserves insertion order natively (ES2015), so first-seen\n // keys determine output ordering without a parallel order array.\n const buckets = new Map<unknown, unknown[]>()\n for (const record of records) {\n const key = readPath(record, field)\n let bucket = buckets.get(key)\n if (bucket === undefined) {\n if (buckets.size >= GROUPBY_MAX_CARDINALITY) {\n throw new GroupCardinalityError(\n field,\n buckets.size + 1,\n GROUPBY_MAX_CARDINALITY,\n )\n }\n bucket = []\n buckets.set(key, bucket)\n }\n bucket.push(record)\n }\n\n if (buckets.size >= GROUPBY_WARN_CARDINALITY) {\n warnCardinalityApproaching(field, buckets.size)\n }\n\n // Reduce each bucket through the spec. Same init/step/finalize\n // pipeline as `reduceRecords` in aggregate.ts, but one state per\n // bucket. Inlining the loop here keeps the per-bucket path tight\n // — calling `reduceRecords` per bucket would recompute\n // `Object.keys(spec)` once per bucket unnecessarily.\n const keys = Object.keys(spec)\n const out: R[] = []\n for (const [groupKey, bucketRecords] of buckets) {\n const state: Record<string, unknown> = {}\n for (const key of keys) {\n state[key] = spec[key]!.init()\n }\n for (const record of bucketRecords) {\n for (const key of keys) {\n state[key] = spec[key]!.step(state[key], record)\n }\n }\n const row: Record<string, unknown> = { [field]: groupKey }\n for (const key of keys) {\n row[key] = spec[key]!.finalize(state[key])\n }\n out.push(row as unknown as R)\n }\n return out\n}\n\n/**\n * Grouped aggregation wrapper — the `.groupBy(field).aggregate(spec)`\n * terminal. Shape mirrors `Aggregation<R>` from aggregate.ts: two\n * terminals (`.run()` and `.live()`), spec bound at construction\n * time, upstreams collected for live mode.\n *\n * The generic `R` is the per-row result shape (i.e. a single\n * grouped row), and the terminals return `R[]` — one row per\n * bucket.\n */\nexport class GroupedAggregation<R> {\n constructor(\n private readonly executeRecords: () => readonly unknown[],\n private readonly field: string,\n private readonly spec: AggregateSpec,\n private readonly upstreams: readonly AggregationUpstream[],\n /**\n * Optional dict label resolver for `<field>Label` projection\n *. Present when the grouping field is a dictKey.\n */\n private readonly dictLabelResolver?: (\n key: string,\n locale: string,\n fallback?: string | readonly string[],\n ) => Promise<string | undefined>,\n ) {}\n\n /** Execute the query, group, reduce, and return an array of rows. */\n run(): R[] {\n return groupAndReduce<R>(this.executeRecords(), this.field, this.spec)\n }\n\n /**\n * Execute the query, group, reduce, and resolve `<field>Label` for\n * each result row when the grouping field is a `dictKey` and a\n * `locale` is provided. Returns `R[]` synchronously when\n * no locale is specified (identical to `.run()`).\n *\n * The `<field>Label` field is appended to each row. Rows whose group\n * key has no dictionary entry get `<field>Label: undefined`.\n */\n async runAsync(opts?: {\n locale?: string\n fallback?: string | readonly string[]\n }): Promise<R[]> {\n const rows = groupAndReduce<R>(this.executeRecords(), this.field, this.spec)\n if (!opts?.locale || !this.dictLabelResolver) return rows\n\n const resolve = this.dictLabelResolver\n const locale = opts.locale\n const fallback = opts.fallback\n const labelKey = `${this.field}Label`\n\n return Promise.all(\n rows.map(async (row) => {\n const key = (row as Record<string, unknown>)[this.field]\n if (typeof key !== 'string') return row\n const label = await resolve(key, locale, fallback)\n return { ...(row as Record<string, unknown>), [labelKey]: label } as unknown as R\n }),\n )\n }\n\n /**\n * Build a reactive `LiveAggregation<R[]>` that re-runs the full\n * group-and-reduce pipeline whenever any upstream source notifies\n * of a change. Same error-isolation and idempotent-stop contract\n * as `Aggregation.live()` — the implementation delegates to the\n * same `LiveAggregationImpl` class by threading a fresh\n * recompute closure through the existing constructor.\n *\n * uses naive full re-run on every change. Incremental\n * per-bucket maintenance (apply `step` on inserted records,\n * `remove` on deleted records, route by bucket key) is a future\n * optimization — the reducer protocol admits it, but wiring\n * delta-aware source subscriptions is a separate PR.\n *\n * Always call `live.stop()` when finished.\n */\n live(): LiveAggregation<R[]> {\n const recompute = (): R[] =>\n groupAndReduce<R>(this.executeRecords(), this.field, this.spec)\n return buildLiveAggregation<R[]>(recompute, this.upstreams)\n }\n}\n"],"mappings":";;;;;;;;AA4FO,SAAS,MAAM,MAAgD;AAGpE,QAAM,QAAQ,MAAM;AACpB,OAAK;AACL,SAAO;AAAA,IACL,MAAM,MAAM;AAAA,IACZ,MAAM,CAAC,UAAU,QAAQ;AAAA,IACzB,QAAQ,CAAC,UAAU,QAAQ;AAAA,IAC3B,UAAU,CAAC,UAAU;AAAA,EACvB;AACF;AAQO,SAAS,IACd,OACA,MACiB;AACjB,QAAM,QAAQ,MAAM;AACpB,OAAK;AACL,SAAO;AAAA,IACL,MAAM,MAAM;AAAA,IACZ,MAAM,CAAC,OAAO,WAAW,QAAQ,WAAW,QAAQ,KAAK;AAAA,IACzD,QAAQ,CAAC,OAAO,WAAW,QAAQ,WAAW,QAAQ,KAAK;AAAA,IAC3D,UAAU,CAAC,UAAU;AAAA,EACvB;AACF;AAgBO,SAAS,IACd,OACA,MACwD;AACxD,QAAM,QAAQ,MAAM;AACpB,OAAK;AACL,SAAO;AAAA,IACL,MAAM,OAAO,EAAE,KAAK,GAAG,OAAO,EAAE;AAAA,IAChC,MAAM,CAAC,OAAO,YAAY;AAAA,MACxB,KAAK,MAAM,MAAM,WAAW,QAAQ,KAAK;AAAA,MACzC,OAAO,MAAM,QAAQ;AAAA,IACvB;AAAA,IACA,QAAQ,CAAC,OAAO,YAAY;AAAA,MAC1B,KAAK,MAAM,MAAM,WAAW,QAAQ,KAAK;AAAA,MACzC,OAAO,MAAM,QAAQ;AAAA,IACvB;AAAA,IACA,UAAU,CAAC,UAAW,MAAM,UAAU,IAAI,OAAO,MAAM,MAAM,MAAM;AAAA,EACrE;AACF;AAcA,SAAS,UAAU,OAAoB,OAA4B;AACjE,SAAO,EAAE,QAAQ,CAAC,GAAG,MAAM,QAAQ,KAAK,EAAE;AAC5C;AAEA,SAAS,YAAY,OAAoB,OAA4B;AAInE,QAAM,MAAM,MAAM,OAAO,QAAQ,KAAK;AACtC,MAAI,MAAM,EAAG,QAAO;AACpB,QAAM,OAAO,MAAM,OAAO,MAAM;AAChC,OAAK,OAAO,KAAK,CAAC;AAClB,SAAO,EAAE,QAAQ,KAAK;AACxB;AAcO,SAAS,IACd,OACA,MACqC;AACrC,QAAM,QAAQ,MAAM;AACpB,OAAK;AACL,SAAO;AAAA,IACL,MAAM,OAAO,EAAE,QAAQ,CAAC,EAAE;AAAA,IAC1B,MAAM,CAAC,OAAO,WAAW,UAAU,OAAO,WAAW,QAAQ,KAAK,CAAC;AAAA,IACnE,QAAQ,CAAC,OAAO,WAAW,YAAY,OAAO,WAAW,QAAQ,KAAK,CAAC;AAAA,IACvE,UAAU,CAAC,UAAU;AACnB,UAAI,MAAM,OAAO,WAAW,EAAG,QAAO;AACtC,UAAI,MAAM,MAAM,OAAO,CAAC;AACxB,eAAS,IAAI,GAAG,IAAI,MAAM,OAAO,QAAQ,KAAK;AAC5C,cAAM,IAAI,MAAM,OAAO,CAAC;AACxB,YAAI,IAAI,IAAK,OAAM;AAAA,MACrB;AACA,aAAO;AAAA,IACT;AAAA,EACF;AACF;AAOO,SAAS,IACd,OACA,MACqC;AACrC,QAAM,QAAQ,MAAM;AACpB,OAAK;AACL,SAAO;AAAA,IACL,MAAM,OAAO,EAAE,QAAQ,CAAC,EAAE;AAAA,IAC1B,MAAM,CAAC,OAAO,WAAW,UAAU,OAAO,WAAW,QAAQ,KAAK,CAAC;AAAA,IACnE,QAAQ,CAAC,OAAO,WAAW,YAAY,OAAO,WAAW,QAAQ,KAAK,CAAC;AAAA,IACvE,UAAU,CAAC,UAAU;AACnB,UAAI,MAAM,OAAO,WAAW,EAAG,QAAO;AACtC,UAAI,MAAM,MAAM,OAAO,CAAC;AACxB,eAAS,IAAI,GAAG,IAAI,MAAM,OAAO,QAAQ,KAAK;AAC5C,cAAM,IAAI,MAAM,OAAO,CAAC;AACxB,YAAI,IAAI,IAAK,OAAM;AAAA,MACrB;AACA,aAAO;AAAA,IACT;AAAA,EACF;AACF;AAaA,SAAS,WAAW,QAAiB,OAAuB;AAC1D,QAAM,QAAQ,SAAS,QAAQ,KAAK;AACpC,SAAO,OAAO,UAAU,YAAY,OAAO,SAAS,KAAK,IAAI,QAAQ;AACvE;;;ACjMO,SAAS,cACd,SACA,MACuB;AAEvB,QAAM,QAAiC,CAAC;AACxC,aAAW,OAAO,OAAO,KAAK,IAAI,GAAG;AACnC,UAAM,GAAG,IAAI,KAAK,GAAG,EAAG,KAAK;AAAA,EAC/B;AACA,aAAW,UAAU,SAAS;AAC5B,eAAW,OAAO,OAAO,KAAK,IAAI,GAAG;AACnC,YAAM,GAAG,IAAI,KAAK,GAAG,EAAG,KAAK,MAAM,GAAG,GAAG,MAAM;AAAA,IACjD;AAAA,EACF;AACA,QAAM,SAAkC,CAAC;AACzC,aAAW,OAAO,OAAO,KAAK,IAAI,GAAG;AACnC,WAAO,GAAG,IAAI,KAAK,GAAG,EAAG,SAAS,MAAM,GAAG,CAAC;AAAA,EAC9C;AACA,SAAO;AACT;AA4DA,IAAM,sBAAN,MAA2D;AAAA,EAOzD,YACmB,WACjB,WACA;AAFiB;AASjB,QAAI;AACF,WAAK,QAAQ,UAAU;AACvB,WAAK,QAAQ;AAAA,IACf,SAAS,KAAK;AACZ,WAAK,QAAQ;AACb,WAAK,QAAQ;AAAA,IACf;AAIA,eAAW,YAAY,WAAW;AAChC,YAAM,QAAQ,SAAS,UAAU,MAAM,KAAK,QAAQ,CAAC;AACrD,WAAK,aAAa,KAAK,KAAK;AAAA,IAC9B;AAAA,EACF;AAAA,EAvBmB;AAAA,EAPZ;AAAA,EACA;AAAA,EACU,YAAY,oBAAI,IAAgB;AAAA,EAChC,eAAkC,CAAC;AAAA,EAC5C,UAAU;AAAA,EA4BV,UAAgB;AACtB,QAAI,KAAK,QAAS;AAClB,QAAI;AACF,WAAK,QAAQ,KAAK,UAAU;AAC5B,WAAK,QAAQ;AAAA,IACf,SAAS,KAAK;AAIZ,WAAK,QAAQ;AAAA,IACf;AACA,eAAW,YAAY,KAAK,WAAW;AACrC,UAAI;AACF,iBAAS;AAAA,MACX,SAAS,KAAK;AAGZ,gBAAQ,KAAK,4CAA4C,GAAG;AAAA,MAC9D;AAAA,IACF;AAAA,EACF;AAAA,EAEA,UAAU,IAA4B;AACpC,QAAI,KAAK,SAAS;AAGhB,aAAO,MAAM;AAAA,MAAC;AAAA,IAChB;AACA,SAAK,UAAU,IAAI,EAAE;AACrB,WAAO,MAAM;AACX,WAAK,UAAU,OAAO,EAAE;AAAA,IAC1B;AAAA,EACF;AAAA,EAEA,OAAa;AACX,QAAI,KAAK,QAAS;AAClB,SAAK,UAAU;AACf,eAAW,SAAS,KAAK,cAAc;AACrC,UAAI;AACF,cAAM;AAAA,MACR,SAAS,KAAK;AACZ,gBAAQ,KAAK,wDAAwD,GAAG;AAAA,MAC1E;AAAA,IACF;AACA,SAAK,aAAa,SAAS;AAC3B,SAAK,UAAU,MAAM;AAAA,EACvB;AACF;AAkBO,IAAM,cAAN,MAAqB;AAAA,EAC1B,YACmB,gBACA,MACA,WACjB;AAHiB;AACA;AACA;AAAA,EAChB;AAAA,EAHgB;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASnB,MAAS;AACP,WAAO,cAAc,KAAK,eAAe,GAAG,KAAK,IAAI;AAAA,EACvD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,OAA2B;AACzB,UAAM,YAAY,MAChB,cAAc,KAAK,eAAe,GAAG,KAAK,IAAI;AAChD,WAAO,IAAI,oBAAuB,WAAW,KAAK,SAAS;AAAA,EAC7D;AACF;AAUO,SAAS,qBACd,WACA,WACoB;AACpB,SAAO,IAAI,oBAAuB,WAAW,SAAS;AACxD;;;AC/NO,IAAM,2BAA2B;AACjC,IAAM,0BAA0B;AASvC,IAAM,0BAA0B,oBAAI,IAAY;AAChD,SAAS,2BAA2B,OAAe,UAAwB;AACzE,MAAI,wBAAwB,IAAI,KAAK,EAAG;AACxC,0BAAwB,IAAI,KAAK;AACjC,UAAQ;AAAA,IACN,sBAAsB,KAAK,eAAe,QAAQ,qBAC7C,KAAK,MAAO,WAAW,0BAA2B,GAAG,CAAC,YACtD,uBAAuB;AAAA,EAE9B;AACF;AAOO,SAAS,uBAA6B;AAC3C,0BAAwB,MAAM;AAChC;AA0BO,IAAM,eAAN,MAAwC;AAAA,EAC7C,YACmB,gBACA,OACA,WAKA,mBAKjB;AAZiB;AACA;AACA;AAKA;AAAA,EASnB;AAAA,EAhBmB;AAAA,EACA;AAAA,EACA;AAAA,EAKA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiBnB,UACE,MAC0D;AAC1D,WAAO,IAAI;AAAA,MACT,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,MACA,KAAK;AAAA,MACL,KAAK;AAAA,IACP;AAAA,EACF;AACF;AAaO,SAAS,eACd,SACA,OACA,MACK;AAGL,QAAM,UAAU,oBAAI,IAAwB;AAC5C,aAAW,UAAU,SAAS;AAC5B,UAAM,MAAM,SAAS,QAAQ,KAAK;AAClC,QAAI,SAAS,QAAQ,IAAI,GAAG;AAC5B,QAAI,WAAW,QAAW;AACxB,UAAI,QAAQ,QAAQ,yBAAyB;AAC3C,cAAM,IAAI;AAAA,UACR;AAAA,UACA,QAAQ,OAAO;AAAA,UACf;AAAA,QACF;AAAA,MACF;AACA,eAAS,CAAC;AACV,cAAQ,IAAI,KAAK,MAAM;AAAA,IACzB;AACA,WAAO,KAAK,MAAM;AAAA,EACpB;AAEA,MAAI,QAAQ,QAAQ,0BAA0B;AAC5C,+BAA2B,OAAO,QAAQ,IAAI;AAAA,EAChD;AAOA,QAAM,OAAO,OAAO,KAAK,IAAI;AAC7B,QAAM,MAAW,CAAC;AAClB,aAAW,CAAC,UAAU,aAAa,KAAK,SAAS;AAC/C,UAAM,QAAiC,CAAC;AACxC,eAAW,OAAO,MAAM;AACtB,YAAM,GAAG,IAAI,KAAK,GAAG,EAAG,KAAK;AAAA,IAC/B;AACA,eAAW,UAAU,eAAe;AAClC,iBAAW,OAAO,MAAM;AACtB,cAAM,GAAG,IAAI,KAAK,GAAG,EAAG,KAAK,MAAM,GAAG,GAAG,MAAM;AAAA,MACjD;AAAA,IACF;AACA,UAAM,MAA+B,EAAE,CAAC,KAAK,GAAG,SAAS;AACzD,eAAW,OAAO,MAAM;AACtB,UAAI,GAAG,IAAI,KAAK,GAAG,EAAG,SAAS,MAAM,GAAG,CAAC;AAAA,IAC3C;AACA,QAAI,KAAK,GAAmB;AAAA,EAC9B;AACA,SAAO;AACT;AAYO,IAAM,qBAAN,MAA4B;AAAA,EACjC,YACmB,gBACA,OACA,MACA,WAKA,mBAKjB;AAbiB;AACA;AACA;AACA;AAKA;AAAA,EAKhB;AAAA,EAbgB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAKA;AAAA;AAAA,EAQnB,MAAW;AACT,WAAO,eAAkB,KAAK,eAAe,GAAG,KAAK,OAAO,KAAK,IAAI;AAAA,EACvE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,SAAS,MAGE;AACf,UAAM,OAAO,eAAkB,KAAK,eAAe,GAAG,KAAK,OAAO,KAAK,IAAI;AAC3E,QAAI,CAAC,MAAM,UAAU,CAAC,KAAK,kBAAmB,QAAO;AAErD,UAAM,UAAU,KAAK;AACrB,UAAM,SAAS,KAAK;AACpB,UAAM,WAAW,KAAK;AACtB,UAAM,WAAW,GAAG,KAAK,KAAK;AAE9B,WAAO,QAAQ;AAAA,MACb,KAAK,IAAI,OAAO,QAAQ;AACtB,cAAM,MAAO,IAAgC,KAAK,KAAK;AACvD,YAAI,OAAO,QAAQ,SAAU,QAAO;AACpC,cAAM,QAAQ,MAAM,QAAQ,KAAK,QAAQ,QAAQ;AACjD,eAAO,EAAE,GAAI,KAAiC,CAAC,QAAQ,GAAG,MAAM;AAAA,MAClE,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,OAA6B;AAC3B,UAAM,YAAY,MAChB,eAAkB,KAAK,eAAe,GAAG,KAAK,OAAO,KAAK,IAAI;AAChE,WAAO,qBAA0B,WAAW,KAAK,SAAS;AAAA,EAC5D;AACF;","names":[]}
@@ -0,0 +1 @@
1
+ //# sourceMappingURL=chunk-UF3BUNQZ.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}