@noy-db/hub 0.1.0-pre.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +197 -0
  3. package/dist/aggregate/index.cjs +476 -0
  4. package/dist/aggregate/index.cjs.map +1 -0
  5. package/dist/aggregate/index.d.cts +38 -0
  6. package/dist/aggregate/index.d.ts +38 -0
  7. package/dist/aggregate/index.js +53 -0
  8. package/dist/aggregate/index.js.map +1 -0
  9. package/dist/blobs/index.cjs +1480 -0
  10. package/dist/blobs/index.cjs.map +1 -0
  11. package/dist/blobs/index.d.cts +45 -0
  12. package/dist/blobs/index.d.ts +45 -0
  13. package/dist/blobs/index.js +48 -0
  14. package/dist/blobs/index.js.map +1 -0
  15. package/dist/bundle/index.cjs +436 -0
  16. package/dist/bundle/index.cjs.map +1 -0
  17. package/dist/bundle/index.d.cts +7 -0
  18. package/dist/bundle/index.d.ts +7 -0
  19. package/dist/bundle/index.js +40 -0
  20. package/dist/bundle/index.js.map +1 -0
  21. package/dist/chunk-2QR2PQTT.js +217 -0
  22. package/dist/chunk-2QR2PQTT.js.map +1 -0
  23. package/dist/chunk-4OWFYIDQ.js +79 -0
  24. package/dist/chunk-4OWFYIDQ.js.map +1 -0
  25. package/dist/chunk-5AATM2M2.js +90 -0
  26. package/dist/chunk-5AATM2M2.js.map +1 -0
  27. package/dist/chunk-ACLDOTNQ.js +543 -0
  28. package/dist/chunk-ACLDOTNQ.js.map +1 -0
  29. package/dist/chunk-BTDCBVJW.js +160 -0
  30. package/dist/chunk-BTDCBVJW.js.map +1 -0
  31. package/dist/chunk-CIMZBAZB.js +72 -0
  32. package/dist/chunk-CIMZBAZB.js.map +1 -0
  33. package/dist/chunk-E445ICYI.js +365 -0
  34. package/dist/chunk-E445ICYI.js.map +1 -0
  35. package/dist/chunk-EXQRC2L4.js +722 -0
  36. package/dist/chunk-EXQRC2L4.js.map +1 -0
  37. package/dist/chunk-FZU343FL.js +32 -0
  38. package/dist/chunk-FZU343FL.js.map +1 -0
  39. package/dist/chunk-GJILMRPO.js +354 -0
  40. package/dist/chunk-GJILMRPO.js.map +1 -0
  41. package/dist/chunk-GOUT6DND.js +1285 -0
  42. package/dist/chunk-GOUT6DND.js.map +1 -0
  43. package/dist/chunk-J66GRPNH.js +111 -0
  44. package/dist/chunk-J66GRPNH.js.map +1 -0
  45. package/dist/chunk-M2F2JAWB.js +464 -0
  46. package/dist/chunk-M2F2JAWB.js.map +1 -0
  47. package/dist/chunk-M5INGEFC.js +84 -0
  48. package/dist/chunk-M5INGEFC.js.map +1 -0
  49. package/dist/chunk-M62XNWRA.js +72 -0
  50. package/dist/chunk-M62XNWRA.js.map +1 -0
  51. package/dist/chunk-MR4424N3.js +275 -0
  52. package/dist/chunk-MR4424N3.js.map +1 -0
  53. package/dist/chunk-NPC4LFV5.js +132 -0
  54. package/dist/chunk-NPC4LFV5.js.map +1 -0
  55. package/dist/chunk-NXFEYLVG.js +311 -0
  56. package/dist/chunk-NXFEYLVG.js.map +1 -0
  57. package/dist/chunk-R36SIKES.js +79 -0
  58. package/dist/chunk-R36SIKES.js.map +1 -0
  59. package/dist/chunk-TDR6T5CJ.js +381 -0
  60. package/dist/chunk-TDR6T5CJ.js.map +1 -0
  61. package/dist/chunk-UF3BUNQZ.js +1 -0
  62. package/dist/chunk-UF3BUNQZ.js.map +1 -0
  63. package/dist/chunk-UQFSPSWG.js +1109 -0
  64. package/dist/chunk-UQFSPSWG.js.map +1 -0
  65. package/dist/chunk-USKYUS74.js +793 -0
  66. package/dist/chunk-USKYUS74.js.map +1 -0
  67. package/dist/chunk-XCL3WP6J.js +121 -0
  68. package/dist/chunk-XCL3WP6J.js.map +1 -0
  69. package/dist/chunk-XHFOENR2.js +680 -0
  70. package/dist/chunk-XHFOENR2.js.map +1 -0
  71. package/dist/chunk-ZFKD4QMV.js +430 -0
  72. package/dist/chunk-ZFKD4QMV.js.map +1 -0
  73. package/dist/chunk-ZLMV3TUA.js +490 -0
  74. package/dist/chunk-ZLMV3TUA.js.map +1 -0
  75. package/dist/chunk-ZRG4V3F5.js +17 -0
  76. package/dist/chunk-ZRG4V3F5.js.map +1 -0
  77. package/dist/consent/index.cjs +204 -0
  78. package/dist/consent/index.cjs.map +1 -0
  79. package/dist/consent/index.d.cts +24 -0
  80. package/dist/consent/index.d.ts +24 -0
  81. package/dist/consent/index.js +23 -0
  82. package/dist/consent/index.js.map +1 -0
  83. package/dist/crdt/index.cjs +152 -0
  84. package/dist/crdt/index.cjs.map +1 -0
  85. package/dist/crdt/index.d.cts +30 -0
  86. package/dist/crdt/index.d.ts +30 -0
  87. package/dist/crdt/index.js +24 -0
  88. package/dist/crdt/index.js.map +1 -0
  89. package/dist/crypto-IVKU7YTT.js +44 -0
  90. package/dist/crypto-IVKU7YTT.js.map +1 -0
  91. package/dist/delegation-XDJCBTI2.js +16 -0
  92. package/dist/delegation-XDJCBTI2.js.map +1 -0
  93. package/dist/dev-unlock-CeXic1xC.d.cts +263 -0
  94. package/dist/dev-unlock-KrKkcqD3.d.ts +263 -0
  95. package/dist/hash-9KO1BGxh.d.cts +63 -0
  96. package/dist/hash-ChfJjRjQ.d.ts +63 -0
  97. package/dist/history/index.cjs +1215 -0
  98. package/dist/history/index.cjs.map +1 -0
  99. package/dist/history/index.d.cts +62 -0
  100. package/dist/history/index.d.ts +62 -0
  101. package/dist/history/index.js +79 -0
  102. package/dist/history/index.js.map +1 -0
  103. package/dist/i18n/index.cjs +746 -0
  104. package/dist/i18n/index.cjs.map +1 -0
  105. package/dist/i18n/index.d.cts +38 -0
  106. package/dist/i18n/index.d.ts +38 -0
  107. package/dist/i18n/index.js +55 -0
  108. package/dist/i18n/index.js.map +1 -0
  109. package/dist/index-BRHBCmLt.d.ts +1940 -0
  110. package/dist/index-C8kQtmOk.d.ts +380 -0
  111. package/dist/index-DN-J-5wT.d.cts +1940 -0
  112. package/dist/index-DhjMjz7L.d.cts +380 -0
  113. package/dist/index.cjs +14756 -0
  114. package/dist/index.cjs.map +1 -0
  115. package/dist/index.d.cts +269 -0
  116. package/dist/index.d.ts +269 -0
  117. package/dist/index.js +6085 -0
  118. package/dist/index.js.map +1 -0
  119. package/dist/indexing/index.cjs +736 -0
  120. package/dist/indexing/index.cjs.map +1 -0
  121. package/dist/indexing/index.d.cts +36 -0
  122. package/dist/indexing/index.d.ts +36 -0
  123. package/dist/indexing/index.js +77 -0
  124. package/dist/indexing/index.js.map +1 -0
  125. package/dist/lazy-builder-BwEoBQZ9.d.ts +304 -0
  126. package/dist/lazy-builder-CZVLKh0Z.d.cts +304 -0
  127. package/dist/ledger-2NX4L7PN.js +33 -0
  128. package/dist/ledger-2NX4L7PN.js.map +1 -0
  129. package/dist/mime-magic-CBBSOkjm.d.cts +50 -0
  130. package/dist/mime-magic-CBBSOkjm.d.ts +50 -0
  131. package/dist/periods/index.cjs +1035 -0
  132. package/dist/periods/index.cjs.map +1 -0
  133. package/dist/periods/index.d.cts +21 -0
  134. package/dist/periods/index.d.ts +21 -0
  135. package/dist/periods/index.js +25 -0
  136. package/dist/periods/index.js.map +1 -0
  137. package/dist/predicate-SBHmi6D0.d.cts +161 -0
  138. package/dist/predicate-SBHmi6D0.d.ts +161 -0
  139. package/dist/query/index.cjs +1957 -0
  140. package/dist/query/index.cjs.map +1 -0
  141. package/dist/query/index.d.cts +3 -0
  142. package/dist/query/index.d.ts +3 -0
  143. package/dist/query/index.js +62 -0
  144. package/dist/query/index.js.map +1 -0
  145. package/dist/session/index.cjs +487 -0
  146. package/dist/session/index.cjs.map +1 -0
  147. package/dist/session/index.d.cts +45 -0
  148. package/dist/session/index.d.ts +45 -0
  149. package/dist/session/index.js +44 -0
  150. package/dist/session/index.js.map +1 -0
  151. package/dist/shadow/index.cjs +133 -0
  152. package/dist/shadow/index.cjs.map +1 -0
  153. package/dist/shadow/index.d.cts +16 -0
  154. package/dist/shadow/index.d.ts +16 -0
  155. package/dist/shadow/index.js +20 -0
  156. package/dist/shadow/index.js.map +1 -0
  157. package/dist/store/index.cjs +1069 -0
  158. package/dist/store/index.cjs.map +1 -0
  159. package/dist/store/index.d.cts +491 -0
  160. package/dist/store/index.d.ts +491 -0
  161. package/dist/store/index.js +34 -0
  162. package/dist/store/index.js.map +1 -0
  163. package/dist/strategy-BSxFXGzb.d.cts +110 -0
  164. package/dist/strategy-BSxFXGzb.d.ts +110 -0
  165. package/dist/strategy-D-SrOLCl.d.cts +548 -0
  166. package/dist/strategy-D-SrOLCl.d.ts +548 -0
  167. package/dist/sync/index.cjs +1062 -0
  168. package/dist/sync/index.cjs.map +1 -0
  169. package/dist/sync/index.d.cts +42 -0
  170. package/dist/sync/index.d.ts +42 -0
  171. package/dist/sync/index.js +28 -0
  172. package/dist/sync/index.js.map +1 -0
  173. package/dist/team/index.cjs +1233 -0
  174. package/dist/team/index.cjs.map +1 -0
  175. package/dist/team/index.d.cts +117 -0
  176. package/dist/team/index.d.ts +117 -0
  177. package/dist/team/index.js +39 -0
  178. package/dist/team/index.js.map +1 -0
  179. package/dist/tx/index.cjs +212 -0
  180. package/dist/tx/index.cjs.map +1 -0
  181. package/dist/tx/index.d.cts +20 -0
  182. package/dist/tx/index.d.ts +20 -0
  183. package/dist/tx/index.js +20 -0
  184. package/dist/tx/index.js.map +1 -0
  185. package/dist/types-BZpCZB8N.d.ts +7526 -0
  186. package/dist/types-Bfs0qr5F.d.cts +7526 -0
  187. package/dist/ulid-COREQ2RQ.js +9 -0
  188. package/dist/ulid-COREQ2RQ.js.map +1 -0
  189. package/dist/util/index.cjs +230 -0
  190. package/dist/util/index.cjs.map +1 -0
  191. package/dist/util/index.d.cts +77 -0
  192. package/dist/util/index.d.ts +77 -0
  193. package/dist/util/index.js +190 -0
  194. package/dist/util/index.js.map +1 -0
  195. package/package.json +244 -0
@@ -0,0 +1,110 @@
1
+ /**
2
+ * CRDT state types, merge logic, and build helpers.
3
+ * per-collection CRDT mode: 'lww-map' | 'rga' | 'yjs'
4
+ *
5
+ * The encrypted envelope wraps the CRDT state (not the resolved snapshot).
6
+ * Adapters only ever see ciphertext. `collection.get(id)` returns the
7
+ * resolved snapshot; `collection.getRaw(id)` returns the full CRDT state.
8
+ */
9
+ /** Per-collection CRDT mode. */
10
+ type CrdtMode = 'lww-map' | 'rga' | 'yjs';
11
+ /**
12
+ * Per-field last-write-wins registers.
13
+ * Each field carries its latest value and the ISO timestamp of the last write.
14
+ * Merge: for each field, keep the entry with the lexicographically higher `ts`.
15
+ */
16
+ interface LwwMapState {
17
+ readonly _crdt: 'lww-map';
18
+ readonly fields: Record<string, {
19
+ readonly v: unknown;
20
+ readonly ts: string;
21
+ }>;
22
+ }
23
+ /**
24
+ * Simplified Replicated Growable Array.
25
+ * Items are assigned stable NID (noy-db id) strings on first insertion.
26
+ * Deleted items are tracked as tombstones so concurrent removals commute.
27
+ *
28
+ * The resolved snapshot is the ordered list of non-tombstoned `v` values.
29
+ */
30
+ interface RgaState {
31
+ readonly _crdt: 'rga';
32
+ readonly items: ReadonlyArray<{
33
+ readonly nid: string;
34
+ readonly v: unknown;
35
+ }>;
36
+ readonly tombstones: readonly string[];
37
+ }
38
+ /**
39
+ * Yjs binary state marker. `update` is base64(Y.encodeStateAsUpdate()).
40
+ * Core stores and retrieves the blob opaquely. `@noy-db/yjs` is responsible
41
+ * for encoding, decoding, and merging via `Y.mergeUpdates`.
42
+ * Core falls back to last-write-wins (higher `_v`) for conflict resolution.
43
+ */
44
+ interface YjsState {
45
+ readonly _crdt: 'yjs';
46
+ /** base64-encoded Y.encodeStateAsUpdate() bytes. */
47
+ readonly update: string;
48
+ }
49
+ type CrdtState = LwwMapState | RgaState | YjsState;
50
+ /**
51
+ * Resolve a CRDT state into the end-user record snapshot.
52
+ *
53
+ * - `lww-map` → `Record<string, unknown>` (field values extracted from registers)
54
+ * - `rga` → `unknown[]` (non-tombstoned items in insertion order)
55
+ * - `yjs` → `string` (base64 update blob; use @noy-db/yjs for a Y.Doc)
56
+ */
57
+ declare function resolveCrdtSnapshot(state: CrdtState): unknown;
58
+ /**
59
+ * Merge two CRDT states produced by concurrent writes.
60
+ * Called by the collection-level conflict resolver registered with SyncEngine.
61
+ *
62
+ * For `yjs`: core cannot merge Yjs without importing the `yjs` package.
63
+ * The caller must handle that case by falling back to the higher-`_v` envelope.
64
+ */
65
+ declare function mergeCrdtStates(a: CrdtState, b: CrdtState): CrdtState;
66
+ /**
67
+ * Build (or update) an lww-map state from a new record.
68
+ *
69
+ * All fields in the new record win at timestamp `now`.
70
+ * Fields present in the existing state but absent from the new record
71
+ * are preserved (they were written by another device).
72
+ */
73
+ declare function buildLwwMapState(record: Record<string, unknown>, existing: LwwMapState | undefined, now: string): LwwMapState;
74
+ /**
75
+ * Build (or update) an RGA state from a new array.
76
+ *
77
+ * Existing items are matched to new elements by deep-equality of their `v`.
78
+ * Unmatched existing items are tombstoned. New elements that have no existing
79
+ * match get a fresh NID via `generateNid()`.
80
+ */
81
+ declare function buildRgaState(arr: unknown[], existing: RgaState | undefined, generateNid: () => string): RgaState;
82
+
83
+ /**
84
+ * Strategy seam between core Collection and the optional CRDT
85
+ * subsystem. Core imports `CrdtStrategy` as a TYPE-ONLY symbol and
86
+ * `NO_CRDT` as a minimal runtime stub.
87
+ *
88
+ * The state-construction / merge / snapshot-resolution helpers —
89
+ * `buildLwwMapState`, `buildRgaState`, `mergeCrdtStates`,
90
+ * `resolveCrdtSnapshot` — are only reachable from `withCrdt()` in
91
+ * `./active.ts`, which is only exported through the `@noy-db/hub/crdt`
92
+ * subpath. Consumers without CRDT mode configured never pull the
93
+ * ~221 LOC into their bundle.
94
+ *
95
+ * @internal
96
+ */
97
+
98
+ /**
99
+ * Seam interface. `@internal`.
100
+ *
101
+ * @internal
102
+ */
103
+ interface CrdtStrategy {
104
+ buildLwwMapState(record: Record<string, unknown>, previous: LwwMapState | undefined, now: string): LwwMapState;
105
+ buildRgaState(items: readonly unknown[], previous: RgaState | undefined, idGen: () => string): RgaState;
106
+ mergeCrdtStates(local: CrdtState, remote: CrdtState): CrdtState;
107
+ resolveCrdtSnapshot(state: CrdtState): unknown;
108
+ }
109
+
110
+ export { type CrdtStrategy as C, type LwwMapState as L, type RgaState as R, type YjsState as Y, type CrdtMode as a, type CrdtState as b, buildLwwMapState as c, buildRgaState as d, mergeCrdtStates as m, resolveCrdtSnapshot as r };
@@ -0,0 +1,548 @@
1
+ /**
2
+ * Aggregation reducers for the query DSL.
3
+ *
4
+ * the reducer protocol plus five built-in factories
5
+ * (`count`, `sum`, `avg`, `min`, `max`) consumed by `Query.aggregate()`
6
+ * and, in the future, `Scan.aggregate()`. Every factory accepts
7
+ * an optional `{ seed }` parameter that is plumbed through the
8
+ * protocol but unused by the executor — that's the load-bearing
9
+ * half of constraint #2. When partition-aware aggregation
10
+ * lands, the seed carries the previous partition's running total into
11
+ * the next partition without requiring a protocol change.
12
+ *
13
+ * Reducers are intentionally generic over their internal state type
14
+ * `S` so compound reducers (avg keeps `{sum, count}`, min/max keep a
15
+ * value bag) can model internal bookkeeping without leaking the
16
+ * implementation through the accumulator's public shape. `finalize`
17
+ * collapses `S` back into the user-visible `R`.
18
+ *
19
+ * Reducers are pure data — `init` / `step` / `finalize` / optional
20
+ * `remove` are stateless functions that receive and return `S`. This
21
+ * is the shape that admits O(1) incremental maintenance in a future
22
+ * optimization (delta-aware `LiveAggregation` applies `step` or
23
+ * `remove` per delta), without blocking the simpler "full re-run on
24
+ * source change" that ships.
25
+ */
26
+ /**
27
+ * A single reducer: factory-produced, ready to plug into an
28
+ * `.aggregate()` spec.
29
+ *
30
+ * Type parameters:
31
+ * - `R` — user-visible result type (what the aggregation returns
32
+ * for this slot, e.g. `number` for `sum()`)
33
+ * - `S` — internal state type, defaults to `R` for simple reducers
34
+ * that don't need compound bookkeeping
35
+ *
36
+ * A reducer is stateless: every method is pure over `S`. `init()` is
37
+ * called once per aggregation run to build the initial state; `step()`
38
+ * folds a record into the state; `remove()` (optional) un-folds a
39
+ * record, enabling incremental live maintenance; `finalize()` reads
40
+ * the final answer out of the state at the end of the run.
41
+ */
42
+ interface Reducer<R, S = R> {
43
+ /** Build the initial state for a fresh aggregation run. */
44
+ init(): S;
45
+ /** Fold a record into the state. Returns the new state. */
46
+ step(state: S, record: unknown): S;
47
+ /**
48
+ * Un-fold a record from the state. Returns the new state.
49
+ *
50
+ * Optional — reducers without `remove` cannot be maintained
51
+ * incrementally and must be re-run from scratch when the underlying
52
+ * record set changes. `sum`, `count`, `avg` implement `remove` in
53
+ * O(1); `min` and `max` implement it in O(N) worst case (when the
54
+ * extremum itself is removed and the next extremum must be
55
+ * recomputed from the remaining contributing values).
56
+ */
57
+ remove?(state: S, record: unknown): S;
58
+ /** Collapse the internal state into the user-visible result. */
59
+ finalize(state: S): R;
60
+ }
61
+ /**
62
+ * Common options accepted by every reducer factory.
63
+ *
64
+ * `seed` — optional initial value for the internal state. **Unused by
65
+ * the executor**, plumbed through the protocol for constraint
66
+ * #2 (partition-aware aggregation seam). In, partitioned
67
+ * aggregations will pass the previous partition's carry as `seed` so
68
+ * a long time series can be rolled forward one partition at a time
69
+ * without re-aggregating closed partitions.
70
+ *
71
+ * always uses `init()` with the factory's zero value, regardless
72
+ * of whether `seed` was passed. Do not remove the parameter — that's
73
+ * the whole point of having it exist now.
74
+ */
75
+ interface ReducerOptions<TSeed = unknown> {
76
+ /** constraint #2 — seed is plumbed through but unused in. */
77
+ readonly seed?: TSeed;
78
+ }
79
+ /**
80
+ * Count the number of records that match the query. Ignores field
81
+ * values entirely — the count is over the number of records, not over
82
+ * the number of non-null field values in any column.
83
+ */
84
+ declare function count(opts?: ReducerOptions<number>): Reducer<number>;
85
+ /**
86
+ * Sum a numeric field across all matching records. Non-number values
87
+ * at the field path are coerced to 0 — consumers who want a different
88
+ * behavior (throw, skip, treat as NaN) should filter upstream via
89
+ * `.where()` or write a custom reducer.
90
+ */
91
+ declare function sum(field: string, opts?: ReducerOptions<number>): Reducer<number>;
92
+ /**
93
+ * Arithmetic mean of a numeric field across all matching records.
94
+ *
95
+ * Returns `null` for an empty result set (zero records is not a
96
+ * well-defined denominator — returning NaN would poison downstream
97
+ * arithmetic, and throwing would force every consumer to wrap in
98
+ * try/catch just to handle "no matches"). Consumers who want an
99
+ * explicit zero should coalesce with `?? 0`.
100
+ *
101
+ * Internal state is `{sum, count}` so the running average can be
102
+ * maintained incrementally — on each delta, both fields update in
103
+ * O(1) and `finalize` divides. Directly storing `avg` as state would
104
+ * not admit incremental removal without also tracking count.
105
+ */
106
+ declare function avg(field: string, opts?: ReducerOptions<{
107
+ sum: number;
108
+ count: number;
109
+ }>): Reducer<number | null, {
110
+ sum: number;
111
+ count: number;
112
+ }>;
113
+ interface MinMaxState {
114
+ /**
115
+ * Multiset of contributing field values. Stored as a plain array
116
+ * because we need to support `remove` and a plain array gives us
117
+ * O(1) push + O(N) worst-case removal — which matches the
118
+ * documented min/max removal complexity. A sorted structure would
119
+ * let us drop the O(N) rescan but adds complexity that doesn't
120
+ * need; consumers hitting the O(N) ceiling should file an issue.
121
+ */
122
+ readonly values: number[];
123
+ }
124
+ /**
125
+ * Smallest numeric value of a field across all matching records.
126
+ * Returns `null` for an empty result set. See `avg()` for the
127
+ * reasoning on `null` vs NaN vs throwing.
128
+ *
129
+ * Incremental complexity: O(1) for `step`, O(N) worst case for
130
+ * `remove` when the current minimum is removed (the state holds the
131
+ * full multiset of contributing values and `finalize` scans for the
132
+ * new minimum). Consumers with very large result sets and frequent
133
+ * removals of the current extremum should either accept the cost or
134
+ * wait for a future optimization.
135
+ */
136
+ declare function min(field: string, opts?: ReducerOptions<number>): Reducer<number | null, MinMaxState>;
137
+ /**
138
+ * Largest numeric value of a field across all matching records.
139
+ * Mirror of `min()` — see that doc for semantics, null-on-empty
140
+ * behavior, and the O(N) removal caveat.
141
+ */
142
+ declare function max(field: string, opts?: ReducerOptions<number>): Reducer<number | null, MinMaxState>;
143
+
144
+ /**
145
+ * Aggregate execution — the runtime behind `Query.aggregate()`.
146
+ *
147
+ * takes an `AggregateSpec` (a record of named reducers
148
+ * built from `reducers.ts`) and runs every reducer over the records
149
+ * produced by the underlying query. Two terminal surfaces:
150
+ *
151
+ * - `.run(): R` — synchronous one-shot reduction. Matches the
152
+ * existing `Query.toArray()` / `.first()` / `.count()` style.
153
+ * - `.live(): LiveAggregation<R>` — reactive primitive that
154
+ * re-runs the reduction whenever the query's source notifies of
155
+ * a change. uses naive full re-run; incremental delta
156
+ * maintenance is admitted by the reducer protocol (`remove()`)
157
+ * but not wired to the executor yet — a follow-up optimization
158
+ * can switch from full re-run to delta-based without breaking
159
+ * the public API. Consumers get correct, reactive values today.
160
+ *
161
+ * The `Aggregation<R>` wrapper is deliberately tiny — it exists so
162
+ * `.aggregate(spec)` can be chained with either `.run()` or `.live()`
163
+ * without the builder needing two separate terminal methods. It
164
+ * holds the closure over the query execution (produces the current
165
+ * matching record set) and the spec, and stitches them together in
166
+ * either mode.
167
+ *
168
+ * This file depends ONLY on `reducers.ts` — it has no knowledge of
169
+ * the `Query` class. Tests can therefore exercise the reduction
170
+ * surface with plain record arrays, without spinning up a Collection.
171
+ */
172
+
173
+ /**
174
+ * A named set of reducers, keyed by output field name. Each key
175
+ * becomes a field on the aggregated result.
176
+ *
177
+ * ```ts
178
+ * const spec = {
179
+ * total: sum('amount'),
180
+ * n: count(),
181
+ * avgAmount: avg('amount'),
182
+ * }
183
+ * ```
184
+ */
185
+ type AggregateSpec = Readonly<Record<string, Reducer<unknown, unknown>>>;
186
+ /**
187
+ * Map an `AggregateSpec` to its reduced result shape — each key
188
+ * carries the finalized result type from its reducer. A spec built
189
+ * from `{ total: sum('amount'), n: count() }` yields a result of
190
+ * `{ total: number, n: number }`.
191
+ *
192
+ * This uses a mapped type with a conditional to extract `R` from
193
+ * each `Reducer<R, _>`. The `infer` captures the user-visible result
194
+ * type, discarding the internal state type `S`.
195
+ */
196
+ type AggregateResult<Spec extends AggregateSpec> = {
197
+ [K in keyof Spec]: Spec[K] extends Reducer<infer R, unknown> ? R : never;
198
+ };
199
+ /**
200
+ * Pure reduction over a record array. Runs every reducer's
201
+ * `init → step* → finalize` pipeline exactly once over the records.
202
+ *
203
+ * Called by `Aggregation.run()` and by the live-mode refresh path.
204
+ * Exported for tests and for future `scan().aggregate()` reuse
205
+ * — the streaming path will call the same reducer protocol with a
206
+ * per-page loop instead of a single array.
207
+ */
208
+ declare function reduceRecords<Spec extends AggregateSpec>(records: readonly unknown[], spec: Spec): AggregateResult<Spec>;
209
+ /**
210
+ * A minimal reactive primitive for aggregation results.
211
+ *
212
+ * Same spirit as the `LiveQuery` in : frame-agnostic, a plain
213
+ * object with `value` / `error` fields and a `subscribe(cb)`
214
+ * notification channel that Vue / React / Solid adapters wrap in
215
+ * their own primitive. Intentionally NOT a Promise — aggregations
216
+ * have a well-defined "current value" at every instant, and the
217
+ * reactive consumer wants to read that value synchronously.
218
+ *
219
+ * Error semantics mirror `LiveQuery`: if a re-run throws, the
220
+ * previous successful `value` is preserved and the error is stored
221
+ * in `error` so consumers can render an error state without losing
222
+ * the last-known-good result. The throw does NOT propagate out of
223
+ * the source's change handler (which would tear down the upstream
224
+ * emitter).
225
+ *
226
+ * `stop()` tears down the upstream subscription. It is idempotent —
227
+ * calling it multiple times is safe — and subscribe calls after
228
+ * stop are no-ops (they immediately return a no-op unsubscribe).
229
+ * Always call `stop()` when done; Vue's `onUnmounted` is the
230
+ * canonical place. Raw consumers must do it themselves.
231
+ */
232
+ interface LiveAggregation<R> {
233
+ /** Current reduced value. Undefined only if the first compute threw. */
234
+ readonly value: R | undefined;
235
+ /** Last execution error, if any. Cleared on the next successful run. */
236
+ readonly error: unknown;
237
+ /** Notify on every recomputation (success or error). Returns unsubscribe. */
238
+ subscribe(cb: () => void): () => void;
239
+ /** Tear down the upstream subscription. Idempotent. */
240
+ stop(): void;
241
+ }
242
+ /**
243
+ * Upstream change-notification hook for live aggregation.
244
+ *
245
+ * Matches the shape that `QuerySource.subscribe` already uses — a
246
+ * single method that accepts a callback and returns an unsubscribe
247
+ * function. The `Aggregation` wrapper collects upstreams from the
248
+ * query's source and wires them into a single re-run trigger.
249
+ */
250
+ interface AggregationUpstream {
251
+ subscribe(cb: () => void): () => void;
252
+ }
253
+ /**
254
+ * Chainable wrapper returned by `Query.aggregate(spec)`. Holds the
255
+ * execute-records closure and the spec; terminal methods (`run`,
256
+ * `live`) stitch them together in either mode.
257
+ *
258
+ * Why a wrapper instead of two terminal methods on `Query` directly?
259
+ *
260
+ * The `.aggregate(spec)` call is where the spec is bound — both
261
+ * `.run()` and `.live()` need the same spec, and the consumer's
262
+ * fluent style is `query.where(...).aggregate(spec).run()` or
263
+ * `.aggregate(spec).live()`. Wrapping lets the spec be named once
264
+ * and reused for either terminal, and keeps the `Query` class
265
+ * from growing a pair of near-duplicate method overloads
266
+ * (`aggregateRun` / `aggregateLive`) that would be harder to
267
+ * discover.
268
+ */
269
+ declare class Aggregation<R> {
270
+ private readonly executeRecords;
271
+ private readonly spec;
272
+ private readonly upstreams;
273
+ constructor(executeRecords: () => readonly unknown[], spec: AggregateSpec, upstreams: readonly AggregationUpstream[]);
274
+ /**
275
+ * Execute the query and reduce the results synchronously.
276
+ * Returns the reduced shape matching the spec — e.g. a spec of
277
+ * `{ total: sum('amount'), n: count() }` returns
278
+ * `{ total: number, n: number }`.
279
+ */
280
+ run(): R;
281
+ /**
282
+ * Build a reactive `LiveAggregation<R>` that re-runs the reduction
283
+ * whenever any upstream source notifies of a change. The initial
284
+ * value is computed eagerly in the constructor, so consumers can
285
+ * read `live.value` immediately after calling `.live()`.
286
+ *
287
+ * Always call `live.stop()` when finished — it tears down the
288
+ * upstream subscriptions. Vue's `onUnmounted` is the canonical
289
+ * place.
290
+ *
291
+ * **Implementation note:** every upstream change triggers a full
292
+ * re-reduction. Incremental maintenance (O(1) per delta for
293
+ * sum/count/avg via the reducer protocol's `remove()` method) is a
294
+ * planned follow-up optimization — the protocol already supports
295
+ * it, but the executor doesn't drive it yet. Consumers get
296
+ * correct, reactive values today; future PRs can switch to
297
+ * delta-based maintenance without changing this API.
298
+ */
299
+ live(): LiveAggregation<R>;
300
+ }
301
+ /**
302
+ * Build a `LiveAggregation<V>` from a recompute closure and a list
303
+ * of upstreams. Exposed so sibling files in the query DSL
304
+ * (currently `groupby.ts`) can reuse the reactive primitive
305
+ * without reaching into `LiveAggregationImpl` directly. This keeps
306
+ * the implementation class private while still allowing planned
307
+ * composition with `.groupBy().aggregate().live()`.
308
+ */
309
+ declare function buildLiveAggregation<V>(recompute: () => V, upstreams: readonly AggregationUpstream[]): LiveAggregation<V>;
310
+
311
+ /**
312
+ * Query DSL `.groupBy()` —.
313
+ *
314
+ * Chains after `.where()` / `.filter()` / `.or()` / `.and()` on a
315
+ * Query and before a reducer spec, so consumers can compute
316
+ * per-bucket aggregates without folding in userland:
317
+ *
318
+ * ```ts
319
+ * const byClient = invoices.query()
320
+ * .where('status', '==', 'open')
321
+ * .groupBy('clientId')
322
+ * .aggregate({ total: sum('amount'), n: count() })
323
+ * .run()
324
+ * // → [ { clientId: 'c1', total: 5250, n: 3 }, … ]
325
+ * ```
326
+ *
327
+ * Execution pipeline:
328
+ *
329
+ * 1. Run the query's where/filter clauses (same candidate /
330
+ * filter pipeline as `.aggregate()` directly on Query).
331
+ * 2. Partition the matching records into buckets keyed by
332
+ * `readPath(record, field)`. JS `Map` preserves insertion
333
+ * order, so the first-seen key for a bucket determines its
334
+ * position in the result array — consumers who want a
335
+ * specific ordering should `.sort()` downstream.
336
+ * 3. Enforce cardinality: warn once per field at 10% of the cap
337
+ * (10_000 buckets), throw `GroupCardinalityError` at 100% of
338
+ * the cap (100_000 buckets).
339
+ * 4. For each bucket, build a per-group reducer state and
340
+ * step every record in the bucket through it.
341
+ * 5. Emit one result row per bucket, shaped as
342
+ * `{ [field]: key, ...reduced }`.
343
+ *
344
+ * **Null / undefined keys:** `Map` distinguishes `null` from
345
+ * `undefined`, so records with a missing group field get their own
346
+ * bucket, and records with an explicit `null` value get a separate
347
+ * bucket from that. Consumers who want them merged can coalesce
348
+ * upstream with `.filter()`.
349
+ *
350
+ * **Live mode:** `.groupBy().aggregate().live()` re-runs the full
351
+ * grouping pipeline on every source change. Per-bucket incremental
352
+ * delta maintenance is a future optimization — the reducer
353
+ * protocol's `remove()` hook admits it, but ships naive
354
+ * re-grouping for simplicity.
355
+ *
356
+ * **Type-level stable-key narrowing:** when
357
+ * `dictKey` lands, `groupBy<DictField>()` will narrow the group key
358
+ * type to the stable dictionary key rather than the resolved locale
359
+ * label. That prevents grouping by the locale-resolved label,
360
+ * which would produce different buckets per reader. types the
361
+ * key as `unknown` at the result shape; the dictKey narrowing
362
+ * layers on top without an API break.
363
+ *
364
+ * Partition-awareness seam: when partitioned collections land,
365
+ * per-partition grouping will need to merge sub-results across
366
+ * partitions. The reducer protocol's `{ seed }` parameter
367
+ * (already plumbed through in `reducers.ts`) is the mechanism —
368
+ * groupBy doesn't need its own seam for the moment, because it
369
+ * delegates to the reducer protocol for all per-bucket state.
370
+ */
371
+
372
+ /**
373
+ * Cardinality thresholds for `.groupBy()`. The warn threshold gives
374
+ * consumers a heads-up before the hard error; the cap is a fixed
375
+ * constant in (not overridable). A `{ maxGroups }` override
376
+ * can be added later without a break if a real consumer asks.
377
+ */
378
+ declare const GROUPBY_WARN_CARDINALITY = 10000;
379
+ declare const GROUPBY_MAX_CARDINALITY = 100000;
380
+ /**
381
+ * Test-only: clear the per-field cardinality warning dedup between
382
+ * tests. Production code never calls this — matching the
383
+ * `resetJoinWarnings` pattern in `join.ts`.
384
+ */
385
+ declare function resetGroupByWarnings(): void;
386
+ /**
387
+ * Result row shape for a grouped aggregation. Each row carries the
388
+ * group key value under the grouping field name plus every reducer
389
+ * output from the spec.
390
+ *
391
+ * types the group key as `unknown` at the result shape — the
392
+ * runtime read via `readPath` can return any value, and narrowing
393
+ * to a specific type would require the caller to assert at the
394
+ * call site. `dictKey` narrowing layers on top of this by
395
+ * adding an overload that constrains `F` when the grouping field
396
+ * is a `dictKey`.
397
+ */
398
+ type GroupedRow<F extends string, R> = {
399
+ [K in F]: unknown;
400
+ } & R;
401
+ /**
402
+ * Chainable wrapper returned by `Query.groupBy(field)`. Terminates
403
+ * with `.aggregate(spec)` which returns a `GroupedAggregation`.
404
+ *
405
+ * Kept minimal — the only operation on a grouped query is
406
+ * aggregation. Ordering, limiting, and further filtering belong on
407
+ * the underlying `Query` before `.groupBy()` is called; applying
408
+ * them post-group would be a different operation (`having` /
409
+ * `groupOrderBy`), out of scope for.
410
+ */
411
+ declare class GroupedQuery<T, F extends string> {
412
+ private readonly executeRecords;
413
+ private readonly field;
414
+ private readonly upstreams;
415
+ /**
416
+ * Optional dict label resolver attached by the query builder when
417
+ * the grouping field is a dictKey.
418
+ */
419
+ private readonly dictLabelResolver?;
420
+ constructor(executeRecords: () => readonly unknown[], field: F, upstreams: readonly AggregationUpstream[],
421
+ /**
422
+ * Optional dict label resolver attached by the query builder when
423
+ * the grouping field is a dictKey.
424
+ */
425
+ dictLabelResolver?: ((key: string, locale: string, fallback?: string | readonly string[]) => Promise<string | undefined>) | undefined);
426
+ /**
427
+ * Build a grouped aggregation. Returns a `GroupedAggregation`
428
+ * with `.run()`, `.runAsync()`, and `.live()` terminals — same shape
429
+ * as the non-grouped `.aggregate()` wrapper, just with an array
430
+ * result (one row per bucket) instead of a single reduced object.
431
+ */
432
+ aggregate<Spec extends AggregateSpec>(spec: Spec): GroupedAggregation<GroupedRow<F, AggregateResult<Spec>>>;
433
+ }
434
+ /**
435
+ * Execute the group-and-reduce pipeline. Pure function over a
436
+ * record array and a spec — shared by `GroupedAggregation.run()`
437
+ * and the live-mode refresh path. Exported for tests and for any
438
+ * future `scan().groupBy().aggregate()` reuse.
439
+ *
440
+ * Enforces the cardinality cap incrementally during the partition
441
+ * loop, so a runaway grouping throws at the moment the 100_001st
442
+ * bucket would be created — the consumer doesn't have to wait for
443
+ * the full partition to materialize before the error fires.
444
+ */
445
+ declare function groupAndReduce<R>(records: readonly unknown[], field: string, spec: AggregateSpec): R[];
446
+ /**
447
+ * Grouped aggregation wrapper — the `.groupBy(field).aggregate(spec)`
448
+ * terminal. Shape mirrors `Aggregation<R>` from aggregate.ts: two
449
+ * terminals (`.run()` and `.live()`), spec bound at construction
450
+ * time, upstreams collected for live mode.
451
+ *
452
+ * The generic `R` is the per-row result shape (i.e. a single
453
+ * grouped row), and the terminals return `R[]` — one row per
454
+ * bucket.
455
+ */
456
+ declare class GroupedAggregation<R> {
457
+ private readonly executeRecords;
458
+ private readonly field;
459
+ private readonly spec;
460
+ private readonly upstreams;
461
+ /**
462
+ * Optional dict label resolver for `<field>Label` projection
463
+ *. Present when the grouping field is a dictKey.
464
+ */
465
+ private readonly dictLabelResolver?;
466
+ constructor(executeRecords: () => readonly unknown[], field: string, spec: AggregateSpec, upstreams: readonly AggregationUpstream[],
467
+ /**
468
+ * Optional dict label resolver for `<field>Label` projection
469
+ *. Present when the grouping field is a dictKey.
470
+ */
471
+ dictLabelResolver?: ((key: string, locale: string, fallback?: string | readonly string[]) => Promise<string | undefined>) | undefined);
472
+ /** Execute the query, group, reduce, and return an array of rows. */
473
+ run(): R[];
474
+ /**
475
+ * Execute the query, group, reduce, and resolve `<field>Label` for
476
+ * each result row when the grouping field is a `dictKey` and a
477
+ * `locale` is provided. Returns `R[]` synchronously when
478
+ * no locale is specified (identical to `.run()`).
479
+ *
480
+ * The `<field>Label` field is appended to each row. Rows whose group
481
+ * key has no dictionary entry get `<field>Label: undefined`.
482
+ */
483
+ runAsync(opts?: {
484
+ locale?: string;
485
+ fallback?: string | readonly string[];
486
+ }): Promise<R[]>;
487
+ /**
488
+ * Build a reactive `LiveAggregation<R[]>` that re-runs the full
489
+ * group-and-reduce pipeline whenever any upstream source notifies
490
+ * of a change. Same error-isolation and idempotent-stop contract
491
+ * as `Aggregation.live()` — the implementation delegates to the
492
+ * same `LiveAggregationImpl` class by threading a fresh
493
+ * recompute closure through the existing constructor.
494
+ *
495
+ * uses naive full re-run on every change. Incremental
496
+ * per-bucket maintenance (apply `step` on inserted records,
497
+ * `remove` on deleted records, route by bucket key) is a future
498
+ * optimization — the reducer protocol admits it, but wiring
499
+ * delta-aware source subscriptions is a separate PR.
500
+ *
501
+ * Always call `live.stop()` when finished.
502
+ */
503
+ live(): LiveAggregation<R[]>;
504
+ }
505
+
506
+ /**
507
+ * Strategy seam between the core Query / ScanBuilder chain and the
508
+ * optional aggregate / groupBy subsystem. Core imports
509
+ * `AggregateStrategy` as a TYPE-ONLY symbol and `NO_AGGREGATE` as a
510
+ * tiny runtime stub.
511
+ *
512
+ * The heavy machinery — `Aggregation`, `GroupedQuery`, the
513
+ * reducer-step logic — is only reachable from `withAggregate()` in
514
+ * `./active.ts`, which is only exported through the
515
+ * `@noy-db/hub/aggregate` subpath. Consumers that don't import the
516
+ * subpath ship none of the ~886 LOC.
517
+ *
518
+ * @internal
519
+ */
520
+
521
+ /**
522
+ * Seam interface. `@internal` — will promote to public only when the
523
+ * aggregate subsystem is extracted into its own package.
524
+ *
525
+ * @internal
526
+ */
527
+ interface AggregateStrategy {
528
+ /**
529
+ * Build an `Aggregation<R>` for `Query.aggregate(spec)`. `executeRecords`
530
+ * is a closure that produces the matching record set when the
531
+ * aggregation runs. NO_AGGREGATE throws; the active strategy
532
+ * constructs a real `Aggregation`.
533
+ */
534
+ aggregate<Spec extends AggregateSpec>(executeRecords: () => readonly unknown[], spec: Spec, upstreams: readonly AggregationUpstream[]): Aggregation<AggregateResult<Spec>>;
535
+ /**
536
+ * Build a `GroupedQuery<T, F>` for `Query.groupBy(field)`. Same
537
+ * closure / upstream inputs as `aggregate` plus the group key field.
538
+ */
539
+ groupBy<T, F extends string>(executeRecords: () => readonly unknown[], field: F, upstreams: readonly AggregationUpstream[], dictLabelResolver?: (key: string, locale: string, fallback?: string | readonly string[]) => Promise<string | undefined>): GroupedQuery<T, F>;
540
+ /**
541
+ * Terminal streaming aggregator for `ScanBuilder.aggregate(spec)`.
542
+ * Takes an async iterable of decrypted records + the spec and
543
+ * returns the reduced result.
544
+ */
545
+ scanAggregate<Spec extends AggregateSpec>(iter: AsyncIterable<unknown>, spec: Spec): Promise<AggregateResult<Spec>>;
546
+ }
547
+
548
+ export { type AggregateStrategy as A, GROUPBY_MAX_CARDINALITY as G, type LiveAggregation as L, type Reducer as R, type AggregateResult as a, type AggregateSpec as b, Aggregation as c, type AggregationUpstream as d, GROUPBY_WARN_CARDINALITY as e, GroupedAggregation as f, GroupedQuery as g, type GroupedRow as h, type ReducerOptions as i, avg as j, buildLiveAggregation as k, count as l, groupAndReduce as m, max as n, min as o, resetGroupByWarnings as p, reduceRecords as r, sum as s };