@noy-db/hub 0.1.0-pre.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +197 -0
- package/dist/aggregate/index.cjs +476 -0
- package/dist/aggregate/index.cjs.map +1 -0
- package/dist/aggregate/index.d.cts +38 -0
- package/dist/aggregate/index.d.ts +38 -0
- package/dist/aggregate/index.js +53 -0
- package/dist/aggregate/index.js.map +1 -0
- package/dist/blobs/index.cjs +1480 -0
- package/dist/blobs/index.cjs.map +1 -0
- package/dist/blobs/index.d.cts +45 -0
- package/dist/blobs/index.d.ts +45 -0
- package/dist/blobs/index.js +48 -0
- package/dist/blobs/index.js.map +1 -0
- package/dist/bundle/index.cjs +436 -0
- package/dist/bundle/index.cjs.map +1 -0
- package/dist/bundle/index.d.cts +7 -0
- package/dist/bundle/index.d.ts +7 -0
- package/dist/bundle/index.js +40 -0
- package/dist/bundle/index.js.map +1 -0
- package/dist/chunk-2QR2PQTT.js +217 -0
- package/dist/chunk-2QR2PQTT.js.map +1 -0
- package/dist/chunk-4OWFYIDQ.js +79 -0
- package/dist/chunk-4OWFYIDQ.js.map +1 -0
- package/dist/chunk-5AATM2M2.js +90 -0
- package/dist/chunk-5AATM2M2.js.map +1 -0
- package/dist/chunk-ACLDOTNQ.js +543 -0
- package/dist/chunk-ACLDOTNQ.js.map +1 -0
- package/dist/chunk-BTDCBVJW.js +160 -0
- package/dist/chunk-BTDCBVJW.js.map +1 -0
- package/dist/chunk-CIMZBAZB.js +72 -0
- package/dist/chunk-CIMZBAZB.js.map +1 -0
- package/dist/chunk-E445ICYI.js +365 -0
- package/dist/chunk-E445ICYI.js.map +1 -0
- package/dist/chunk-EXQRC2L4.js +722 -0
- package/dist/chunk-EXQRC2L4.js.map +1 -0
- package/dist/chunk-FZU343FL.js +32 -0
- package/dist/chunk-FZU343FL.js.map +1 -0
- package/dist/chunk-GJILMRPO.js +354 -0
- package/dist/chunk-GJILMRPO.js.map +1 -0
- package/dist/chunk-GOUT6DND.js +1285 -0
- package/dist/chunk-GOUT6DND.js.map +1 -0
- package/dist/chunk-J66GRPNH.js +111 -0
- package/dist/chunk-J66GRPNH.js.map +1 -0
- package/dist/chunk-M2F2JAWB.js +464 -0
- package/dist/chunk-M2F2JAWB.js.map +1 -0
- package/dist/chunk-M5INGEFC.js +84 -0
- package/dist/chunk-M5INGEFC.js.map +1 -0
- package/dist/chunk-M62XNWRA.js +72 -0
- package/dist/chunk-M62XNWRA.js.map +1 -0
- package/dist/chunk-MR4424N3.js +275 -0
- package/dist/chunk-MR4424N3.js.map +1 -0
- package/dist/chunk-NPC4LFV5.js +132 -0
- package/dist/chunk-NPC4LFV5.js.map +1 -0
- package/dist/chunk-NXFEYLVG.js +311 -0
- package/dist/chunk-NXFEYLVG.js.map +1 -0
- package/dist/chunk-R36SIKES.js +79 -0
- package/dist/chunk-R36SIKES.js.map +1 -0
- package/dist/chunk-TDR6T5CJ.js +381 -0
- package/dist/chunk-TDR6T5CJ.js.map +1 -0
- package/dist/chunk-UF3BUNQZ.js +1 -0
- package/dist/chunk-UF3BUNQZ.js.map +1 -0
- package/dist/chunk-UQFSPSWG.js +1109 -0
- package/dist/chunk-UQFSPSWG.js.map +1 -0
- package/dist/chunk-USKYUS74.js +793 -0
- package/dist/chunk-USKYUS74.js.map +1 -0
- package/dist/chunk-XCL3WP6J.js +121 -0
- package/dist/chunk-XCL3WP6J.js.map +1 -0
- package/dist/chunk-XHFOENR2.js +680 -0
- package/dist/chunk-XHFOENR2.js.map +1 -0
- package/dist/chunk-ZFKD4QMV.js +430 -0
- package/dist/chunk-ZFKD4QMV.js.map +1 -0
- package/dist/chunk-ZLMV3TUA.js +490 -0
- package/dist/chunk-ZLMV3TUA.js.map +1 -0
- package/dist/chunk-ZRG4V3F5.js +17 -0
- package/dist/chunk-ZRG4V3F5.js.map +1 -0
- package/dist/consent/index.cjs +204 -0
- package/dist/consent/index.cjs.map +1 -0
- package/dist/consent/index.d.cts +24 -0
- package/dist/consent/index.d.ts +24 -0
- package/dist/consent/index.js +23 -0
- package/dist/consent/index.js.map +1 -0
- package/dist/crdt/index.cjs +152 -0
- package/dist/crdt/index.cjs.map +1 -0
- package/dist/crdt/index.d.cts +30 -0
- package/dist/crdt/index.d.ts +30 -0
- package/dist/crdt/index.js +24 -0
- package/dist/crdt/index.js.map +1 -0
- package/dist/crypto-IVKU7YTT.js +44 -0
- package/dist/crypto-IVKU7YTT.js.map +1 -0
- package/dist/delegation-XDJCBTI2.js +16 -0
- package/dist/delegation-XDJCBTI2.js.map +1 -0
- package/dist/dev-unlock-CeXic1xC.d.cts +263 -0
- package/dist/dev-unlock-KrKkcqD3.d.ts +263 -0
- package/dist/hash-9KO1BGxh.d.cts +63 -0
- package/dist/hash-ChfJjRjQ.d.ts +63 -0
- package/dist/history/index.cjs +1215 -0
- package/dist/history/index.cjs.map +1 -0
- package/dist/history/index.d.cts +62 -0
- package/dist/history/index.d.ts +62 -0
- package/dist/history/index.js +79 -0
- package/dist/history/index.js.map +1 -0
- package/dist/i18n/index.cjs +746 -0
- package/dist/i18n/index.cjs.map +1 -0
- package/dist/i18n/index.d.cts +38 -0
- package/dist/i18n/index.d.ts +38 -0
- package/dist/i18n/index.js +55 -0
- package/dist/i18n/index.js.map +1 -0
- package/dist/index-BRHBCmLt.d.ts +1940 -0
- package/dist/index-C8kQtmOk.d.ts +380 -0
- package/dist/index-DN-J-5wT.d.cts +1940 -0
- package/dist/index-DhjMjz7L.d.cts +380 -0
- package/dist/index.cjs +14756 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +269 -0
- package/dist/index.d.ts +269 -0
- package/dist/index.js +6085 -0
- package/dist/index.js.map +1 -0
- package/dist/indexing/index.cjs +736 -0
- package/dist/indexing/index.cjs.map +1 -0
- package/dist/indexing/index.d.cts +36 -0
- package/dist/indexing/index.d.ts +36 -0
- package/dist/indexing/index.js +77 -0
- package/dist/indexing/index.js.map +1 -0
- package/dist/lazy-builder-BwEoBQZ9.d.ts +304 -0
- package/dist/lazy-builder-CZVLKh0Z.d.cts +304 -0
- package/dist/ledger-2NX4L7PN.js +33 -0
- package/dist/ledger-2NX4L7PN.js.map +1 -0
- package/dist/mime-magic-CBBSOkjm.d.cts +50 -0
- package/dist/mime-magic-CBBSOkjm.d.ts +50 -0
- package/dist/periods/index.cjs +1035 -0
- package/dist/periods/index.cjs.map +1 -0
- package/dist/periods/index.d.cts +21 -0
- package/dist/periods/index.d.ts +21 -0
- package/dist/periods/index.js +25 -0
- package/dist/periods/index.js.map +1 -0
- package/dist/predicate-SBHmi6D0.d.cts +161 -0
- package/dist/predicate-SBHmi6D0.d.ts +161 -0
- package/dist/query/index.cjs +1957 -0
- package/dist/query/index.cjs.map +1 -0
- package/dist/query/index.d.cts +3 -0
- package/dist/query/index.d.ts +3 -0
- package/dist/query/index.js +62 -0
- package/dist/query/index.js.map +1 -0
- package/dist/session/index.cjs +487 -0
- package/dist/session/index.cjs.map +1 -0
- package/dist/session/index.d.cts +45 -0
- package/dist/session/index.d.ts +45 -0
- package/dist/session/index.js +44 -0
- package/dist/session/index.js.map +1 -0
- package/dist/shadow/index.cjs +133 -0
- package/dist/shadow/index.cjs.map +1 -0
- package/dist/shadow/index.d.cts +16 -0
- package/dist/shadow/index.d.ts +16 -0
- package/dist/shadow/index.js +20 -0
- package/dist/shadow/index.js.map +1 -0
- package/dist/store/index.cjs +1069 -0
- package/dist/store/index.cjs.map +1 -0
- package/dist/store/index.d.cts +491 -0
- package/dist/store/index.d.ts +491 -0
- package/dist/store/index.js +34 -0
- package/dist/store/index.js.map +1 -0
- package/dist/strategy-BSxFXGzb.d.cts +110 -0
- package/dist/strategy-BSxFXGzb.d.ts +110 -0
- package/dist/strategy-D-SrOLCl.d.cts +548 -0
- package/dist/strategy-D-SrOLCl.d.ts +548 -0
- package/dist/sync/index.cjs +1062 -0
- package/dist/sync/index.cjs.map +1 -0
- package/dist/sync/index.d.cts +42 -0
- package/dist/sync/index.d.ts +42 -0
- package/dist/sync/index.js +28 -0
- package/dist/sync/index.js.map +1 -0
- package/dist/team/index.cjs +1233 -0
- package/dist/team/index.cjs.map +1 -0
- package/dist/team/index.d.cts +117 -0
- package/dist/team/index.d.ts +117 -0
- package/dist/team/index.js +39 -0
- package/dist/team/index.js.map +1 -0
- package/dist/tx/index.cjs +212 -0
- package/dist/tx/index.cjs.map +1 -0
- package/dist/tx/index.d.cts +20 -0
- package/dist/tx/index.d.ts +20 -0
- package/dist/tx/index.js +20 -0
- package/dist/tx/index.js.map +1 -0
- package/dist/types-BZpCZB8N.d.ts +7526 -0
- package/dist/types-Bfs0qr5F.d.cts +7526 -0
- package/dist/ulid-COREQ2RQ.js +9 -0
- package/dist/ulid-COREQ2RQ.js.map +1 -0
- package/dist/util/index.cjs +230 -0
- package/dist/util/index.cjs.map +1 -0
- package/dist/util/index.d.cts +77 -0
- package/dist/util/index.d.ts +77 -0
- package/dist/util/index.js +190 -0
- package/dist/util/index.js.map +1 -0
- package/package.json +244 -0
|
@@ -0,0 +1,548 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Aggregation reducers for the query DSL.
|
|
3
|
+
*
|
|
4
|
+
* the reducer protocol plus five built-in factories
|
|
5
|
+
* (`count`, `sum`, `avg`, `min`, `max`) consumed by `Query.aggregate()`
|
|
6
|
+
* and, in the future, `Scan.aggregate()`. Every factory accepts
|
|
7
|
+
* an optional `{ seed }` parameter that is plumbed through the
|
|
8
|
+
* protocol but unused by the executor — that's the load-bearing
|
|
9
|
+
* half of constraint #2. When partition-aware aggregation
|
|
10
|
+
* lands, the seed carries the previous partition's running total into
|
|
11
|
+
* the next partition without requiring a protocol change.
|
|
12
|
+
*
|
|
13
|
+
* Reducers are intentionally generic over their internal state type
|
|
14
|
+
* `S` so compound reducers (avg keeps `{sum, count}`, min/max keep a
|
|
15
|
+
* value bag) can model internal bookkeeping without leaking the
|
|
16
|
+
* implementation through the accumulator's public shape. `finalize`
|
|
17
|
+
* collapses `S` back into the user-visible `R`.
|
|
18
|
+
*
|
|
19
|
+
* Reducers are pure data — `init` / `step` / `finalize` / optional
|
|
20
|
+
* `remove` are stateless functions that receive and return `S`. This
|
|
21
|
+
* is the shape that admits O(1) incremental maintenance in a future
|
|
22
|
+
* optimization (delta-aware `LiveAggregation` applies `step` or
|
|
23
|
+
* `remove` per delta), without blocking the simpler "full re-run on
|
|
24
|
+
* source change" that ships.
|
|
25
|
+
*/
|
|
26
|
+
/**
|
|
27
|
+
* A single reducer: factory-produced, ready to plug into an
|
|
28
|
+
* `.aggregate()` spec.
|
|
29
|
+
*
|
|
30
|
+
* Type parameters:
|
|
31
|
+
* - `R` — user-visible result type (what the aggregation returns
|
|
32
|
+
* for this slot, e.g. `number` for `sum()`)
|
|
33
|
+
* - `S` — internal state type, defaults to `R` for simple reducers
|
|
34
|
+
* that don't need compound bookkeeping
|
|
35
|
+
*
|
|
36
|
+
* A reducer is stateless: every method is pure over `S`. `init()` is
|
|
37
|
+
* called once per aggregation run to build the initial state; `step()`
|
|
38
|
+
* folds a record into the state; `remove()` (optional) un-folds a
|
|
39
|
+
* record, enabling incremental live maintenance; `finalize()` reads
|
|
40
|
+
* the final answer out of the state at the end of the run.
|
|
41
|
+
*/
|
|
42
|
+
interface Reducer<R, S = R> {
|
|
43
|
+
/** Build the initial state for a fresh aggregation run. */
|
|
44
|
+
init(): S;
|
|
45
|
+
/** Fold a record into the state. Returns the new state. */
|
|
46
|
+
step(state: S, record: unknown): S;
|
|
47
|
+
/**
|
|
48
|
+
* Un-fold a record from the state. Returns the new state.
|
|
49
|
+
*
|
|
50
|
+
* Optional — reducers without `remove` cannot be maintained
|
|
51
|
+
* incrementally and must be re-run from scratch when the underlying
|
|
52
|
+
* record set changes. `sum`, `count`, `avg` implement `remove` in
|
|
53
|
+
* O(1); `min` and `max` implement it in O(N) worst case (when the
|
|
54
|
+
* extremum itself is removed and the next extremum must be
|
|
55
|
+
* recomputed from the remaining contributing values).
|
|
56
|
+
*/
|
|
57
|
+
remove?(state: S, record: unknown): S;
|
|
58
|
+
/** Collapse the internal state into the user-visible result. */
|
|
59
|
+
finalize(state: S): R;
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* Common options accepted by every reducer factory.
|
|
63
|
+
*
|
|
64
|
+
* `seed` — optional initial value for the internal state. **Unused by
|
|
65
|
+
* the executor**, plumbed through the protocol for constraint
|
|
66
|
+
* #2 (partition-aware aggregation seam). In, partitioned
|
|
67
|
+
* aggregations will pass the previous partition's carry as `seed` so
|
|
68
|
+
* a long time series can be rolled forward one partition at a time
|
|
69
|
+
* without re-aggregating closed partitions.
|
|
70
|
+
*
|
|
71
|
+
* always uses `init()` with the factory's zero value, regardless
|
|
72
|
+
* of whether `seed` was passed. Do not remove the parameter — that's
|
|
73
|
+
* the whole point of having it exist now.
|
|
74
|
+
*/
|
|
75
|
+
interface ReducerOptions<TSeed = unknown> {
|
|
76
|
+
/** constraint #2 — seed is plumbed through but unused in. */
|
|
77
|
+
readonly seed?: TSeed;
|
|
78
|
+
}
|
|
79
|
+
/**
|
|
80
|
+
* Count the number of records that match the query. Ignores field
|
|
81
|
+
* values entirely — the count is over the number of records, not over
|
|
82
|
+
* the number of non-null field values in any column.
|
|
83
|
+
*/
|
|
84
|
+
declare function count(opts?: ReducerOptions<number>): Reducer<number>;
|
|
85
|
+
/**
|
|
86
|
+
* Sum a numeric field across all matching records. Non-number values
|
|
87
|
+
* at the field path are coerced to 0 — consumers who want a different
|
|
88
|
+
* behavior (throw, skip, treat as NaN) should filter upstream via
|
|
89
|
+
* `.where()` or write a custom reducer.
|
|
90
|
+
*/
|
|
91
|
+
declare function sum(field: string, opts?: ReducerOptions<number>): Reducer<number>;
|
|
92
|
+
/**
|
|
93
|
+
* Arithmetic mean of a numeric field across all matching records.
|
|
94
|
+
*
|
|
95
|
+
* Returns `null` for an empty result set (zero records is not a
|
|
96
|
+
* well-defined denominator — returning NaN would poison downstream
|
|
97
|
+
* arithmetic, and throwing would force every consumer to wrap in
|
|
98
|
+
* try/catch just to handle "no matches"). Consumers who want an
|
|
99
|
+
* explicit zero should coalesce with `?? 0`.
|
|
100
|
+
*
|
|
101
|
+
* Internal state is `{sum, count}` so the running average can be
|
|
102
|
+
* maintained incrementally — on each delta, both fields update in
|
|
103
|
+
* O(1) and `finalize` divides. Directly storing `avg` as state would
|
|
104
|
+
* not admit incremental removal without also tracking count.
|
|
105
|
+
*/
|
|
106
|
+
declare function avg(field: string, opts?: ReducerOptions<{
|
|
107
|
+
sum: number;
|
|
108
|
+
count: number;
|
|
109
|
+
}>): Reducer<number | null, {
|
|
110
|
+
sum: number;
|
|
111
|
+
count: number;
|
|
112
|
+
}>;
|
|
113
|
+
interface MinMaxState {
|
|
114
|
+
/**
|
|
115
|
+
* Multiset of contributing field values. Stored as a plain array
|
|
116
|
+
* because we need to support `remove` and a plain array gives us
|
|
117
|
+
* O(1) push + O(N) worst-case removal — which matches the
|
|
118
|
+
* documented min/max removal complexity. A sorted structure would
|
|
119
|
+
* let us drop the O(N) rescan but adds complexity that doesn't
|
|
120
|
+
* need; consumers hitting the O(N) ceiling should file an issue.
|
|
121
|
+
*/
|
|
122
|
+
readonly values: number[];
|
|
123
|
+
}
|
|
124
|
+
/**
|
|
125
|
+
* Smallest numeric value of a field across all matching records.
|
|
126
|
+
* Returns `null` for an empty result set. See `avg()` for the
|
|
127
|
+
* reasoning on `null` vs NaN vs throwing.
|
|
128
|
+
*
|
|
129
|
+
* Incremental complexity: O(1) for `step`, O(N) worst case for
|
|
130
|
+
* `remove` when the current minimum is removed (the state holds the
|
|
131
|
+
* full multiset of contributing values and `finalize` scans for the
|
|
132
|
+
* new minimum). Consumers with very large result sets and frequent
|
|
133
|
+
* removals of the current extremum should either accept the cost or
|
|
134
|
+
* wait for a future optimization.
|
|
135
|
+
*/
|
|
136
|
+
declare function min(field: string, opts?: ReducerOptions<number>): Reducer<number | null, MinMaxState>;
|
|
137
|
+
/**
|
|
138
|
+
* Largest numeric value of a field across all matching records.
|
|
139
|
+
* Mirror of `min()` — see that doc for semantics, null-on-empty
|
|
140
|
+
* behavior, and the O(N) removal caveat.
|
|
141
|
+
*/
|
|
142
|
+
declare function max(field: string, opts?: ReducerOptions<number>): Reducer<number | null, MinMaxState>;
|
|
143
|
+
|
|
144
|
+
/**
|
|
145
|
+
* Aggregate execution — the runtime behind `Query.aggregate()`.
|
|
146
|
+
*
|
|
147
|
+
* takes an `AggregateSpec` (a record of named reducers
|
|
148
|
+
* built from `reducers.ts`) and runs every reducer over the records
|
|
149
|
+
* produced by the underlying query. Two terminal surfaces:
|
|
150
|
+
*
|
|
151
|
+
* - `.run(): R` — synchronous one-shot reduction. Matches the
|
|
152
|
+
* existing `Query.toArray()` / `.first()` / `.count()` style.
|
|
153
|
+
* - `.live(): LiveAggregation<R>` — reactive primitive that
|
|
154
|
+
* re-runs the reduction whenever the query's source notifies of
|
|
155
|
+
* a change. uses naive full re-run; incremental delta
|
|
156
|
+
* maintenance is admitted by the reducer protocol (`remove()`)
|
|
157
|
+
* but not wired to the executor yet — a follow-up optimization
|
|
158
|
+
* can switch from full re-run to delta-based without breaking
|
|
159
|
+
* the public API. Consumers get correct, reactive values today.
|
|
160
|
+
*
|
|
161
|
+
* The `Aggregation<R>` wrapper is deliberately tiny — it exists so
|
|
162
|
+
* `.aggregate(spec)` can be chained with either `.run()` or `.live()`
|
|
163
|
+
* without the builder needing two separate terminal methods. It
|
|
164
|
+
* holds the closure over the query execution (produces the current
|
|
165
|
+
* matching record set) and the spec, and stitches them together in
|
|
166
|
+
* either mode.
|
|
167
|
+
*
|
|
168
|
+
* This file depends ONLY on `reducers.ts` — it has no knowledge of
|
|
169
|
+
* the `Query` class. Tests can therefore exercise the reduction
|
|
170
|
+
* surface with plain record arrays, without spinning up a Collection.
|
|
171
|
+
*/
|
|
172
|
+
|
|
173
|
+
/**
|
|
174
|
+
* A named set of reducers, keyed by output field name. Each key
|
|
175
|
+
* becomes a field on the aggregated result.
|
|
176
|
+
*
|
|
177
|
+
* ```ts
|
|
178
|
+
* const spec = {
|
|
179
|
+
* total: sum('amount'),
|
|
180
|
+
* n: count(),
|
|
181
|
+
* avgAmount: avg('amount'),
|
|
182
|
+
* }
|
|
183
|
+
* ```
|
|
184
|
+
*/
|
|
185
|
+
type AggregateSpec = Readonly<Record<string, Reducer<unknown, unknown>>>;
|
|
186
|
+
/**
|
|
187
|
+
* Map an `AggregateSpec` to its reduced result shape — each key
|
|
188
|
+
* carries the finalized result type from its reducer. A spec built
|
|
189
|
+
* from `{ total: sum('amount'), n: count() }` yields a result of
|
|
190
|
+
* `{ total: number, n: number }`.
|
|
191
|
+
*
|
|
192
|
+
* This uses a mapped type with a conditional to extract `R` from
|
|
193
|
+
* each `Reducer<R, _>`. The `infer` captures the user-visible result
|
|
194
|
+
* type, discarding the internal state type `S`.
|
|
195
|
+
*/
|
|
196
|
+
type AggregateResult<Spec extends AggregateSpec> = {
|
|
197
|
+
[K in keyof Spec]: Spec[K] extends Reducer<infer R, unknown> ? R : never;
|
|
198
|
+
};
|
|
199
|
+
/**
|
|
200
|
+
* Pure reduction over a record array. Runs every reducer's
|
|
201
|
+
* `init → step* → finalize` pipeline exactly once over the records.
|
|
202
|
+
*
|
|
203
|
+
* Called by `Aggregation.run()` and by the live-mode refresh path.
|
|
204
|
+
* Exported for tests and for future `scan().aggregate()` reuse
|
|
205
|
+
* — the streaming path will call the same reducer protocol with a
|
|
206
|
+
* per-page loop instead of a single array.
|
|
207
|
+
*/
|
|
208
|
+
declare function reduceRecords<Spec extends AggregateSpec>(records: readonly unknown[], spec: Spec): AggregateResult<Spec>;
|
|
209
|
+
/**
|
|
210
|
+
* A minimal reactive primitive for aggregation results.
|
|
211
|
+
*
|
|
212
|
+
* Same spirit as the `LiveQuery` in : frame-agnostic, a plain
|
|
213
|
+
* object with `value` / `error` fields and a `subscribe(cb)`
|
|
214
|
+
* notification channel that Vue / React / Solid adapters wrap in
|
|
215
|
+
* their own primitive. Intentionally NOT a Promise — aggregations
|
|
216
|
+
* have a well-defined "current value" at every instant, and the
|
|
217
|
+
* reactive consumer wants to read that value synchronously.
|
|
218
|
+
*
|
|
219
|
+
* Error semantics mirror `LiveQuery`: if a re-run throws, the
|
|
220
|
+
* previous successful `value` is preserved and the error is stored
|
|
221
|
+
* in `error` so consumers can render an error state without losing
|
|
222
|
+
* the last-known-good result. The throw does NOT propagate out of
|
|
223
|
+
* the source's change handler (which would tear down the upstream
|
|
224
|
+
* emitter).
|
|
225
|
+
*
|
|
226
|
+
* `stop()` tears down the upstream subscription. It is idempotent —
|
|
227
|
+
* calling it multiple times is safe — and subscribe calls after
|
|
228
|
+
* stop are no-ops (they immediately return a no-op unsubscribe).
|
|
229
|
+
* Always call `stop()` when done; Vue's `onUnmounted` is the
|
|
230
|
+
* canonical place. Raw consumers must do it themselves.
|
|
231
|
+
*/
|
|
232
|
+
interface LiveAggregation<R> {
|
|
233
|
+
/** Current reduced value. Undefined only if the first compute threw. */
|
|
234
|
+
readonly value: R | undefined;
|
|
235
|
+
/** Last execution error, if any. Cleared on the next successful run. */
|
|
236
|
+
readonly error: unknown;
|
|
237
|
+
/** Notify on every recomputation (success or error). Returns unsubscribe. */
|
|
238
|
+
subscribe(cb: () => void): () => void;
|
|
239
|
+
/** Tear down the upstream subscription. Idempotent. */
|
|
240
|
+
stop(): void;
|
|
241
|
+
}
|
|
242
|
+
/**
|
|
243
|
+
* Upstream change-notification hook for live aggregation.
|
|
244
|
+
*
|
|
245
|
+
* Matches the shape that `QuerySource.subscribe` already uses — a
|
|
246
|
+
* single method that accepts a callback and returns an unsubscribe
|
|
247
|
+
* function. The `Aggregation` wrapper collects upstreams from the
|
|
248
|
+
* query's source and wires them into a single re-run trigger.
|
|
249
|
+
*/
|
|
250
|
+
interface AggregationUpstream {
|
|
251
|
+
subscribe(cb: () => void): () => void;
|
|
252
|
+
}
|
|
253
|
+
/**
|
|
254
|
+
* Chainable wrapper returned by `Query.aggregate(spec)`. Holds the
|
|
255
|
+
* execute-records closure and the spec; terminal methods (`run`,
|
|
256
|
+
* `live`) stitch them together in either mode.
|
|
257
|
+
*
|
|
258
|
+
* Why a wrapper instead of two terminal methods on `Query` directly?
|
|
259
|
+
*
|
|
260
|
+
* The `.aggregate(spec)` call is where the spec is bound — both
|
|
261
|
+
* `.run()` and `.live()` need the same spec, and the consumer's
|
|
262
|
+
* fluent style is `query.where(...).aggregate(spec).run()` or
|
|
263
|
+
* `.aggregate(spec).live()`. Wrapping lets the spec be named once
|
|
264
|
+
* and reused for either terminal, and keeps the `Query` class
|
|
265
|
+
* from growing a pair of near-duplicate method overloads
|
|
266
|
+
* (`aggregateRun` / `aggregateLive`) that would be harder to
|
|
267
|
+
* discover.
|
|
268
|
+
*/
|
|
269
|
+
declare class Aggregation<R> {
|
|
270
|
+
private readonly executeRecords;
|
|
271
|
+
private readonly spec;
|
|
272
|
+
private readonly upstreams;
|
|
273
|
+
constructor(executeRecords: () => readonly unknown[], spec: AggregateSpec, upstreams: readonly AggregationUpstream[]);
|
|
274
|
+
/**
|
|
275
|
+
* Execute the query and reduce the results synchronously.
|
|
276
|
+
* Returns the reduced shape matching the spec — e.g. a spec of
|
|
277
|
+
* `{ total: sum('amount'), n: count() }` returns
|
|
278
|
+
* `{ total: number, n: number }`.
|
|
279
|
+
*/
|
|
280
|
+
run(): R;
|
|
281
|
+
/**
|
|
282
|
+
* Build a reactive `LiveAggregation<R>` that re-runs the reduction
|
|
283
|
+
* whenever any upstream source notifies of a change. The initial
|
|
284
|
+
* value is computed eagerly in the constructor, so consumers can
|
|
285
|
+
* read `live.value` immediately after calling `.live()`.
|
|
286
|
+
*
|
|
287
|
+
* Always call `live.stop()` when finished — it tears down the
|
|
288
|
+
* upstream subscriptions. Vue's `onUnmounted` is the canonical
|
|
289
|
+
* place.
|
|
290
|
+
*
|
|
291
|
+
* **Implementation note:** every upstream change triggers a full
|
|
292
|
+
* re-reduction. Incremental maintenance (O(1) per delta for
|
|
293
|
+
* sum/count/avg via the reducer protocol's `remove()` method) is a
|
|
294
|
+
* planned follow-up optimization — the protocol already supports
|
|
295
|
+
* it, but the executor doesn't drive it yet. Consumers get
|
|
296
|
+
* correct, reactive values today; future PRs can switch to
|
|
297
|
+
* delta-based maintenance without changing this API.
|
|
298
|
+
*/
|
|
299
|
+
live(): LiveAggregation<R>;
|
|
300
|
+
}
|
|
301
|
+
/**
|
|
302
|
+
* Build a `LiveAggregation<V>` from a recompute closure and a list
|
|
303
|
+
* of upstreams. Exposed so sibling files in the query DSL
|
|
304
|
+
* (currently `groupby.ts`) can reuse the reactive primitive
|
|
305
|
+
* without reaching into `LiveAggregationImpl` directly. This keeps
|
|
306
|
+
* the implementation class private while still allowing planned
|
|
307
|
+
* composition with `.groupBy().aggregate().live()`.
|
|
308
|
+
*/
|
|
309
|
+
declare function buildLiveAggregation<V>(recompute: () => V, upstreams: readonly AggregationUpstream[]): LiveAggregation<V>;
|
|
310
|
+
|
|
311
|
+
/**
|
|
312
|
+
* Query DSL `.groupBy()` —.
|
|
313
|
+
*
|
|
314
|
+
* Chains after `.where()` / `.filter()` / `.or()` / `.and()` on a
|
|
315
|
+
* Query and before a reducer spec, so consumers can compute
|
|
316
|
+
* per-bucket aggregates without folding in userland:
|
|
317
|
+
*
|
|
318
|
+
* ```ts
|
|
319
|
+
* const byClient = invoices.query()
|
|
320
|
+
* .where('status', '==', 'open')
|
|
321
|
+
* .groupBy('clientId')
|
|
322
|
+
* .aggregate({ total: sum('amount'), n: count() })
|
|
323
|
+
* .run()
|
|
324
|
+
* // → [ { clientId: 'c1', total: 5250, n: 3 }, … ]
|
|
325
|
+
* ```
|
|
326
|
+
*
|
|
327
|
+
* Execution pipeline:
|
|
328
|
+
*
|
|
329
|
+
* 1. Run the query's where/filter clauses (same candidate /
|
|
330
|
+
* filter pipeline as `.aggregate()` directly on Query).
|
|
331
|
+
* 2. Partition the matching records into buckets keyed by
|
|
332
|
+
* `readPath(record, field)`. JS `Map` preserves insertion
|
|
333
|
+
* order, so the first-seen key for a bucket determines its
|
|
334
|
+
* position in the result array — consumers who want a
|
|
335
|
+
* specific ordering should `.sort()` downstream.
|
|
336
|
+
* 3. Enforce cardinality: warn once per field at 10% of the cap
|
|
337
|
+
* (10_000 buckets), throw `GroupCardinalityError` at 100% of
|
|
338
|
+
* the cap (100_000 buckets).
|
|
339
|
+
* 4. For each bucket, build a per-group reducer state and
|
|
340
|
+
* step every record in the bucket through it.
|
|
341
|
+
* 5. Emit one result row per bucket, shaped as
|
|
342
|
+
* `{ [field]: key, ...reduced }`.
|
|
343
|
+
*
|
|
344
|
+
* **Null / undefined keys:** `Map` distinguishes `null` from
|
|
345
|
+
* `undefined`, so records with a missing group field get their own
|
|
346
|
+
* bucket, and records with an explicit `null` value get a separate
|
|
347
|
+
* bucket from that. Consumers who want them merged can coalesce
|
|
348
|
+
* upstream with `.filter()`.
|
|
349
|
+
*
|
|
350
|
+
* **Live mode:** `.groupBy().aggregate().live()` re-runs the full
|
|
351
|
+
* grouping pipeline on every source change. Per-bucket incremental
|
|
352
|
+
* delta maintenance is a future optimization — the reducer
|
|
353
|
+
* protocol's `remove()` hook admits it, but ships naive
|
|
354
|
+
* re-grouping for simplicity.
|
|
355
|
+
*
|
|
356
|
+
* **Type-level stable-key narrowing:** when
|
|
357
|
+
* `dictKey` lands, `groupBy<DictField>()` will narrow the group key
|
|
358
|
+
* type to the stable dictionary key rather than the resolved locale
|
|
359
|
+
* label. That prevents grouping by the locale-resolved label,
|
|
360
|
+
* which would produce different buckets per reader. types the
|
|
361
|
+
* key as `unknown` at the result shape; the dictKey narrowing
|
|
362
|
+
* layers on top without an API break.
|
|
363
|
+
*
|
|
364
|
+
* Partition-awareness seam: when partitioned collections land,
|
|
365
|
+
* per-partition grouping will need to merge sub-results across
|
|
366
|
+
* partitions. The reducer protocol's `{ seed }` parameter
|
|
367
|
+
* (already plumbed through in `reducers.ts`) is the mechanism —
|
|
368
|
+
* groupBy doesn't need its own seam for the moment, because it
|
|
369
|
+
* delegates to the reducer protocol for all per-bucket state.
|
|
370
|
+
*/
|
|
371
|
+
|
|
372
|
+
/**
|
|
373
|
+
* Cardinality thresholds for `.groupBy()`. The warn threshold gives
|
|
374
|
+
* consumers a heads-up before the hard error; the cap is a fixed
|
|
375
|
+
* constant in (not overridable). A `{ maxGroups }` override
|
|
376
|
+
* can be added later without a break if a real consumer asks.
|
|
377
|
+
*/
|
|
378
|
+
declare const GROUPBY_WARN_CARDINALITY = 10000;
|
|
379
|
+
declare const GROUPBY_MAX_CARDINALITY = 100000;
|
|
380
|
+
/**
|
|
381
|
+
* Test-only: clear the per-field cardinality warning dedup between
|
|
382
|
+
* tests. Production code never calls this — matching the
|
|
383
|
+
* `resetJoinWarnings` pattern in `join.ts`.
|
|
384
|
+
*/
|
|
385
|
+
declare function resetGroupByWarnings(): void;
|
|
386
|
+
/**
|
|
387
|
+
* Result row shape for a grouped aggregation. Each row carries the
|
|
388
|
+
* group key value under the grouping field name plus every reducer
|
|
389
|
+
* output from the spec.
|
|
390
|
+
*
|
|
391
|
+
* types the group key as `unknown` at the result shape — the
|
|
392
|
+
* runtime read via `readPath` can return any value, and narrowing
|
|
393
|
+
* to a specific type would require the caller to assert at the
|
|
394
|
+
* call site. `dictKey` narrowing layers on top of this by
|
|
395
|
+
* adding an overload that constrains `F` when the grouping field
|
|
396
|
+
* is a `dictKey`.
|
|
397
|
+
*/
|
|
398
|
+
type GroupedRow<F extends string, R> = {
|
|
399
|
+
[K in F]: unknown;
|
|
400
|
+
} & R;
|
|
401
|
+
/**
|
|
402
|
+
* Chainable wrapper returned by `Query.groupBy(field)`. Terminates
|
|
403
|
+
* with `.aggregate(spec)` which returns a `GroupedAggregation`.
|
|
404
|
+
*
|
|
405
|
+
* Kept minimal — the only operation on a grouped query is
|
|
406
|
+
* aggregation. Ordering, limiting, and further filtering belong on
|
|
407
|
+
* the underlying `Query` before `.groupBy()` is called; applying
|
|
408
|
+
* them post-group would be a different operation (`having` /
|
|
409
|
+
* `groupOrderBy`), out of scope for.
|
|
410
|
+
*/
|
|
411
|
+
declare class GroupedQuery<T, F extends string> {
|
|
412
|
+
private readonly executeRecords;
|
|
413
|
+
private readonly field;
|
|
414
|
+
private readonly upstreams;
|
|
415
|
+
/**
|
|
416
|
+
* Optional dict label resolver attached by the query builder when
|
|
417
|
+
* the grouping field is a dictKey.
|
|
418
|
+
*/
|
|
419
|
+
private readonly dictLabelResolver?;
|
|
420
|
+
constructor(executeRecords: () => readonly unknown[], field: F, upstreams: readonly AggregationUpstream[],
|
|
421
|
+
/**
|
|
422
|
+
* Optional dict label resolver attached by the query builder when
|
|
423
|
+
* the grouping field is a dictKey.
|
|
424
|
+
*/
|
|
425
|
+
dictLabelResolver?: ((key: string, locale: string, fallback?: string | readonly string[]) => Promise<string | undefined>) | undefined);
|
|
426
|
+
/**
|
|
427
|
+
* Build a grouped aggregation. Returns a `GroupedAggregation`
|
|
428
|
+
* with `.run()`, `.runAsync()`, and `.live()` terminals — same shape
|
|
429
|
+
* as the non-grouped `.aggregate()` wrapper, just with an array
|
|
430
|
+
* result (one row per bucket) instead of a single reduced object.
|
|
431
|
+
*/
|
|
432
|
+
aggregate<Spec extends AggregateSpec>(spec: Spec): GroupedAggregation<GroupedRow<F, AggregateResult<Spec>>>;
|
|
433
|
+
}
|
|
434
|
+
/**
|
|
435
|
+
* Execute the group-and-reduce pipeline. Pure function over a
|
|
436
|
+
* record array and a spec — shared by `GroupedAggregation.run()`
|
|
437
|
+
* and the live-mode refresh path. Exported for tests and for any
|
|
438
|
+
* future `scan().groupBy().aggregate()` reuse.
|
|
439
|
+
*
|
|
440
|
+
* Enforces the cardinality cap incrementally during the partition
|
|
441
|
+
* loop, so a runaway grouping throws at the moment the 100_001st
|
|
442
|
+
* bucket would be created — the consumer doesn't have to wait for
|
|
443
|
+
* the full partition to materialize before the error fires.
|
|
444
|
+
*/
|
|
445
|
+
declare function groupAndReduce<R>(records: readonly unknown[], field: string, spec: AggregateSpec): R[];
|
|
446
|
+
/**
|
|
447
|
+
* Grouped aggregation wrapper — the `.groupBy(field).aggregate(spec)`
|
|
448
|
+
* terminal. Shape mirrors `Aggregation<R>` from aggregate.ts: two
|
|
449
|
+
* terminals (`.run()` and `.live()`), spec bound at construction
|
|
450
|
+
* time, upstreams collected for live mode.
|
|
451
|
+
*
|
|
452
|
+
* The generic `R` is the per-row result shape (i.e. a single
|
|
453
|
+
* grouped row), and the terminals return `R[]` — one row per
|
|
454
|
+
* bucket.
|
|
455
|
+
*/
|
|
456
|
+
declare class GroupedAggregation<R> {
|
|
457
|
+
private readonly executeRecords;
|
|
458
|
+
private readonly field;
|
|
459
|
+
private readonly spec;
|
|
460
|
+
private readonly upstreams;
|
|
461
|
+
/**
|
|
462
|
+
* Optional dict label resolver for `<field>Label` projection
|
|
463
|
+
*. Present when the grouping field is a dictKey.
|
|
464
|
+
*/
|
|
465
|
+
private readonly dictLabelResolver?;
|
|
466
|
+
constructor(executeRecords: () => readonly unknown[], field: string, spec: AggregateSpec, upstreams: readonly AggregationUpstream[],
|
|
467
|
+
/**
|
|
468
|
+
* Optional dict label resolver for `<field>Label` projection
|
|
469
|
+
*. Present when the grouping field is a dictKey.
|
|
470
|
+
*/
|
|
471
|
+
dictLabelResolver?: ((key: string, locale: string, fallback?: string | readonly string[]) => Promise<string | undefined>) | undefined);
|
|
472
|
+
/** Execute the query, group, reduce, and return an array of rows. */
|
|
473
|
+
run(): R[];
|
|
474
|
+
/**
|
|
475
|
+
* Execute the query, group, reduce, and resolve `<field>Label` for
|
|
476
|
+
* each result row when the grouping field is a `dictKey` and a
|
|
477
|
+
* `locale` is provided. Returns `R[]` synchronously when
|
|
478
|
+
* no locale is specified (identical to `.run()`).
|
|
479
|
+
*
|
|
480
|
+
* The `<field>Label` field is appended to each row. Rows whose group
|
|
481
|
+
* key has no dictionary entry get `<field>Label: undefined`.
|
|
482
|
+
*/
|
|
483
|
+
runAsync(opts?: {
|
|
484
|
+
locale?: string;
|
|
485
|
+
fallback?: string | readonly string[];
|
|
486
|
+
}): Promise<R[]>;
|
|
487
|
+
/**
|
|
488
|
+
* Build a reactive `LiveAggregation<R[]>` that re-runs the full
|
|
489
|
+
* group-and-reduce pipeline whenever any upstream source notifies
|
|
490
|
+
* of a change. Same error-isolation and idempotent-stop contract
|
|
491
|
+
* as `Aggregation.live()` — the implementation delegates to the
|
|
492
|
+
* same `LiveAggregationImpl` class by threading a fresh
|
|
493
|
+
* recompute closure through the existing constructor.
|
|
494
|
+
*
|
|
495
|
+
* uses naive full re-run on every change. Incremental
|
|
496
|
+
* per-bucket maintenance (apply `step` on inserted records,
|
|
497
|
+
* `remove` on deleted records, route by bucket key) is a future
|
|
498
|
+
* optimization — the reducer protocol admits it, but wiring
|
|
499
|
+
* delta-aware source subscriptions is a separate PR.
|
|
500
|
+
*
|
|
501
|
+
* Always call `live.stop()` when finished.
|
|
502
|
+
*/
|
|
503
|
+
live(): LiveAggregation<R[]>;
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
/**
|
|
507
|
+
* Strategy seam between the core Query / ScanBuilder chain and the
|
|
508
|
+
* optional aggregate / groupBy subsystem. Core imports
|
|
509
|
+
* `AggregateStrategy` as a TYPE-ONLY symbol and `NO_AGGREGATE` as a
|
|
510
|
+
* tiny runtime stub.
|
|
511
|
+
*
|
|
512
|
+
* The heavy machinery — `Aggregation`, `GroupedQuery`, the
|
|
513
|
+
* reducer-step logic — is only reachable from `withAggregate()` in
|
|
514
|
+
* `./active.ts`, which is only exported through the
|
|
515
|
+
* `@noy-db/hub/aggregate` subpath. Consumers that don't import the
|
|
516
|
+
* subpath ship none of the ~886 LOC.
|
|
517
|
+
*
|
|
518
|
+
* @internal
|
|
519
|
+
*/
|
|
520
|
+
|
|
521
|
+
/**
|
|
522
|
+
* Seam interface. `@internal` — will promote to public only when the
|
|
523
|
+
* aggregate subsystem is extracted into its own package.
|
|
524
|
+
*
|
|
525
|
+
* @internal
|
|
526
|
+
*/
|
|
527
|
+
interface AggregateStrategy {
|
|
528
|
+
/**
|
|
529
|
+
* Build an `Aggregation<R>` for `Query.aggregate(spec)`. `executeRecords`
|
|
530
|
+
* is a closure that produces the matching record set when the
|
|
531
|
+
* aggregation runs. NO_AGGREGATE throws; the active strategy
|
|
532
|
+
* constructs a real `Aggregation`.
|
|
533
|
+
*/
|
|
534
|
+
aggregate<Spec extends AggregateSpec>(executeRecords: () => readonly unknown[], spec: Spec, upstreams: readonly AggregationUpstream[]): Aggregation<AggregateResult<Spec>>;
|
|
535
|
+
/**
|
|
536
|
+
* Build a `GroupedQuery<T, F>` for `Query.groupBy(field)`. Same
|
|
537
|
+
* closure / upstream inputs as `aggregate` plus the group key field.
|
|
538
|
+
*/
|
|
539
|
+
groupBy<T, F extends string>(executeRecords: () => readonly unknown[], field: F, upstreams: readonly AggregationUpstream[], dictLabelResolver?: (key: string, locale: string, fallback?: string | readonly string[]) => Promise<string | undefined>): GroupedQuery<T, F>;
|
|
540
|
+
/**
|
|
541
|
+
* Terminal streaming aggregator for `ScanBuilder.aggregate(spec)`.
|
|
542
|
+
* Takes an async iterable of decrypted records + the spec and
|
|
543
|
+
* returns the reduced result.
|
|
544
|
+
*/
|
|
545
|
+
scanAggregate<Spec extends AggregateSpec>(iter: AsyncIterable<unknown>, spec: Spec): Promise<AggregateResult<Spec>>;
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
export { type AggregateStrategy as A, GROUPBY_MAX_CARDINALITY as G, type LiveAggregation as L, type Reducer as R, type AggregateResult as a, type AggregateSpec as b, Aggregation as c, type AggregationUpstream as d, GROUPBY_WARN_CARDINALITY as e, GroupedAggregation as f, GroupedQuery as g, type GroupedRow as h, type ReducerOptions as i, avg as j, buildLiveAggregation as k, count as l, groupAndReduce as m, max as n, min as o, resetGroupByWarnings as p, reduceRecords as r, sum as s };
|