@noy-db/hub 0.1.0-pre.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +197 -0
- package/dist/aggregate/index.cjs +476 -0
- package/dist/aggregate/index.cjs.map +1 -0
- package/dist/aggregate/index.d.cts +38 -0
- package/dist/aggregate/index.d.ts +38 -0
- package/dist/aggregate/index.js +53 -0
- package/dist/aggregate/index.js.map +1 -0
- package/dist/blobs/index.cjs +1480 -0
- package/dist/blobs/index.cjs.map +1 -0
- package/dist/blobs/index.d.cts +45 -0
- package/dist/blobs/index.d.ts +45 -0
- package/dist/blobs/index.js +48 -0
- package/dist/blobs/index.js.map +1 -0
- package/dist/bundle/index.cjs +436 -0
- package/dist/bundle/index.cjs.map +1 -0
- package/dist/bundle/index.d.cts +7 -0
- package/dist/bundle/index.d.ts +7 -0
- package/dist/bundle/index.js +40 -0
- package/dist/bundle/index.js.map +1 -0
- package/dist/chunk-2QR2PQTT.js +217 -0
- package/dist/chunk-2QR2PQTT.js.map +1 -0
- package/dist/chunk-4OWFYIDQ.js +79 -0
- package/dist/chunk-4OWFYIDQ.js.map +1 -0
- package/dist/chunk-5AATM2M2.js +90 -0
- package/dist/chunk-5AATM2M2.js.map +1 -0
- package/dist/chunk-ACLDOTNQ.js +543 -0
- package/dist/chunk-ACLDOTNQ.js.map +1 -0
- package/dist/chunk-BTDCBVJW.js +160 -0
- package/dist/chunk-BTDCBVJW.js.map +1 -0
- package/dist/chunk-CIMZBAZB.js +72 -0
- package/dist/chunk-CIMZBAZB.js.map +1 -0
- package/dist/chunk-E445ICYI.js +365 -0
- package/dist/chunk-E445ICYI.js.map +1 -0
- package/dist/chunk-EXQRC2L4.js +722 -0
- package/dist/chunk-EXQRC2L4.js.map +1 -0
- package/dist/chunk-FZU343FL.js +32 -0
- package/dist/chunk-FZU343FL.js.map +1 -0
- package/dist/chunk-GJILMRPO.js +354 -0
- package/dist/chunk-GJILMRPO.js.map +1 -0
- package/dist/chunk-GOUT6DND.js +1285 -0
- package/dist/chunk-GOUT6DND.js.map +1 -0
- package/dist/chunk-J66GRPNH.js +111 -0
- package/dist/chunk-J66GRPNH.js.map +1 -0
- package/dist/chunk-M2F2JAWB.js +464 -0
- package/dist/chunk-M2F2JAWB.js.map +1 -0
- package/dist/chunk-M5INGEFC.js +84 -0
- package/dist/chunk-M5INGEFC.js.map +1 -0
- package/dist/chunk-M62XNWRA.js +72 -0
- package/dist/chunk-M62XNWRA.js.map +1 -0
- package/dist/chunk-MR4424N3.js +275 -0
- package/dist/chunk-MR4424N3.js.map +1 -0
- package/dist/chunk-NPC4LFV5.js +132 -0
- package/dist/chunk-NPC4LFV5.js.map +1 -0
- package/dist/chunk-NXFEYLVG.js +311 -0
- package/dist/chunk-NXFEYLVG.js.map +1 -0
- package/dist/chunk-R36SIKES.js +79 -0
- package/dist/chunk-R36SIKES.js.map +1 -0
- package/dist/chunk-TDR6T5CJ.js +381 -0
- package/dist/chunk-TDR6T5CJ.js.map +1 -0
- package/dist/chunk-UF3BUNQZ.js +1 -0
- package/dist/chunk-UF3BUNQZ.js.map +1 -0
- package/dist/chunk-UQFSPSWG.js +1109 -0
- package/dist/chunk-UQFSPSWG.js.map +1 -0
- package/dist/chunk-USKYUS74.js +793 -0
- package/dist/chunk-USKYUS74.js.map +1 -0
- package/dist/chunk-XCL3WP6J.js +121 -0
- package/dist/chunk-XCL3WP6J.js.map +1 -0
- package/dist/chunk-XHFOENR2.js +680 -0
- package/dist/chunk-XHFOENR2.js.map +1 -0
- package/dist/chunk-ZFKD4QMV.js +430 -0
- package/dist/chunk-ZFKD4QMV.js.map +1 -0
- package/dist/chunk-ZLMV3TUA.js +490 -0
- package/dist/chunk-ZLMV3TUA.js.map +1 -0
- package/dist/chunk-ZRG4V3F5.js +17 -0
- package/dist/chunk-ZRG4V3F5.js.map +1 -0
- package/dist/consent/index.cjs +204 -0
- package/dist/consent/index.cjs.map +1 -0
- package/dist/consent/index.d.cts +24 -0
- package/dist/consent/index.d.ts +24 -0
- package/dist/consent/index.js +23 -0
- package/dist/consent/index.js.map +1 -0
- package/dist/crdt/index.cjs +152 -0
- package/dist/crdt/index.cjs.map +1 -0
- package/dist/crdt/index.d.cts +30 -0
- package/dist/crdt/index.d.ts +30 -0
- package/dist/crdt/index.js +24 -0
- package/dist/crdt/index.js.map +1 -0
- package/dist/crypto-IVKU7YTT.js +44 -0
- package/dist/crypto-IVKU7YTT.js.map +1 -0
- package/dist/delegation-XDJCBTI2.js +16 -0
- package/dist/delegation-XDJCBTI2.js.map +1 -0
- package/dist/dev-unlock-CeXic1xC.d.cts +263 -0
- package/dist/dev-unlock-KrKkcqD3.d.ts +263 -0
- package/dist/hash-9KO1BGxh.d.cts +63 -0
- package/dist/hash-ChfJjRjQ.d.ts +63 -0
- package/dist/history/index.cjs +1215 -0
- package/dist/history/index.cjs.map +1 -0
- package/dist/history/index.d.cts +62 -0
- package/dist/history/index.d.ts +62 -0
- package/dist/history/index.js +79 -0
- package/dist/history/index.js.map +1 -0
- package/dist/i18n/index.cjs +746 -0
- package/dist/i18n/index.cjs.map +1 -0
- package/dist/i18n/index.d.cts +38 -0
- package/dist/i18n/index.d.ts +38 -0
- package/dist/i18n/index.js +55 -0
- package/dist/i18n/index.js.map +1 -0
- package/dist/index-BRHBCmLt.d.ts +1940 -0
- package/dist/index-C8kQtmOk.d.ts +380 -0
- package/dist/index-DN-J-5wT.d.cts +1940 -0
- package/dist/index-DhjMjz7L.d.cts +380 -0
- package/dist/index.cjs +14756 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +269 -0
- package/dist/index.d.ts +269 -0
- package/dist/index.js +6085 -0
- package/dist/index.js.map +1 -0
- package/dist/indexing/index.cjs +736 -0
- package/dist/indexing/index.cjs.map +1 -0
- package/dist/indexing/index.d.cts +36 -0
- package/dist/indexing/index.d.ts +36 -0
- package/dist/indexing/index.js +77 -0
- package/dist/indexing/index.js.map +1 -0
- package/dist/lazy-builder-BwEoBQZ9.d.ts +304 -0
- package/dist/lazy-builder-CZVLKh0Z.d.cts +304 -0
- package/dist/ledger-2NX4L7PN.js +33 -0
- package/dist/ledger-2NX4L7PN.js.map +1 -0
- package/dist/mime-magic-CBBSOkjm.d.cts +50 -0
- package/dist/mime-magic-CBBSOkjm.d.ts +50 -0
- package/dist/periods/index.cjs +1035 -0
- package/dist/periods/index.cjs.map +1 -0
- package/dist/periods/index.d.cts +21 -0
- package/dist/periods/index.d.ts +21 -0
- package/dist/periods/index.js +25 -0
- package/dist/periods/index.js.map +1 -0
- package/dist/predicate-SBHmi6D0.d.cts +161 -0
- package/dist/predicate-SBHmi6D0.d.ts +161 -0
- package/dist/query/index.cjs +1957 -0
- package/dist/query/index.cjs.map +1 -0
- package/dist/query/index.d.cts +3 -0
- package/dist/query/index.d.ts +3 -0
- package/dist/query/index.js +62 -0
- package/dist/query/index.js.map +1 -0
- package/dist/session/index.cjs +487 -0
- package/dist/session/index.cjs.map +1 -0
- package/dist/session/index.d.cts +45 -0
- package/dist/session/index.d.ts +45 -0
- package/dist/session/index.js +44 -0
- package/dist/session/index.js.map +1 -0
- package/dist/shadow/index.cjs +133 -0
- package/dist/shadow/index.cjs.map +1 -0
- package/dist/shadow/index.d.cts +16 -0
- package/dist/shadow/index.d.ts +16 -0
- package/dist/shadow/index.js +20 -0
- package/dist/shadow/index.js.map +1 -0
- package/dist/store/index.cjs +1069 -0
- package/dist/store/index.cjs.map +1 -0
- package/dist/store/index.d.cts +491 -0
- package/dist/store/index.d.ts +491 -0
- package/dist/store/index.js +34 -0
- package/dist/store/index.js.map +1 -0
- package/dist/strategy-BSxFXGzb.d.cts +110 -0
- package/dist/strategy-BSxFXGzb.d.ts +110 -0
- package/dist/strategy-D-SrOLCl.d.cts +548 -0
- package/dist/strategy-D-SrOLCl.d.ts +548 -0
- package/dist/sync/index.cjs +1062 -0
- package/dist/sync/index.cjs.map +1 -0
- package/dist/sync/index.d.cts +42 -0
- package/dist/sync/index.d.ts +42 -0
- package/dist/sync/index.js +28 -0
- package/dist/sync/index.js.map +1 -0
- package/dist/team/index.cjs +1233 -0
- package/dist/team/index.cjs.map +1 -0
- package/dist/team/index.d.cts +117 -0
- package/dist/team/index.d.ts +117 -0
- package/dist/team/index.js +39 -0
- package/dist/team/index.js.map +1 -0
- package/dist/tx/index.cjs +212 -0
- package/dist/tx/index.cjs.map +1 -0
- package/dist/tx/index.d.cts +20 -0
- package/dist/tx/index.d.ts +20 -0
- package/dist/tx/index.js +20 -0
- package/dist/tx/index.js.map +1 -0
- package/dist/types-BZpCZB8N.d.ts +7526 -0
- package/dist/types-Bfs0qr5F.d.cts +7526 -0
- package/dist/ulid-COREQ2RQ.js +9 -0
- package/dist/ulid-COREQ2RQ.js.map +1 -0
- package/dist/util/index.cjs +230 -0
- package/dist/util/index.cjs.map +1 -0
- package/dist/util/index.d.cts +77 -0
- package/dist/util/index.d.ts +77 -0
- package/dist/util/index.js +190 -0
- package/dist/util/index.js.map +1 -0
- package/package.json +244 -0
|
@@ -0,0 +1,1940 @@
|
|
|
1
|
+
import { C as CollectionIndexes, a as Clause, O as Operator } from './predicate-SBHmi6D0.cjs';
|
|
2
|
+
import { A as AggregateStrategy, b as AggregateSpec, c as Aggregation, a as AggregateResult, g as GroupedQuery } from './strategy-D-SrOLCl.cjs';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* All NOYDB error classes — a single import surface for `catch` blocks and
|
|
6
|
+
* `instanceof` checks.
|
|
7
|
+
*
|
|
8
|
+
* ## Class hierarchy
|
|
9
|
+
*
|
|
10
|
+
* ```
|
|
11
|
+
* Error
|
|
12
|
+
* └─ NoydbError (code: string)
|
|
13
|
+
* ├─ Crypto errors
|
|
14
|
+
* │ ├─ DecryptionError — AES-GCM tag failure
|
|
15
|
+
* │ ├─ TamperedError — ciphertext modified after write
|
|
16
|
+
* │ └─ InvalidKeyError — wrong passphrase / corrupt keyring
|
|
17
|
+
* ├─ Access errors
|
|
18
|
+
* │ ├─ NoAccessError — no DEK for this collection
|
|
19
|
+
* │ ├─ ReadOnlyError — ro permission, write attempted
|
|
20
|
+
* │ ├─ PermissionDeniedError — role too low for operation
|
|
21
|
+
* │ ├─ PrivilegeEscalationError — grant wider than grantor holds
|
|
22
|
+
* │ └─ StoreCapabilityError — optional store method missing
|
|
23
|
+
* ├─ Sync errors
|
|
24
|
+
* │ ├─ ConflictError — optimistic-lock version mismatch
|
|
25
|
+
* │ ├─ BundleVersionConflictError — bundle push rejected by remote
|
|
26
|
+
* │ └─ NetworkError — push/pull network failure
|
|
27
|
+
* ├─ Data errors
|
|
28
|
+
* │ ├─ NotFoundError — get(id) on missing record
|
|
29
|
+
* │ ├─ ValidationError — application-level guard failed
|
|
30
|
+
* │ └─ SchemaValidationError — Standard Schema v1 rejection
|
|
31
|
+
* ├─ Query errors
|
|
32
|
+
* │ ├─ JoinTooLargeError — join row ceiling exceeded
|
|
33
|
+
* │ ├─ DanglingReferenceError — strict ref() points at nothing
|
|
34
|
+
* │ ├─ GroupCardinalityError — groupBy bucket cap exceeded
|
|
35
|
+
* │ ├─ IndexRequiredError — lazy-mode query touches unindexed field
|
|
36
|
+
* │ └─ IndexWriteFailureError — index side-car put/delete failed post-main
|
|
37
|
+
* ├─ i18n / Dictionary errors
|
|
38
|
+
* │ ├─ ReservedCollectionNameError
|
|
39
|
+
* │ ├─ DictKeyMissingError
|
|
40
|
+
* │ ├─ DictKeyInUseError
|
|
41
|
+
* │ ├─ MissingTranslationError
|
|
42
|
+
* │ ├─ LocaleNotSpecifiedError
|
|
43
|
+
* │ └─ TranslatorNotConfiguredError
|
|
44
|
+
* ├─ Backup errors
|
|
45
|
+
* │ ├─ BackupLedgerError — hash-chain verification failed
|
|
46
|
+
* │ └─ BackupCorruptedError — envelope hash mismatch in dump
|
|
47
|
+
* ├─ Bundle errors
|
|
48
|
+
* │ └─ BundleIntegrityError — .noydb body sha256 mismatch
|
|
49
|
+
* └─ Session errors
|
|
50
|
+
* ├─ SessionExpiredError
|
|
51
|
+
* ├─ SessionNotFoundError
|
|
52
|
+
* └─ SessionPolicyError
|
|
53
|
+
* ```
|
|
54
|
+
*
|
|
55
|
+
* ## Catching all NOYDB errors
|
|
56
|
+
*
|
|
57
|
+
* ```ts
|
|
58
|
+
* import { NoydbError, InvalidKeyError, ConflictError } from '@noy-db/hub'
|
|
59
|
+
*
|
|
60
|
+
* try {
|
|
61
|
+
* await vault.unlock(passphrase)
|
|
62
|
+
* } catch (e) {
|
|
63
|
+
* if (e instanceof InvalidKeyError) { showBadPassphraseUI(); return }
|
|
64
|
+
* if (e instanceof NoydbError) { logToSentry(e.code, e); return }
|
|
65
|
+
* throw e // unexpected — re-throw
|
|
66
|
+
* }
|
|
67
|
+
* ```
|
|
68
|
+
*
|
|
69
|
+
* @module
|
|
70
|
+
*/
|
|
71
|
+
/**
|
|
72
|
+
* Base class for all NOYDB errors.
|
|
73
|
+
*
|
|
74
|
+
* Every error thrown by `@noy-db/hub` extends this class, so consumers can
|
|
75
|
+
* catch all NOYDB errors in a single `catch (e) { if (e instanceof NoydbError) ... }`
|
|
76
|
+
* block. The `code` field is a machine-readable string (e.g. `'DECRYPTION_FAILED'`)
|
|
77
|
+
* suitable for `switch` statements and logging pipelines.
|
|
78
|
+
*/
|
|
79
|
+
declare class NoydbError extends Error {
|
|
80
|
+
/** Machine-readable error code. Stable across library versions. */
|
|
81
|
+
readonly code: string;
|
|
82
|
+
constructor(code: string, message: string);
|
|
83
|
+
}
|
|
84
|
+
/**
|
|
85
|
+
* Thrown when AES-GCM decryption fails.
|
|
86
|
+
*
|
|
87
|
+
* The most common cause is a wrong passphrase or a corrupted ciphertext.
|
|
88
|
+
* A `DecryptionError` at the wrong passphrase level is caught internally
|
|
89
|
+
* and re-thrown as `InvalidKeyError` — so in practice this surfaces for
|
|
90
|
+
* per-record corruption rather than authentication failures.
|
|
91
|
+
*/
|
|
92
|
+
declare class DecryptionError extends NoydbError {
|
|
93
|
+
constructor(message?: string);
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Thrown when GCM tag verification fails, indicating the ciphertext was
|
|
97
|
+
* modified after encryption.
|
|
98
|
+
*
|
|
99
|
+
* AES-256-GCM is authenticated encryption — the tag over the ciphertext
|
|
100
|
+
* is checked on every decrypt. If any byte was flipped (accidental
|
|
101
|
+
* corruption or deliberate tampering), decryption throws this error.
|
|
102
|
+
* Treat it as a security alert: the stored bytes are not what NOYDB wrote.
|
|
103
|
+
*/
|
|
104
|
+
declare class TamperedError extends NoydbError {
|
|
105
|
+
constructor(message?: string);
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Thrown when key unwrapping fails, typically because the passphrase is wrong
|
|
109
|
+
* or the keyring file is corrupted.
|
|
110
|
+
*
|
|
111
|
+
* NOYDB uses AES-KW (RFC 3394) to wrap DEKs with the KEK. If AES-KW
|
|
112
|
+
* unwrapping fails, it means either the KEK was derived from the wrong
|
|
113
|
+
* passphrase (PBKDF2 with 600K iterations) or the keyring bytes are
|
|
114
|
+
* corrupted. This is the error shown to the user on a failed unlock attempt.
|
|
115
|
+
*/
|
|
116
|
+
declare class InvalidKeyError extends NoydbError {
|
|
117
|
+
constructor(message?: string);
|
|
118
|
+
}
|
|
119
|
+
/**
|
|
120
|
+
* Thrown when the authenticated user does not have a DEK for the requested
|
|
121
|
+
* collection — i.e. the collection is not in their keyring at all.
|
|
122
|
+
*
|
|
123
|
+
* This is the "no key for this door" error. It is different from
|
|
124
|
+
* `ReadOnlyError` (user has a key but it only grants ro) and from
|
|
125
|
+
* `PermissionDeniedError` (user's role doesn't allow the operation).
|
|
126
|
+
*/
|
|
127
|
+
declare class NoAccessError extends NoydbError {
|
|
128
|
+
constructor(message?: string);
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Thrown when a user with read-only (`ro`) permission attempts a write
|
|
132
|
+
* operation (`put` or `delete`) on a collection.
|
|
133
|
+
*
|
|
134
|
+
* The user has a DEK for the collection (they can decrypt and read), but
|
|
135
|
+
* their keyring grants only `ro`. To fix: re-grant the user with `rw`
|
|
136
|
+
* permission, or do not attempt writes as a viewer/client role.
|
|
137
|
+
*/
|
|
138
|
+
declare class ReadOnlyError extends NoydbError {
|
|
139
|
+
constructor(message?: string);
|
|
140
|
+
}
|
|
141
|
+
/**
|
|
142
|
+
* Thrown when a write is attempted against a historical view produced
|
|
143
|
+
* by `vault.at(timestamp)`. Time-machine views are read-only by
|
|
144
|
+
* contract — mutating the past would require either the shadow-vault
|
|
145
|
+
* mechanism or a ledger-history rewrite (which breaks
|
|
146
|
+
* the tamper-evidence guarantee).
|
|
147
|
+
*
|
|
148
|
+
* Distinct from {@link ReadOnlyError} (keyring-level) and
|
|
149
|
+
* {@link PermissionDeniedError} (role-level): this error is about the
|
|
150
|
+
* *view* being historical, independent of the caller's permissions.
|
|
151
|
+
*/
|
|
152
|
+
declare class ReadOnlyAtInstantError extends NoydbError {
|
|
153
|
+
constructor(operation: string, timestamp: string);
|
|
154
|
+
}
|
|
155
|
+
/**
|
|
156
|
+
* Thrown when a write is attempted against a shadow-vault frame
|
|
157
|
+
* produced by `vault.frame()`. Frames are read-only by contract —
|
|
158
|
+
* the use case is screen-sharing / demos / compliance review where
|
|
159
|
+
* the operator wants to prevent accidental edits.
|
|
160
|
+
*
|
|
161
|
+
* Behavioural enforcement only — the underlying keyring still holds
|
|
162
|
+
* write-capable DEKs. See {@link VaultFrame} for the full caveat.
|
|
163
|
+
*/
|
|
164
|
+
declare class ReadOnlyFrameError extends NoydbError {
|
|
165
|
+
constructor(operation: string);
|
|
166
|
+
}
|
|
167
|
+
/**
|
|
168
|
+
* Thrown when the authenticated user's role does not permit the requested
|
|
169
|
+
* operation — e.g. a `viewer` calling `grantAccess()`, or an `operator`
|
|
170
|
+
* calling `rotateKeys()`.
|
|
171
|
+
*
|
|
172
|
+
* This is a role-level check (what the user's role allows), distinct from
|
|
173
|
+
* `NoAccessError` (collection not in keyring) and `ReadOnlyError` (in
|
|
174
|
+
* keyring, but write not allowed).
|
|
175
|
+
*/
|
|
176
|
+
declare class PermissionDeniedError extends NoydbError {
|
|
177
|
+
constructor(message?: string);
|
|
178
|
+
}
|
|
179
|
+
/**
|
|
180
|
+
* Thrown when an `@noy-db/as-*` export is attempted without the
|
|
181
|
+
* required capability bit on the invoking keyring.
|
|
182
|
+
*
|
|
183
|
+
* Two sub-cases discriminated by the `tier` field:
|
|
184
|
+
*
|
|
185
|
+
* - `tier: 'plaintext'` — a plaintext-tier export (`as-xlsx`,
|
|
186
|
+
* `as-csv`, `as-blob`, `as-zip`, …) was attempted but the
|
|
187
|
+
* keyring's `exportCapability.plaintext` does not include the
|
|
188
|
+
* requested `format` (nor the `'*'` wildcard). Default for every
|
|
189
|
+
* role is `plaintext: []` — the owner must positively grant.
|
|
190
|
+
* - `tier: 'bundle'` — an encrypted `as-noydb` bundle export was
|
|
191
|
+
* attempted but the keyring's `exportCapability.bundle` is
|
|
192
|
+
* `false`. Default for `owner`/`admin` is `true`; for
|
|
193
|
+
* `operator`/`viewer`/`client` it is `false`.
|
|
194
|
+
*
|
|
195
|
+
* Distinct from `PermissionDeniedError` (role-level check) and
|
|
196
|
+
* `NoAccessError` (collection not readable). Surfaces separately so
|
|
197
|
+
* UI layers can show a "request the export capability from your
|
|
198
|
+
* admin" flow rather than a generic permission error.
|
|
199
|
+
*/
|
|
200
|
+
declare class ExportCapabilityError extends NoydbError {
|
|
201
|
+
readonly tier: 'plaintext' | 'bundle';
|
|
202
|
+
readonly format?: string;
|
|
203
|
+
readonly userId: string;
|
|
204
|
+
constructor(opts: {
|
|
205
|
+
tier: 'plaintext' | 'bundle';
|
|
206
|
+
userId: string;
|
|
207
|
+
format?: string;
|
|
208
|
+
message?: string;
|
|
209
|
+
});
|
|
210
|
+
}
|
|
211
|
+
/**
|
|
212
|
+
* Thrown when a keyring file's `expires_at` cutoff has passed.
|
|
213
|
+
* Surfaced by `loadKeyring` before any DEK unwrap is attempted —
|
|
214
|
+
* past the cutoff the slot refuses to open even with the right
|
|
215
|
+
* passphrase. Distinct from PBKDF2 / unwrap errors so consumer code
|
|
216
|
+
* can show a precise "this bundle slot has expired" message instead
|
|
217
|
+
* of the generic decryption-failure UX.
|
|
218
|
+
*
|
|
219
|
+
* Used predominantly on `BundleRecipient` slots produced by
|
|
220
|
+
* `writeNoydbBundle({ recipients: [...] })` to time-box audit access.
|
|
221
|
+
*/
|
|
222
|
+
declare class KeyringExpiredError extends NoydbError {
|
|
223
|
+
readonly userId: string;
|
|
224
|
+
readonly expiresAt: string;
|
|
225
|
+
constructor(opts: {
|
|
226
|
+
userId: string;
|
|
227
|
+
expiresAt: string;
|
|
228
|
+
});
|
|
229
|
+
}
|
|
230
|
+
/**
|
|
231
|
+
* Thrown when an `@noy-db/as-*` import is attempted but the invoking
|
|
232
|
+
* keyring lacks the required import-capability bit (issue ).
|
|
233
|
+
*
|
|
234
|
+
* - `tier: 'plaintext'` — a plaintext-tier import (`as-csv`, `as-json`,
|
|
235
|
+
* `as-ndjson`, `as-zip`, …) was attempted but the keyring's
|
|
236
|
+
* `importCapability.plaintext` does not include the requested
|
|
237
|
+
* `format` (nor the `'*'` wildcard).
|
|
238
|
+
* - `tier: 'bundle'` — a `.noydb` bundle import was attempted but the
|
|
239
|
+
* keyring's `importCapability.bundle` is not `true`.
|
|
240
|
+
*
|
|
241
|
+
* Default for every role on every dimension is closed — owners and
|
|
242
|
+
* admins must positively grant the capability. Distinct from
|
|
243
|
+
* `PermissionDeniedError` and `NoAccessError` so UI layers can show a
|
|
244
|
+
* specific "request the import capability" flow.
|
|
245
|
+
*/
|
|
246
|
+
declare class ImportCapabilityError extends NoydbError {
|
|
247
|
+
readonly tier: 'plaintext' | 'bundle';
|
|
248
|
+
readonly format?: string;
|
|
249
|
+
readonly userId: string;
|
|
250
|
+
constructor(opts: {
|
|
251
|
+
tier: 'plaintext' | 'bundle';
|
|
252
|
+
userId: string;
|
|
253
|
+
format?: string;
|
|
254
|
+
message?: string;
|
|
255
|
+
});
|
|
256
|
+
}
|
|
257
|
+
/**
|
|
258
|
+
* Thrown when a grant would give the grantee a permission the grantor
|
|
259
|
+
* does not themselves hold — the "admin cannot grant what admin cannot
|
|
260
|
+
* do" rule from the admin-delegation work.
|
|
261
|
+
*
|
|
262
|
+
* Distinct from `PermissionDeniedError` so callers can tell the two
|
|
263
|
+
* cases apart in logs and tests:
|
|
264
|
+
*
|
|
265
|
+
* - `PermissionDeniedError` — "you are not allowed to perform this
|
|
266
|
+
* operation at all" (wrong role).
|
|
267
|
+
* - `PrivilegeEscalationError` — "you are allowed to grant, but not
|
|
268
|
+
* with these specific permissions" (widening attempt).
|
|
269
|
+
*
|
|
270
|
+
* Under the admin model the grantee of an admin-grants-admin call
|
|
271
|
+
* inherits the caller's entire DEK set by construction, so this error
|
|
272
|
+
* is structurally unreachable in typical flows. The check and error
|
|
273
|
+
* class exist so that future per-collection admin scoping cannot
|
|
274
|
+
* accidentally bypass the subset rule — the guard is already wired in.
|
|
275
|
+
*
|
|
276
|
+
* `offendingCollection` carries the first collection name that failed
|
|
277
|
+
* the subset check, to make the violation actionable in error output.
|
|
278
|
+
*/
|
|
279
|
+
/**
|
|
280
|
+
* Thrown when a caller invokes an API that requires an optional
|
|
281
|
+
* store capability the active store does not implement.
|
|
282
|
+
*
|
|
283
|
+
* Today the only call site is `Noydb.listAccessibleVaults()`,
|
|
284
|
+
* which depends on the optional `NoydbStore.listVaults()`
|
|
285
|
+
* method. The error message names the missing method and the calling
|
|
286
|
+
* API so consumers know exactly which combination is unsupported,
|
|
287
|
+
* and the `capability` field is machine-readable so library code can
|
|
288
|
+
* pattern-match in catch blocks (e.g. fall back to a candidate-list
|
|
289
|
+
* shape).
|
|
290
|
+
*
|
|
291
|
+
* The class lives in `errors.ts` rather than as a generic
|
|
292
|
+
* `ValidationError` because the diagnostic shape is different: a
|
|
293
|
+
* `ValidationError` says "the inputs you passed are wrong"; this
|
|
294
|
+
* error says "the inputs are fine, but the store you wired up
|
|
295
|
+
* doesn't support what you're asking for." Different fix, different
|
|
296
|
+
* documentation.
|
|
297
|
+
*/
|
|
298
|
+
declare class StoreCapabilityError extends NoydbError {
|
|
299
|
+
/** The store method/capability that was missing. */
|
|
300
|
+
readonly capability: string;
|
|
301
|
+
constructor(capability: string, callerApi: string, storeName?: string);
|
|
302
|
+
}
|
|
303
|
+
declare class PrivilegeEscalationError extends NoydbError {
|
|
304
|
+
readonly offendingCollection: string;
|
|
305
|
+
constructor(offendingCollection: string, message?: string);
|
|
306
|
+
}
|
|
307
|
+
/**
|
|
308
|
+
* Thrown by `Collection.put` / `.delete` when the target record's
|
|
309
|
+
* envelope `_ts` falls within a closed accounting period.
|
|
310
|
+
*
|
|
311
|
+
* Distinct from `ReadOnlyError` (keyring-level), `ReadOnlyAtInstantError`
|
|
312
|
+
* (historical view), and `ReadOnlyFrameError` (shadow vault): this
|
|
313
|
+
* error is about the STORED RECORD being sealed by an operator call
|
|
314
|
+
* to `vault.closePeriod()`, independent of caller permissions or
|
|
315
|
+
* view type. The `periodName` and `endDate` fields name the sealing
|
|
316
|
+
* period so audit UIs can surface a "this record is locked in
|
|
317
|
+
* FY2026-Q1 (closed 2026-03-31)" message without parsing the error
|
|
318
|
+
* string.
|
|
319
|
+
*
|
|
320
|
+
* To apply a correction after close, book a compensating entry in a
|
|
321
|
+
* new period rather than unlocking the old one. Re-opening a closed
|
|
322
|
+
* period is deliberately unsupported.
|
|
323
|
+
*/
|
|
324
|
+
declare class PeriodClosedError extends NoydbError {
|
|
325
|
+
readonly periodName: string;
|
|
326
|
+
readonly endDate: string;
|
|
327
|
+
readonly recordTs: string;
|
|
328
|
+
constructor(periodName: string, endDate: string, recordTs: string);
|
|
329
|
+
}
|
|
330
|
+
/**
|
|
331
|
+
* Thrown when a user tries to act at a tier they are not cleared for.
|
|
332
|
+
*
|
|
333
|
+
* This is the umbrella error for tier write refusals:
|
|
334
|
+
* - `put({ tier: N })` when the user's keyring lacks tier-N DEK.
|
|
335
|
+
* - `elevate(id, N)` when the caller cannot reach tier N.
|
|
336
|
+
*
|
|
337
|
+
* Distinct from `TierAccessDeniedError` which covers *read* refusals on
|
|
338
|
+
* the invisibility/ghost path.
|
|
339
|
+
*/
|
|
340
|
+
declare class TierNotGrantedError extends NoydbError {
|
|
341
|
+
readonly tier: number;
|
|
342
|
+
readonly collection: string;
|
|
343
|
+
constructor(collection: string, tier: number);
|
|
344
|
+
}
|
|
345
|
+
/**
|
|
346
|
+
* Thrown when an elevated-handle operation runs after the elevation's
|
|
347
|
+
* TTL expired. Reads continue at the original tier; only writes
|
|
348
|
+
* through the scoped handle flip to throwing once expired.
|
|
349
|
+
*/
|
|
350
|
+
declare class ElevationExpiredError extends NoydbError {
|
|
351
|
+
readonly tier: number;
|
|
352
|
+
readonly expiresAt: number;
|
|
353
|
+
constructor(opts: {
|
|
354
|
+
tier: number;
|
|
355
|
+
expiresAt: number;
|
|
356
|
+
});
|
|
357
|
+
}
|
|
358
|
+
/**
|
|
359
|
+
* Thrown by `vault.elevate(...)` when an elevation is already active
|
|
360
|
+
* on the vault. Adopters must `release()` the existing handle before
|
|
361
|
+
* starting a new elevation.
|
|
362
|
+
*/
|
|
363
|
+
declare class AlreadyElevatedError extends NoydbError {
|
|
364
|
+
readonly activeTier: number;
|
|
365
|
+
constructor(activeTier: number);
|
|
366
|
+
}
|
|
367
|
+
/**
|
|
368
|
+
* Thrown when `demote()` is called by someone who is not the original
|
|
369
|
+
* elevator and not an owner.
|
|
370
|
+
*/
|
|
371
|
+
declare class TierDemoteDeniedError extends NoydbError {
|
|
372
|
+
constructor(id: string, tier: number);
|
|
373
|
+
}
|
|
374
|
+
/**
|
|
375
|
+
* Thrown when `db.delegate()` is called against a user that has no
|
|
376
|
+
* keyring in the target vault — the delegation token cannot be
|
|
377
|
+
* constructed without the target user's KEK wrap.
|
|
378
|
+
*/
|
|
379
|
+
declare class DelegationTargetMissingError extends NoydbError {
|
|
380
|
+
readonly toUser: string;
|
|
381
|
+
constructor(toUser: string);
|
|
382
|
+
}
|
|
383
|
+
/**
|
|
384
|
+
* Thrown when a `put()` detects an optimistic concurrency conflict.
|
|
385
|
+
*
|
|
386
|
+
* NOYDB uses version numbers (`_v`) for optimistic locking. If a `put()`
|
|
387
|
+
* is called with `expectedVersion: N` but the stored record is at version
|
|
388
|
+
* `M ≠ N`, the write is rejected and the caller must re-read, re-apply their
|
|
389
|
+
* change, and retry. The `version` field carries the actual stored version
|
|
390
|
+
* so callers can decide whether to retry or surface the conflict to the user.
|
|
391
|
+
*/
|
|
392
|
+
declare class ConflictError extends NoydbError {
|
|
393
|
+
/** The actual stored version at the time of conflict. */
|
|
394
|
+
readonly version: number;
|
|
395
|
+
constructor(version: number, message?: string);
|
|
396
|
+
}
|
|
397
|
+
/**
|
|
398
|
+
* Thrown by `LedgerStore.append()` after exhausting its CAS retry
|
|
399
|
+
* budget under multi-writer contention. Two browser tabs, a
|
|
400
|
+
* web app + an offline mobile peer, or a server worker pool all
|
|
401
|
+
* producing ledger entries against the same vault can race on the
|
|
402
|
+
* "read head, write head+1" cycle; the optimistic-CAS retry loop
|
|
403
|
+
* resolves the race for `casAtomic: true` stores, but pathological
|
|
404
|
+
* contention (or a buggy peer) can still exhaust the budget. When
|
|
405
|
+
* that happens, the chain is intact — the failed writer simply
|
|
406
|
+
* couldn't claim a slot. Caller's choice whether to retry, queue,
|
|
407
|
+
* or surface the failure to the user.
|
|
408
|
+
*/
|
|
409
|
+
declare class LedgerContentionError extends NoydbError {
|
|
410
|
+
readonly attempts: number;
|
|
411
|
+
constructor(attempts: number);
|
|
412
|
+
}
|
|
413
|
+
/**
|
|
414
|
+
* Thrown when a bundle push is rejected because the remote has been updated
|
|
415
|
+
* since the local bundle was last pulled.
|
|
416
|
+
*
|
|
417
|
+
* Unlike `ConflictError` (per-record), this is a whole-bundle conflict —
|
|
418
|
+
* the remote's bundle handle has changed. The caller must pull the new
|
|
419
|
+
* bundle, merge, and re-push. `remoteVersion` is the handle of the newer
|
|
420
|
+
* remote bundle for use in diagnostics.
|
|
421
|
+
*/
|
|
422
|
+
declare class BundleVersionConflictError extends NoydbError {
|
|
423
|
+
/** The bundle handle of the newer remote version that rejected the push. */
|
|
424
|
+
readonly remoteVersion: string;
|
|
425
|
+
constructor(remoteVersion: string, message?: string);
|
|
426
|
+
}
|
|
427
|
+
/**
|
|
428
|
+
* Thrown when a sync operation (push or pull) fails due to a network error.
|
|
429
|
+
*
|
|
430
|
+
* NOYDB's offline-first design means network errors are expected during sync.
|
|
431
|
+
* Callers should catch `NetworkError`, surface connectivity status in the UI,
|
|
432
|
+
* and rely on the `SyncScheduler` to retry when connectivity is restored.
|
|
433
|
+
*/
|
|
434
|
+
declare class NetworkError extends NoydbError {
|
|
435
|
+
constructor(message?: string);
|
|
436
|
+
}
|
|
437
|
+
/**
|
|
438
|
+
* Thrown when `collection.get(id)` is called with an ID that does not exist.
|
|
439
|
+
*
|
|
440
|
+
* NOYDB collections are memory-first, so this error is synchronous and cheap —
|
|
441
|
+
* it does not make a network round-trip. Callers that expect the record to be
|
|
442
|
+
* absent should use `collection.getOrNull(id)` instead.
|
|
443
|
+
*/
|
|
444
|
+
declare class NotFoundError extends NoydbError {
|
|
445
|
+
constructor(message?: string);
|
|
446
|
+
}
|
|
447
|
+
/**
|
|
448
|
+
* Thrown when application-level validation fails before encryption.
|
|
449
|
+
*
|
|
450
|
+
* Distinct from `SchemaValidationError` (Standard Schema v1 validator)
|
|
451
|
+
* and `MissingTranslationError` (i18nText). `ValidationError` is the
|
|
452
|
+
* general-purpose validation base — use it for custom guards in `put()`
|
|
453
|
+
* hooks or store middleware.
|
|
454
|
+
*/
|
|
455
|
+
declare class ValidationError extends NoydbError {
|
|
456
|
+
constructor(message?: string);
|
|
457
|
+
}
|
|
458
|
+
/**
|
|
459
|
+
* Thrown when a Standard Schema v1 validator rejects a record on
|
|
460
|
+
* `put()` (input validation) or on read (output validation). Carries
|
|
461
|
+
* the raw issue list so callers can render field-level errors.
|
|
462
|
+
*
|
|
463
|
+
* `direction` distinguishes the two cases:
|
|
464
|
+
* - `'input'`: the user passed bad data into `put()`. This is a
|
|
465
|
+
* normal error case that application code should handle — typically
|
|
466
|
+
* by showing validation messages in the UI.
|
|
467
|
+
* - `'output'`: stored data does not match the current schema. This
|
|
468
|
+
* indicates a schema drift (the schema was changed without
|
|
469
|
+
* migrating the existing records) and should be treated as a bug
|
|
470
|
+
* — the application should not swallow it silently.
|
|
471
|
+
*
|
|
472
|
+
* The `issues` type is deliberately `readonly unknown[]` on this class
|
|
473
|
+
* so that `errors.ts` doesn't need to import from `schema.ts` (and
|
|
474
|
+
* create a dependency cycle). Callers who know they're holding a
|
|
475
|
+
* `SchemaValidationError` can cast to the more precise
|
|
476
|
+
* `readonly StandardSchemaV1Issue[]` from `schema.ts`.
|
|
477
|
+
*/
|
|
478
|
+
declare class SchemaValidationError extends NoydbError {
|
|
479
|
+
readonly issues: readonly unknown[];
|
|
480
|
+
readonly direction: 'input' | 'output';
|
|
481
|
+
constructor(message: string, issues: readonly unknown[], direction: 'input' | 'output');
|
|
482
|
+
}
|
|
483
|
+
/**
|
|
484
|
+
* Thrown when `.groupBy().aggregate()` produces more than the hard
|
|
485
|
+
* cardinality cap (default 100_000 groups)..
|
|
486
|
+
*
|
|
487
|
+
* The cap exists because `.groupBy()` materializes one bucket per
|
|
488
|
+
* distinct key value in memory, and runaway cardinality — a groupBy
|
|
489
|
+
* on a high-uniqueness field like `id` or `createdAt` — is almost
|
|
490
|
+
* always a query mistake rather than legitimate use. A hard error is
|
|
491
|
+
* better than silent OOM: the consumer sees an actionable message
|
|
492
|
+
* naming the field and the observed cardinality, with guidance to
|
|
493
|
+
* either narrow the query with `.where()` or accept the ceiling
|
|
494
|
+
* override.
|
|
495
|
+
*
|
|
496
|
+
* A separate one-shot warning fires at 10% of the cap (10_000
|
|
497
|
+
* groups) so consumers get a heads-up before the hard error — same
|
|
498
|
+
* pattern as `JoinTooLargeError` and the `.join()` row ceiling.
|
|
499
|
+
*
|
|
500
|
+
* **Not overridable in.** The 100k cap is a fixed constant so
|
|
501
|
+
* the failure mode is consistent across the codebase; a
|
|
502
|
+
* `{ maxGroups }` override can be added later without a break if a
|
|
503
|
+
* real consumer asks.
|
|
504
|
+
*/
|
|
505
|
+
declare class GroupCardinalityError extends NoydbError {
|
|
506
|
+
/** The field being grouped on. */
|
|
507
|
+
readonly field: string;
|
|
508
|
+
/** Observed number of distinct groups at the moment the cap tripped. */
|
|
509
|
+
readonly cardinality: number;
|
|
510
|
+
/** The cap that was exceeded. */
|
|
511
|
+
readonly maxGroups: number;
|
|
512
|
+
constructor(field: string, cardinality: number, maxGroups: number);
|
|
513
|
+
}
|
|
514
|
+
/**
|
|
515
|
+
* Thrown in lazy mode when a `.query()` / `.where()` / `.orderBy()` clause
|
|
516
|
+
* references a field that does not have a declared index.
|
|
517
|
+
*
|
|
518
|
+
* Lazy-mode queries only work when every touched field is indexed.
|
|
519
|
+
* This is deliberate — silent scan-fallback would hide the performance
|
|
520
|
+
* cliff that lazy-mode indexes exist to prevent.
|
|
521
|
+
*
|
|
522
|
+
* Payload:
|
|
523
|
+
* - `collection` — name of the collection queried
|
|
524
|
+
* - `touchedFields` — every field referenced by the query (filter + order)
|
|
525
|
+
* - `missingFields` — subset of `touchedFields` that have no declared index
|
|
526
|
+
*/
|
|
527
|
+
declare class IndexRequiredError extends NoydbError {
|
|
528
|
+
readonly collection: string;
|
|
529
|
+
readonly touchedFields: readonly string[];
|
|
530
|
+
readonly missingFields: readonly string[];
|
|
531
|
+
constructor(args: {
|
|
532
|
+
collection: string;
|
|
533
|
+
touchedFields: readonly string[];
|
|
534
|
+
missingFields: readonly string[];
|
|
535
|
+
});
|
|
536
|
+
}
|
|
537
|
+
/**
|
|
538
|
+
* Thrown (or surfaced via the `index:write-partial` event) when one or more
|
|
539
|
+
* per-indexed-field side-car writes fail after the main record write has
|
|
540
|
+
* already succeeded.
|
|
541
|
+
*
|
|
542
|
+
* Not thrown out of `.put()` / `.delete()` directly — those succeed when the
|
|
543
|
+
* main record succeeds. Instead, `IndexWriteFailureError` instances are collected
|
|
544
|
+
* into the session-scoped reconcile queue and emitted on the Collection
|
|
545
|
+
* emitter as `index:write-partial`.
|
|
546
|
+
*
|
|
547
|
+
* Payload:
|
|
548
|
+
* - `recordId` — the id of the main record whose side-car writes failed
|
|
549
|
+
* - `field` — the indexed field whose side-car write failed
|
|
550
|
+
* - `op` — `'put'` or `'delete'`, indicating which mutation was in flight
|
|
551
|
+
* - `cause` — the underlying error from the store
|
|
552
|
+
*/
|
|
553
|
+
declare class IndexWriteFailureError extends NoydbError {
|
|
554
|
+
readonly recordId: string;
|
|
555
|
+
readonly field: string;
|
|
556
|
+
readonly op: 'put' | 'delete';
|
|
557
|
+
readonly cause: unknown;
|
|
558
|
+
constructor(args: {
|
|
559
|
+
recordId: string;
|
|
560
|
+
field: string;
|
|
561
|
+
op: 'put' | 'delete';
|
|
562
|
+
cause: unknown;
|
|
563
|
+
});
|
|
564
|
+
}
|
|
565
|
+
/**
|
|
566
|
+
* Thrown by `readNoydbBundle()` when the body bytes don't match
|
|
567
|
+
* the integrity hash declared in the bundle header — i.e. someone
|
|
568
|
+
* modified the bytes between write and read.
|
|
569
|
+
*
|
|
570
|
+
* Distinct from a generic `Error` (which would be thrown for
|
|
571
|
+
* format violations like a missing magic prefix or malformed
|
|
572
|
+
* header JSON) so consumers can pattern-match the corruption case
|
|
573
|
+
* and handle it differently from a producer bug. A
|
|
574
|
+
* `BundleIntegrityError` indicates "the bytes you got are not
|
|
575
|
+
* what was written"; a plain `Error` from `parsePrefixAndHeader`
|
|
576
|
+
* indicates "what was written wasn't a valid bundle in the first
|
|
577
|
+
* place."
|
|
578
|
+
*
|
|
579
|
+
* Also thrown when decompression fails after the integrity hash
|
|
580
|
+
* passed — that's a producer bug (the wrong algorithm byte was
|
|
581
|
+
* written) but it surfaces with the same error class because the
|
|
582
|
+
* end result is "the body cannot be turned back into a dump."
|
|
583
|
+
*/
|
|
584
|
+
declare class BundleIntegrityError extends NoydbError {
|
|
585
|
+
constructor(message: string);
|
|
586
|
+
}
|
|
587
|
+
/**
|
|
588
|
+
* Thrown when `vault.collection()` is called with a name that is
|
|
589
|
+
* reserved for NOYDB internal use (any name starting with `_dict_`).
|
|
590
|
+
*
|
|
591
|
+
* Dictionary collections are accessed exclusively via
|
|
592
|
+
* `vault.dictionary(name)` — attempting to open one as a regular
|
|
593
|
+
* collection would bypass the dictionary invariants (ACL, rename
|
|
594
|
+
* tracking, reserved-name policy).
|
|
595
|
+
*/
|
|
596
|
+
declare class ReservedCollectionNameError extends NoydbError {
|
|
597
|
+
/** The rejected collection name. */
|
|
598
|
+
readonly collectionName: string;
|
|
599
|
+
constructor(collectionName: string);
|
|
600
|
+
}
|
|
601
|
+
/**
|
|
602
|
+
* Thrown by `DictionaryHandle.get()` and `DictionaryHandle.delete()` when
|
|
603
|
+
* the requested key does not exist in the dictionary.
|
|
604
|
+
*
|
|
605
|
+
* Distinct from `NotFoundError` (which is for data records) so callers
|
|
606
|
+
* can distinguish "data record missing" from "dictionary key missing"
|
|
607
|
+
* without inspecting error messages.
|
|
608
|
+
*/
|
|
609
|
+
declare class DictKeyMissingError extends NoydbError {
|
|
610
|
+
/** The dictionary name. */
|
|
611
|
+
readonly dictionaryName: string;
|
|
612
|
+
/** The key that was not found. */
|
|
613
|
+
readonly key: string;
|
|
614
|
+
constructor(dictionaryName: string, key: string);
|
|
615
|
+
}
|
|
616
|
+
/**
|
|
617
|
+
* Thrown by `DictionaryHandle.delete()` in strict mode when the key to
|
|
618
|
+
* be deleted is still referenced by one or more records.
|
|
619
|
+
*
|
|
620
|
+
* The caller must either rename the key first (the only sanctioned
|
|
621
|
+
* mass-mutation path) or pass `{ mode: 'warn' }` to skip the check
|
|
622
|
+
* (development only).
|
|
623
|
+
*/
|
|
624
|
+
declare class DictKeyInUseError extends NoydbError {
|
|
625
|
+
/** The dictionary name. */
|
|
626
|
+
readonly dictionaryName: string;
|
|
627
|
+
/** The key that is still referenced. */
|
|
628
|
+
readonly key: string;
|
|
629
|
+
/** Name of the first collection found to reference this key. */
|
|
630
|
+
readonly usedBy: string;
|
|
631
|
+
/** Number of records in `usedBy` that reference this key. */
|
|
632
|
+
readonly count: number;
|
|
633
|
+
constructor(dictionaryName: string, key: string, usedBy: string, count: number);
|
|
634
|
+
}
|
|
635
|
+
/**
|
|
636
|
+
* Thrown by `Collection.put()` when an `i18nText` field is missing one
|
|
637
|
+
* or more required translations.
|
|
638
|
+
*
|
|
639
|
+
* The `missing` array names each locale code that was absent from the
|
|
640
|
+
* field value. The `field` property names the field so callers can
|
|
641
|
+
* render a field-level error message without parsing the string.
|
|
642
|
+
*/
|
|
643
|
+
declare class MissingTranslationError extends NoydbError {
|
|
644
|
+
/** The field name whose translation(s) are missing. */
|
|
645
|
+
readonly field: string;
|
|
646
|
+
/** Locale codes that were required but absent. */
|
|
647
|
+
readonly missing: readonly string[];
|
|
648
|
+
constructor(field: string, missing: readonly string[], message?: string);
|
|
649
|
+
}
|
|
650
|
+
/**
|
|
651
|
+
* Thrown when reading an `i18nText` field without specifying a locale —
|
|
652
|
+
* either at the call site (`get(id, { locale })`) or on the vault
|
|
653
|
+
* (`openVault(name, { locale })`).
|
|
654
|
+
*
|
|
655
|
+
* Also thrown when `resolveI18nText()` exhausts the fallback chain and
|
|
656
|
+
* no translation is available for the requested locale.
|
|
657
|
+
*
|
|
658
|
+
* The `field` property names the field that triggered the error so the
|
|
659
|
+
* caller can surface it in the UI.
|
|
660
|
+
*/
|
|
661
|
+
declare class LocaleNotSpecifiedError extends NoydbError {
|
|
662
|
+
/** The field name that required a locale. */
|
|
663
|
+
readonly field: string;
|
|
664
|
+
constructor(field: string, message?: string);
|
|
665
|
+
}
|
|
666
|
+
/**
|
|
667
|
+
* Thrown when a collection has an `i18nText` field with
|
|
668
|
+
* `autoTranslate: true` but no `plaintextTranslator` was configured
|
|
669
|
+
* on `createNoydb()`.
|
|
670
|
+
*
|
|
671
|
+
* The error is raised at `put()` time (not at schema construction) so
|
|
672
|
+
* the mis-configuration is surfaced by the first write rather than
|
|
673
|
+
* silently at startup.
|
|
674
|
+
*/
|
|
675
|
+
declare class TranslatorNotConfiguredError extends NoydbError {
|
|
676
|
+
/** The field that requested auto-translation. */
|
|
677
|
+
readonly field: string;
|
|
678
|
+
/** The collection the put was targeting. */
|
|
679
|
+
readonly collection: string;
|
|
680
|
+
constructor(field: string, collection: string);
|
|
681
|
+
}
|
|
682
|
+
/**
|
|
683
|
+
* Thrown when `Vault.load()` finds that a backup's hash chain
|
|
684
|
+
* doesn't verify, or that its embedded `ledgerHead.hash` doesn't
|
|
685
|
+
* match the chain head reconstructed from the loaded entries.
|
|
686
|
+
*
|
|
687
|
+
* Distinct from `BackupCorruptedError` so callers can choose to
|
|
688
|
+
* recover from one but not the other (e.g., a corrupted JSON file is
|
|
689
|
+
* unrecoverable; a chain mismatch might mean the backup is from an
|
|
690
|
+
* incompatible noy-db version).
|
|
691
|
+
*/
|
|
692
|
+
declare class BackupLedgerError extends NoydbError {
|
|
693
|
+
/** First-broken-entry index, if known. */
|
|
694
|
+
readonly divergedAt?: number;
|
|
695
|
+
constructor(message: string, divergedAt?: number);
|
|
696
|
+
}
|
|
697
|
+
/**
|
|
698
|
+
* Thrown when `Vault.load()` finds that the backup's data
|
|
699
|
+
* collection content doesn't match the ledger's recorded
|
|
700
|
+
* `payloadHash`es. This is the "envelope was tampered with after
|
|
701
|
+
* dump" detection — the chain itself can be intact, but if any
|
|
702
|
+
* encrypted record bytes were swapped, this check catches it.
|
|
703
|
+
*/
|
|
704
|
+
declare class BackupCorruptedError extends NoydbError {
|
|
705
|
+
/** The (collection, id) pair whose envelope failed the hash check. */
|
|
706
|
+
readonly collection: string;
|
|
707
|
+
readonly id: string;
|
|
708
|
+
constructor(collection: string, id: string, message: string);
|
|
709
|
+
}
|
|
710
|
+
/**
|
|
711
|
+
* Thrown by `resolveSession()` when the session token's `expiresAt`
|
|
712
|
+
* timestamp is in the past. The session key is also removed from the
|
|
713
|
+
* in-memory store when this is thrown, so retrying with the same sessionId
|
|
714
|
+
* will produce `SessionNotFoundError`.
|
|
715
|
+
*
|
|
716
|
+
* Separate from `SessionNotFoundError` so callers can distinguish between
|
|
717
|
+
* "session is gone" (key store cleared, tab reloaded) and "session is
|
|
718
|
+
* still in the store but has exceeded its lifetime" (idle timeout, absolute
|
|
719
|
+
* timeout, policy-driven expiry). The remediation differs: expired sessions
|
|
720
|
+
* should prompt a fresh unlock; not-found sessions may indicate a bug or a
|
|
721
|
+
* cross-tab scenario where the session was never established.
|
|
722
|
+
*/
|
|
723
|
+
declare class SessionExpiredError extends NoydbError {
|
|
724
|
+
readonly sessionId: string;
|
|
725
|
+
constructor(sessionId: string);
|
|
726
|
+
}
|
|
727
|
+
/**
|
|
728
|
+
* Thrown by `resolveSession()` when the session key cannot be found in
|
|
729
|
+
* the module-level store. This happens when:
|
|
730
|
+
* - The session was explicitly revoked via `revokeSession()`.
|
|
731
|
+
* - The JS context was reloaded (tab navigation, page refresh, worker restart).
|
|
732
|
+
* - `Noydb.close()` was called (which calls `revokeAllSessions()`).
|
|
733
|
+
* - The sessionId is wrong or was generated by a different JS context.
|
|
734
|
+
*
|
|
735
|
+
* The session token (if the caller holds it) is permanently useless after
|
|
736
|
+
* this error — the key is gone and cannot be recovered.
|
|
737
|
+
*/
|
|
738
|
+
declare class SessionNotFoundError extends NoydbError {
|
|
739
|
+
readonly sessionId: string;
|
|
740
|
+
constructor(sessionId: string);
|
|
741
|
+
}
|
|
742
|
+
/**
|
|
743
|
+
* Thrown when a session policy blocks an operation — for example,
|
|
744
|
+
* `requireReAuthFor: ['export']` is set and the caller attempts to
|
|
745
|
+
* call `exportStream()` without re-authenticating for this session.
|
|
746
|
+
*
|
|
747
|
+
* The `operation` field names the specific operation that was blocked
|
|
748
|
+
* (e.g. `'export'`, `'grant'`, `'rotate'`) so the caller can surface
|
|
749
|
+
* a targeted prompt ("Please re-enter your passphrase to export data").
|
|
750
|
+
*/
|
|
751
|
+
declare class SessionPolicyError extends NoydbError {
|
|
752
|
+
readonly operation: string;
|
|
753
|
+
constructor(operation: string, message?: string);
|
|
754
|
+
}
|
|
755
|
+
/**
|
|
756
|
+
* Thrown when a `.join()` would exceed its configured row ceiling on
|
|
757
|
+
* either side. The ceiling defaults to 50,000 per side and can be
|
|
758
|
+
* overridden via the `{ maxRows }` option on `.join()`.
|
|
759
|
+
*
|
|
760
|
+
* Carries both row counts so the error message can show which side
|
|
761
|
+
* tripped the limit (e.g. "left had 60,000 rows, right had 1,200,
|
|
762
|
+
* max was 50,000"). The `side` field is machine-readable so test
|
|
763
|
+
* code and devtools can match on it without regex-parsing the
|
|
764
|
+
* message.
|
|
765
|
+
*
|
|
766
|
+
* The row ceiling exists because joins are bounded in-memory
|
|
767
|
+
* operations over materialized record sets. Consumers whose
|
|
768
|
+
* collections genuinely exceed the ceiling should track
|
|
769
|
+
* (streaming joins over `scan()`) or filter the left side further
|
|
770
|
+
* with `where()` / `limit()` before joining.
|
|
771
|
+
*/
|
|
772
|
+
declare class JoinTooLargeError extends NoydbError {
|
|
773
|
+
readonly leftRows: number;
|
|
774
|
+
readonly rightRows: number;
|
|
775
|
+
readonly maxRows: number;
|
|
776
|
+
readonly side: 'left' | 'right';
|
|
777
|
+
constructor(opts: {
|
|
778
|
+
leftRows: number;
|
|
779
|
+
rightRows: number;
|
|
780
|
+
maxRows: number;
|
|
781
|
+
side: 'left' | 'right';
|
|
782
|
+
message: string;
|
|
783
|
+
});
|
|
784
|
+
}
|
|
785
|
+
/**
|
|
786
|
+
* Thrown by `.join()` in strict `ref()` mode when a left-side record
|
|
787
|
+
* points at a right-side id that does not exist in the target
|
|
788
|
+
* collection.
|
|
789
|
+
*
|
|
790
|
+
* Distinct from `RefIntegrityError` so test code can pattern-match
|
|
791
|
+
* on the *read-time* dangling case without catching *write-time*
|
|
792
|
+
* integrity violations. Both indicate "ref points at nothing" but
|
|
793
|
+
* happen at different lifecycle phases and deserve different
|
|
794
|
+
* remediation in documentation: a RefIntegrityError on `put()`
|
|
795
|
+
* means the input is invalid; a DanglingReferenceError on `.join()`
|
|
796
|
+
* means stored data has drifted and `vault.checkIntegrity()`
|
|
797
|
+
* is the right tool to find the full set of orphans.
|
|
798
|
+
*/
|
|
799
|
+
declare class DanglingReferenceError extends NoydbError {
|
|
800
|
+
readonly field: string;
|
|
801
|
+
readonly target: string;
|
|
802
|
+
readonly refId: string;
|
|
803
|
+
constructor(opts: {
|
|
804
|
+
field: string;
|
|
805
|
+
target: string;
|
|
806
|
+
refId: string;
|
|
807
|
+
message: string;
|
|
808
|
+
});
|
|
809
|
+
}
|
|
810
|
+
/**
|
|
811
|
+
* Thrown by {@link sanitizeFilename} when an input filename cannot be
|
|
812
|
+
* made safe — NUL byte, empty after normalization, missing
|
|
813
|
+
* `opaqueId` for the opaque profile, `..` segment, or a `maxBytes`
|
|
814
|
+
* cap too small to hold a single code point.
|
|
815
|
+
*/
|
|
816
|
+
declare class FilenameSanitizationError extends NoydbError {
|
|
817
|
+
constructor(message: string);
|
|
818
|
+
}
|
|
819
|
+
/**
|
|
820
|
+
* Thrown when a write target resolves OUTSIDE the requested
|
|
821
|
+
* directory after sanitization — the canonical Zip-Slip class. The
|
|
822
|
+
* sanitizer's job is to strip path-traversal segments; this error
|
|
823
|
+
* is the defense-in-depth fallback at the FS write site.
|
|
824
|
+
*/
|
|
825
|
+
declare class PathEscapeError extends NoydbError {
|
|
826
|
+
readonly attempted: string;
|
|
827
|
+
readonly targetDir: string;
|
|
828
|
+
constructor(opts: {
|
|
829
|
+
attempted: string;
|
|
830
|
+
targetDir: string;
|
|
831
|
+
});
|
|
832
|
+
}
|
|
833
|
+
|
|
834
|
+
/**
|
|
835
|
+
* Foreign-key references — the soft-FK mechanism.
|
|
836
|
+
*
|
|
837
|
+
* A collection declares its references as metadata at construction
|
|
838
|
+
* time:
|
|
839
|
+
*
|
|
840
|
+
* ```ts
|
|
841
|
+
* import { ref } from '@noy-db/hub'
|
|
842
|
+
*
|
|
843
|
+
* const invoices = company.collection<Invoice>('invoices', {
|
|
844
|
+
* refs: {
|
|
845
|
+
* clientId: ref('clients'), // default: strict
|
|
846
|
+
* categoryId: ref('categories', 'warn'),
|
|
847
|
+
* parentId: ref('invoices', 'cascade'), // self-reference OK
|
|
848
|
+
* },
|
|
849
|
+
* })
|
|
850
|
+
* ```
|
|
851
|
+
*
|
|
852
|
+
* Three modes:
|
|
853
|
+
*
|
|
854
|
+
* - **strict** — the default. `put()` rejects records whose
|
|
855
|
+
* reference target doesn't exist, and `delete()` of the target
|
|
856
|
+
* rejects if any strict-referencing records still exist.
|
|
857
|
+
* Matches SQL's default FK semantics.
|
|
858
|
+
*
|
|
859
|
+
* - **warn** — both operations succeed unconditionally. Broken
|
|
860
|
+
* references surface only through
|
|
861
|
+
* `vault.checkIntegrity()`, which walks every collection
|
|
862
|
+
* and reports orphans. Use when you want soft validation for
|
|
863
|
+
* imports from messy sources.
|
|
864
|
+
*
|
|
865
|
+
* - **cascade** — `put()` is same as warn. `delete()` of the
|
|
866
|
+
* target deletes every referencing record. Cycles are detected
|
|
867
|
+
* and broken via an in-progress set, so mutual cascades
|
|
868
|
+
* terminate instead of recursing forever.
|
|
869
|
+
*
|
|
870
|
+
* Cross-vault refs are explicitly rejected: if the target
|
|
871
|
+
* name contains a `/`, `ref()` throws `RefScopeError`. Cross-
|
|
872
|
+
* vault refs need an auth story (multi-keyring reads) that
|
|
873
|
+
* doesn't ship — tracked for.
|
|
874
|
+
*/
|
|
875
|
+
|
|
876
|
+
/** The three enforcement modes. Default for new refs is `'strict'`. */
|
|
877
|
+
type RefMode = 'strict' | 'warn' | 'cascade';
|
|
878
|
+
/**
|
|
879
|
+
* Descriptor returned by `ref()`. Collections accept a
|
|
880
|
+
* `Record<string, RefDescriptor>` in their options. The key is the
|
|
881
|
+
* field name on the record (top-level only — dotted paths are out of
|
|
882
|
+
* scope), the value describes which target collection the
|
|
883
|
+
* field references and under what mode.
|
|
884
|
+
*
|
|
885
|
+
* The descriptor carries only plain data so it can be serialized,
|
|
886
|
+
* passed around, and introspected without any class machinery.
|
|
887
|
+
*/
|
|
888
|
+
interface RefDescriptor {
|
|
889
|
+
readonly target: string;
|
|
890
|
+
readonly mode: RefMode;
|
|
891
|
+
}
|
|
892
|
+
/**
|
|
893
|
+
* Thrown when a strict reference is violated — either `put()` with a
|
|
894
|
+
* missing target id, or `delete()` of a target that still has
|
|
895
|
+
* strict-referencing records.
|
|
896
|
+
*
|
|
897
|
+
* Carries structured detail so UI code (and a potential future
|
|
898
|
+
* devtools panel) can render "client X cannot be deleted because
|
|
899
|
+
* invoices 1, 2, and 3 reference it" instead of a bare error string.
|
|
900
|
+
*/
|
|
901
|
+
declare class RefIntegrityError extends NoydbError {
|
|
902
|
+
readonly collection: string;
|
|
903
|
+
readonly id: string;
|
|
904
|
+
readonly field: string;
|
|
905
|
+
readonly refTo: string;
|
|
906
|
+
readonly refId: string | null;
|
|
907
|
+
constructor(opts: {
|
|
908
|
+
collection: string;
|
|
909
|
+
id: string;
|
|
910
|
+
field: string;
|
|
911
|
+
refTo: string;
|
|
912
|
+
refId: string | null;
|
|
913
|
+
message: string;
|
|
914
|
+
});
|
|
915
|
+
}
|
|
916
|
+
/**
|
|
917
|
+
* Thrown when `ref()` is called with a target name that looks like
|
|
918
|
+
* a cross-vault reference (contains a `/`). Separate error
|
|
919
|
+
* class because the fix is different: RefIntegrityError means "data
|
|
920
|
+
* is wrong"; RefScopeError means "the ref declaration is wrong".
|
|
921
|
+
*/
|
|
922
|
+
declare class RefScopeError extends NoydbError {
|
|
923
|
+
constructor(target: string);
|
|
924
|
+
}
|
|
925
|
+
/**
|
|
926
|
+
* Helper constructor. Thin wrapper around the object literal so user
|
|
927
|
+
* code reads like `ref('clients')` instead of `{ target: 'clients',
|
|
928
|
+
* mode: 'strict' }` — this is the only ergonomics reason it exists.
|
|
929
|
+
*
|
|
930
|
+
* Validates the target name eagerly so a misconfigured ref declaration
|
|
931
|
+
* fails at collection construction time, not at the first put.
|
|
932
|
+
*/
|
|
933
|
+
declare function ref(target: string, mode?: RefMode): RefDescriptor;
|
|
934
|
+
/**
|
|
935
|
+
* Per-vault registry of reference declarations.
|
|
936
|
+
*
|
|
937
|
+
* The registry is populated by `Collection` constructors (which pass
|
|
938
|
+
* their `refs` option through the Vault) and consulted by the
|
|
939
|
+
* Vault on every `put` / `delete` and by `checkIntegrity`. A
|
|
940
|
+
* single instance lives on the Vault for its lifetime; there's
|
|
941
|
+
* no global state.
|
|
942
|
+
*
|
|
943
|
+
* The data structure is two parallel maps:
|
|
944
|
+
*
|
|
945
|
+
* - `outbound`: `collection → { field → RefDescriptor }` — what
|
|
946
|
+
* refs does `collection` declare? Used on put to check
|
|
947
|
+
* strict-target-exists and on checkIntegrity to walk each
|
|
948
|
+
* collection's outbound refs.
|
|
949
|
+
*
|
|
950
|
+
* - `inbound`: `target → Array<{ collection, field, mode }>` —
|
|
951
|
+
* which collections reference `target`? Used on delete to find
|
|
952
|
+
* the records that might be affected by cascade / strict.
|
|
953
|
+
*
|
|
954
|
+
* The two views are kept in sync by `register()` and never mutated
|
|
955
|
+
* otherwise — refs can't be unregistered at runtime in.
|
|
956
|
+
*/
|
|
957
|
+
declare class RefRegistry {
|
|
958
|
+
private readonly outbound;
|
|
959
|
+
private readonly inbound;
|
|
960
|
+
/**
|
|
961
|
+
* Register the refs declared by a single collection. Idempotent in
|
|
962
|
+
* the happy path — calling twice with the same data is a no-op.
|
|
963
|
+
* Calling twice with DIFFERENT data throws, because silent
|
|
964
|
+
* overrides would be confusing ("I changed the ref and it doesn't
|
|
965
|
+
* update" vs "I declared the same collection twice with different
|
|
966
|
+
* refs and the second call won").
|
|
967
|
+
*/
|
|
968
|
+
register(collection: string, refs: Record<string, RefDescriptor>): void;
|
|
969
|
+
/** Get the outbound refs declared by a collection (or `{}` if none). */
|
|
970
|
+
getOutbound(collection: string): Record<string, RefDescriptor>;
|
|
971
|
+
/** Get the inbound refs that target a given collection (or `[]`). */
|
|
972
|
+
getInbound(target: string): ReadonlyArray<{
|
|
973
|
+
collection: string;
|
|
974
|
+
field: string;
|
|
975
|
+
mode: RefMode;
|
|
976
|
+
}>;
|
|
977
|
+
/**
|
|
978
|
+
* Iterate every (collection → refs) pair that has at least one
|
|
979
|
+
* declared reference. Used by `checkIntegrity` to walk the full
|
|
980
|
+
* universe of outbound refs without needing to track collection
|
|
981
|
+
* names elsewhere.
|
|
982
|
+
*/
|
|
983
|
+
entries(): Array<[string, Record<string, RefDescriptor>]>;
|
|
984
|
+
/** Clear the registry. Test-only escape hatch; never called from production code. */
|
|
985
|
+
clear(): void;
|
|
986
|
+
}
|
|
987
|
+
/**
|
|
988
|
+
* Shape of a single violation reported by `vault.checkIntegrity()`.
|
|
989
|
+
*
|
|
990
|
+
* `refId` is the value we saw in the referencing field — it's the
|
|
991
|
+
* ID we expected to find in `refTo`, but didn't. Left as `unknown`
|
|
992
|
+
* because records are loosely typed at the integrity-check layer.
|
|
993
|
+
*/
|
|
994
|
+
interface RefViolation {
|
|
995
|
+
readonly collection: string;
|
|
996
|
+
readonly id: string;
|
|
997
|
+
readonly field: string;
|
|
998
|
+
readonly refTo: string;
|
|
999
|
+
readonly refId: unknown;
|
|
1000
|
+
readonly mode: RefMode;
|
|
1001
|
+
}
|
|
1002
|
+
|
|
1003
|
+
/**
|
|
1004
|
+
* Query DSL `.join()` — eager, single-FK, intra-vault joins.
|
|
1005
|
+
*
|
|
1006
|
+
* resolves a ref()-declared foreign key into an attached
|
|
1007
|
+
* right-side record under an alias, using one of two planner paths
|
|
1008
|
+
* selected automatically:
|
|
1009
|
+
*
|
|
1010
|
+
* - **nested-loop** — right-side source exposes `lookupById`, so
|
|
1011
|
+
* each left row costs O(1). This is the common path for joins
|
|
1012
|
+
* against a Collection, which backs `lookupById` with a Map
|
|
1013
|
+
* lookup.
|
|
1014
|
+
* - **hash** — right-side has only `snapshot()`. Build a
|
|
1015
|
+
* `Map<id, record>` once, probe per left row. Same asymptotic
|
|
1016
|
+
* cost for our collections, but the path exists as a fallback
|
|
1017
|
+
* for custom QuerySource implementations and as an explicit
|
|
1018
|
+
* test-only override via `{ strategy: 'hash' }`.
|
|
1019
|
+
*
|
|
1020
|
+
* Scope:
|
|
1021
|
+
*
|
|
1022
|
+
* - Equi-joins on declared `ref()` fields only. Joins on
|
|
1023
|
+
* undeclared fields throw at plan time with an actionable error
|
|
1024
|
+
* naming the field and collection.
|
|
1025
|
+
* - Same-vault only. Cross-vault correlation goes
|
|
1026
|
+
* through `queryAcross`; this is an architectural
|
|
1027
|
+
* invariant, not a limitation we plan to lift.
|
|
1028
|
+
* - Hard row ceiling via `JoinTooLargeError` — default 50k per
|
|
1029
|
+
* side, override via `{ maxRows }`. Warns at 80% of the ceiling
|
|
1030
|
+
* on the existing warn channel.
|
|
1031
|
+
* - Three ref-mode behaviors on dangling refs:
|
|
1032
|
+
* strict → `DanglingReferenceError`,
|
|
1033
|
+
* warn → attach `null` with a one-shot warning,
|
|
1034
|
+
* cascade → attach `null` silently (cascade is a delete-time
|
|
1035
|
+
* mode; any dangling refs still present at read time are
|
|
1036
|
+
* mid-flight cascades or orphans from earlier, not a DSL error).
|
|
1037
|
+
*
|
|
1038
|
+
* Partition-awareness seam:
|
|
1039
|
+
*
|
|
1040
|
+
* Every `JoinLeg` carries a `partitionScope` field that is always
|
|
1041
|
+
* `'all'` in. The executor never reads this field.
|
|
1042
|
+
* partition-aware joins will start populating it from `where()`
|
|
1043
|
+
* predicates on the partition key without changing the planner's
|
|
1044
|
+
* external shape — this is the whole reason it exists now.
|
|
1045
|
+
*
|
|
1046
|
+
* Joins stay OUT of the ledger: reads don't touch `_ledger/`,
|
|
1047
|
+
* including joined reads.
|
|
1048
|
+
*/
|
|
1049
|
+
|
|
1050
|
+
/** Planner strategy for a single join leg. Auto-selected unless overridden. */
|
|
1051
|
+
type JoinStrategy = 'hash' | 'nested';
|
|
1052
|
+
/** Default per-side row ceiling before `.join()` throws `JoinTooLargeError`. */
|
|
1053
|
+
declare const DEFAULT_JOIN_MAX_ROWS = 50000;
|
|
1054
|
+
/**
|
|
1055
|
+
* Internal representation of a single join leg in the query plan.
|
|
1056
|
+
*
|
|
1057
|
+
* This is the primary place where constraint #1 is honored:
|
|
1058
|
+
* every leg carries a `partitionScope` field that is always `'all'`
|
|
1059
|
+
* in and is never read by the executor. partition-aware
|
|
1060
|
+
* joins will start populating it from `where()` predicates on the
|
|
1061
|
+
* partition key without changing the planner's external shape.
|
|
1062
|
+
*/
|
|
1063
|
+
interface JoinLeg {
|
|
1064
|
+
/** Field on the left-side record holding the foreign key value. */
|
|
1065
|
+
readonly field: string;
|
|
1066
|
+
/** Alias key under which the joined right-side record attaches. */
|
|
1067
|
+
readonly as: string;
|
|
1068
|
+
/** Target collection name, resolved from the `ref()` declaration. */
|
|
1069
|
+
readonly target: string;
|
|
1070
|
+
/** Ref mode controlling behavior on dangling refs at read time. */
|
|
1071
|
+
readonly mode: RefMode;
|
|
1072
|
+
/** Manual planner strategy override. `undefined` → auto-select. */
|
|
1073
|
+
readonly strategy: JoinStrategy | undefined;
|
|
1074
|
+
/** Per-side row ceiling override. `undefined` → DEFAULT_JOIN_MAX_ROWS. */
|
|
1075
|
+
readonly maxRows: number | undefined;
|
|
1076
|
+
/**
|
|
1077
|
+
* Partition scope for future partition-aware joins. Always `'all'`
|
|
1078
|
+
* today — the executor never reads this field. Future versions will
|
|
1079
|
+
* populate it from `where()` predicates without breaking the
|
|
1080
|
+
* planner's external shape. Do not remove even though it looks
|
|
1081
|
+
* unused today — that's the whole point of having it.
|
|
1082
|
+
*/
|
|
1083
|
+
readonly partitionScope: 'all' | readonly string[];
|
|
1084
|
+
/**
|
|
1085
|
+
* When `true`, this is a dictionary join. The executor
|
|
1086
|
+
* resolves the left-field value against the dict snapshot and
|
|
1087
|
+
* attaches `{ ...labels, key }` rather than a right-side record.
|
|
1088
|
+
* `target` holds the dictionary name (not a collection name).
|
|
1089
|
+
*/
|
|
1090
|
+
readonly isDictJoin?: true;
|
|
1091
|
+
}
|
|
1092
|
+
/**
|
|
1093
|
+
* Minimal shape of a joinable right-side record source.
|
|
1094
|
+
*
|
|
1095
|
+
* Collections implement this structurally via their `QuerySource`;
|
|
1096
|
+
* sources without `lookupById` force the hash-join fallback. Kept as
|
|
1097
|
+
* a thin interface so tests can wire up plain-object sources without
|
|
1098
|
+
* pulling in the full Collection class.
|
|
1099
|
+
*
|
|
1100
|
+
* The optional `subscribe` is used by `Query.live()` to merge
|
|
1101
|
+
* right-side change streams into the live re-run trigger. Sources
|
|
1102
|
+
* that omit `subscribe` still work for live joins — they just
|
|
1103
|
+
* don't drive re-fires when their right side mutates. Collection
|
|
1104
|
+
* implements `subscribe` by hooking into the existing per-
|
|
1105
|
+
* vault event emitter.
|
|
1106
|
+
*/
|
|
1107
|
+
interface JoinableSource {
|
|
1108
|
+
snapshot(): readonly unknown[];
|
|
1109
|
+
lookupById?(id: string): unknown;
|
|
1110
|
+
/**
|
|
1111
|
+
* Subscribe to mutations on this source. The callback fires
|
|
1112
|
+
* AFTER the underlying record set has been updated. Returns an
|
|
1113
|
+
* unsubscribe function. Optional — sources without this method
|
|
1114
|
+
* cannot trigger live-join re-fires from their side.
|
|
1115
|
+
*/
|
|
1116
|
+
subscribe?(cb: () => void): () => void;
|
|
1117
|
+
}
|
|
1118
|
+
/**
|
|
1119
|
+
* Join resolution context attached to a `Query` when it's constructed
|
|
1120
|
+
* from a `Collection`. Holds everything the `.join()` method needs to
|
|
1121
|
+
* translate a field name into a target collection + ref mode, and
|
|
1122
|
+
* everything the executor needs to read the right side.
|
|
1123
|
+
*
|
|
1124
|
+
* Kept as a structural interface so `Vault` can implement it
|
|
1125
|
+
* without `Query` needing to import `Vault` (circular-import
|
|
1126
|
+
* avoid). The Collection wires this up in its `query()` method using
|
|
1127
|
+
* the `joinResolver` back-reference the Vault passes in.
|
|
1128
|
+
*/
|
|
1129
|
+
interface JoinContext {
|
|
1130
|
+
/** Name of the left-side (owning) collection. */
|
|
1131
|
+
readonly leftCollection: string;
|
|
1132
|
+
/** Look up a `RefDescriptor` by field name on the left collection. */
|
|
1133
|
+
resolveRef(field: string): RefDescriptor | null;
|
|
1134
|
+
/** Resolve a right-side source by target collection name. */
|
|
1135
|
+
resolveSource(collectionName: string): JoinableSource | null;
|
|
1136
|
+
/**
|
|
1137
|
+
* Resolve a dictKey join source. Returns a `JoinableSource`
|
|
1138
|
+
* whose snapshot exposes `{ key, ...labels }` records, keyed by the
|
|
1139
|
+
* stable dictionary key. `null` when the field is not a dictKey.
|
|
1140
|
+
*
|
|
1141
|
+
* The source is built from the compartment's in-memory dictionary
|
|
1142
|
+
* snapshot — same data as `DictionaryHandle.list()`, O(1) per lookup.
|
|
1143
|
+
*/
|
|
1144
|
+
resolveDictSource?(field: string): JoinableSource | null;
|
|
1145
|
+
}
|
|
1146
|
+
/**
|
|
1147
|
+
* Apply every join leg in the plan against a base set of left-side
|
|
1148
|
+
* rows. Called by the query executor after `where` / `orderBy` /
|
|
1149
|
+
* `offset` / `limit` have narrowed the left set.
|
|
1150
|
+
*
|
|
1151
|
+
* Each leg attaches a `leg.as` field to every row. Returns a new
|
|
1152
|
+
* array of plain objects — the original left rows are not mutated
|
|
1153
|
+
* (structural sharing is fine for the inner fields, but the
|
|
1154
|
+
* top-level object is a fresh clone so consumers can further mutate
|
|
1155
|
+
* safely).
|
|
1156
|
+
*
|
|
1157
|
+
* **Ordering:** joins run AFTER orderBy / limit / offset in v1.
|
|
1158
|
+
* This keeps the planner simple and means queries like "top 10
|
|
1159
|
+
* invoices with client" sort and paginate the left side first, then
|
|
1160
|
+
* join. Sorting *by* a joined field is out of scope for — users
|
|
1161
|
+
* can post-sort the result array in userland or wait for
|
|
1162
|
+
* (multi-FK chaining) which can be layered on top.
|
|
1163
|
+
*
|
|
1164
|
+
* **Multi-FK chaining:** each leg's `maxRows` is enforced
|
|
1165
|
+
* against the current left-row count independently. Because
|
|
1166
|
+
* joins are equi-joins on the target's primary key (one-to-one or
|
|
1167
|
+
* one-to-null), the left row count is constant across legs — no
|
|
1168
|
+
* cartesian blowup. The per-leg left-side check is still necessary
|
|
1169
|
+
* so that a later leg with a tighter ceiling correctly fires on a
|
|
1170
|
+
* query like `.join('a', { maxRows: 100_000 }).join('b', { maxRows: 50 })`,
|
|
1171
|
+
* which should throw on the second leg if the left set exceeds 50.
|
|
1172
|
+
*/
|
|
1173
|
+
declare function applyJoins(rows: readonly unknown[], joins: readonly JoinLeg[], context: JoinContext): unknown[];
|
|
1174
|
+
/**
|
|
1175
|
+
* Test-only: reset the join warning deduplication state between
|
|
1176
|
+
* tests. Production code never calls this — the dedup state is
|
|
1177
|
+
* intentionally process-scoped so a noisy query doesn't spam the
|
|
1178
|
+
* console once per component render.
|
|
1179
|
+
*/
|
|
1180
|
+
declare function resetJoinWarnings(): void;
|
|
1181
|
+
|
|
1182
|
+
/**
|
|
1183
|
+
* Reactive query primitive — `query.live()`.
|
|
1184
|
+
*
|
|
1185
|
+
* produces a `LiveQuery<T>` that re-runs the query and
|
|
1186
|
+
* updates its `value` whenever any source feeding it (the left
|
|
1187
|
+
* collection AND every right-side collection a join leg points at)
|
|
1188
|
+
* mutates.
|
|
1189
|
+
*
|
|
1190
|
+
* Framework-agnostic by design. The Vue layer wraps a `LiveQuery`
|
|
1191
|
+
* in a Vue `Ref<T[]>` by subscribing once and copying `value` into
|
|
1192
|
+
* the ref on every notification. React/Solid/Svelte adapters do the
|
|
1193
|
+
* same with their own primitives. Core never depends on a UI
|
|
1194
|
+
* framework.
|
|
1195
|
+
*
|
|
1196
|
+
* **Error semantics.** A `.live()` query may throw at re-run time —
|
|
1197
|
+
* a strict-mode `DanglingReferenceError` is the most common case
|
|
1198
|
+
* (a right-side record was deleted out-of-band, leaving a left
|
|
1199
|
+
* row's FK pointing at nothing). When the re-run throws, the
|
|
1200
|
+
* `LiveQuery` catches the error and stores it in the `error`
|
|
1201
|
+
* field; it does NOT propagate the throw out of the source's
|
|
1202
|
+
* change handler, because doing so would tear down whatever
|
|
1203
|
+
* upstream emitter is dispatching. Listeners check `error` after
|
|
1204
|
+
* each notification and render an error state in the UI.
|
|
1205
|
+
*
|
|
1206
|
+
* **Dedup of right-side subscriptions.** A multi-FK chain that
|
|
1207
|
+
* joins the same target twice (e.g.
|
|
1208
|
+
* `.join('billingClientId').join('shippingClientId')`, both
|
|
1209
|
+
* pointing at `clients`) only subscribes to that target once. We
|
|
1210
|
+
* dedup by target collection name, on the assumption that
|
|
1211
|
+
* `resolveSource(name)` returns a single subscribable source per
|
|
1212
|
+
* vault + name. Vault's `resolveSource` reads from
|
|
1213
|
+
* `collectionCache` so this assumption holds.
|
|
1214
|
+
*
|
|
1215
|
+
* **What .live() does NOT do in v1:**
|
|
1216
|
+
* - No granular delta updates — the whole query re-runs on every
|
|
1217
|
+
* change. Granular delta tracking is a v2 optimization once
|
|
1218
|
+
* the API is stable.
|
|
1219
|
+
* - No batching of bursty changes — one event in, one re-run
|
|
1220
|
+
* out. Batching with microtask coalescing is a v2 enhancement.
|
|
1221
|
+
* - No async notifications — every notification is synchronous
|
|
1222
|
+
* within the source's change handler.
|
|
1223
|
+
* - No re-planning under live mutations — the planner picks once
|
|
1224
|
+
* at subscription time and reuses the same plan for every
|
|
1225
|
+
* re-run.
|
|
1226
|
+
*/
|
|
1227
|
+
/**
|
|
1228
|
+
* The reactive primitive returned by `Query.live()`.
|
|
1229
|
+
*
|
|
1230
|
+
* Listeners can read the current `value` snapshot at any time and
|
|
1231
|
+
* subscribe to changes via `.subscribe(cb)`. The `error` field
|
|
1232
|
+
* carries the most recent re-run error, if any — read it after
|
|
1233
|
+
* each notification to render error state.
|
|
1234
|
+
*
|
|
1235
|
+
* Always call `stop()` when the live query is no longer needed.
|
|
1236
|
+
* Without it, the upstream change-stream subscriptions stay live
|
|
1237
|
+
* forever and the query keeps re-running on every mutation.
|
|
1238
|
+
*/
|
|
1239
|
+
interface LiveQuery<T> {
|
|
1240
|
+
/**
|
|
1241
|
+
* Current snapshot of the query result. Updated in place on
|
|
1242
|
+
* every upstream change. The reference returned is the same
|
|
1243
|
+
* `readonly T[]` array — consumers that want change detection by
|
|
1244
|
+
* reference should copy: `const arr = [...live.value]`.
|
|
1245
|
+
*/
|
|
1246
|
+
readonly value: readonly T[];
|
|
1247
|
+
/**
|
|
1248
|
+
* Most recent re-run error, or `null` on success. Set when the
|
|
1249
|
+
* executor throws (e.g. `DanglingReferenceError` in strict mode
|
|
1250
|
+
* after a right-side delete). Cleared on the next successful
|
|
1251
|
+
* re-run.
|
|
1252
|
+
*/
|
|
1253
|
+
readonly error: Error | null;
|
|
1254
|
+
/**
|
|
1255
|
+
* Register a notification callback. Fires AFTER `value` and
|
|
1256
|
+
* `error` have been updated for a given upstream change.
|
|
1257
|
+
* Returns an unsubscribe function.
|
|
1258
|
+
*
|
|
1259
|
+
* The first call to `subscribe` does NOT fire the callback
|
|
1260
|
+
* immediately — call sites that want the initial value should
|
|
1261
|
+
* read `live.value` directly before subscribing.
|
|
1262
|
+
*/
|
|
1263
|
+
subscribe(cb: () => void): () => void;
|
|
1264
|
+
/**
|
|
1265
|
+
* Tear down every upstream subscription and clear the listener
|
|
1266
|
+
* set. Idempotent — calling twice is safe. After `stop()`, the
|
|
1267
|
+
* query no longer re-runs and `subscribe()` becomes a no-op
|
|
1268
|
+
* (the returned unsubscribe is still callable and is also a
|
|
1269
|
+
* no-op).
|
|
1270
|
+
*/
|
|
1271
|
+
stop(): void;
|
|
1272
|
+
}
|
|
1273
|
+
/**
|
|
1274
|
+
* Internal subscription handle for an upstream source — left or
|
|
1275
|
+
* right side. The contract is just `subscribe(cb): unsubscribe`,
|
|
1276
|
+
* matching the existing `QuerySource.subscribe` and the new
|
|
1277
|
+
* `JoinableSource.subscribe` (added in ).
|
|
1278
|
+
*/
|
|
1279
|
+
interface LiveUpstream {
|
|
1280
|
+
subscribe(cb: () => void): () => void;
|
|
1281
|
+
}
|
|
1282
|
+
/**
|
|
1283
|
+
* Build a LiveQuery from a `recompute` callback (typically the
|
|
1284
|
+
* Query's bound `toArray`) and a list of upstream sources to
|
|
1285
|
+
* subscribe to.
|
|
1286
|
+
*
|
|
1287
|
+
* The recompute fires once synchronously to populate the initial
|
|
1288
|
+
* value, then re-fires every time any upstream notifies. Errors
|
|
1289
|
+
* thrown by recompute are caught and stored in `error` instead of
|
|
1290
|
+
* propagating — see the file docstring for the rationale.
|
|
1291
|
+
*/
|
|
1292
|
+
declare function buildLiveQuery<T>(recompute: () => T[], upstreams: readonly LiveUpstream[]): LiveQuery<T>;
|
|
1293
|
+
|
|
1294
|
+
/**
|
|
1295
|
+
* Chainable, immutable query builder.
|
|
1296
|
+
*
|
|
1297
|
+
* Each builder operation returns a NEW Query — the underlying plan is never
|
|
1298
|
+
* mutated. This makes plans safe to share, cache, and serialize.
|
|
1299
|
+
*/
|
|
1300
|
+
|
|
1301
|
+
interface OrderBy {
|
|
1302
|
+
readonly field: string;
|
|
1303
|
+
readonly direction: 'asc' | 'desc';
|
|
1304
|
+
}
|
|
1305
|
+
/**
|
|
1306
|
+
* A complete query plan: zero-or-more clauses, optional ordering, pagination,
|
|
1307
|
+
* and optional joins.
|
|
1308
|
+
*
|
|
1309
|
+
* Plans are JSON-serializable as long as no FilterClause is present and no
|
|
1310
|
+
* join leg carries a manual `strategy` override (JoinLeg itself is plain
|
|
1311
|
+
* data, so it serializes cleanly).
|
|
1312
|
+
*
|
|
1313
|
+
* Plans are intentionally NOT parametric on T — see `predicate.ts` FilterClause
|
|
1314
|
+
* for the variance reasoning. The public `Query<T>` API attaches the type tag.
|
|
1315
|
+
*/
|
|
1316
|
+
interface QueryPlan {
|
|
1317
|
+
readonly clauses: readonly Clause[];
|
|
1318
|
+
readonly orderBy: readonly OrderBy[];
|
|
1319
|
+
readonly limit: number | undefined;
|
|
1320
|
+
readonly offset: number;
|
|
1321
|
+
/**
|
|
1322
|
+
* Zero-or-more join legs to apply after where/orderBy/limit/offset.
|
|
1323
|
+
* Each leg attaches a resolved right-side record (or null) under its
|
|
1324
|
+
* alias. See `query/join.ts` for the full semantics.
|
|
1325
|
+
*/
|
|
1326
|
+
readonly joins: readonly JoinLeg[];
|
|
1327
|
+
}
|
|
1328
|
+
/**
|
|
1329
|
+
* Source of records that a query executes against.
|
|
1330
|
+
*
|
|
1331
|
+
* The interface is non-parametric to keep variance friendly: callers cast
|
|
1332
|
+
* their typed source (e.g. `QuerySource<Invoice>`) into this opaque shape.
|
|
1333
|
+
*
|
|
1334
|
+
* `getIndexes` and `lookupById` are optional fast-path hooks. When both are
|
|
1335
|
+
* present and a where clause matches an indexed field, the executor uses
|
|
1336
|
+
* the index to skip a linear scan. Sources without these methods (or with
|
|
1337
|
+
* `getIndexes` returning `null`) always fall back to a linear scan.
|
|
1338
|
+
*/
|
|
1339
|
+
interface QuerySource<T> {
|
|
1340
|
+
/** Snapshot of all current records. The query never mutates this array. */
|
|
1341
|
+
snapshot(): readonly T[];
|
|
1342
|
+
/** Subscribe to mutations; returns an unsubscribe function. */
|
|
1343
|
+
subscribe?(cb: () => void): () => void;
|
|
1344
|
+
/** Index store for the indexed-fast-path. Optional. */
|
|
1345
|
+
getIndexes?(): CollectionIndexes | null;
|
|
1346
|
+
/** O(1) record lookup by id, used to materialize index hits. */
|
|
1347
|
+
lookupById?(id: string): T | undefined;
|
|
1348
|
+
}
|
|
1349
|
+
/**
|
|
1350
|
+
* The chainable builder. All methods return a new Query — the original
|
|
1351
|
+
* remains unchanged. Terminal methods (`toArray`, `first`, `count`,
|
|
1352
|
+
* `subscribe`) execute the plan against the source.
|
|
1353
|
+
*
|
|
1354
|
+
* Type parameter T flows through the public API for ergonomics, but the
|
|
1355
|
+
* internal storage uses `unknown` so Collection<T> stays covariant.
|
|
1356
|
+
*
|
|
1357
|
+
* The optional `joinContext` is attached when the Query is constructed
|
|
1358
|
+
* via `Collection.query()` (Collection passes in a context built from
|
|
1359
|
+
* the Vault's join resolver). A Query constructed via `new Query`
|
|
1360
|
+
* directly — e.g. from tests with a plain-object source — has no
|
|
1361
|
+
* joinContext, and calling `.join()` on it throws with an actionable
|
|
1362
|
+
* error. See `query/join.ts` for the full design.
|
|
1363
|
+
*/
|
|
1364
|
+
declare class Query<T> {
|
|
1365
|
+
private readonly source;
|
|
1366
|
+
private readonly plan;
|
|
1367
|
+
private readonly joinContext;
|
|
1368
|
+
private readonly aggregateStrategy;
|
|
1369
|
+
constructor(source: QuerySource<T>, plan?: QueryPlan, joinContext?: JoinContext, aggregateStrategy?: AggregateStrategy);
|
|
1370
|
+
/** Add a field comparison. Multiple where() calls are AND-combined. */
|
|
1371
|
+
where(field: string, op: Operator, value: unknown): Query<T>;
|
|
1372
|
+
/**
|
|
1373
|
+
* Logical OR group. Pass a callback that builds a sub-query.
|
|
1374
|
+
* Each clause inside the callback is OR-combined; the group itself
|
|
1375
|
+
* joins the parent plan with AND.
|
|
1376
|
+
*/
|
|
1377
|
+
or(builder: (q: Query<T>) => Query<T>): Query<T>;
|
|
1378
|
+
/**
|
|
1379
|
+
* Logical AND group. Same shape as `or()` but every clause inside the group
|
|
1380
|
+
* must match. Useful for explicit grouping inside a larger OR.
|
|
1381
|
+
*/
|
|
1382
|
+
and(builder: (q: Query<T>) => Query<T>): Query<T>;
|
|
1383
|
+
/** Escape hatch: add an arbitrary predicate function. Not serializable. */
|
|
1384
|
+
filter(fn: (record: T) => boolean): Query<T>;
|
|
1385
|
+
/** Sort by a field. Subsequent calls are tie-breakers. */
|
|
1386
|
+
orderBy(field: string, direction?: 'asc' | 'desc'): Query<T>;
|
|
1387
|
+
/** Cap the result size. */
|
|
1388
|
+
limit(n: number): Query<T>;
|
|
1389
|
+
/** Skip the first N matching records (after ordering). */
|
|
1390
|
+
offset(n: number): Query<T>;
|
|
1391
|
+
/**
|
|
1392
|
+
* Resolve a `ref()`-declared foreign key and attach the right-side
|
|
1393
|
+
* record under `opts.as`. — eager, single-FK, intra-
|
|
1394
|
+
* vault joins.
|
|
1395
|
+
*
|
|
1396
|
+
* ```ts
|
|
1397
|
+
* const rows = invoices.query()
|
|
1398
|
+
* .where('status', '==', 'open')
|
|
1399
|
+
* .join('clientId', { as: 'client' })
|
|
1400
|
+
* .toArray()
|
|
1401
|
+
* // → [{ id, amount, client: { id, name, ... } }, ...]
|
|
1402
|
+
* ```
|
|
1403
|
+
*
|
|
1404
|
+
* Preconditions:
|
|
1405
|
+
* - The Query must have a `joinContext` (constructed via
|
|
1406
|
+
* `Collection.query()`, not `new Query`).
|
|
1407
|
+
* - `field` must have a matching `refs: { [field]: ref('<target>') }`
|
|
1408
|
+
* declaration on the left collection.
|
|
1409
|
+
* - The target collection must be reachable via the vault
|
|
1410
|
+
* (either currently open or openable on demand).
|
|
1411
|
+
*
|
|
1412
|
+
* Strategy:
|
|
1413
|
+
* - Nested-loop against `lookupById` when the target source
|
|
1414
|
+
* provides it (the common path for Collection targets).
|
|
1415
|
+
* - Hash join otherwise, or when `{ strategy: 'hash' }` is
|
|
1416
|
+
* explicitly passed for test purposes.
|
|
1417
|
+
*
|
|
1418
|
+
* Ref-mode semantics on dangling refs (left record has a non-null
|
|
1419
|
+
* FK value pointing at a right-side id that doesn't exist):
|
|
1420
|
+
* - `strict` → throws `DanglingReferenceError` with the full
|
|
1421
|
+
* field / target / refId context.
|
|
1422
|
+
* - `warn` → attaches `null` and emits a one-shot warning per
|
|
1423
|
+
* unique dangling pair.
|
|
1424
|
+
* - `cascade` → attaches `null` silently. Cascade is a
|
|
1425
|
+
* delete-time mode; dangling refs visible at read time are
|
|
1426
|
+
* either mid-flight cascades or pre-existing orphans, not a
|
|
1427
|
+
* DSL-level error.
|
|
1428
|
+
*
|
|
1429
|
+
* A left-side record whose FK field is `null` / `undefined` is NOT
|
|
1430
|
+
* a dangling ref — it's "no reference at all", always allowed
|
|
1431
|
+
* regardless of mode.
|
|
1432
|
+
*
|
|
1433
|
+
* The return type widens `T` with `Record<As, R | null>`. The `R`
|
|
1434
|
+
* parameter is optional — supply it explicitly for type-checked
|
|
1435
|
+
* access to the joined fields:
|
|
1436
|
+
*
|
|
1437
|
+
* ```ts
|
|
1438
|
+
* invoices.query().join<'client', Client>('clientId', { as: 'client' })
|
|
1439
|
+
* // ^^^^^^^^^^^^^^^^^^^ alias literal + right-side type
|
|
1440
|
+
* ```
|
|
1441
|
+
*
|
|
1442
|
+
* Without the generic, the joined field is typed as `unknown`, which
|
|
1443
|
+
* still works but requires a cast to access its properties.
|
|
1444
|
+
*
|
|
1445
|
+
* Joins stay intra-vault by construction — cross-vault
|
|
1446
|
+
* correlation goes through `Noydb.queryAcross`, not
|
|
1447
|
+
* `.join()`.
|
|
1448
|
+
*/
|
|
1449
|
+
join<As extends string, R = unknown>(field: string, opts: {
|
|
1450
|
+
as: As;
|
|
1451
|
+
strategy?: JoinStrategy;
|
|
1452
|
+
maxRows?: number;
|
|
1453
|
+
}): Query<T & Record<As, R | null>>;
|
|
1454
|
+
/**
|
|
1455
|
+
* Execute the plan and return the matching records. When the plan
|
|
1456
|
+
* carries any join legs, they are applied after `where` / `orderBy`
|
|
1457
|
+
* / `limit` / `offset` narrow the left set. See the `.join()` doc
|
|
1458
|
+
* for the ordering rationale.
|
|
1459
|
+
*/
|
|
1460
|
+
toArray(): T[];
|
|
1461
|
+
/** Return the first matching record, or null. Joins are applied. */
|
|
1462
|
+
first(): T | null;
|
|
1463
|
+
/**
|
|
1464
|
+
* Return the number of matching records (after where/filter,
|
|
1465
|
+
* before limit). **Joins are NOT applied** — count() reports the
|
|
1466
|
+
* left-side cardinality, because joins in are projection-only
|
|
1467
|
+
* (they attach an aliased field; they never filter). Running joins
|
|
1468
|
+
* here just to discard the aliases would be wasteful, and in strict
|
|
1469
|
+
* mode it could throw `DanglingReferenceError` for a call whose
|
|
1470
|
+
* intent is purely to count.
|
|
1471
|
+
*/
|
|
1472
|
+
count(): number;
|
|
1473
|
+
/**
|
|
1474
|
+
* Reduce the matching records through a named set of reducers.
|
|
1475
|
+
* the aggregation terminal.
|
|
1476
|
+
*
|
|
1477
|
+
* ```ts
|
|
1478
|
+
* const { total, n, avgAmount } = invoices.query()
|
|
1479
|
+
* .where('status', '==', 'open')
|
|
1480
|
+
* .aggregate({
|
|
1481
|
+
* total: sum('amount'),
|
|
1482
|
+
* n: count(),
|
|
1483
|
+
* avgAmount: avg('amount'),
|
|
1484
|
+
* })
|
|
1485
|
+
* .run()
|
|
1486
|
+
* ```
|
|
1487
|
+
*
|
|
1488
|
+
* Returns an `Aggregation<R>` wrapper with two terminals:
|
|
1489
|
+
* - `.run(): R` — synchronous one-shot reduction
|
|
1490
|
+
* - `.live(): LiveAggregation<R>` — reactive primitive that
|
|
1491
|
+
* re-runs the reduction whenever the source notifies of a
|
|
1492
|
+
* change. Always call `live.stop()` when finished.
|
|
1493
|
+
*
|
|
1494
|
+
* The reducer spec is bound here once and reused by both
|
|
1495
|
+
* terminals — this is why `.aggregate()` returns a wrapper instead
|
|
1496
|
+
* of being a direct terminal. Consumers who only need the static
|
|
1497
|
+
* value read `.run()`; consumers wiring a reactive UI read
|
|
1498
|
+
* `.live()`.
|
|
1499
|
+
*
|
|
1500
|
+
* Joins are intentionally NOT applied to aggregations in —
|
|
1501
|
+
* the same logic as `.count()`. Joins in are projection-only
|
|
1502
|
+
* (they attach an aliased field and never filter), so running
|
|
1503
|
+
* them just to throw the aliases away would be wasteful. If you
|
|
1504
|
+
* need a reducer that reads a joined field, open an issue —
|
|
1505
|
+
* aggregations-across-joins is explicitly out of scope for v1.
|
|
1506
|
+
*
|
|
1507
|
+
* Every reducer factory accepts an optional `{ seed }` parameter
|
|
1508
|
+
* that is plumbed through the protocol but unused by the
|
|
1509
|
+
* executor — that's constraint #2. When partition-aware
|
|
1510
|
+
* aggregation lands, the seed will carry running state across
|
|
1511
|
+
* partition boundaries without an API break.
|
|
1512
|
+
*/
|
|
1513
|
+
aggregate<Spec extends AggregateSpec>(spec: Spec): Aggregation<AggregateResult<Spec>>;
|
|
1514
|
+
/**
|
|
1515
|
+
* Partition matching records into buckets keyed by a field, then
|
|
1516
|
+
* terminate with `.aggregate(spec)` to compute per-bucket
|
|
1517
|
+
* reducers..
|
|
1518
|
+
*
|
|
1519
|
+
* ```ts
|
|
1520
|
+
* const byClient = invoices.query()
|
|
1521
|
+
* .where('status', '==', 'open')
|
|
1522
|
+
* .groupBy('clientId')
|
|
1523
|
+
* .aggregate({ total: sum('amount'), n: count() })
|
|
1524
|
+
* .run()
|
|
1525
|
+
* // → [ { clientId: 'c1', total: 5250, n: 3 }, … ]
|
|
1526
|
+
* ```
|
|
1527
|
+
*
|
|
1528
|
+
* Result rows carry the group key value under the grouping field
|
|
1529
|
+
* name plus every reducer output from the spec. Buckets are
|
|
1530
|
+
* emitted in first-seen order — consumers who want a specific
|
|
1531
|
+
* ordering should `.sort()` downstream.
|
|
1532
|
+
*
|
|
1533
|
+
* **Cardinality caps:** a one-shot warning fires at 10_000
|
|
1534
|
+
* distinct groups; `GroupCardinalityError` throws at 100_000.
|
|
1535
|
+
* Grouping on a high-uniqueness field like `id` or `createdAt` is
|
|
1536
|
+
* almost always a query mistake — the error message names the
|
|
1537
|
+
* field and observed cardinality and suggests narrowing with
|
|
1538
|
+
* `.where()` first.
|
|
1539
|
+
*
|
|
1540
|
+
* **Null / undefined keys:** records with a missing or explicitly
|
|
1541
|
+
* `null` group field get their own buckets. `Map`-based
|
|
1542
|
+
* partitioning distinguishes `undefined` from `null`, so the two
|
|
1543
|
+
* cases do NOT merge. Consumers who want them merged should
|
|
1544
|
+
* coalesce upstream with `.filter()`.
|
|
1545
|
+
*
|
|
1546
|
+
* **Joins are not applied** — same rationale as `.count()` and
|
|
1547
|
+
* `.aggregate()`. Joined fields in are projection-only, so
|
|
1548
|
+
* running a join inside a grouping pipeline would be wasteful and
|
|
1549
|
+
* could trigger `DanglingReferenceError` in strict mode for a
|
|
1550
|
+
* call whose intent is purely to bucket-and-reduce. Grouping by
|
|
1551
|
+
* a joined field is explicitly out of scope for — file an
|
|
1552
|
+
* issue if a real consumer needs it.
|
|
1553
|
+
*
|
|
1554
|
+
* **Filter clauses (`.filter(fn)`):** grouped queries still
|
|
1555
|
+
* support filter clauses in the underlying plan — they run in
|
|
1556
|
+
* the same candidate/filter pipeline that `.aggregate()` uses.
|
|
1557
|
+
* The performance caveat is the same: filter clauses cost O(N)
|
|
1558
|
+
* per record and can't be index-accelerated.
|
|
1559
|
+
*/
|
|
1560
|
+
groupBy<F extends string>(field: F): GroupedQuery<T, F>;
|
|
1561
|
+
/**
|
|
1562
|
+
* Re-run the query whenever the source notifies of changes.
|
|
1563
|
+
* Returns an unsubscribe function. The callback receives the latest result.
|
|
1564
|
+
* Throws if the source does not support subscriptions.
|
|
1565
|
+
*
|
|
1566
|
+
* **For joined queries, prefer `.live()`** — `subscribe()`
|
|
1567
|
+
* only re-fires on LEFT-side changes, so joined data can be
|
|
1568
|
+
* stale if the right side mutates between emissions. `.live()`
|
|
1569
|
+
* merges change streams from every join target.
|
|
1570
|
+
*/
|
|
1571
|
+
subscribe(cb: (result: T[]) => void): () => void;
|
|
1572
|
+
/**
|
|
1573
|
+
* Reactive terminal — returns a `LiveQuery<T>` that re-runs the
|
|
1574
|
+
* query and updates its `value` whenever any source feeding it
|
|
1575
|
+
* mutates..
|
|
1576
|
+
*
|
|
1577
|
+
* For non-joined queries, `.live()` is a convenience over the
|
|
1578
|
+
* existing `.subscribe()` callback shape: a hand-rolled reactive
|
|
1579
|
+
* primitive with `value` / `error` fields and a `subscribe(cb)`
|
|
1580
|
+
* notification channel. Frame-agnostic — Vue / React / Solid
|
|
1581
|
+
* adapters wrap it in their own primitive.
|
|
1582
|
+
*
|
|
1583
|
+
* For joined queries, `.live()` additionally subscribes to every
|
|
1584
|
+
* join target's change stream. Mutations on a right-side
|
|
1585
|
+
* collection (insert / update / delete of a client referenced by
|
|
1586
|
+
* an invoice) re-fire the live query and re-evaluate every
|
|
1587
|
+
* dependent left row. Right-side targets are deduped by
|
|
1588
|
+
* collection name, so a chain that joins the same target twice
|
|
1589
|
+
* (e.g. billing client + shipping client → both 'clients') only
|
|
1590
|
+
* subscribes once.
|
|
1591
|
+
*
|
|
1592
|
+
* **Ref-mode behavior on right-side disappearance** — matches the
|
|
1593
|
+
* eager `.toArray()` contract from :
|
|
1594
|
+
* - `strict` → re-run throws `DanglingReferenceError`. The
|
|
1595
|
+
* LiveQuery catches the throw, stores it in `live.error`, and
|
|
1596
|
+
* notifies listeners (the throw does NOT propagate out of
|
|
1597
|
+
* the source's change handler — that would tear down the
|
|
1598
|
+
* emitter). Consumers check `live.error` after each
|
|
1599
|
+
* notification and render an error state in the UI.
|
|
1600
|
+
* - `warn` → joined value flips to `null`; the existing
|
|
1601
|
+
* warn-channel deduplication keeps repeated re-runs from
|
|
1602
|
+
* spamming the console.
|
|
1603
|
+
* - `cascade` → no special handling needed; the cascade-
|
|
1604
|
+
* delete mechanism propagates the right-side delete into the
|
|
1605
|
+
* left collection on the next tick, and the live query
|
|
1606
|
+
* naturally re-fires with the orphaned left rows gone.
|
|
1607
|
+
*
|
|
1608
|
+
* Always call `live.stop()` when finished — it tears down every
|
|
1609
|
+
* upstream subscription. The Vue layer's `onUnmounted` hook
|
|
1610
|
+
* should call `stop()` automatically; raw consumers must do it
|
|
1611
|
+
* themselves.
|
|
1612
|
+
*
|
|
1613
|
+
* **Limitations:**
|
|
1614
|
+
* - No granular delta updates — the whole query re-runs on
|
|
1615
|
+
* every change.
|
|
1616
|
+
* - No microtask batching — bursty changes produce one re-run
|
|
1617
|
+
* per change.
|
|
1618
|
+
* - No re-planning under live mutations — the planner picks
|
|
1619
|
+
* once at subscription time and reuses the same plan.
|
|
1620
|
+
* - Streaming live joins are deferred.
|
|
1621
|
+
*/
|
|
1622
|
+
live(): LiveQuery<T>;
|
|
1623
|
+
/**
|
|
1624
|
+
* Return the plan as a JSON-friendly object. FilterClause entries are
|
|
1625
|
+
* stripped (their `fn` cannot be serialized) and replaced with
|
|
1626
|
+
* { type: 'filter', fn: '[function]' } so devtools can still see them.
|
|
1627
|
+
*/
|
|
1628
|
+
toPlan(): unknown;
|
|
1629
|
+
}
|
|
1630
|
+
/**
|
|
1631
|
+
* Execute a plan against a snapshot of records.
|
|
1632
|
+
* Pure function — same input, same output, no side effects.
|
|
1633
|
+
*
|
|
1634
|
+
* Records are typed as `unknown` because plans are non-parametric; callers
|
|
1635
|
+
* cast the return type at the API surface (see `Query.toArray()`).
|
|
1636
|
+
*/
|
|
1637
|
+
declare function executePlan(records: readonly unknown[], plan: QueryPlan): unknown[];
|
|
1638
|
+
|
|
1639
|
+
/**
|
|
1640
|
+
* Streaming scan builder with filter + aggregate support.
|
|
1641
|
+
*
|
|
1642
|
+
* `Collection.scan()` now returns a `ScanBuilder<T>` that
|
|
1643
|
+
* implements `AsyncIterable<T>` (for existing `for await … of`
|
|
1644
|
+
* consumers) AND exposes chainable `.where()` / `.filter()` clauses
|
|
1645
|
+
* plus a `.aggregate(spec)` async terminal that reduces the scan
|
|
1646
|
+
* stream through the same reducer protocol as `Query.aggregate()`
|
|
1647
|
+
*.
|
|
1648
|
+
*
|
|
1649
|
+
* **Memory model:** O(reducers), not O(records). The aggregate
|
|
1650
|
+
* terminal initializes one state per reducer, iterates through the
|
|
1651
|
+
* scan one record at a time via `for await`, applies every reducer's
|
|
1652
|
+
* `step` per record, and never collects the stream into an array.
|
|
1653
|
+
* This is what makes `scan().aggregate()` suitable for collections
|
|
1654
|
+
* that don't fit in memory — the bound is a code-level invariant
|
|
1655
|
+
* visible in the function body, not a runtime assertion.
|
|
1656
|
+
*
|
|
1657
|
+
* **Paginated iteration:** the builder holds a `pageProvider`
|
|
1658
|
+
* closure that maps `(cursor, limit) → Promise<page>`, plumbed by
|
|
1659
|
+
* `Collection.scan()` to `collection.listPage(...)`. The page
|
|
1660
|
+
* iterator walks cursors forward until exhaustion, same as the
|
|
1661
|
+
* previous async-generator `scan()` did.
|
|
1662
|
+
*
|
|
1663
|
+
* **Backward compatibility:** existing `for await (const rec of
|
|
1664
|
+
* collection.scan()) { … }` code continues to work because
|
|
1665
|
+
* `ScanBuilder` implements `[Symbol.asyncIterator]`. The previous
|
|
1666
|
+
* signature returned an `AsyncIterableIterator<T>` (which has both
|
|
1667
|
+
* `[Symbol.asyncIterator]` and `.next()`). We verified at grep time
|
|
1668
|
+
* that no call sites use `.next()` on the scan result directly, so
|
|
1669
|
+
* the narrowed interface is safe.
|
|
1670
|
+
*
|
|
1671
|
+
* **Immutability:** each `.where()` / `.filter()` call returns a
|
|
1672
|
+
* fresh builder sharing the same page provider and page size. This
|
|
1673
|
+
* lets a base scan be reused for multiple parallel aggregations:
|
|
1674
|
+
*
|
|
1675
|
+
* ```ts
|
|
1676
|
+
* const scan = invoices.scan()
|
|
1677
|
+
* const [open, paid] = await Promise.all([
|
|
1678
|
+
* scan.where('status', '==', 'open').aggregate({ n: count() }),
|
|
1679
|
+
* scan.where('status', '==', 'paid').aggregate({ n: count() }),
|
|
1680
|
+
* ])
|
|
1681
|
+
* ```
|
|
1682
|
+
*
|
|
1683
|
+
* Note that each aggregation pays a full scan — there's no shared
|
|
1684
|
+
* iteration across the two. Multi-way aggregation in a single pass
|
|
1685
|
+
* is out of scope; consumers who need it should build a compound spec
|
|
1686
|
+
* and run a single `.aggregate({ openN, paidN })` at the DSL level.
|
|
1687
|
+
*
|
|
1688
|
+
* **Out of scope for (tracked separately):**
|
|
1689
|
+
* - `scan().aggregate().live()` — unbounded scan + change-stream
|
|
1690
|
+
* reconciliation is a design problem, not just a code one
|
|
1691
|
+
* - `scan().groupBy().aggregate()` — high-cardinality grouping on
|
|
1692
|
+
* huge collections would re-introduce the O(groups) memory
|
|
1693
|
+
* problem that aggregate fixes
|
|
1694
|
+
* - Parallel scan across pages — race-safe page cursor contracts
|
|
1695
|
+
* are not in the adapter API yet
|
|
1696
|
+
* - `scan().join(...)` — tracked under (streaming join)
|
|
1697
|
+
*/
|
|
1698
|
+
|
|
1699
|
+
/**
|
|
1700
|
+
* Page provider — the Collection-shaped hook the builder calls to
|
|
1701
|
+
* walk cursors forward. Kept as a structural interface so tests can
|
|
1702
|
+
* wire up a synthetic provider without pulling in the full
|
|
1703
|
+
* Collection class. Collection's `listPage` matches this shape
|
|
1704
|
+
* exactly.
|
|
1705
|
+
*/
|
|
1706
|
+
interface ScanPageProvider<T> {
|
|
1707
|
+
listPage(opts: {
|
|
1708
|
+
cursor?: string;
|
|
1709
|
+
limit?: number;
|
|
1710
|
+
}): Promise<{
|
|
1711
|
+
items: T[];
|
|
1712
|
+
nextCursor: string | null;
|
|
1713
|
+
}>;
|
|
1714
|
+
}
|
|
1715
|
+
/**
|
|
1716
|
+
* Chainable streaming scan. Implements `AsyncIterable<T>` for
|
|
1717
|
+
* drop-in use with `for await … of`; adds `.where()` / `.filter()`
|
|
1718
|
+
* chainable clauses and a `.aggregate(spec)` async terminal.
|
|
1719
|
+
*
|
|
1720
|
+
* The builder is immutable per operation — each chained call
|
|
1721
|
+
* returns a fresh `ScanBuilder` sharing the same page provider and
|
|
1722
|
+
* page size. The original builder is never mutated, so it's safe
|
|
1723
|
+
* to reuse across multiple parallel consumers.
|
|
1724
|
+
*/
|
|
1725
|
+
declare class ScanBuilder<T> implements AsyncIterable<T> {
|
|
1726
|
+
private readonly pageProvider;
|
|
1727
|
+
private readonly pageSize;
|
|
1728
|
+
private readonly clauses;
|
|
1729
|
+
/**
|
|
1730
|
+
* Zero-or-more join legs to apply per record as the stream flows.
|
|
1731
|
+
* Each leg attaches the resolved right-side record (or null) under
|
|
1732
|
+
* its alias. — streaming joins.
|
|
1733
|
+
*
|
|
1734
|
+
* Joins are evaluated AFTER clauses, so a `where()` filtered-out
|
|
1735
|
+
* record never triggers a right-side lookup. This is the same
|
|
1736
|
+
* ordering as `Query.toArray()` (clauses first, joins after) and
|
|
1737
|
+
* keeps the streaming path from doing wasted work.
|
|
1738
|
+
*/
|
|
1739
|
+
private readonly joins;
|
|
1740
|
+
/**
|
|
1741
|
+
* Join resolution context. Required for `.join()` to translate a
|
|
1742
|
+
* field name into a target collection + ref mode and to resolve
|
|
1743
|
+
* the right-side `JoinableSource`. Optional because tests
|
|
1744
|
+
* construct ScanBuilder directly with synthetic page providers
|
|
1745
|
+
* that don't know about ref() — calling `.join()` without a
|
|
1746
|
+
* context throws with an actionable error.
|
|
1747
|
+
*/
|
|
1748
|
+
private readonly joinContext;
|
|
1749
|
+
constructor(pageProvider: ScanPageProvider<T>, pageSize?: number, clauses?: readonly Clause[], joins?: readonly JoinLeg[], joinContext?: JoinContext);
|
|
1750
|
+
/**
|
|
1751
|
+
* Add a field comparison. Runs per record as the scan stream
|
|
1752
|
+
* flows through, so non-matching records are dropped before they
|
|
1753
|
+
* reach `.aggregate()` or the iteration consumer. Multiple
|
|
1754
|
+
* `.where()` calls are AND-combined — same semantics as
|
|
1755
|
+
* `Query.where()`.
|
|
1756
|
+
*
|
|
1757
|
+
* Clauses cannot use the secondary-index fast path here because
|
|
1758
|
+
* the scan sources records from the adapter's paginator, not from
|
|
1759
|
+
* the in-memory cache where indexes live. Index-accelerated scans
|
|
1760
|
+
* are a future optimization — the current implementation
|
|
1761
|
+
* evaluates clauses per record in O(1) per clause.
|
|
1762
|
+
*/
|
|
1763
|
+
where(field: string, op: Operator, value: unknown): ScanBuilder<T>;
|
|
1764
|
+
/**
|
|
1765
|
+
* Escape hatch: add an arbitrary predicate function. Same
|
|
1766
|
+
* non-serializable caveat as `Query.filter()` — filter clauses
|
|
1767
|
+
* don't round-trip through `toPlan()`. Prefer `.where()` when
|
|
1768
|
+
* possible.
|
|
1769
|
+
*/
|
|
1770
|
+
filter(fn: (record: T) => boolean): ScanBuilder<T>;
|
|
1771
|
+
/**
|
|
1772
|
+
* Resolve a `ref()`-declared foreign key per record as the scan
|
|
1773
|
+
* stream flows, attaching the right-side record (or null) under
|
|
1774
|
+
* `opts.as`. — streaming joins over `scan()`.
|
|
1775
|
+
*
|
|
1776
|
+
* ```ts
|
|
1777
|
+
* for await (const inv of invoices.scan().join('clientId', { as: 'client' })) {
|
|
1778
|
+
* await processInvoice(inv) // inv.client is attached
|
|
1779
|
+
* }
|
|
1780
|
+
*
|
|
1781
|
+
* // Or terminate with .aggregate() for streaming joined aggregation
|
|
1782
|
+
* const { total } = await invoices.scan()
|
|
1783
|
+
* .where('status', '==', 'open')
|
|
1784
|
+
* .join('clientId', { as: 'client' })
|
|
1785
|
+
* .aggregate({ total: sum('amount') })
|
|
1786
|
+
* ```
|
|
1787
|
+
*
|
|
1788
|
+
* **The key difference from eager `.join()`:** the LEFT
|
|
1789
|
+
* side streams page-by-page from the adapter and is never
|
|
1790
|
+
* materialized. Memory ceiling on the left is O(pageSize), not
|
|
1791
|
+
* O(rowCount). This is what makes streaming joins suitable for
|
|
1792
|
+
* collections that exceed the eager join's 50_000-row ceiling.
|
|
1793
|
+
*
|
|
1794
|
+
* **Right-side strategy** is auto-selected per leg:
|
|
1795
|
+
* - **Indexed** — right source exposes `lookupById`, so each
|
|
1796
|
+
* left row costs O(1). This is the common path for
|
|
1797
|
+
* Collection right sides, which back `lookupById` with a Map
|
|
1798
|
+
* lookup over the in-memory cache. The right collection must
|
|
1799
|
+
* be in eager mode (the same constraint as eager join's
|
|
1800
|
+
* `querySourceForJoin` from ).
|
|
1801
|
+
* - **Hash** — right source has only `snapshot()`. Build a
|
|
1802
|
+
* `Map<id, record>` once at iteration start, probe per left
|
|
1803
|
+
* row. Same correctness, same per-row cost as the indexed
|
|
1804
|
+
* path; the difference is the upfront cost of materializing
|
|
1805
|
+
* the right side once.
|
|
1806
|
+
*
|
|
1807
|
+
* Both strategies hold the right side in memory for the duration
|
|
1808
|
+
* of the iteration. The "streaming" property applies to the LEFT
|
|
1809
|
+
* side only — true left-and-right streaming joins (where neither
|
|
1810
|
+
* side fits in memory) require a sort-merge join planner that's
|
|
1811
|
+
* out of scope for.
|
|
1812
|
+
*
|
|
1813
|
+
* **Ref-mode semantics** match eager `.join()` exactly:
|
|
1814
|
+
* - `strict` → throws `DanglingReferenceError` mid-stream
|
|
1815
|
+
* when a left record points at a non-existent right id.
|
|
1816
|
+
* The throw aborts the async iterator — consumers should
|
|
1817
|
+
* wrap the `for await` in try/catch if they want to recover.
|
|
1818
|
+
* - `warn` → attaches `null` and emits a one-shot warning
|
|
1819
|
+
* per unique dangling pair (deduped via the same warn
|
|
1820
|
+
* channel as eager join).
|
|
1821
|
+
* - `cascade` → attaches `null` silently. A delete-time mode;
|
|
1822
|
+
* dangling refs at read time are mid-flight or pre-existing
|
|
1823
|
+
* orphans, not a DSL error.
|
|
1824
|
+
*
|
|
1825
|
+
* Left records with null/undefined FK values attach `null`
|
|
1826
|
+
* regardless of mode — same "no reference at all" policy as
|
|
1827
|
+
* eager join and write-time `enforceRefsOnPut`.
|
|
1828
|
+
*
|
|
1829
|
+
* **Multi-FK chaining** is supported via repeated `.join()`
|
|
1830
|
+
* calls: each leg resolves an independent ref. Each leg
|
|
1831
|
+
* independently picks its right-side strategy and applies its
|
|
1832
|
+
* own ref mode.
|
|
1833
|
+
*
|
|
1834
|
+
* **Joins are NOT applied** to a `.aggregate()` terminal that
|
|
1835
|
+
* doesn't reference joined fields — wait, that's not quite
|
|
1836
|
+
* right. The streaming path actually DOES apply joins before
|
|
1837
|
+
* `.aggregate()` because the join attaches a field that the
|
|
1838
|
+
* spec might reference. Unlike `Query.aggregate()` (which skips
|
|
1839
|
+
* joins entirely as a projection-only short-circuit), the
|
|
1840
|
+
* streaming aggregation can't know whether the spec touches a
|
|
1841
|
+
* joined field, so it always applies joins. Consumers who want
|
|
1842
|
+
* unjoined streaming aggregation should leave `.join()` off the
|
|
1843
|
+
* chain — the chain is composable for a reason.
|
|
1844
|
+
*
|
|
1845
|
+
* constraint #1 — every JoinLeg carries `partitionScope:
|
|
1846
|
+
* 'all'` plumbed through but never read by. Same seam as
|
|
1847
|
+
* eager join.
|
|
1848
|
+
*/
|
|
1849
|
+
join<As extends string, R = unknown>(field: string, opts: {
|
|
1850
|
+
as: As;
|
|
1851
|
+
}): ScanBuilder<T & Record<As, R | null>>;
|
|
1852
|
+
/**
|
|
1853
|
+
* Iterate the scan as an async iterable. Walks the page
|
|
1854
|
+
* provider's cursors forward until exhaustion, applying every
|
|
1855
|
+
* clause per record — only matching records are yielded.
|
|
1856
|
+
*
|
|
1857
|
+
* Backward-compatible with the previous async-generator `scan()`
|
|
1858
|
+
* return type for `for await … of` consumers.
|
|
1859
|
+
*/
|
|
1860
|
+
[Symbol.asyncIterator](): AsyncIterator<T>;
|
|
1861
|
+
/**
|
|
1862
|
+
* Per-leg right-side resolution state. Built once at iteration
|
|
1863
|
+
* start and reused for every left record. Two strategies:
|
|
1864
|
+
*
|
|
1865
|
+
* - `lookupById`: present when the right source exposes the
|
|
1866
|
+
* hook directly (typical Collection right side). Per-row
|
|
1867
|
+
* cost is O(1).
|
|
1868
|
+
* - `hashByPrimaryKey`: built from `snapshot()` when no
|
|
1869
|
+
* lookupById. Per-row cost is O(1) after the upfront O(N)
|
|
1870
|
+
* materialization. Same as eager join's hash strategy.
|
|
1871
|
+
*
|
|
1872
|
+
* `warnedKeys` is the per-leg dedup set for ref-mode 'warn'. We
|
|
1873
|
+
* key on `field→target:refId` so the same dangling pair only
|
|
1874
|
+
* warns once per iteration. The dedup is per-iteration, not
|
|
1875
|
+
* per-process — a long-running scan that re-iterates would warn
|
|
1876
|
+
* again, which is the desired behavior (the data may have
|
|
1877
|
+
* changed between iterations).
|
|
1878
|
+
*/
|
|
1879
|
+
private buildJoinResolvers;
|
|
1880
|
+
/**
|
|
1881
|
+
* Resolve a single join leg for one left record and return the
|
|
1882
|
+
* left record with the joined field attached under
|
|
1883
|
+
* `leg.as`. Pure function over `(left, resolver)`; never
|
|
1884
|
+
* mutates the input.
|
|
1885
|
+
*
|
|
1886
|
+
* Ref-mode dispatch matches eager `applyJoins` from :
|
|
1887
|
+
* - null/undefined FK → attach null silently (always allowed)
|
|
1888
|
+
* - dangling FK + strict → throw `DanglingReferenceError`
|
|
1889
|
+
* - dangling FK + warn → attach null, warn-once per pair
|
|
1890
|
+
* - dangling FK + cascade → attach null silently
|
|
1891
|
+
*/
|
|
1892
|
+
private applyOneJoinStreaming;
|
|
1893
|
+
/**
|
|
1894
|
+
* Reduce the scan stream through a named set of reducers and
|
|
1895
|
+
* return the final aggregated shape.
|
|
1896
|
+
*
|
|
1897
|
+
* Memory is O(reducers): one mutable state slot per spec key.
|
|
1898
|
+
* Records flow through the pipeline one at a time via
|
|
1899
|
+
* `for await` and are discarded after their `step()` is applied
|
|
1900
|
+
* — never collected into an array. This is the distinguishing
|
|
1901
|
+
* property from `Query.aggregate()`, which materializes the full
|
|
1902
|
+
* match set first.
|
|
1903
|
+
*
|
|
1904
|
+
* Reuses the same reducer protocol as `Query.aggregate()`,
|
|
1905
|
+
* so `count()`, `sum(field)`, `avg(field)`, `min(field)`,
|
|
1906
|
+
* `max(field)` all work unchanged. The `{ seed }` parameter
|
|
1907
|
+
* plumbing from constraint #2 is honored transparently — the
|
|
1908
|
+
* factories ignore it in and the scan executor never
|
|
1909
|
+
* touches the per-reducer state construction.
|
|
1910
|
+
*
|
|
1911
|
+
* **Returns a Promise**, unlike `Query.aggregate().run()` which
|
|
1912
|
+
* is synchronous. The scan is inherently async because it walks
|
|
1913
|
+
* adapter pages, so the terminal has to be too. Consumers
|
|
1914
|
+
* destructure with await:
|
|
1915
|
+
*
|
|
1916
|
+
* ```ts
|
|
1917
|
+
* const { total, n } = await invoices.scan()
|
|
1918
|
+
* .where('year', '==', 2025)
|
|
1919
|
+
* .aggregate({ total: sum('amount'), n: count() })
|
|
1920
|
+
* ```
|
|
1921
|
+
*
|
|
1922
|
+
* **No `.live()` in.** `scan().aggregate().live()` would
|
|
1923
|
+
* require reconciling an unbounded streaming iteration with a
|
|
1924
|
+
* change-stream subscription — a design problem, not just a code
|
|
1925
|
+
* one. Consumers with huge collections and live needs should
|
|
1926
|
+
* narrow with `.where()` enough to fit in the 50k `query()`
|
|
1927
|
+
* limit and use `query().aggregate().live()` instead.
|
|
1928
|
+
*/
|
|
1929
|
+
aggregate<Spec extends AggregateSpec>(spec: Spec): Promise<AggregateResult<Spec>>;
|
|
1930
|
+
/**
|
|
1931
|
+
* Evaluate the clause list against a single record. Linear in
|
|
1932
|
+
* the clause count; short-circuits on first false. Clauses on a
|
|
1933
|
+
* scan are always re-evaluated per record — no index-accelerated
|
|
1934
|
+
* path, because the stream sources records from the adapter
|
|
1935
|
+
* paginator, not from the in-memory cache where indexes live.
|
|
1936
|
+
*/
|
|
1937
|
+
private recordMatches;
|
|
1938
|
+
}
|
|
1939
|
+
|
|
1940
|
+
export { ScanBuilder as $, AlreadyElevatedError as A, BackupCorruptedError as B, ConflictError as C, DEFAULT_JOIN_MAX_ROWS as D, ElevationExpiredError as E, FilenameSanitizationError as F, GroupCardinalityError as G, type QuerySource as H, ImportCapabilityError as I, type JoinContext as J, KeyringExpiredError as K, LedgerContentionError as L, MissingTranslationError as M, NetworkError as N, type OrderBy as O, PathEscapeError as P, Query as Q, ReadOnlyAtInstantError as R, ReadOnlyError as S, ReadOnlyFrameError as T, type RefDescriptor as U, RefIntegrityError as V, type RefMode as W, RefRegistry as X, RefScopeError as Y, type RefViolation as Z, ReservedCollectionNameError as _, BackupLedgerError as a, type ScanPageProvider as a0, SchemaValidationError as a1, SessionExpiredError as a2, SessionNotFoundError as a3, SessionPolicyError as a4, StoreCapabilityError as a5, TamperedError as a6, TierDemoteDeniedError as a7, TierNotGrantedError as a8, TranslatorNotConfiguredError as a9, ValidationError as aa, applyJoins as ab, buildLiveQuery as ac, executePlan as ad, ref as ae, resetJoinWarnings as af, BundleIntegrityError as b, BundleVersionConflictError as c, DanglingReferenceError as d, DecryptionError as e, DelegationTargetMissingError as f, DictKeyInUseError as g, DictKeyMissingError as h, ExportCapabilityError as i, IndexRequiredError as j, IndexWriteFailureError as k, InvalidKeyError as l, type JoinLeg as m, type JoinStrategy as n, JoinTooLargeError as o, type JoinableSource as p, type LiveQuery as q, type LiveUpstream as r, LocaleNotSpecifiedError as s, NoAccessError as t, NotFoundError as u, NoydbError as v, PeriodClosedError as w, PermissionDeniedError as x, PrivilegeEscalationError as y, type QueryPlan as z };
|