@noy-db/core 0.2.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/index.cjs +2415 -75
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +1866 -11
- package/dist/index.d.ts +1866 -11
- package/dist/index.js +2385 -74
- package/dist/index.js.map +1 -1
- package/package.json +3 -2
package/dist/index.d.cts
CHANGED
|
@@ -21,7 +21,30 @@ interface EncryptedEnvelope {
|
|
|
21
21
|
}
|
|
22
22
|
/** All records across all collections for a compartment. */
|
|
23
23
|
type CompartmentSnapshot = Record<string, Record<string, EncryptedEnvelope>>;
|
|
24
|
+
/**
|
|
25
|
+
* Result of a single page fetch via the optional `listPage` adapter extension.
|
|
26
|
+
*
|
|
27
|
+
* `items` carries the actual encrypted envelopes (not just ids) so the
|
|
28
|
+
* caller can decrypt and emit a single record without an extra `get()`
|
|
29
|
+
* round-trip per id. `nextCursor` is `null` on the final page.
|
|
30
|
+
*/
|
|
31
|
+
interface ListPageResult {
|
|
32
|
+
/** Encrypted envelopes for this page, in adapter-defined order. */
|
|
33
|
+
items: Array<{
|
|
34
|
+
id: string;
|
|
35
|
+
envelope: EncryptedEnvelope;
|
|
36
|
+
}>;
|
|
37
|
+
/** Opaque cursor for the next page, or `null` if this was the last page. */
|
|
38
|
+
nextCursor: string | null;
|
|
39
|
+
}
|
|
24
40
|
interface NoydbAdapter {
|
|
41
|
+
/**
|
|
42
|
+
* Optional human-readable adapter name (e.g. 'memory', 'file', 'dynamo').
|
|
43
|
+
* Used in diagnostic messages and the listPage fallback warning. Adapters
|
|
44
|
+
* are encouraged to set this so logs are clearer about which backend is
|
|
45
|
+
* involved when something goes wrong.
|
|
46
|
+
*/
|
|
47
|
+
name?: string;
|
|
25
48
|
/** Get a single record. Returns null if not found. */
|
|
26
49
|
get(compartment: string, collection: string, id: string): Promise<EncryptedEnvelope | null>;
|
|
27
50
|
/** Put a record. Throws ConflictError if expectedVersion doesn't match. */
|
|
@@ -36,6 +59,26 @@ interface NoydbAdapter {
|
|
|
36
59
|
saveAll(compartment: string, data: CompartmentSnapshot): Promise<void>;
|
|
37
60
|
/** Optional connectivity check for sync engine. */
|
|
38
61
|
ping?(): Promise<boolean>;
|
|
62
|
+
/**
|
|
63
|
+
* Optional pagination extension. Adapters that implement `listPage` get
|
|
64
|
+
* the streaming `Collection.scan()` fast path; adapters that don't are
|
|
65
|
+
* silently fallen back to a full `loadAll()` + slice (with a one-time
|
|
66
|
+
* console.warn).
|
|
67
|
+
*
|
|
68
|
+
* `cursor` is opaque to the core — each adapter encodes its own paging
|
|
69
|
+
* state (DynamoDB: base64 LastEvaluatedKey JSON; S3: ContinuationToken;
|
|
70
|
+
* memory/file/browser: numeric offset of a sorted id list). Pass
|
|
71
|
+
* `undefined` to start from the beginning.
|
|
72
|
+
*
|
|
73
|
+
* `limit` is a soft upper bound on `items.length`. Adapters MAY return
|
|
74
|
+
* fewer items even when more exist (e.g. if the underlying store has
|
|
75
|
+
* its own page size cap), and MUST signal "no more pages" by returning
|
|
76
|
+
* `nextCursor: null`.
|
|
77
|
+
*
|
|
78
|
+
* The 6-method core contract is unchanged — this is an additive
|
|
79
|
+
* extension discovered via `'listPage' in adapter`.
|
|
80
|
+
*/
|
|
81
|
+
listPage?(compartment: string, collection: string, cursor?: string, limit?: number): Promise<ListPageResult>;
|
|
39
82
|
}
|
|
40
83
|
/** Type-safe helper for creating adapter factories. */
|
|
41
84
|
declare function defineAdapter<TOptions>(factory: (options: TOptions) => NoydbAdapter): (options: TOptions) => NoydbAdapter;
|
|
@@ -57,6 +100,32 @@ interface CompartmentBackup {
|
|
|
57
100
|
readonly _exported_by: string;
|
|
58
101
|
readonly keyrings: Record<string, KeyringFile>;
|
|
59
102
|
readonly collections: CompartmentSnapshot;
|
|
103
|
+
/**
|
|
104
|
+
* Internal collections (`_ledger`, `_ledger_deltas`, `_history`, `_sync`, …)
|
|
105
|
+
* captured alongside the data collections. Optional for backwards
|
|
106
|
+
* compat with v0.3 backups, which only stored data collections —
|
|
107
|
+
* loading a v0.3 backup leaves the ledger empty (and `verifyBackupIntegrity`
|
|
108
|
+
* skips the chain check, surfacing only a console warning).
|
|
109
|
+
*/
|
|
110
|
+
readonly _internal?: CompartmentSnapshot;
|
|
111
|
+
/**
|
|
112
|
+
* Verifiable-backup metadata (v0.4 #46). Embeds the ledger head at
|
|
113
|
+
* dump time so `load()` can cross-check that the loaded chain matches
|
|
114
|
+
* exactly what was exported. A backup whose chain has been tampered
|
|
115
|
+
* with — either by modifying ledger entries or by modifying data
|
|
116
|
+
* envelopes that the chain references — fails this check.
|
|
117
|
+
*
|
|
118
|
+
* Optional for backwards compat with v0.3 backups; missing means
|
|
119
|
+
* "legacy backup, load with a warning, no integrity check".
|
|
120
|
+
*/
|
|
121
|
+
readonly ledgerHead?: {
|
|
122
|
+
/** Hex sha256 of the canonical JSON of the last ledger entry. */
|
|
123
|
+
readonly hash: string;
|
|
124
|
+
/** Sequential index of the last ledger entry. */
|
|
125
|
+
readonly index: number;
|
|
126
|
+
/** ISO timestamp captured at dump time. */
|
|
127
|
+
readonly ts: string;
|
|
128
|
+
};
|
|
60
129
|
}
|
|
61
130
|
interface DirtyEntry {
|
|
62
131
|
readonly compartment: string;
|
|
@@ -236,6 +305,950 @@ declare class NotFoundError extends NoydbError {
|
|
|
236
305
|
declare class ValidationError extends NoydbError {
|
|
237
306
|
constructor(message?: string);
|
|
238
307
|
}
|
|
308
|
+
/**
|
|
309
|
+
* Thrown when a Standard Schema v1 validator rejects a record on
|
|
310
|
+
* `put()` (input validation) or on read (output validation). Carries
|
|
311
|
+
* the raw issue list so callers can render field-level errors.
|
|
312
|
+
*
|
|
313
|
+
* `direction` distinguishes the two cases:
|
|
314
|
+
* - `'input'`: the user passed bad data into `put()`. This is a
|
|
315
|
+
* normal error case that application code should handle — typically
|
|
316
|
+
* by showing validation messages in the UI.
|
|
317
|
+
* - `'output'`: stored data does not match the current schema. This
|
|
318
|
+
* indicates a schema drift (the schema was changed without
|
|
319
|
+
* migrating the existing records) and should be treated as a bug
|
|
320
|
+
* — the application should not swallow it silently.
|
|
321
|
+
*
|
|
322
|
+
* The `issues` type is deliberately `readonly unknown[]` on this class
|
|
323
|
+
* so that `errors.ts` doesn't need to import from `schema.ts` (and
|
|
324
|
+
* create a dependency cycle). Callers who know they're holding a
|
|
325
|
+
* `SchemaValidationError` can cast to the more precise
|
|
326
|
+
* `readonly StandardSchemaV1Issue[]` from `schema.ts`.
|
|
327
|
+
*/
|
|
328
|
+
declare class SchemaValidationError extends NoydbError {
|
|
329
|
+
readonly issues: readonly unknown[];
|
|
330
|
+
readonly direction: 'input' | 'output';
|
|
331
|
+
constructor(message: string, issues: readonly unknown[], direction: 'input' | 'output');
|
|
332
|
+
}
|
|
333
|
+
/**
|
|
334
|
+
* Thrown when `Compartment.load()` finds that a backup's hash chain
|
|
335
|
+
* doesn't verify, or that its embedded `ledgerHead.hash` doesn't
|
|
336
|
+
* match the chain head reconstructed from the loaded entries.
|
|
337
|
+
*
|
|
338
|
+
* Distinct from `BackupCorruptedError` so callers can choose to
|
|
339
|
+
* recover from one but not the other (e.g., a corrupted JSON file is
|
|
340
|
+
* unrecoverable; a chain mismatch might mean the backup is from an
|
|
341
|
+
* incompatible noy-db version).
|
|
342
|
+
*/
|
|
343
|
+
declare class BackupLedgerError extends NoydbError {
|
|
344
|
+
/** First-broken-entry index, if known. */
|
|
345
|
+
readonly divergedAt?: number;
|
|
346
|
+
constructor(message: string, divergedAt?: number);
|
|
347
|
+
}
|
|
348
|
+
/**
|
|
349
|
+
* Thrown when `Compartment.load()` finds that the backup's data
|
|
350
|
+
* collection content doesn't match the ledger's recorded
|
|
351
|
+
* `payloadHash`es. This is the "envelope was tampered with after
|
|
352
|
+
* dump" detection — the chain itself can be intact, but if any
|
|
353
|
+
* encrypted record bytes were swapped, this check catches it.
|
|
354
|
+
*/
|
|
355
|
+
declare class BackupCorruptedError extends NoydbError {
|
|
356
|
+
/** The (collection, id) pair whose envelope failed the hash check. */
|
|
357
|
+
readonly collection: string;
|
|
358
|
+
readonly id: string;
|
|
359
|
+
constructor(collection: string, id: string, message: string);
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
/**
|
|
363
|
+
* Standard Schema v1 integration.
|
|
364
|
+
*
|
|
365
|
+
* This file is the v0.4 entry point for **schema validation**. Any
|
|
366
|
+
* validator that implements the [Standard Schema v1
|
|
367
|
+
* protocol](https://standardschema.dev) — Zod, Valibot, ArkType, Effect
|
|
368
|
+
* Schema, etc. — can be attached to a `Collection` or `defineNoydbStore`
|
|
369
|
+
* and will:
|
|
370
|
+
*
|
|
371
|
+
* 1. Validate the record BEFORE encryption on `put()` — bad data is
|
|
372
|
+
* rejected at the store boundary with a rich issue list.
|
|
373
|
+
* 2. Validate the record AFTER decryption on `get()`/`list()`/`query()`
|
|
374
|
+
* — stored data that has drifted from the current schema throws
|
|
375
|
+
* loudly instead of silently propagating garbage to the UI.
|
|
376
|
+
*
|
|
377
|
+
* ## Why vendor the types?
|
|
378
|
+
*
|
|
379
|
+
* Standard Schema is a protocol, not a library. The spec is <200 lines of
|
|
380
|
+
* TypeScript and has no runtime. There's an official `@standard-schema/spec`
|
|
381
|
+
* types package on npm, but pulling it in would add a dependency edge
|
|
382
|
+
* purely for type definitions. Vendoring the minimal surface keeps
|
|
383
|
+
* `@noy-db/core` at **zero runtime dependencies** and gives us freedom to
|
|
384
|
+
* evolve the helpers without a version-lock on the spec package.
|
|
385
|
+
*
|
|
386
|
+
* If the spec changes in a breaking way (unlikely — it's frozen at v1),
|
|
387
|
+
* we update this file and bump our minor.
|
|
388
|
+
*
|
|
389
|
+
* ## Why not just run `schema.parse(value)` directly?
|
|
390
|
+
*
|
|
391
|
+
* Because then we'd be locked to whichever validator happens to have
|
|
392
|
+
* `.parse`. Standard Schema's `'~standard'.validate` contract is the same
|
|
393
|
+
* across every implementation and includes a structured issues list,
|
|
394
|
+
* which is much more useful than a thrown error for programmatic error
|
|
395
|
+
* handling (e.g., rendering field-level messages in a Vue component).
|
|
396
|
+
*/
|
|
397
|
+
/**
|
|
398
|
+
* The Standard Schema v1 protocol. A schema is any object that exposes a
|
|
399
|
+
* `'~standard'` property with `version: 1` and a `validate` function.
|
|
400
|
+
*
|
|
401
|
+
* The type parameters are:
|
|
402
|
+
* - `Input` — the type accepted by `validate` (what the user passes in)
|
|
403
|
+
* - `Output` — the type produced by `validate` (what we store/return,
|
|
404
|
+
* may differ from Input if the schema transforms or coerces)
|
|
405
|
+
*
|
|
406
|
+
* In most cases `Input === Output`, but validators that transform
|
|
407
|
+
* (Zod's `.transform`, Valibot's `transform`, etc.) can narrow or widen.
|
|
408
|
+
*
|
|
409
|
+
* We intentionally keep the `types` field `readonly` and optional — the
|
|
410
|
+
* spec marks it as optional because it's only used for inference, and
|
|
411
|
+
* not every implementation bothers populating it at runtime.
|
|
412
|
+
*/
|
|
413
|
+
interface StandardSchemaV1<Input = unknown, Output = Input> {
|
|
414
|
+
readonly '~standard': {
|
|
415
|
+
readonly version: 1;
|
|
416
|
+
readonly vendor: string;
|
|
417
|
+
readonly validate: (value: unknown) => StandardSchemaV1SyncResult<Output> | Promise<StandardSchemaV1SyncResult<Output>>;
|
|
418
|
+
readonly types?: {
|
|
419
|
+
readonly input: Input;
|
|
420
|
+
readonly output: Output;
|
|
421
|
+
} | undefined;
|
|
422
|
+
};
|
|
423
|
+
}
|
|
424
|
+
/**
|
|
425
|
+
* The result of a single call to `schema['~standard'].validate`. Either
|
|
426
|
+
* `{ value }` on success or `{ issues }` on failure — never both.
|
|
427
|
+
*
|
|
428
|
+
* The spec allows `issues` to be undefined on success (and some
|
|
429
|
+
* validators leave it that way), so consumers should discriminate on
|
|
430
|
+
* `issues?.length` rather than on truthiness of `value`.
|
|
431
|
+
*/
|
|
432
|
+
type StandardSchemaV1SyncResult<Output> = {
|
|
433
|
+
readonly value: Output;
|
|
434
|
+
readonly issues?: undefined;
|
|
435
|
+
} | {
|
|
436
|
+
readonly value?: undefined;
|
|
437
|
+
readonly issues: readonly StandardSchemaV1Issue[];
|
|
438
|
+
};
|
|
439
|
+
/**
|
|
440
|
+
* A single validation issue. The `message` is always present; the `path`
|
|
441
|
+
* is optional and points at the offending field when the schema tracks
|
|
442
|
+
* it (virtually every validator does for object types).
|
|
443
|
+
*
|
|
444
|
+
* The path is deliberately permissive — both a plain `PropertyKey` and a
|
|
445
|
+
* `{ key }` wrapper are allowed so validators that wrap path segments in
|
|
446
|
+
* objects (Zod does this in some modes) don't need special handling.
|
|
447
|
+
*/
|
|
448
|
+
interface StandardSchemaV1Issue {
|
|
449
|
+
readonly message: string;
|
|
450
|
+
readonly path?: ReadonlyArray<PropertyKey | {
|
|
451
|
+
readonly key: PropertyKey;
|
|
452
|
+
}> | undefined;
|
|
453
|
+
}
|
|
454
|
+
/**
|
|
455
|
+
* Infer the output type of a Standard Schema. Consumers use this to
|
|
456
|
+
* pull the type out of a schema instance when they want to declare a
|
|
457
|
+
* Collection<T> or defineNoydbStore<T> with `T` derived from the schema.
|
|
458
|
+
*
|
|
459
|
+
* Example:
|
|
460
|
+
* ```ts
|
|
461
|
+
* const InvoiceSchema = z.object({ id: z.string(), amount: z.number() })
|
|
462
|
+
* type Invoice = InferOutput<typeof InvoiceSchema>
|
|
463
|
+
* ```
|
|
464
|
+
*/
|
|
465
|
+
type InferOutput<T extends StandardSchemaV1> = T extends StandardSchemaV1<unknown, infer O> ? O : never;
|
|
466
|
+
/**
|
|
467
|
+
* Validate an input value against a schema. Throws
|
|
468
|
+
* `SchemaValidationError` if the schema rejects, with the rich issue
|
|
469
|
+
* list attached. Otherwise returns the (possibly transformed) output
|
|
470
|
+
* value.
|
|
471
|
+
*
|
|
472
|
+
* The `context` string is included in the thrown error's message so the
|
|
473
|
+
* caller knows where the failure happened (e.g. `"put(inv-001)"`) without
|
|
474
|
+
* every caller having to wrap the throw in a try/catch.
|
|
475
|
+
*
|
|
476
|
+
* This function is ALWAYS async because some validators (notably Effect
|
|
477
|
+
* Schema and Zod's `.refine` with async predicates) can return a
|
|
478
|
+
* Promise. We `await` the result unconditionally to normalize the
|
|
479
|
+
* contract — the extra microtask is free compared to the cost of an
|
|
480
|
+
* encrypt/decrypt round-trip.
|
|
481
|
+
*/
|
|
482
|
+
declare function validateSchemaInput<Output>(schema: StandardSchemaV1<unknown, Output>, value: unknown, context: string): Promise<Output>;
|
|
483
|
+
/**
|
|
484
|
+
* Validate an already-stored value coming OUT of the collection. This
|
|
485
|
+
* is a distinct helper from `validateSchemaInput` because the error
|
|
486
|
+
* semantics differ: an output-validation failure means the data in
|
|
487
|
+
* storage has drifted from the current schema (an unexpected state),
|
|
488
|
+
* whereas an input-validation failure means the user passed bad data
|
|
489
|
+
* (an expected state for a UI that isn't guarding its inputs).
|
|
490
|
+
*
|
|
491
|
+
* We still throw — silently returning bad data would be worse — but
|
|
492
|
+
* the error carries `direction: 'output'` so upstream code (and a
|
|
493
|
+
* potential migrate hook) can distinguish the two cases.
|
|
494
|
+
*/
|
|
495
|
+
declare function validateSchemaOutput<Output>(schema: StandardSchemaV1<unknown, Output>, value: unknown, context: string): Promise<Output>;
|
|
496
|
+
|
|
497
|
+
/**
|
|
498
|
+
* Ledger entry shape + canonical JSON + sha256 helpers.
|
|
499
|
+
*
|
|
500
|
+
* This file holds the PURE primitives used by the hash-chained ledger:
|
|
501
|
+
* the entry type, the deterministic (sort-stable) JSON encoder, and
|
|
502
|
+
* the sha256 hasher that produces `prevHash` and `ledger.head()`.
|
|
503
|
+
*
|
|
504
|
+
* Everything here is validator-free and side-effect free — the only
|
|
505
|
+
* runtime dep is Web Crypto's `subtle.digest` for the sha256 call,
|
|
506
|
+
* which we already use for every other hashing operation in the core.
|
|
507
|
+
*
|
|
508
|
+
* The hash chain property works like this:
|
|
509
|
+
*
|
|
510
|
+
* hash(entry[i]) = sha256(canonicalJSON(entry[i]))
|
|
511
|
+
* entry[i+1].prevHash = hash(entry[i])
|
|
512
|
+
*
|
|
513
|
+
* Any modification to `entry[i]` (field values, field order, whitespace)
|
|
514
|
+
* produces a different `hash(entry[i])`, which means `entry[i+1]`'s
|
|
515
|
+
* stored `prevHash` no longer matches the recomputed hash, which means
|
|
516
|
+
* `verify()` returns `{ ok: false, divergedAt: i + 1 }`. The chain is
|
|
517
|
+
* append-only and tamper-evident without external anchoring.
|
|
518
|
+
*/
|
|
519
|
+
/**
|
|
520
|
+
* A single ledger entry in its plaintext form — what gets serialized,
|
|
521
|
+
* hashed, and then encrypted with the ledger DEK before being written
|
|
522
|
+
* to the `_ledger/` adapter collection.
|
|
523
|
+
*
|
|
524
|
+
* ## Why hash the ciphertext, not the plaintext?
|
|
525
|
+
*
|
|
526
|
+
* `payloadHash` is the sha256 of the record's ENCRYPTED envelope bytes,
|
|
527
|
+
* not its plaintext. This matters:
|
|
528
|
+
*
|
|
529
|
+
* 1. **Zero-knowledge preserved.** A user (or a third party) can
|
|
530
|
+
* verify the ledger against the stored envelopes without any
|
|
531
|
+
* decryption keys. The adapter layer already holds only
|
|
532
|
+
* ciphertext, so hashing the ciphertext keeps the ledger at the
|
|
533
|
+
* same privacy level as the adapter.
|
|
534
|
+
*
|
|
535
|
+
* 2. **Determinism.** Plaintext → ciphertext is randomized by the
|
|
536
|
+
* fresh per-write IV, so `hash(plaintext)` would need extra
|
|
537
|
+
* normalization. `hash(ciphertext)` is already deterministic and
|
|
538
|
+
* unique per write.
|
|
539
|
+
*
|
|
540
|
+
* 3. **Detection property.** If an attacker modifies even one byte of
|
|
541
|
+
* the stored ciphertext (trying to flip a record), the hash
|
|
542
|
+
* changes, the ledger's recorded `payloadHash` no longer matches,
|
|
543
|
+
* and a data-integrity check fails. We don't do that check in
|
|
544
|
+
* `verify()` today (v0.4 only checks chain consistency), but the
|
|
545
|
+
* hook is there for a future `verifyIntegrity()` follow-up.
|
|
546
|
+
*
|
|
547
|
+
* Fields marked `op`, `collection`, `id`, `version`, `ts`, `actor` are
|
|
548
|
+
* plaintext METADATA about the operation — NOT the record itself. The
|
|
549
|
+
* entry is still encrypted at rest via the ledger DEK, but adapters
|
|
550
|
+
* could theoretically infer operation patterns from the sizes and
|
|
551
|
+
* timestamps. This is an accepted trade-off for the tamper-evidence
|
|
552
|
+
* property; full ORAM-level privacy is out of scope for noy-db.
|
|
553
|
+
*/
|
|
554
|
+
interface LedgerEntry {
|
|
555
|
+
/**
|
|
556
|
+
* Zero-based sequential position of this entry in the chain. The
|
|
557
|
+
* canonical adapter key is this number zero-padded to 10 digits
|
|
558
|
+
* (`"0000000001"`) so lexicographic ordering matches numeric order.
|
|
559
|
+
*/
|
|
560
|
+
readonly index: number;
|
|
561
|
+
/**
|
|
562
|
+
* Hex-encoded sha256 of the canonical JSON of the PREVIOUS entry.
|
|
563
|
+
* The genesis entry (index 0) has `prevHash === ''` — the first
|
|
564
|
+
* entry in a fresh compartment has nothing to point back to.
|
|
565
|
+
*/
|
|
566
|
+
readonly prevHash: string;
|
|
567
|
+
/**
|
|
568
|
+
* Which kind of mutation this entry records. v0.4 only supports
|
|
569
|
+
* data operations (`put`, `delete`). Access-control operations
|
|
570
|
+
* (`grant`, `revoke`, `rotate`) will be added in a follow-up once
|
|
571
|
+
* the keyring write path is instrumented — that's tracked in the
|
|
572
|
+
* v0.4 epic issue.
|
|
573
|
+
*/
|
|
574
|
+
readonly op: 'put' | 'delete';
|
|
575
|
+
/** The collection the mutation targeted. */
|
|
576
|
+
readonly collection: string;
|
|
577
|
+
/** The record id the mutation targeted. */
|
|
578
|
+
readonly id: string;
|
|
579
|
+
/**
|
|
580
|
+
* The record version AFTER this mutation. For `put` this is the
|
|
581
|
+
* newly assigned version; for `delete` this is the version that
|
|
582
|
+
* was deleted (the last version visible to reads).
|
|
583
|
+
*/
|
|
584
|
+
readonly version: number;
|
|
585
|
+
/** ISO timestamp of the mutation. */
|
|
586
|
+
readonly ts: string;
|
|
587
|
+
/** User id of the actor who performed the mutation. */
|
|
588
|
+
readonly actor: string;
|
|
589
|
+
/**
|
|
590
|
+
* Hex-encoded sha256 of the encrypted envelope's `_data` field.
|
|
591
|
+
* For `put`, this is the hash of the new ciphertext. For `delete`,
|
|
592
|
+
* it's the hash of the last visible ciphertext at deletion time,
|
|
593
|
+
* or the empty string if nothing was there to delete. Hashing the
|
|
594
|
+
* ciphertext (not the plaintext) preserves zero-knowledge — see
|
|
595
|
+
* the file docstring.
|
|
596
|
+
*/
|
|
597
|
+
readonly payloadHash: string;
|
|
598
|
+
/**
|
|
599
|
+
* Optional hex-encoded sha256 of the encrypted JSON Patch delta
|
|
600
|
+
* blob stored alongside this entry in `_ledger_deltas/`. Present
|
|
601
|
+
* only for `put` operations that had a previous version — the
|
|
602
|
+
* genesis put of a new record, and every `delete`, leave this
|
|
603
|
+
* field undefined.
|
|
604
|
+
*
|
|
605
|
+
* The delta payload itself lives in a sibling internal collection
|
|
606
|
+
* (`_ledger_deltas/<paddedIndex>`) and is encrypted with the
|
|
607
|
+
* ledger DEK. Callers use `ledger.loadDelta(index)` to decrypt and
|
|
608
|
+
* deserialize it when reconstructing a historical version.
|
|
609
|
+
*
|
|
610
|
+
* Why optional instead of always-present: the first put of a
|
|
611
|
+
* record has no previous version to diff against, so storing an
|
|
612
|
+
* empty patch would be noise. For deletes there's no "next" state
|
|
613
|
+
* to describe with a delta. Both cases set this field to undefined.
|
|
614
|
+
*
|
|
615
|
+
* Note: the canonical-JSON hasher treats `undefined` as invalid
|
|
616
|
+
* (it's one of the guard rails), so on the wire this field is
|
|
617
|
+
* either `{ deltaHash: '<hex>' }` or absent from the JSON
|
|
618
|
+
* entirely — never `{ deltaHash: undefined }`.
|
|
619
|
+
*/
|
|
620
|
+
readonly deltaHash?: string;
|
|
621
|
+
}
|
|
622
|
+
/**
|
|
623
|
+
* Canonical (sort-stable) JSON encoder.
|
|
624
|
+
*
|
|
625
|
+
* This function is the load-bearing primitive of the hash chain:
|
|
626
|
+
* `sha256(canonicalJSON(entry))` must produce the same hex string
|
|
627
|
+
* every time, on every machine, for the same logical entry — otherwise
|
|
628
|
+
* `verify()` would return `{ ok: false }` on cross-platform reads.
|
|
629
|
+
*
|
|
630
|
+
* JavaScript's `JSON.stringify` is almost canonical, but NOT quite:
|
|
631
|
+
* it preserves the insertion order of object keys, which means
|
|
632
|
+
* `{a:1,b:2}` and `{b:2,a:1}` serialize differently. We fix this by
|
|
633
|
+
* recursively walking objects and sorting their keys before
|
|
634
|
+
* concatenation.
|
|
635
|
+
*
|
|
636
|
+
* Arrays keep their original order (reordering them would change
|
|
637
|
+
* semantics). Numbers, strings, booleans, and `null` use the default
|
|
638
|
+
* JSON encoding. `undefined` and functions are rejected — ledger
|
|
639
|
+
* entries are plain data, and silently dropping `undefined` would
|
|
640
|
+
* break the "same input → same hash" property if a caller forgot to
|
|
641
|
+
* omit a field.
|
|
642
|
+
*
|
|
643
|
+
* Performance: one pass per nesting level; O(n log n) for key sorting
|
|
644
|
+
* at each object. Entries are small (< 1 KB) so this is negligible
|
|
645
|
+
* compared to the sha256 call.
|
|
646
|
+
*/
|
|
647
|
+
declare function canonicalJson(value: unknown): string;
|
|
648
|
+
/**
|
|
649
|
+
* Compute a hex-encoded sha256 of a string via Web Crypto's subtle API.
|
|
650
|
+
*
|
|
651
|
+
* We use hex (not base64) for hashes because hex is case-insensitive,
|
|
652
|
+
* fixed-length (64 chars), and easier to compare visually in debug
|
|
653
|
+
* output. Base64 would save a few bytes in storage but every encrypted
|
|
654
|
+
* ledger entry is already much larger than the hash itself.
|
|
655
|
+
*/
|
|
656
|
+
declare function sha256Hex(input: string): Promise<string>;
|
|
657
|
+
/**
|
|
658
|
+
* Compute the canonical hash of a ledger entry. Short wrapper around
|
|
659
|
+
* `canonicalJson` + `sha256Hex`; callers use this instead of composing
|
|
660
|
+
* the two functions every time, so any future change to the hashing
|
|
661
|
+
* pipeline (e.g., adding a domain-separation prefix) lives in one place.
|
|
662
|
+
*/
|
|
663
|
+
declare function hashEntry(entry: LedgerEntry): Promise<string>;
|
|
664
|
+
/**
|
|
665
|
+
* Pad an index to the canonical 10-digit form used as the adapter key.
|
|
666
|
+
* Ten digits is enough for ~10 billion ledger entries per compartment
|
|
667
|
+
* — far beyond any realistic use case, but cheap enough that the extra
|
|
668
|
+
* digits don't hurt storage.
|
|
669
|
+
*/
|
|
670
|
+
declare function paddedIndex(index: number): string;
|
|
671
|
+
/** Parse a padded adapter key back into a number. Returns NaN on malformed input. */
|
|
672
|
+
declare function parseIndex(key: string): number;
|
|
673
|
+
|
|
674
|
+
/**
|
|
675
|
+
* RFC 6902 JSON Patch — compute + apply.
|
|
676
|
+
*
|
|
677
|
+
* This module is the v0.4 "delta history" primitive: instead of
|
|
678
|
+
* snapshotting the full record on every put (the v0.3 behavior),
|
|
679
|
+
* `Collection.put` computes a JSON Patch from the previous version to
|
|
680
|
+
* the new version and stores only the patch in the ledger. To
|
|
681
|
+
* reconstruct version N, we walk from the genesis snapshot forward
|
|
682
|
+
* applying patches. Storage scales with **edit size**, not record
|
|
683
|
+
* size — a 10 KB record edited 1000 times costs ~10 KB of deltas
|
|
684
|
+
* instead of ~10 MB of snapshots.
|
|
685
|
+
*
|
|
686
|
+
* ## Why hand-roll instead of using a library?
|
|
687
|
+
*
|
|
688
|
+
* RFC 6902 has good libraries (`fast-json-patch`, `rfc6902`) but every
|
|
689
|
+
* single one of them adds a runtime dependency to `@noy-db/core`. The
|
|
690
|
+
* "zero runtime dependencies" promise is one of the core's load-bearing
|
|
691
|
+
* features, and the patch surface we actually need is small enough
|
|
692
|
+
* (~150 LoC) that vendoring is the right call.
|
|
693
|
+
*
|
|
694
|
+
* What we implement:
|
|
695
|
+
* - `add` — insert a value at a path
|
|
696
|
+
* - `remove` — delete the value at a path
|
|
697
|
+
* - `replace` — overwrite the value at a path
|
|
698
|
+
*
|
|
699
|
+
* What we deliberately skip (out of scope for the v0.4 ledger use):
|
|
700
|
+
* - `move` and `copy` — optimizations; the diff algorithm doesn't
|
|
701
|
+
* emit them, so the apply path doesn't need them
|
|
702
|
+
* - `test` — used for transactional patches; we already have
|
|
703
|
+
* optimistic concurrency via `_v` at the envelope layer
|
|
704
|
+
* - Sophisticated array diffing (LCS, edit distance) — we treat
|
|
705
|
+
* arrays as atomic values and emit a single `replace` op when
|
|
706
|
+
* they differ. The accounting domain has small arrays where this
|
|
707
|
+
* is fine; if we ever need patch-level array diffing we can add
|
|
708
|
+
* it without changing the storage format.
|
|
709
|
+
*
|
|
710
|
+
* ## Path encoding (RFC 6902 §3)
|
|
711
|
+
*
|
|
712
|
+
* Paths look like `/foo/bar/0`. Each path segment is either an object
|
|
713
|
+
* key or a numeric array index. Two characters need escaping inside
|
|
714
|
+
* keys: `~` becomes `~0` and `/` becomes `~1`. We implement both.
|
|
715
|
+
*
|
|
716
|
+
* Empty path (`""`) refers to the root document. Only `replace` makes
|
|
717
|
+
* sense at the root, and our diff function emits it as a top-level
|
|
718
|
+
* `replace` when `prev` and `next` differ in shape (object vs array,
|
|
719
|
+
* primitive vs object, etc.).
|
|
720
|
+
*/
|
|
721
|
+
/** A single JSON Patch operation. Subset of RFC 6902 — see file docstring. */
|
|
722
|
+
type JsonPatchOp = {
|
|
723
|
+
readonly op: 'add';
|
|
724
|
+
readonly path: string;
|
|
725
|
+
readonly value: unknown;
|
|
726
|
+
} | {
|
|
727
|
+
readonly op: 'remove';
|
|
728
|
+
readonly path: string;
|
|
729
|
+
} | {
|
|
730
|
+
readonly op: 'replace';
|
|
731
|
+
readonly path: string;
|
|
732
|
+
readonly value: unknown;
|
|
733
|
+
};
|
|
734
|
+
/** A complete JSON Patch document — an array of operations. */
|
|
735
|
+
type JsonPatch = readonly JsonPatchOp[];
|
|
736
|
+
/**
|
|
737
|
+
* Compute a JSON Patch that, when applied to `prev`, produces `next`.
|
|
738
|
+
*
|
|
739
|
+
* The algorithm is a straightforward recursive object walk:
|
|
740
|
+
*
|
|
741
|
+
* 1. If both inputs are plain objects (and not arrays/null):
|
|
742
|
+
* - For each key in `prev`, recurse if `next` has it, else emit `remove`
|
|
743
|
+
* - For each key in `next` not in `prev`, emit `add`
|
|
744
|
+
* 2. If both inputs are arrays AND structurally equal, no-op.
|
|
745
|
+
* Otherwise emit a single `replace` for the whole array.
|
|
746
|
+
* 3. If both inputs are deeply equal primitives, no-op.
|
|
747
|
+
* 4. Otherwise emit a `replace` at the current path.
|
|
748
|
+
*
|
|
749
|
+
* We do not minimize patches across move-like rearrangements — every
|
|
750
|
+
* generated patch is straightforward enough to apply by hand if you
|
|
751
|
+
* had to debug it.
|
|
752
|
+
*/
|
|
753
|
+
declare function computePatch(prev: unknown, next: unknown): JsonPatch;
|
|
754
|
+
/**
|
|
755
|
+
* Apply a JSON Patch to a base document and return the result.
|
|
756
|
+
*
|
|
757
|
+
* The base document is **not mutated** — every op clones the parent
|
|
758
|
+
* container before writing to it, so the caller's reference to `base`
|
|
759
|
+
* stays untouched. This costs an extra allocation per op but makes
|
|
760
|
+
* the apply pipeline reorderable and safe to interrupt.
|
|
761
|
+
*
|
|
762
|
+
* Throws on:
|
|
763
|
+
* - Removing a path that doesn't exist
|
|
764
|
+
* - Adding to a path whose parent doesn't exist
|
|
765
|
+
* - A path component that doesn't match the document shape (e.g.,
|
|
766
|
+
* trying to step into a primitive)
|
|
767
|
+
*
|
|
768
|
+
* Throwing is the right behavior for the ledger use case: a failed
|
|
769
|
+
* apply means the chain is corrupted, which should be loud rather
|
|
770
|
+
* than silently producing a wrong reconstruction.
|
|
771
|
+
*/
|
|
772
|
+
declare function applyPatch<T = unknown>(base: T, patch: JsonPatch): T;
|
|
773
|
+
|
|
774
|
+
/**
|
|
775
|
+
* `LedgerStore` — read/write access to a compartment's hash-chained
|
|
776
|
+
* audit log.
|
|
777
|
+
*
|
|
778
|
+
* The store is a thin wrapper around the adapter's `_ledger/` internal
|
|
779
|
+
* collection. Every append:
|
|
780
|
+
*
|
|
781
|
+
* 1. Loads the current head (or treats an empty ledger as head = -1)
|
|
782
|
+
* 2. Computes `prevHash` = sha256(canonicalJson(head))
|
|
783
|
+
* 3. Builds the new entry with `index = head.index + 1`
|
|
784
|
+
* 4. Encrypts the entry with the compartment's ledger DEK
|
|
785
|
+
* 5. Writes the encrypted envelope to `_ledger/<paddedIndex>`
|
|
786
|
+
*
|
|
787
|
+
* `verify()` walks the chain from genesis forward and returns
|
|
788
|
+
* `{ ok: true, head }` on success or `{ ok: false, divergedAt }` on the
|
|
789
|
+
* first broken link.
|
|
790
|
+
*
|
|
791
|
+
* ## Thread / concurrency model
|
|
792
|
+
*
|
|
793
|
+
* For v0.4 we assume a **single writer per compartment**. Two
|
|
794
|
+
* concurrent `append()` calls would race on the "read head, write
|
|
795
|
+
* head+1" cycle and could produce a broken chain. The v0.3 sync engine
|
|
796
|
+
* is the primary concurrent-writer scenario, and it uses
|
|
797
|
+
* optimistic-concurrency via `expectedVersion` on the adapter — but
|
|
798
|
+
* the ledger path has no such guard today. Multi-writer hardening is a
|
|
799
|
+
* v0.5 follow-up.
|
|
800
|
+
*
|
|
801
|
+
* Single-writer usage IS safe, including across process restarts:
|
|
802
|
+
* `head()` reads the adapter fresh each call, so a crash between the
|
|
803
|
+
* adapter.put of a data record and the ledger append just means the
|
|
804
|
+
* ledger is missing an entry for that record. `verify()` still
|
|
805
|
+
* succeeds; a future `verifyIntegrity()` helper can cross-check the
|
|
806
|
+
* ledger against the data collections to catch the gap.
|
|
807
|
+
*
|
|
808
|
+
* ## Why hide the ledger from `compartment.collection()`?
|
|
809
|
+
*
|
|
810
|
+
* The `_ledger` name starts with `_`, matching the existing prefix
|
|
811
|
+
* convention for internal collections (`_keyring`, `_sync`,
|
|
812
|
+
* `_history`). The Compartment's public `collection()` method already
|
|
813
|
+
* returns entries for any name, but `loadAll()` filters out
|
|
814
|
+
* underscore-prefixed collections so backups and exports don't leak
|
|
815
|
+
* ledger metadata. We keep the ledger accessible ONLY via
|
|
816
|
+
* `compartment.ledger()` to enforce the hash-chain invariants — direct
|
|
817
|
+
* puts via `collection('_ledger')` would bypass the `append()` logic.
|
|
818
|
+
*/
|
|
819
|
+
|
|
820
|
+
/** The internal collection name used for ledger entry storage. */
|
|
821
|
+
declare const LEDGER_COLLECTION = "_ledger";
|
|
822
|
+
/**
|
|
823
|
+
* The internal collection name used for delta payload storage.
|
|
824
|
+
*
|
|
825
|
+
* Deltas live in a sibling collection (not inside `_ledger`) for two
|
|
826
|
+
* reasons:
|
|
827
|
+
*
|
|
828
|
+
* 1. **Listing efficiency.** `ledger.loadAllEntries()` calls
|
|
829
|
+
* `adapter.list(_ledger)` which would otherwise return every
|
|
830
|
+
* delta key alongside every entry key. Splitting them keeps the
|
|
831
|
+
* list small (one key per ledger entry) and the delta reads
|
|
832
|
+
* keyed by the entry's index.
|
|
833
|
+
*
|
|
834
|
+
* 2. **Prune-friendliness.** A future `pruneHistory()` will delete
|
|
835
|
+
* old deltas while keeping the ledger chain intact (folding old
|
|
836
|
+
* deltas into a base snapshot). Separating the storage makes
|
|
837
|
+
* that deletion a targeted operation on one collection instead
|
|
838
|
+
* of a filter across a mixed list.
|
|
839
|
+
*
|
|
840
|
+
* Both collections share the same ledger DEK — one DEK, two
|
|
841
|
+
* internal collections, same zero-knowledge guarantees.
|
|
842
|
+
*/
|
|
843
|
+
declare const LEDGER_DELTAS_COLLECTION = "_ledger_deltas";
|
|
844
|
+
/**
|
|
845
|
+
* Input shape for `LedgerStore.append()`. The caller supplies the
|
|
846
|
+
* operation metadata; the store fills in `index` and `prevHash`.
|
|
847
|
+
*/
|
|
848
|
+
interface AppendInput {
|
|
849
|
+
op: LedgerEntry['op'];
|
|
850
|
+
collection: string;
|
|
851
|
+
id: string;
|
|
852
|
+
version: number;
|
|
853
|
+
actor: string;
|
|
854
|
+
payloadHash: string;
|
|
855
|
+
/**
|
|
856
|
+
* Optional JSON Patch representing the delta from the previous
|
|
857
|
+
* version to the new version. Present only for `put` operations
|
|
858
|
+
* that had a previous version; omitted for genesis puts and for
|
|
859
|
+
* deletes. When present, `LedgerStore.append` persists the patch
|
|
860
|
+
* in `_ledger_deltas/<paddedIndex>` and records its sha256 hash
|
|
861
|
+
* as the entry's `deltaHash` field.
|
|
862
|
+
*/
|
|
863
|
+
delta?: JsonPatch;
|
|
864
|
+
}
|
|
865
|
+
/**
|
|
866
|
+
* Result of `LedgerStore.verify()`. On success, `head` is the hash of
|
|
867
|
+
* the last entry — the same value that should be published to any
|
|
868
|
+
* external anchoring service (blockchain, OpenTimestamps, etc.). On
|
|
869
|
+
* failure, `divergedAt` is the 0-based index of the first entry whose
|
|
870
|
+
* recorded `prevHash` does not match the recomputed hash of its
|
|
871
|
+
* predecessor. Entries at `divergedAt` and later are untrustworthy;
|
|
872
|
+
* entries before that index are still valid.
|
|
873
|
+
*/
|
|
874
|
+
type VerifyResult = {
|
|
875
|
+
readonly ok: true;
|
|
876
|
+
readonly head: string;
|
|
877
|
+
readonly length: number;
|
|
878
|
+
} | {
|
|
879
|
+
readonly ok: false;
|
|
880
|
+
readonly divergedAt: number;
|
|
881
|
+
readonly expected: string;
|
|
882
|
+
readonly actual: string;
|
|
883
|
+
};
|
|
884
|
+
/**
|
|
885
|
+
* A LedgerStore is bound to a single compartment. Callers obtain one
|
|
886
|
+
* via `compartment.ledger()` — there is no public constructor to keep
|
|
887
|
+
* the hash-chain invariants in one place.
|
|
888
|
+
*
|
|
889
|
+
* The class holds no mutable state beyond its dependencies (adapter,
|
|
890
|
+
* compartment name, DEK resolver, actor id). Every method reads the
|
|
891
|
+
* adapter fresh so multiple instances against the same compartment
|
|
892
|
+
* see each other's writes immediately (at the cost of re-parsing the
|
|
893
|
+
* ledger on every head() / verify() call; acceptable at v0.4 scale).
|
|
894
|
+
*/
|
|
895
|
+
declare class LedgerStore {
|
|
896
|
+
private readonly adapter;
|
|
897
|
+
private readonly compartment;
|
|
898
|
+
private readonly encrypted;
|
|
899
|
+
private readonly getDEK;
|
|
900
|
+
private readonly actor;
|
|
901
|
+
/**
|
|
902
|
+
* In-memory cache of the chain head — the most recently appended
|
|
903
|
+
* entry along with its precomputed hash. Without this, every
|
|
904
|
+
* `append()` would re-load every prior entry to recompute the
|
|
905
|
+
* prevHash, making N puts O(N²) — a 1K-record stress test goes from
|
|
906
|
+
* < 100ms to a multi-second timeout.
|
|
907
|
+
*
|
|
908
|
+
* The cache is populated on first read (`append`, `head`, `verify`)
|
|
909
|
+
* and updated in-place on every successful `append`. Single-writer
|
|
910
|
+
* usage (the v0.4 assumption) keeps it consistent. A second
|
|
911
|
+
* LedgerStore instance writing to the same compartment would not
|
|
912
|
+
* see the first instance's appends in its cached state — that's the
|
|
913
|
+
* concurrency caveat documented at the class level.
|
|
914
|
+
*
|
|
915
|
+
* Sentinel `undefined` means "not yet loaded"; an explicit `null`
|
|
916
|
+
* value means "loaded and confirmed empty" — distinguishing these
|
|
917
|
+
* matters because an empty ledger is a valid state (genesis prevHash
|
|
918
|
+
* is the empty string), and we don't want to re-scan the adapter
|
|
919
|
+
* just because the chain is freshly initialized.
|
|
920
|
+
*/
|
|
921
|
+
private headCache;
|
|
922
|
+
constructor(opts: {
|
|
923
|
+
adapter: NoydbAdapter;
|
|
924
|
+
compartment: string;
|
|
925
|
+
encrypted: boolean;
|
|
926
|
+
getDEK: (collectionName: string) => Promise<CryptoKey>;
|
|
927
|
+
actor: string;
|
|
928
|
+
});
|
|
929
|
+
/**
|
|
930
|
+
* Lazily load (or return cached) the current chain head. The cache
|
|
931
|
+
* sentinel is `undefined` until first access; after the first call,
|
|
932
|
+
* the cache holds either a `{ entry, hash }` for non-empty ledgers
|
|
933
|
+
* or `null` for empty ones.
|
|
934
|
+
*/
|
|
935
|
+
private getCachedHead;
|
|
936
|
+
/**
|
|
937
|
+
* Append a new entry to the ledger. Returns the full entry that was
|
|
938
|
+
* written (with its assigned index and computed prevHash) so the
|
|
939
|
+
* caller can use the hash for downstream purposes (e.g., embedding
|
|
940
|
+
* in a verifiable backup).
|
|
941
|
+
*
|
|
942
|
+
* This is the **only** way to add entries. Direct adapter writes to
|
|
943
|
+
* `_ledger/` would bypass the chain math and would be caught by the
|
|
944
|
+
* next `verify()` call as a divergence.
|
|
945
|
+
*/
|
|
946
|
+
append(input: AppendInput): Promise<LedgerEntry>;
|
|
947
|
+
/**
|
|
948
|
+
* Load a delta payload by its entry index. Returns `null` if the
|
|
949
|
+
* entry at that index doesn't reference a delta (genesis puts and
|
|
950
|
+
* deletes leave the slot empty) or if the delta row is missing
|
|
951
|
+
* (possible after a `pruneHistory` fold).
|
|
952
|
+
*
|
|
953
|
+
* The caller is responsible for deciding what to do with a missing
|
|
954
|
+
* delta — `ledger.reconstruct()` uses it as a "stop walking
|
|
955
|
+
* backward" signal and falls back to the on-disk current value.
|
|
956
|
+
*/
|
|
957
|
+
loadDelta(index: number): Promise<JsonPatch | null>;
|
|
958
|
+
/** Encrypt a JSON Patch into an envelope for storage. Mirrors encryptEntry. */
|
|
959
|
+
private encryptDelta;
|
|
960
|
+
/**
|
|
961
|
+
* Read all entries in ascending-index order. Used internally by
|
|
962
|
+
* `append()`, `head()`, `verify()`, and `entries()`. Decryption is
|
|
963
|
+
* serial because the entries are tiny and the overhead of a Promise
|
|
964
|
+
* pool would dominate at realistic chain lengths (< 100K entries).
|
|
965
|
+
*/
|
|
966
|
+
loadAllEntries(): Promise<LedgerEntry[]>;
|
|
967
|
+
/**
|
|
968
|
+
* Return the current head of the ledger: the last entry, its hash,
|
|
969
|
+
* and the total chain length. `null` on an empty ledger so callers
|
|
970
|
+
* can distinguish "no history yet" from "empty history".
|
|
971
|
+
*/
|
|
972
|
+
head(): Promise<{
|
|
973
|
+
readonly entry: LedgerEntry;
|
|
974
|
+
readonly hash: string;
|
|
975
|
+
readonly length: number;
|
|
976
|
+
} | null>;
|
|
977
|
+
/**
|
|
978
|
+
* Return entries in the requested half-open range `[from, to)`.
|
|
979
|
+
* Defaults: `from = 0`, `to = length`. The indices are clipped to
|
|
980
|
+
* the valid range; no error is thrown for out-of-range queries.
|
|
981
|
+
*/
|
|
982
|
+
entries(opts?: {
|
|
983
|
+
from?: number;
|
|
984
|
+
to?: number;
|
|
985
|
+
}): Promise<LedgerEntry[]>;
|
|
986
|
+
/**
|
|
987
|
+
* Reconstruct a record's state at a given historical version by
|
|
988
|
+
* walking the ledger's delta chain backward from the current state.
|
|
989
|
+
*
|
|
990
|
+
* ## Algorithm
|
|
991
|
+
*
|
|
992
|
+
* Ledger deltas are stored in **reverse** form — each entry's
|
|
993
|
+
* patch describes how to undo that put, transforming the new
|
|
994
|
+
* record back into the previous one. `reconstruct` exploits this
|
|
995
|
+
* by:
|
|
996
|
+
*
|
|
997
|
+
* 1. Finding every ledger entry for `(collection, id)` in the
|
|
998
|
+
* chain, sorted by index ascending.
|
|
999
|
+
* 2. Starting from `current` (the present value of the record,
|
|
1000
|
+
* as held by the caller — typically fetched via
|
|
1001
|
+
* `Collection.get()`).
|
|
1002
|
+
* 3. Walking entries in **descending** index order and applying
|
|
1003
|
+
* each entry's reverse patch, stopping when we reach the
|
|
1004
|
+
* entry whose version equals `atVersion`.
|
|
1005
|
+
*
|
|
1006
|
+
* The result is the record as it existed immediately AFTER the
|
|
1007
|
+
* put at `atVersion`. To get the state at the genesis put
|
|
1008
|
+
* (version 1), the walk runs all the way back through every put
|
|
1009
|
+
* after the first.
|
|
1010
|
+
*
|
|
1011
|
+
* ## Caveats
|
|
1012
|
+
*
|
|
1013
|
+
* - **Delete entries** break the walk: once we see a delete, the
|
|
1014
|
+
* record didn't exist before that point, so there's nothing to
|
|
1015
|
+
* reconstruct. We return `null` in that case.
|
|
1016
|
+
* - **Missing deltas** (e.g., after `pruneHistory` folds old
|
|
1017
|
+
* entries into a base snapshot) also stop the walk. v0.4 does
|
|
1018
|
+
* not ship pruneHistory, so today this only happens if an entry
|
|
1019
|
+
* was deleted out-of-band.
|
|
1020
|
+
* - The caller MUST pass the correct current value. Passing a
|
|
1021
|
+
* mutated object would corrupt the reconstruction — the patch
|
|
1022
|
+
* chain is only valid against the exact state that was in
|
|
1023
|
+
* effect when the most recent put happened.
|
|
1024
|
+
*
|
|
1025
|
+
* For v0.4, `reconstruct` is the only way to read a historical
|
|
1026
|
+
* version via deltas. The legacy `_history` collection still
|
|
1027
|
+
* holds full snapshots and `Collection.getVersion()` still reads
|
|
1028
|
+
* from there — the two paths coexist until pruneHistory lands in
|
|
1029
|
+
* a follow-up and delta becomes the default.
|
|
1030
|
+
*/
|
|
1031
|
+
reconstruct<T>(collection: string, id: string, current: T, atVersion: number): Promise<T | null>;
|
|
1032
|
+
/**
|
|
1033
|
+
* Walk the chain from genesis forward and verify every link.
|
|
1034
|
+
*
|
|
1035
|
+
* Returns `{ ok: true, head, length }` if every entry's `prevHash`
|
|
1036
|
+
* matches the recomputed hash of its predecessor (and the genesis
|
|
1037
|
+
* entry's `prevHash` is the empty string).
|
|
1038
|
+
*
|
|
1039
|
+
* Returns `{ ok: false, divergedAt, expected, actual }` on the first
|
|
1040
|
+
* mismatch. `divergedAt` is the 0-based index of the BROKEN entry
|
|
1041
|
+
* — entries before that index still verify cleanly; entries at and
|
|
1042
|
+
* after `divergedAt` are untrustworthy.
|
|
1043
|
+
*
|
|
1044
|
+
* This method detects:
|
|
1045
|
+
* - Mutated entry content (fields changed)
|
|
1046
|
+
* - Reordered entries (if any adjacent pair swaps, the prevHash
|
|
1047
|
+
* of the second no longer matches)
|
|
1048
|
+
* - Inserted entries (the inserted entry's prevHash likely fails,
|
|
1049
|
+
* and the following entry's prevHash definitely fails)
|
|
1050
|
+
* - Deleted entries (the entry after the deletion sees a wrong
|
|
1051
|
+
* prevHash)
|
|
1052
|
+
*
|
|
1053
|
+
* It does NOT detect:
|
|
1054
|
+
* - Tampering with the DATA collections that bypassed the ledger
|
|
1055
|
+
* entirely (e.g., an attacker who modifies records without
|
|
1056
|
+
* appending matching ledger entries — this is why we also
|
|
1057
|
+
* plan a `verifyIntegrity()` helper in a follow-up)
|
|
1058
|
+
* - Truncation of the chain at the tail (dropping the last N
|
|
1059
|
+
* entries leaves a shorter but still consistent chain). External
|
|
1060
|
+
* anchoring of `head.hash` to a trusted service is the defense
|
|
1061
|
+
* against this.
|
|
1062
|
+
*/
|
|
1063
|
+
verify(): Promise<VerifyResult>;
|
|
1064
|
+
/**
|
|
1065
|
+
* Serialize + encrypt a ledger entry into an EncryptedEnvelope. The
|
|
1066
|
+
* envelope's `_v` field is set to `entry.index + 1` so the usual
|
|
1067
|
+
* optimistic-concurrency machinery has a reasonable version number
|
|
1068
|
+
* to compare against (the ledger is append-only, so concurrent
|
|
1069
|
+
* writes should always bump the index).
|
|
1070
|
+
*/
|
|
1071
|
+
private encryptEntry;
|
|
1072
|
+
/** Decrypt an envelope into a LedgerEntry. Throws on bad key / tamper. */
|
|
1073
|
+
private decryptEntry;
|
|
1074
|
+
}
|
|
1075
|
+
/**
|
|
1076
|
+
* Compute the `payloadHash` value for an encrypted envelope. Pulled
|
|
1077
|
+
* out as a standalone helper because both `put` (hash the new
|
|
1078
|
+
* envelope's `_data`) and `delete` (hash the previous envelope's
|
|
1079
|
+
* `_data`) need the same calculation, and the logic is small enough
|
|
1080
|
+
* that duplicating it would be noise.
|
|
1081
|
+
*/
|
|
1082
|
+
declare function envelopePayloadHash(envelope: EncryptedEnvelope | null): Promise<string>;
|
|
1083
|
+
|
|
1084
|
+
/**
|
|
1085
|
+
* Foreign-key references — the v0.4 soft-FK mechanism.
|
|
1086
|
+
*
|
|
1087
|
+
* A collection declares its references as metadata at construction
|
|
1088
|
+
* time:
|
|
1089
|
+
*
|
|
1090
|
+
* ```ts
|
|
1091
|
+
* import { ref } from '@noy-db/core'
|
|
1092
|
+
*
|
|
1093
|
+
* const invoices = company.collection<Invoice>('invoices', {
|
|
1094
|
+
* refs: {
|
|
1095
|
+
* clientId: ref('clients'), // default: strict
|
|
1096
|
+
* categoryId: ref('categories', 'warn'),
|
|
1097
|
+
* parentId: ref('invoices', 'cascade'), // self-reference OK
|
|
1098
|
+
* },
|
|
1099
|
+
* })
|
|
1100
|
+
* ```
|
|
1101
|
+
*
|
|
1102
|
+
* Three modes:
|
|
1103
|
+
*
|
|
1104
|
+
* - **strict** — the default. `put()` rejects records whose
|
|
1105
|
+
* reference target doesn't exist, and `delete()` of the target
|
|
1106
|
+
* rejects if any strict-referencing records still exist.
|
|
1107
|
+
* Matches SQL's default FK semantics.
|
|
1108
|
+
*
|
|
1109
|
+
* - **warn** — both operations succeed unconditionally. Broken
|
|
1110
|
+
* references surface only through
|
|
1111
|
+
* `compartment.checkIntegrity()`, which walks every collection
|
|
1112
|
+
* and reports orphans. Use when you want soft validation for
|
|
1113
|
+
* imports from messy sources.
|
|
1114
|
+
*
|
|
1115
|
+
* - **cascade** — `put()` is same as warn. `delete()` of the
|
|
1116
|
+
* target deletes every referencing record. Cycles are detected
|
|
1117
|
+
* and broken via an in-progress set, so mutual cascades
|
|
1118
|
+
* terminate instead of recursing forever.
|
|
1119
|
+
*
|
|
1120
|
+
* Cross-compartment refs are explicitly rejected: if the target
|
|
1121
|
+
* name contains a `/`, `ref()` throws `RefScopeError`. Cross-
|
|
1122
|
+
* compartment refs need an auth story (multi-keyring reads) that
|
|
1123
|
+
* v0.4 doesn't ship — tracked for v0.5.
|
|
1124
|
+
*/
|
|
1125
|
+
|
|
1126
|
+
/** The three enforcement modes. Default for new refs is `'strict'`. */
|
|
1127
|
+
type RefMode = 'strict' | 'warn' | 'cascade';
|
|
1128
|
+
/**
|
|
1129
|
+
* Descriptor returned by `ref()`. Collections accept a
|
|
1130
|
+
* `Record<string, RefDescriptor>` in their options. The key is the
|
|
1131
|
+
* field name on the record (top-level only — dotted paths are out of
|
|
1132
|
+
* scope for v0.4), the value describes which target collection the
|
|
1133
|
+
* field references and under what mode.
|
|
1134
|
+
*
|
|
1135
|
+
* The descriptor carries only plain data so it can be serialized,
|
|
1136
|
+
* passed around, and introspected without any class machinery.
|
|
1137
|
+
*/
|
|
1138
|
+
interface RefDescriptor {
|
|
1139
|
+
readonly target: string;
|
|
1140
|
+
readonly mode: RefMode;
|
|
1141
|
+
}
|
|
1142
|
+
/**
|
|
1143
|
+
* Thrown when a strict reference is violated — either `put()` with a
|
|
1144
|
+
* missing target id, or `delete()` of a target that still has
|
|
1145
|
+
* strict-referencing records.
|
|
1146
|
+
*
|
|
1147
|
+
* Carries structured detail so UI code (and a potential future
|
|
1148
|
+
* devtools panel) can render "client X cannot be deleted because
|
|
1149
|
+
* invoices 1, 2, and 3 reference it" instead of a bare error string.
|
|
1150
|
+
*/
|
|
1151
|
+
declare class RefIntegrityError extends NoydbError {
|
|
1152
|
+
readonly collection: string;
|
|
1153
|
+
readonly id: string;
|
|
1154
|
+
readonly field: string;
|
|
1155
|
+
readonly refTo: string;
|
|
1156
|
+
readonly refId: string | null;
|
|
1157
|
+
constructor(opts: {
|
|
1158
|
+
collection: string;
|
|
1159
|
+
id: string;
|
|
1160
|
+
field: string;
|
|
1161
|
+
refTo: string;
|
|
1162
|
+
refId: string | null;
|
|
1163
|
+
message: string;
|
|
1164
|
+
});
|
|
1165
|
+
}
|
|
1166
|
+
/**
|
|
1167
|
+
* Thrown when `ref()` is called with a target name that looks like
|
|
1168
|
+
* a cross-compartment reference (contains a `/`). Separate error
|
|
1169
|
+
* class because the fix is different: RefIntegrityError means "data
|
|
1170
|
+
* is wrong"; RefScopeError means "the ref declaration is wrong".
|
|
1171
|
+
*/
|
|
1172
|
+
declare class RefScopeError extends NoydbError {
|
|
1173
|
+
constructor(target: string);
|
|
1174
|
+
}
|
|
1175
|
+
/**
|
|
1176
|
+
* Helper constructor. Thin wrapper around the object literal so user
|
|
1177
|
+
* code reads like `ref('clients')` instead of `{ target: 'clients',
|
|
1178
|
+
* mode: 'strict' }` — this is the only ergonomics reason it exists.
|
|
1179
|
+
*
|
|
1180
|
+
* Validates the target name eagerly so a misconfigured ref declaration
|
|
1181
|
+
* fails at collection construction time, not at the first put.
|
|
1182
|
+
*/
|
|
1183
|
+
declare function ref(target: string, mode?: RefMode): RefDescriptor;
|
|
1184
|
+
/**
|
|
1185
|
+
* Per-compartment registry of reference declarations.
|
|
1186
|
+
*
|
|
1187
|
+
* The registry is populated by `Collection` constructors (which pass
|
|
1188
|
+
* their `refs` option through the Compartment) and consulted by the
|
|
1189
|
+
* Compartment on every `put` / `delete` and by `checkIntegrity`. A
|
|
1190
|
+
* single instance lives on the Compartment for its lifetime; there's
|
|
1191
|
+
* no global state.
|
|
1192
|
+
*
|
|
1193
|
+
* The data structure is two parallel maps:
|
|
1194
|
+
*
|
|
1195
|
+
* - `outbound`: `collection → { field → RefDescriptor }` — what
|
|
1196
|
+
* refs does `collection` declare? Used on put to check
|
|
1197
|
+
* strict-target-exists and on checkIntegrity to walk each
|
|
1198
|
+
* collection's outbound refs.
|
|
1199
|
+
*
|
|
1200
|
+
* - `inbound`: `target → Array<{ collection, field, mode }>` —
|
|
1201
|
+
* which collections reference `target`? Used on delete to find
|
|
1202
|
+
* the records that might be affected by cascade / strict.
|
|
1203
|
+
*
|
|
1204
|
+
* The two views are kept in sync by `register()` and never mutated
|
|
1205
|
+
* otherwise — refs can't be unregistered at runtime in v0.4.
|
|
1206
|
+
*/
|
|
1207
|
+
declare class RefRegistry {
|
|
1208
|
+
private readonly outbound;
|
|
1209
|
+
private readonly inbound;
|
|
1210
|
+
/**
|
|
1211
|
+
* Register the refs declared by a single collection. Idempotent in
|
|
1212
|
+
* the happy path — calling twice with the same data is a no-op.
|
|
1213
|
+
* Calling twice with DIFFERENT data throws, because silent
|
|
1214
|
+
* overrides would be confusing ("I changed the ref and it doesn't
|
|
1215
|
+
* update" vs "I declared the same collection twice with different
|
|
1216
|
+
* refs and the second call won").
|
|
1217
|
+
*/
|
|
1218
|
+
register(collection: string, refs: Record<string, RefDescriptor>): void;
|
|
1219
|
+
/** Get the outbound refs declared by a collection (or `{}` if none). */
|
|
1220
|
+
getOutbound(collection: string): Record<string, RefDescriptor>;
|
|
1221
|
+
/** Get the inbound refs that target a given collection (or `[]`). */
|
|
1222
|
+
getInbound(target: string): ReadonlyArray<{
|
|
1223
|
+
collection: string;
|
|
1224
|
+
field: string;
|
|
1225
|
+
mode: RefMode;
|
|
1226
|
+
}>;
|
|
1227
|
+
/**
|
|
1228
|
+
* Iterate every (collection → refs) pair that has at least one
|
|
1229
|
+
* declared reference. Used by `checkIntegrity` to walk the full
|
|
1230
|
+
* universe of outbound refs without needing to track collection
|
|
1231
|
+
* names elsewhere.
|
|
1232
|
+
*/
|
|
1233
|
+
entries(): Array<[string, Record<string, RefDescriptor>]>;
|
|
1234
|
+
/** Clear the registry. Test-only escape hatch; never called from production code. */
|
|
1235
|
+
clear(): void;
|
|
1236
|
+
}
|
|
1237
|
+
/**
|
|
1238
|
+
* Shape of a single violation reported by `compartment.checkIntegrity()`.
|
|
1239
|
+
*
|
|
1240
|
+
* `refId` is the value we saw in the referencing field — it's the
|
|
1241
|
+
* ID we expected to find in `refTo`, but didn't. Left as `unknown`
|
|
1242
|
+
* because records are loosely typed at the integrity-check layer.
|
|
1243
|
+
*/
|
|
1244
|
+
interface RefViolation {
|
|
1245
|
+
readonly collection: string;
|
|
1246
|
+
readonly id: string;
|
|
1247
|
+
readonly field: string;
|
|
1248
|
+
readonly refTo: string;
|
|
1249
|
+
readonly refId: unknown;
|
|
1250
|
+
readonly mode: RefMode;
|
|
1251
|
+
}
|
|
239
1252
|
|
|
240
1253
|
/** In-memory representation of an unlocked keyring. */
|
|
241
1254
|
interface UnlockedKeyring {
|
|
@@ -282,8 +1295,394 @@ declare function diff(oldObj: unknown, newObj: unknown, basePath?: string): Diff
|
|
|
282
1295
|
/** Format a diff as a human-readable string. */
|
|
283
1296
|
declare function formatDiff(changes: DiffEntry[]): string;
|
|
284
1297
|
|
|
1298
|
+
/**
|
|
1299
|
+
* Operator implementations for the query DSL.
|
|
1300
|
+
*
|
|
1301
|
+
* All predicates run client-side, AFTER decryption — they never see ciphertext.
|
|
1302
|
+
* This file is dependency-free and tree-shakeable.
|
|
1303
|
+
*/
|
|
1304
|
+
/** Comparison operators supported by the where() builder. */
|
|
1305
|
+
type Operator = '==' | '!=' | '<' | '<=' | '>' | '>=' | 'in' | 'contains' | 'startsWith' | 'between';
|
|
1306
|
+
/**
|
|
1307
|
+
* A single field comparison clause inside a query plan.
|
|
1308
|
+
* Plans are JSON-serializable, so this type uses primitives only.
|
|
1309
|
+
*/
|
|
1310
|
+
interface FieldClause {
|
|
1311
|
+
readonly type: 'field';
|
|
1312
|
+
readonly field: string;
|
|
1313
|
+
readonly op: Operator;
|
|
1314
|
+
readonly value: unknown;
|
|
1315
|
+
}
|
|
1316
|
+
/**
|
|
1317
|
+
* A user-supplied predicate function escape hatch. Not serializable.
|
|
1318
|
+
*
|
|
1319
|
+
* The predicate accepts `unknown` at the type level so the surrounding
|
|
1320
|
+
* Clause type can stay non-parametric — this keeps Collection<T> covariant
|
|
1321
|
+
* in T at the public API surface. Builder methods cast user predicates
|
|
1322
|
+
* (typed `(record: T) => boolean`) into this shape on the way in.
|
|
1323
|
+
*/
|
|
1324
|
+
interface FilterClause {
|
|
1325
|
+
readonly type: 'filter';
|
|
1326
|
+
readonly fn: (record: unknown) => boolean;
|
|
1327
|
+
}
|
|
1328
|
+
/** A logical group of clauses combined by AND or OR. */
|
|
1329
|
+
interface GroupClause {
|
|
1330
|
+
readonly type: 'group';
|
|
1331
|
+
readonly op: 'and' | 'or';
|
|
1332
|
+
readonly clauses: readonly Clause[];
|
|
1333
|
+
}
|
|
1334
|
+
type Clause = FieldClause | FilterClause | GroupClause;
|
|
1335
|
+
/**
|
|
1336
|
+
* Read a possibly nested field path like "address.city" from a record.
|
|
1337
|
+
* Returns undefined if any segment is missing.
|
|
1338
|
+
*/
|
|
1339
|
+
declare function readPath(record: unknown, path: string): unknown;
|
|
1340
|
+
/**
|
|
1341
|
+
* Evaluate a single field clause against a record.
|
|
1342
|
+
* Returns false on type mismatches rather than throwing — query results
|
|
1343
|
+
* exclude non-matching records by definition.
|
|
1344
|
+
*/
|
|
1345
|
+
declare function evaluateFieldClause(record: unknown, clause: FieldClause): boolean;
|
|
1346
|
+
/**
|
|
1347
|
+
* Evaluate any clause (field / filter / group) against a record.
|
|
1348
|
+
* The recursion depth is bounded by the user's query expression — no risk of
|
|
1349
|
+
* blowing the stack on a 50K-record collection.
|
|
1350
|
+
*/
|
|
1351
|
+
declare function evaluateClause(record: unknown, clause: Clause): boolean;
|
|
1352
|
+
|
|
1353
|
+
/**
|
|
1354
|
+
* Secondary indexes for the query DSL.
|
|
1355
|
+
*
|
|
1356
|
+
* v0.3 ships **in-memory hash indexes**:
|
|
1357
|
+
* - Built during `Collection.ensureHydrated()` from the decrypted cache
|
|
1358
|
+
* - Maintained incrementally on `put` and `delete`
|
|
1359
|
+
* - Consulted by the query executor for `==` and `in` operators on
|
|
1360
|
+
* indexed fields, falling back to a linear scan otherwise
|
|
1361
|
+
* - Live entirely in memory — no adapter writes for the index itself
|
|
1362
|
+
*
|
|
1363
|
+
* Persistent encrypted index blobs (the spec's "store as a separate
|
|
1364
|
+
* AES-256-GCM blob" note) are deferred to a follow-up issue. The reasons
|
|
1365
|
+
* are documented in the v0.3 PR body — short version: at the v0.3 target
|
|
1366
|
+
* scale of 1K–50K records, building the index during hydrate is free,
|
|
1367
|
+
* so persistence buys nothing measurable.
|
|
1368
|
+
*/
|
|
1369
|
+
/**
|
|
1370
|
+
* Index declaration accepted by `Collection`'s constructor.
|
|
1371
|
+
*
|
|
1372
|
+
* Today only single-field hash indexes are supported. Future shapes
|
|
1373
|
+
* (composite, sorted, unique constraints) will land as additive variants
|
|
1374
|
+
* of this discriminated union without breaking existing declarations.
|
|
1375
|
+
*/
|
|
1376
|
+
type IndexDef = string;
|
|
1377
|
+
/**
|
|
1378
|
+
* Internal representation of a built hash index.
|
|
1379
|
+
*
|
|
1380
|
+
* Maps stringified field values to the set of record ids whose value
|
|
1381
|
+
* for that field matches. Stringification keeps the index simple and
|
|
1382
|
+
* works uniformly for primitives (`'open'`, `'42'`, `'true'`).
|
|
1383
|
+
*
|
|
1384
|
+
* Records whose indexed field is `undefined` or `null` are NOT inserted
|
|
1385
|
+
* — `query().where('field', '==', undefined)` falls back to a linear
|
|
1386
|
+
* scan, which is the conservative behavior.
|
|
1387
|
+
*/
|
|
1388
|
+
interface HashIndex {
|
|
1389
|
+
readonly field: string;
|
|
1390
|
+
readonly buckets: Map<string, Set<string>>;
|
|
1391
|
+
}
|
|
1392
|
+
/**
|
|
1393
|
+
* Container for all indexes on a single collection.
|
|
1394
|
+
*
|
|
1395
|
+
* Methods are pure with respect to the in-memory `buckets` Map — they
|
|
1396
|
+
* never touch the adapter or the keyring. The Collection class owns
|
|
1397
|
+
* lifecycle (build on hydrate, maintain on put/delete).
|
|
1398
|
+
*/
|
|
1399
|
+
declare class CollectionIndexes {
|
|
1400
|
+
private readonly indexes;
|
|
1401
|
+
/**
|
|
1402
|
+
* Declare an index. Subsequent record additions are tracked under it.
|
|
1403
|
+
* Calling this twice for the same field is a no-op (idempotent).
|
|
1404
|
+
*/
|
|
1405
|
+
declare(field: string): void;
|
|
1406
|
+
/** True if the given field has a declared index. */
|
|
1407
|
+
has(field: string): boolean;
|
|
1408
|
+
/** All declared field names, in declaration order. */
|
|
1409
|
+
fields(): string[];
|
|
1410
|
+
/**
|
|
1411
|
+
* Build all declared indexes from a snapshot of records.
|
|
1412
|
+
* Called once per hydration. O(N × indexes.size).
|
|
1413
|
+
*/
|
|
1414
|
+
build<T>(records: ReadonlyArray<{
|
|
1415
|
+
id: string;
|
|
1416
|
+
record: T;
|
|
1417
|
+
}>): void;
|
|
1418
|
+
/**
|
|
1419
|
+
* Insert or update a single record across all indexes.
|
|
1420
|
+
* Called by `Collection.put()` after the encrypted write succeeds.
|
|
1421
|
+
*
|
|
1422
|
+
* If `previousRecord` is provided, the record is removed from any old
|
|
1423
|
+
* buckets first — this is the update path. Pass `null` for fresh adds.
|
|
1424
|
+
*/
|
|
1425
|
+
upsert<T>(id: string, newRecord: T, previousRecord: T | null): void;
|
|
1426
|
+
/**
|
|
1427
|
+
* Remove a record from all indexes. Called by `Collection.delete()`
|
|
1428
|
+
* (and as the first half of `upsert` for the update path).
|
|
1429
|
+
*/
|
|
1430
|
+
remove<T>(id: string, record: T): void;
|
|
1431
|
+
/** Drop all index data. Called when the collection is invalidated. */
|
|
1432
|
+
clear(): void;
|
|
1433
|
+
/**
|
|
1434
|
+
* Equality lookup: return the set of record ids whose `field` matches
|
|
1435
|
+
* the given value. Returns `null` if no index covers the field — the
|
|
1436
|
+
* caller should fall back to a linear scan.
|
|
1437
|
+
*
|
|
1438
|
+
* The returned Set is a reference to the index's internal storage —
|
|
1439
|
+
* callers must NOT mutate it.
|
|
1440
|
+
*/
|
|
1441
|
+
lookupEqual(field: string, value: unknown): ReadonlySet<string> | null;
|
|
1442
|
+
/**
|
|
1443
|
+
* Set lookup: return the union of record ids whose `field` matches any
|
|
1444
|
+
* of the given values. Returns `null` if no index covers the field.
|
|
1445
|
+
*/
|
|
1446
|
+
lookupIn(field: string, values: readonly unknown[]): ReadonlySet<string> | null;
|
|
1447
|
+
}
|
|
1448
|
+
|
|
1449
|
+
/**
|
|
1450
|
+
* Chainable, immutable query builder.
|
|
1451
|
+
*
|
|
1452
|
+
* Each builder operation returns a NEW Query — the underlying plan is never
|
|
1453
|
+
* mutated. This makes plans safe to share, cache, and serialize.
|
|
1454
|
+
*/
|
|
1455
|
+
|
|
1456
|
+
interface OrderBy {
|
|
1457
|
+
readonly field: string;
|
|
1458
|
+
readonly direction: 'asc' | 'desc';
|
|
1459
|
+
}
|
|
1460
|
+
/**
|
|
1461
|
+
* A complete query plan: zero-or-more clauses, optional ordering, pagination.
|
|
1462
|
+
* Plans are JSON-serializable as long as no FilterClause is present.
|
|
1463
|
+
*
|
|
1464
|
+
* Plans are intentionally NOT parametric on T — see `predicate.ts` FilterClause
|
|
1465
|
+
* for the variance reasoning. The public `Query<T>` API attaches the type tag.
|
|
1466
|
+
*/
|
|
1467
|
+
interface QueryPlan {
|
|
1468
|
+
readonly clauses: readonly Clause[];
|
|
1469
|
+
readonly orderBy: readonly OrderBy[];
|
|
1470
|
+
readonly limit: number | undefined;
|
|
1471
|
+
readonly offset: number;
|
|
1472
|
+
}
|
|
1473
|
+
/**
|
|
1474
|
+
* Source of records that a query executes against.
|
|
1475
|
+
*
|
|
1476
|
+
* The interface is non-parametric to keep variance friendly: callers cast
|
|
1477
|
+
* their typed source (e.g. `QuerySource<Invoice>`) into this opaque shape.
|
|
1478
|
+
*
|
|
1479
|
+
* `getIndexes` and `lookupById` are optional fast-path hooks. When both are
|
|
1480
|
+
* present and a where clause matches an indexed field, the executor uses
|
|
1481
|
+
* the index to skip a linear scan. Sources without these methods (or with
|
|
1482
|
+
* `getIndexes` returning `null`) always fall back to a linear scan.
|
|
1483
|
+
*/
|
|
1484
|
+
interface QuerySource<T> {
|
|
1485
|
+
/** Snapshot of all current records. The query never mutates this array. */
|
|
1486
|
+
snapshot(): readonly T[];
|
|
1487
|
+
/** Subscribe to mutations; returns an unsubscribe function. */
|
|
1488
|
+
subscribe?(cb: () => void): () => void;
|
|
1489
|
+
/** Index store for the indexed-fast-path. Optional. */
|
|
1490
|
+
getIndexes?(): CollectionIndexes | null;
|
|
1491
|
+
/** O(1) record lookup by id, used to materialize index hits. */
|
|
1492
|
+
lookupById?(id: string): T | undefined;
|
|
1493
|
+
}
|
|
1494
|
+
/**
|
|
1495
|
+
* The chainable builder. All methods return a new Query — the original
|
|
1496
|
+
* remains unchanged. Terminal methods (`toArray`, `first`, `count`,
|
|
1497
|
+
* `subscribe`) execute the plan against the source.
|
|
1498
|
+
*
|
|
1499
|
+
* Type parameter T flows through the public API for ergonomics, but the
|
|
1500
|
+
* internal storage uses `unknown` so Collection<T> stays covariant.
|
|
1501
|
+
*/
|
|
1502
|
+
declare class Query<T> {
|
|
1503
|
+
private readonly source;
|
|
1504
|
+
private readonly plan;
|
|
1505
|
+
constructor(source: QuerySource<T>, plan?: QueryPlan);
|
|
1506
|
+
/** Add a field comparison. Multiple where() calls are AND-combined. */
|
|
1507
|
+
where(field: string, op: Operator, value: unknown): Query<T>;
|
|
1508
|
+
/**
|
|
1509
|
+
* Logical OR group. Pass a callback that builds a sub-query.
|
|
1510
|
+
* Each clause inside the callback is OR-combined; the group itself
|
|
1511
|
+
* joins the parent plan with AND.
|
|
1512
|
+
*/
|
|
1513
|
+
or(builder: (q: Query<T>) => Query<T>): Query<T>;
|
|
1514
|
+
/**
|
|
1515
|
+
* Logical AND group. Same shape as `or()` but every clause inside the group
|
|
1516
|
+
* must match. Useful for explicit grouping inside a larger OR.
|
|
1517
|
+
*/
|
|
1518
|
+
and(builder: (q: Query<T>) => Query<T>): Query<T>;
|
|
1519
|
+
/** Escape hatch: add an arbitrary predicate function. Not serializable. */
|
|
1520
|
+
filter(fn: (record: T) => boolean): Query<T>;
|
|
1521
|
+
/** Sort by a field. Subsequent calls are tie-breakers. */
|
|
1522
|
+
orderBy(field: string, direction?: 'asc' | 'desc'): Query<T>;
|
|
1523
|
+
/** Cap the result size. */
|
|
1524
|
+
limit(n: number): Query<T>;
|
|
1525
|
+
/** Skip the first N matching records (after ordering). */
|
|
1526
|
+
offset(n: number): Query<T>;
|
|
1527
|
+
/** Execute the plan and return the matching records. */
|
|
1528
|
+
toArray(): T[];
|
|
1529
|
+
/** Return the first matching record, or null. */
|
|
1530
|
+
first(): T | null;
|
|
1531
|
+
/** Return the number of matching records (after where/filter, before limit). */
|
|
1532
|
+
count(): number;
|
|
1533
|
+
/**
|
|
1534
|
+
* Re-run the query whenever the source notifies of changes.
|
|
1535
|
+
* Returns an unsubscribe function. The callback receives the latest result.
|
|
1536
|
+
* Throws if the source does not support subscriptions.
|
|
1537
|
+
*/
|
|
1538
|
+
subscribe(cb: (result: T[]) => void): () => void;
|
|
1539
|
+
/**
|
|
1540
|
+
* Return the plan as a JSON-friendly object. FilterClause entries are
|
|
1541
|
+
* stripped (their `fn` cannot be serialized) and replaced with
|
|
1542
|
+
* { type: 'filter', fn: '[function]' } so devtools can still see them.
|
|
1543
|
+
*/
|
|
1544
|
+
toPlan(): unknown;
|
|
1545
|
+
}
|
|
1546
|
+
/**
|
|
1547
|
+
* Execute a plan against a snapshot of records.
|
|
1548
|
+
* Pure function — same input, same output, no side effects.
|
|
1549
|
+
*
|
|
1550
|
+
* Records are typed as `unknown` because plans are non-parametric; callers
|
|
1551
|
+
* cast the return type at the API surface (see `Query.toArray()`).
|
|
1552
|
+
*/
|
|
1553
|
+
declare function executePlan(records: readonly unknown[], plan: QueryPlan): unknown[];
|
|
1554
|
+
|
|
1555
|
+
interface LruOptions {
|
|
1556
|
+
/** Maximum number of entries before eviction. Required if `maxBytes` is unset. */
|
|
1557
|
+
maxRecords?: number;
|
|
1558
|
+
/** Maximum total bytes before eviction. Computed from per-entry `size`. */
|
|
1559
|
+
maxBytes?: number;
|
|
1560
|
+
}
|
|
1561
|
+
interface LruStats {
|
|
1562
|
+
/** Total cache hits since construction (or `resetStats()`). */
|
|
1563
|
+
hits: number;
|
|
1564
|
+
/** Total cache misses since construction (or `resetStats()`). */
|
|
1565
|
+
misses: number;
|
|
1566
|
+
/** Total entries evicted since construction (or `resetStats()`). */
|
|
1567
|
+
evictions: number;
|
|
1568
|
+
/** Current number of cached entries. */
|
|
1569
|
+
size: number;
|
|
1570
|
+
/** Current sum of cached entry sizes (in bytes, approximate). */
|
|
1571
|
+
bytes: number;
|
|
1572
|
+
}
|
|
1573
|
+
/**
|
|
1574
|
+
* O(1) LRU cache. Both `get()` and `set()` promote the touched entry to
|
|
1575
|
+
* the most-recently-used end. Eviction happens after every insert and
|
|
1576
|
+
* walks the front of the Map iterator dropping entries until both
|
|
1577
|
+
* budgets are satisfied.
|
|
1578
|
+
*/
|
|
1579
|
+
declare class Lru<K, V> {
|
|
1580
|
+
private readonly entries;
|
|
1581
|
+
private readonly maxRecords;
|
|
1582
|
+
private readonly maxBytes;
|
|
1583
|
+
private currentBytes;
|
|
1584
|
+
private hits;
|
|
1585
|
+
private misses;
|
|
1586
|
+
private evictions;
|
|
1587
|
+
constructor(options: LruOptions);
|
|
1588
|
+
/**
|
|
1589
|
+
* Look up a key. Hits promote the entry to most-recently-used; misses
|
|
1590
|
+
* return undefined. Both update the running stats counters.
|
|
1591
|
+
*/
|
|
1592
|
+
get(key: K): V | undefined;
|
|
1593
|
+
/**
|
|
1594
|
+
* Insert or update a key. If the key already exists, its size is
|
|
1595
|
+
* accounted for and the entry is promoted to MRU. After insertion,
|
|
1596
|
+
* eviction runs to maintain both budgets.
|
|
1597
|
+
*/
|
|
1598
|
+
set(key: K, value: V, size: number): void;
|
|
1599
|
+
/**
|
|
1600
|
+
* Remove a key without affecting hit/miss stats. Used by `Collection.delete()`.
|
|
1601
|
+
* Returns true if the key was present.
|
|
1602
|
+
*/
|
|
1603
|
+
remove(key: K): boolean;
|
|
1604
|
+
/** True if the cache currently holds an entry for the given key. */
|
|
1605
|
+
has(key: K): boolean;
|
|
1606
|
+
/**
|
|
1607
|
+
* Drop every entry. Stats counters survive — call `resetStats()` if you
|
|
1608
|
+
* want a clean slate. Used by `Collection.invalidate()` on key rotation.
|
|
1609
|
+
*/
|
|
1610
|
+
clear(): void;
|
|
1611
|
+
/** Reset hit/miss/eviction counters to zero. Does NOT touch entries. */
|
|
1612
|
+
resetStats(): void;
|
|
1613
|
+
/** Snapshot of current cache statistics. Cheap — no copying. */
|
|
1614
|
+
stats(): LruStats;
|
|
1615
|
+
/**
|
|
1616
|
+
* Iterate over all currently-cached values. Order is least-recently-used
|
|
1617
|
+
* first. Used by tests and devtools — production callers should use
|
|
1618
|
+
* `Collection.scan()` instead.
|
|
1619
|
+
*/
|
|
1620
|
+
values(): IterableIterator<V>;
|
|
1621
|
+
/**
|
|
1622
|
+
* Walk the cache from the LRU end and drop entries until both budgets
|
|
1623
|
+
* are satisfied. Called after every `set()`. Single pass — entries are
|
|
1624
|
+
* never re-promoted during eviction.
|
|
1625
|
+
*/
|
|
1626
|
+
private evictUntilUnderBudget;
|
|
1627
|
+
private overBudget;
|
|
1628
|
+
}
|
|
1629
|
+
|
|
1630
|
+
/**
|
|
1631
|
+
* Cache policy helpers — parse human-friendly byte budgets into raw numbers.
|
|
1632
|
+
*
|
|
1633
|
+
* Accepted shapes (case-insensitive on suffix):
|
|
1634
|
+
* number — interpreted as raw bytes
|
|
1635
|
+
* '1024' — string of digits, raw bytes
|
|
1636
|
+
* '50KB' — kilobytes (×1024)
|
|
1637
|
+
* '50MB' — megabytes (×1024²)
|
|
1638
|
+
* '1GB' — gigabytes (×1024³)
|
|
1639
|
+
*
|
|
1640
|
+
* Decimals are accepted (`'1.5GB'` → 1610612736 bytes).
|
|
1641
|
+
*
|
|
1642
|
+
* Anything else throws — better to fail loud at construction time than
|
|
1643
|
+
* to silently treat a typo as 0 bytes (which would evict everything).
|
|
1644
|
+
*/
|
|
1645
|
+
/** Parse a byte budget into a positive integer number of bytes. */
|
|
1646
|
+
declare function parseBytes(input: number | string): number;
|
|
1647
|
+
/**
|
|
1648
|
+
* Estimate the in-memory byte size of a decrypted record.
|
|
1649
|
+
*
|
|
1650
|
+
* Uses `JSON.stringify().length` as a stand-in for actual heap usage.
|
|
1651
|
+
* It's a deliberate approximation: real V8 heap size includes pointer
|
|
1652
|
+
* overhead, hidden classes, and string interning that we can't measure
|
|
1653
|
+
* from JavaScript. The JSON length is a stable, monotonic proxy that
|
|
1654
|
+
* costs O(record size) per insert — fine when records are typically
|
|
1655
|
+
* < 1 KB and the cache eviction is the slow path anyway.
|
|
1656
|
+
*
|
|
1657
|
+
* Returns `0` (and the caller must treat it as 1 for accounting) if
|
|
1658
|
+
* stringification throws on circular references; this is documented
|
|
1659
|
+
* but in practice records always come from JSON-decoded envelopes.
|
|
1660
|
+
*/
|
|
1661
|
+
declare function estimateRecordBytes(record: unknown): number;
|
|
1662
|
+
|
|
285
1663
|
/** Callback for dirty tracking (sync engine integration). */
|
|
286
1664
|
type OnDirtyCallback = (collection: string, id: string, action: 'put' | 'delete', version: number) => Promise<void>;
|
|
1665
|
+
/**
|
|
1666
|
+
* Per-collection cache configuration. Only meaningful when paired with
|
|
1667
|
+
* `prefetch: false` (lazy mode); eager mode keeps the entire decrypted
|
|
1668
|
+
* cache in memory and ignores these bounds.
|
|
1669
|
+
*/
|
|
1670
|
+
interface CacheOptions {
|
|
1671
|
+
/** Maximum number of records to keep in memory before LRU eviction. */
|
|
1672
|
+
maxRecords?: number;
|
|
1673
|
+
/**
|
|
1674
|
+
* Maximum total decrypted byte size before LRU eviction. Accepts a raw
|
|
1675
|
+
* number or a human-friendly string: `'50KB'`, `'50MB'`, `'1GB'`.
|
|
1676
|
+
* Eviction picks the least-recently-used entry until both budgets
|
|
1677
|
+
* (maxRecords AND maxBytes, if both are set) are satisfied.
|
|
1678
|
+
*/
|
|
1679
|
+
maxBytes?: number | string;
|
|
1680
|
+
}
|
|
1681
|
+
/** Statistics exposed via `Collection.cacheStats()`. */
|
|
1682
|
+
interface CacheStats extends LruStats {
|
|
1683
|
+
/** True if this collection is in lazy mode. */
|
|
1684
|
+
lazy: boolean;
|
|
1685
|
+
}
|
|
287
1686
|
/** A typed collection of records within a compartment. */
|
|
288
1687
|
declare class Collection<T> {
|
|
289
1688
|
private readonly adapter;
|
|
@@ -297,6 +1696,86 @@ declare class Collection<T> {
|
|
|
297
1696
|
private readonly historyConfig;
|
|
298
1697
|
private readonly cache;
|
|
299
1698
|
private hydrated;
|
|
1699
|
+
/**
|
|
1700
|
+
* Lazy mode flag. `true` when constructed with `prefetch: false`.
|
|
1701
|
+
* In lazy mode the cache is bounded by an LRU and `list()`/`query()`
|
|
1702
|
+
* throw — callers must use `scan()` or per-id `get()` instead.
|
|
1703
|
+
*/
|
|
1704
|
+
private readonly lazy;
|
|
1705
|
+
/**
|
|
1706
|
+
* LRU cache for lazy mode. Only allocated when `prefetch: false` is set.
|
|
1707
|
+
* Stores `{ record, version }` entries the same shape as `this.cache`.
|
|
1708
|
+
* Tree-shaking note: importing Collection without setting `prefetch:false`
|
|
1709
|
+
* still pulls in the Lru class today; future bundle-size work could
|
|
1710
|
+
* lazy-import the cache module.
|
|
1711
|
+
*/
|
|
1712
|
+
private readonly lru;
|
|
1713
|
+
/**
|
|
1714
|
+
* In-memory secondary indexes for the query DSL.
|
|
1715
|
+
*
|
|
1716
|
+
* Built during `ensureHydrated()` and maintained on every put/delete.
|
|
1717
|
+
* The query executor consults these for `==` and `in` operators on
|
|
1718
|
+
* indexed fields, falling back to a linear scan for unindexed fields
|
|
1719
|
+
* or unsupported operators.
|
|
1720
|
+
*
|
|
1721
|
+
* v0.3 ships in-memory only — persistence as encrypted blobs is a
|
|
1722
|
+
* follow-up. See `query/indexes.ts` for the design rationale.
|
|
1723
|
+
*
|
|
1724
|
+
* Indexes are INCOMPATIBLE with lazy mode in v0.3 — the constructor
|
|
1725
|
+
* rejects the combination because evicted records would silently
|
|
1726
|
+
* disappear from the index without notification.
|
|
1727
|
+
*/
|
|
1728
|
+
private readonly indexes;
|
|
1729
|
+
/**
|
|
1730
|
+
* Optional Standard Schema v1 validator. When set, every `put()` runs
|
|
1731
|
+
* the input through `validateSchemaInput` before encryption, and every
|
|
1732
|
+
* record coming OUT of `decryptRecord` runs through
|
|
1733
|
+
* `validateSchemaOutput`. A rejected input throws
|
|
1734
|
+
* `SchemaValidationError` with `direction: 'input'`; drifted stored
|
|
1735
|
+
* data throws with `direction: 'output'`. Both carry the rich issue
|
|
1736
|
+
* list from the validator so UI code can render field-level messages.
|
|
1737
|
+
*
|
|
1738
|
+
* The schema is stored as `StandardSchemaV1<unknown, T>` because the
|
|
1739
|
+
* collection type parameter `T` is the OUTPUT type — whatever the
|
|
1740
|
+
* validator produces after transforms and coercion. Users who pass a
|
|
1741
|
+
* schema to `defineNoydbStore` (or `Collection.constructor`) get their
|
|
1742
|
+
* `T` inferred automatically via `InferOutput<Schema>`.
|
|
1743
|
+
*/
|
|
1744
|
+
private readonly schema;
|
|
1745
|
+
/**
|
|
1746
|
+
* Optional reference to the compartment-level hash-chained audit
|
|
1747
|
+
* log. When present, every successful `put()` and `delete()` appends
|
|
1748
|
+
* an entry to the ledger AFTER the adapter write succeeds (so a
|
|
1749
|
+
* failed adapter write never produces an orphan ledger entry).
|
|
1750
|
+
*
|
|
1751
|
+
* The ledger is always a compartment-wide singleton — all
|
|
1752
|
+
* collections in the same compartment share the same LedgerStore.
|
|
1753
|
+
* Compartment.ledger() does the lazy init; this field just holds
|
|
1754
|
+
* the reference so Collection doesn't need to reach back up to the
|
|
1755
|
+
* compartment on every mutation.
|
|
1756
|
+
*
|
|
1757
|
+
* `undefined` means "no ledger attached" — supported for tests that
|
|
1758
|
+
* construct a Collection directly without a compartment, and for
|
|
1759
|
+
* future backwards-compat scenarios. Production usage always has a
|
|
1760
|
+
* ledger because Compartment.collection() passes one through.
|
|
1761
|
+
*/
|
|
1762
|
+
private readonly ledger;
|
|
1763
|
+
/**
|
|
1764
|
+
* Optional back-reference to the owning compartment's ref
|
|
1765
|
+
* enforcer. When present, `Collection.put` calls
|
|
1766
|
+
* `refEnforcer.enforceRefsOnPut(name, record)` before the adapter
|
|
1767
|
+
* write, and `Collection.delete` calls
|
|
1768
|
+
* `refEnforcer.enforceRefsOnDelete(name, id)` before its own
|
|
1769
|
+
* adapter delete. The Compartment handles the actual registry
|
|
1770
|
+
* lookup and cross-collection enforcement — Collection just
|
|
1771
|
+
* notifies it at the right points in the lifecycle.
|
|
1772
|
+
*
|
|
1773
|
+
* Typed as a structural interface rather than `Compartment`
|
|
1774
|
+
* directly to avoid a circular import. Compartment implements
|
|
1775
|
+
* these two methods; any other object with the same shape would
|
|
1776
|
+
* work too (used only in unit tests).
|
|
1777
|
+
*/
|
|
1778
|
+
private readonly refEnforcer;
|
|
300
1779
|
constructor(opts: {
|
|
301
1780
|
adapter: NoydbAdapter;
|
|
302
1781
|
compartment: string;
|
|
@@ -307,6 +1786,46 @@ declare class Collection<T> {
|
|
|
307
1786
|
getDEK: (collectionName: string) => Promise<CryptoKey>;
|
|
308
1787
|
historyConfig?: HistoryConfig | undefined;
|
|
309
1788
|
onDirty?: OnDirtyCallback | undefined;
|
|
1789
|
+
indexes?: IndexDef[] | undefined;
|
|
1790
|
+
/**
|
|
1791
|
+
* Hydration mode. `'eager'` (default) loads everything into memory on
|
|
1792
|
+
* first access — matches v0.2 behavior exactly. `'lazy'` defers loads
|
|
1793
|
+
* to per-id `get()` calls and bounds memory via the `cache` option.
|
|
1794
|
+
*/
|
|
1795
|
+
prefetch?: boolean;
|
|
1796
|
+
/**
|
|
1797
|
+
* LRU cache options. Only meaningful when `prefetch: false`. At least
|
|
1798
|
+
* one of `maxRecords` or `maxBytes` must be set in lazy mode — an
|
|
1799
|
+
* unbounded lazy cache defeats the purpose.
|
|
1800
|
+
*/
|
|
1801
|
+
cache?: CacheOptions | undefined;
|
|
1802
|
+
/**
|
|
1803
|
+
* Optional Standard Schema v1 validator (Zod, Valibot, ArkType,
|
|
1804
|
+
* Effect Schema, etc.). When set, every `put()` is validated before
|
|
1805
|
+
* encryption and every read is validated after decryption. See the
|
|
1806
|
+
* `schema` field docstring for the error semantics.
|
|
1807
|
+
*/
|
|
1808
|
+
schema?: StandardSchemaV1<unknown, T> | undefined;
|
|
1809
|
+
/**
|
|
1810
|
+
* Optional reference to the compartment's hash-chained ledger.
|
|
1811
|
+
* When present, successful mutations append a ledger entry via
|
|
1812
|
+
* `LedgerStore.append()`. Constructed at the Compartment level and
|
|
1813
|
+
* threaded through — see the Compartment.collection() source for
|
|
1814
|
+
* the wiring.
|
|
1815
|
+
*/
|
|
1816
|
+
ledger?: LedgerStore | undefined;
|
|
1817
|
+
/**
|
|
1818
|
+
* Optional back-reference to the owning compartment's ref
|
|
1819
|
+
* enforcer (v0.4 #45 — foreign-key references via `ref()`).
|
|
1820
|
+
* Collection.put calls `enforceRefsOnPut` before the adapter
|
|
1821
|
+
* write; Collection.delete calls `enforceRefsOnDelete` before
|
|
1822
|
+
* its own adapter delete. See the `refEnforcer` field docstring
|
|
1823
|
+
* for the full protocol.
|
|
1824
|
+
*/
|
|
1825
|
+
refEnforcer?: {
|
|
1826
|
+
enforceRefsOnPut(collectionName: string, record: unknown): Promise<void>;
|
|
1827
|
+
enforceRefsOnDelete(collectionName: string, id: string): Promise<void>;
|
|
1828
|
+
} | undefined;
|
|
310
1829
|
});
|
|
311
1830
|
/** Get a single record by ID. Returns null if not found. */
|
|
312
1831
|
get(id: string): Promise<T | null>;
|
|
@@ -314,13 +1833,57 @@ declare class Collection<T> {
|
|
|
314
1833
|
put(id: string, record: T): Promise<void>;
|
|
315
1834
|
/** Delete a record by ID. */
|
|
316
1835
|
delete(id: string): Promise<void>;
|
|
317
|
-
/**
|
|
1836
|
+
/**
|
|
1837
|
+
* List all records in the collection.
|
|
1838
|
+
*
|
|
1839
|
+
* Throws in lazy mode — bulk listing defeats the purpose of lazy
|
|
1840
|
+
* hydration. Use `scan()` to iterate over the full collection
|
|
1841
|
+
* page-by-page without holding more than `pageSize` records in memory.
|
|
1842
|
+
*/
|
|
318
1843
|
list(): Promise<T[]>;
|
|
319
|
-
/**
|
|
1844
|
+
/**
|
|
1845
|
+
* Build a chainable query against the collection. Returns a `Query<T>`
|
|
1846
|
+
* builder when called with no arguments.
|
|
1847
|
+
*
|
|
1848
|
+
* Backward-compatible overload: passing a predicate function returns
|
|
1849
|
+
* the filtered records directly (the v0.2 API). Prefer the chainable
|
|
1850
|
+
* form for new code.
|
|
1851
|
+
*
|
|
1852
|
+
* @example
|
|
1853
|
+
* ```ts
|
|
1854
|
+
* // New chainable API:
|
|
1855
|
+
* const overdue = invoices.query()
|
|
1856
|
+
* .where('status', '==', 'open')
|
|
1857
|
+
* .where('dueDate', '<', new Date())
|
|
1858
|
+
* .orderBy('dueDate')
|
|
1859
|
+
* .toArray();
|
|
1860
|
+
*
|
|
1861
|
+
* // Legacy predicate form (still supported):
|
|
1862
|
+
* const drafts = invoices.query(i => i.status === 'draft');
|
|
1863
|
+
* ```
|
|
1864
|
+
*/
|
|
1865
|
+
query(): Query<T>;
|
|
320
1866
|
query(predicate: (record: T) => boolean): T[];
|
|
1867
|
+
/**
|
|
1868
|
+
* Cache statistics — useful for devtools, monitoring, and verifying
|
|
1869
|
+
* that LRU eviction is happening as expected in lazy mode.
|
|
1870
|
+
*
|
|
1871
|
+
* In eager mode, returns size only (no hits/misses are tracked because
|
|
1872
|
+
* every read is a cache hit by construction). In lazy mode, returns
|
|
1873
|
+
* the full LRU stats: `{ hits, misses, evictions, size, bytes }`.
|
|
1874
|
+
*/
|
|
1875
|
+
cacheStats(): CacheStats;
|
|
321
1876
|
/** Get version history for a record, newest first. */
|
|
322
1877
|
history(id: string, options?: HistoryOptions): Promise<HistoryEntry<T>[]>;
|
|
323
|
-
/**
|
|
1878
|
+
/**
|
|
1879
|
+
* Get a specific past version of a record.
|
|
1880
|
+
*
|
|
1881
|
+
* History reads intentionally **skip schema validation** — historical
|
|
1882
|
+
* records predate the current schema by definition, so validating them
|
|
1883
|
+
* against today's shape would be a false positive on any schema
|
|
1884
|
+
* evolution. If a caller needs validated history, they should filter
|
|
1885
|
+
* and re-put the records through the normal `put()` path.
|
|
1886
|
+
*/
|
|
324
1887
|
getVersion(id: string, version: number): Promise<T | null>;
|
|
325
1888
|
/** Revert a record to a past version. Creates a new version with the old content. */
|
|
326
1889
|
revert(id: string, version: number): Promise<void>;
|
|
@@ -337,15 +1900,95 @@ declare class Collection<T> {
|
|
|
337
1900
|
pruneRecordHistory(id: string | undefined, options: PruneOptions): Promise<number>;
|
|
338
1901
|
/** Clear all history for this collection (or a specific record). */
|
|
339
1902
|
clearHistory(id?: string): Promise<number>;
|
|
340
|
-
/**
|
|
1903
|
+
/**
|
|
1904
|
+
* Count records in the collection.
|
|
1905
|
+
*
|
|
1906
|
+
* In eager mode this returns the in-memory cache size (instant). In
|
|
1907
|
+
* lazy mode it asks the adapter via `list()` to enumerate ids — slower
|
|
1908
|
+
* but still correct, and avoids loading any record bodies into memory.
|
|
1909
|
+
*/
|
|
341
1910
|
count(): Promise<number>;
|
|
1911
|
+
/**
|
|
1912
|
+
* Fetch a single page of records via the adapter's optional `listPage`
|
|
1913
|
+
* extension. Returns the decrypted records for this page plus an opaque
|
|
1914
|
+
* cursor for the next page.
|
|
1915
|
+
*
|
|
1916
|
+
* Pass `cursor: undefined` (or omit it) to start from the beginning.
|
|
1917
|
+
* The final page returns `nextCursor: null`.
|
|
1918
|
+
*
|
|
1919
|
+
* If the adapter does NOT implement `listPage`, this falls back to a
|
|
1920
|
+
* synthetic implementation: it loads all ids via `list()`, sorts them,
|
|
1921
|
+
* and slices a window. The first call emits a one-time console.warn so
|
|
1922
|
+
* developers can spot adapters that should opt into the fast path.
|
|
1923
|
+
*/
|
|
1924
|
+
listPage(opts?: {
|
|
1925
|
+
cursor?: string;
|
|
1926
|
+
limit?: number;
|
|
1927
|
+
}): Promise<{
|
|
1928
|
+
items: T[];
|
|
1929
|
+
nextCursor: string | null;
|
|
1930
|
+
}>;
|
|
1931
|
+
/**
|
|
1932
|
+
* Stream every record in the collection page-by-page, yielding decrypted
|
|
1933
|
+
* records as an `AsyncIterable<T>`. The whole point: process collections
|
|
1934
|
+
* larger than RAM without ever holding more than `pageSize` records
|
|
1935
|
+
* decrypted at once.
|
|
1936
|
+
*
|
|
1937
|
+
* @example
|
|
1938
|
+
* ```ts
|
|
1939
|
+
* for await (const record of invoices.scan({ pageSize: 500 })) {
|
|
1940
|
+
* await processOne(record)
|
|
1941
|
+
* }
|
|
1942
|
+
* ```
|
|
1943
|
+
*
|
|
1944
|
+
* Uses `adapter.listPage` when available; otherwise falls back to the
|
|
1945
|
+
* synthetic pagination path with the same one-time warning.
|
|
1946
|
+
*/
|
|
1947
|
+
scan(opts?: {
|
|
1948
|
+
pageSize?: number;
|
|
1949
|
+
}): AsyncIterableIterator<T>;
|
|
1950
|
+
/** Decrypt a page of envelopes returned by `adapter.listPage`. */
|
|
1951
|
+
private decryptPage;
|
|
342
1952
|
/** Load all records from adapter into memory cache. */
|
|
343
1953
|
private ensureHydrated;
|
|
344
1954
|
/** Hydrate from a pre-loaded snapshot (used by Compartment). */
|
|
345
1955
|
hydrateFromSnapshot(records: Record<string, EncryptedEnvelope>): Promise<void>;
|
|
1956
|
+
/**
|
|
1957
|
+
* Rebuild secondary indexes from the current in-memory cache.
|
|
1958
|
+
*
|
|
1959
|
+
* Called after any bulk hydration. Incremental put/delete updates
|
|
1960
|
+
* are handled by `indexes.upsert()` / `indexes.remove()` directly,
|
|
1961
|
+
* so this only fires for full reloads.
|
|
1962
|
+
*
|
|
1963
|
+
* Synchronous and O(N × indexes.size); for the v0.3 target scale of
|
|
1964
|
+
* 1K–50K records this completes in single-digit milliseconds.
|
|
1965
|
+
*/
|
|
1966
|
+
private rebuildIndexes;
|
|
1967
|
+
/**
|
|
1968
|
+
* Get the in-memory index store. Used by `Query` to short-circuit
|
|
1969
|
+
* `==` and `in` lookups when an index covers the where clause.
|
|
1970
|
+
*
|
|
1971
|
+
* Returns `null` if no indexes are declared on this collection.
|
|
1972
|
+
*/
|
|
1973
|
+
getIndexes(): CollectionIndexes | null;
|
|
346
1974
|
/** Get all records as encrypted envelopes (for dump). */
|
|
347
1975
|
dumpEnvelopes(): Promise<Record<string, EncryptedEnvelope>>;
|
|
348
1976
|
private encryptRecord;
|
|
1977
|
+
/**
|
|
1978
|
+
* Decrypt an envelope into a record of type `T`.
|
|
1979
|
+
*
|
|
1980
|
+
* When a schema is attached, the decrypted value is validated before
|
|
1981
|
+
* being returned. A divergence between the stored bytes and the
|
|
1982
|
+
* current schema throws `SchemaValidationError` with
|
|
1983
|
+
* `direction: 'output'` — silently returning drifted data would
|
|
1984
|
+
* propagate garbage into the UI and break the whole point of having
|
|
1985
|
+
* a schema.
|
|
1986
|
+
*
|
|
1987
|
+
* `skipValidation` exists for history reads: when calling
|
|
1988
|
+
* `getVersion()` the caller is explicitly asking for an old snapshot
|
|
1989
|
+
* that may predate a schema change, so validating it would be a
|
|
1990
|
+
* false positive. Every non-history read leaves this flag `false`.
|
|
1991
|
+
*/
|
|
349
1992
|
private decryptRecord;
|
|
350
1993
|
}
|
|
351
1994
|
|
|
@@ -353,13 +1996,62 @@ declare class Collection<T> {
|
|
|
353
1996
|
declare class Compartment {
|
|
354
1997
|
private readonly adapter;
|
|
355
1998
|
private readonly name;
|
|
356
|
-
|
|
1999
|
+
/**
|
|
2000
|
+
* The active in-memory keyring. NOT readonly because `load()`
|
|
2001
|
+
* needs to refresh it after restoring a different keyring file —
|
|
2002
|
+
* otherwise the in-memory DEKs (from the pre-load session) and
|
|
2003
|
+
* the on-disk wrapped DEKs (from the loaded backup) drift apart
|
|
2004
|
+
* and every subsequent decrypt fails with TamperedError.
|
|
2005
|
+
*/
|
|
2006
|
+
private keyring;
|
|
357
2007
|
private readonly encrypted;
|
|
358
2008
|
private readonly emitter;
|
|
359
2009
|
private readonly onDirty;
|
|
360
2010
|
private readonly historyConfig;
|
|
361
|
-
private
|
|
2011
|
+
private getDEK;
|
|
2012
|
+
/**
|
|
2013
|
+
* Optional callback that re-derives an UnlockedKeyring from the
|
|
2014
|
+
* adapter using the active user's passphrase. Called by `load()`
|
|
2015
|
+
* after the on-disk keyring file has been replaced — refreshes
|
|
2016
|
+
* `this.keyring` so the next DEK access uses the loaded wrapped
|
|
2017
|
+
* DEKs instead of the stale pre-load ones.
|
|
2018
|
+
*
|
|
2019
|
+
* Provided by Noydb at openCompartment() time. Tests that
|
|
2020
|
+
* construct Compartment directly can pass `undefined`; load()
|
|
2021
|
+
* skips the refresh in that case (which is fine for plaintext
|
|
2022
|
+
* compartments — there's nothing to re-unwrap).
|
|
2023
|
+
*/
|
|
2024
|
+
private readonly reloadKeyring;
|
|
362
2025
|
private readonly collectionCache;
|
|
2026
|
+
/**
|
|
2027
|
+
* Per-compartment ledger store. Lazy-initialized on first
|
|
2028
|
+
* `collection()` call (which passes it through to the Collection)
|
|
2029
|
+
* or on first `ledger()` call from user code.
|
|
2030
|
+
*
|
|
2031
|
+
* One LedgerStore is shared across all collections in a compartment
|
|
2032
|
+
* because the hash chain is compartment-scoped: the chain head is a
|
|
2033
|
+
* single "what did this compartment do last" identifier, not a
|
|
2034
|
+
* per-collection one. Two collections appending concurrently is the
|
|
2035
|
+
* single-writer concurrency concern documented in the LedgerStore
|
|
2036
|
+
* docstring.
|
|
2037
|
+
*/
|
|
2038
|
+
private ledgerStore;
|
|
2039
|
+
/**
|
|
2040
|
+
* Per-compartment foreign-key reference registry. Collections
|
|
2041
|
+
* register their `refs` option here on construction; the
|
|
2042
|
+
* compartment uses the registry on every put/delete/checkIntegrity
|
|
2043
|
+
* call. One instance lives for the compartment's lifetime.
|
|
2044
|
+
*/
|
|
2045
|
+
private readonly refRegistry;
|
|
2046
|
+
/**
|
|
2047
|
+
* Set of collection record-ids currently being deleted as part of
|
|
2048
|
+
* a cascade. Populated on entry to `enforceRefsOnDelete` and
|
|
2049
|
+
* drained on exit. Used to break mutual-cascade cycles: deleting
|
|
2050
|
+
* A → cascade to B → cascade back to A would otherwise recurse
|
|
2051
|
+
* forever, so we short-circuit when we see an already-in-progress
|
|
2052
|
+
* delete on the same (collection, id) pair.
|
|
2053
|
+
*/
|
|
2054
|
+
private readonly cascadeInProgress;
|
|
363
2055
|
constructor(opts: {
|
|
364
2056
|
adapter: NoydbAdapter;
|
|
365
2057
|
name: string;
|
|
@@ -368,15 +2060,178 @@ declare class Compartment {
|
|
|
368
2060
|
emitter: NoydbEventEmitter;
|
|
369
2061
|
onDirty?: OnDirtyCallback | undefined;
|
|
370
2062
|
historyConfig?: HistoryConfig | undefined;
|
|
2063
|
+
reloadKeyring?: (() => Promise<UnlockedKeyring>) | undefined;
|
|
371
2064
|
});
|
|
372
|
-
/**
|
|
373
|
-
|
|
2065
|
+
/**
|
|
2066
|
+
* Construct (or reconstruct) the lazy DEK resolver. Captures the
|
|
2067
|
+
* CURRENT value of `this.keyring` and `this.adapter` in a closure,
|
|
2068
|
+
* memoizing the inner getDEKFn after first use so subsequent
|
|
2069
|
+
* lookups are O(1).
|
|
2070
|
+
*
|
|
2071
|
+
* `load()` calls this after refreshing `this.keyring` to discard
|
|
2072
|
+
* the prior session's cached DEKs.
|
|
2073
|
+
*/
|
|
2074
|
+
private makeGetDEK;
|
|
2075
|
+
/**
|
|
2076
|
+
* Open a typed collection within this compartment.
|
|
2077
|
+
*
|
|
2078
|
+
* - `options.indexes` declares secondary indexes for the query DSL.
|
|
2079
|
+
* Indexes are computed in memory after decryption; adapters never
|
|
2080
|
+
* see plaintext index data.
|
|
2081
|
+
* - `options.prefetch` (default `true`) controls hydration. Eager mode
|
|
2082
|
+
* loads everything on first access; lazy mode (`prefetch: false`)
|
|
2083
|
+
* loads records on demand and bounds memory via the LRU cache.
|
|
2084
|
+
* - `options.cache` configures the LRU bounds. Required in lazy mode.
|
|
2085
|
+
* Accepts `{ maxRecords, maxBytes: '50MB' | 1024 }`.
|
|
2086
|
+
* - `options.schema` attaches a Standard Schema v1 validator (Zod,
|
|
2087
|
+
* Valibot, ArkType, Effect Schema, etc.). Every `put()` is validated
|
|
2088
|
+
* before encryption; every read is validated after decryption.
|
|
2089
|
+
* Failing records throw `SchemaValidationError`.
|
|
2090
|
+
*
|
|
2091
|
+
* Lazy mode + indexes is rejected at construction time — see the
|
|
2092
|
+
* Collection constructor for the rationale.
|
|
2093
|
+
*/
|
|
2094
|
+
collection<T>(collectionName: string, options?: {
|
|
2095
|
+
indexes?: IndexDef[];
|
|
2096
|
+
prefetch?: boolean;
|
|
2097
|
+
cache?: CacheOptions;
|
|
2098
|
+
schema?: StandardSchemaV1<unknown, T>;
|
|
2099
|
+
refs?: Record<string, RefDescriptor>;
|
|
2100
|
+
}): Collection<T>;
|
|
2101
|
+
/**
|
|
2102
|
+
* Enforce strict outbound refs on a `put()`. Called by Collection
|
|
2103
|
+
* just before it writes to the adapter. For every strict ref
|
|
2104
|
+
* declared on the collection, check that the target id exists in
|
|
2105
|
+
* the target collection; throw `RefIntegrityError` if not.
|
|
2106
|
+
*
|
|
2107
|
+
* `warn` and `cascade` modes don't affect put semantics — they're
|
|
2108
|
+
* enforced at delete time or via `checkIntegrity()`.
|
|
2109
|
+
*/
|
|
2110
|
+
enforceRefsOnPut(collectionName: string, record: unknown): Promise<void>;
|
|
2111
|
+
/**
|
|
2112
|
+
* Enforce inbound ref modes on a `delete()`. Called by Collection
|
|
2113
|
+
* just before it deletes from the adapter. Walks every inbound
|
|
2114
|
+
* ref that targets this (collection, id) and:
|
|
2115
|
+
*
|
|
2116
|
+
* - `strict`: throws if any referencing records exist
|
|
2117
|
+
* - `cascade`: deletes every referencing record
|
|
2118
|
+
* - `warn`: no-op (checkIntegrity picks it up)
|
|
2119
|
+
*
|
|
2120
|
+
* Cascade cycles are broken via `cascadeInProgress` — re-entering
|
|
2121
|
+
* for the same (collection, id) returns immediately so two
|
|
2122
|
+
* mutually-cascading collections don't recurse forever.
|
|
2123
|
+
*/
|
|
2124
|
+
enforceRefsOnDelete(collectionName: string, id: string): Promise<void>;
|
|
2125
|
+
/**
|
|
2126
|
+
* Walk every collection that has declared refs, load its records,
|
|
2127
|
+
* and report any reference whose target id is missing. Modes are
|
|
2128
|
+
* reported alongside each violation so the caller can distinguish
|
|
2129
|
+
* "this is a warning the user asked for" from "this should never
|
|
2130
|
+
* have happened" (strict violations produced by out-of-band
|
|
2131
|
+
* writes).
|
|
2132
|
+
*
|
|
2133
|
+
* Returns `{ violations: [...] }` instead of throwing — the whole
|
|
2134
|
+
* point of `checkIntegrity()` is to surface a list for display
|
|
2135
|
+
* or repair, not to fail noisily.
|
|
2136
|
+
*/
|
|
2137
|
+
checkIntegrity(): Promise<{
|
|
2138
|
+
violations: RefViolation[];
|
|
2139
|
+
}>;
|
|
2140
|
+
/**
|
|
2141
|
+
* Return this compartment's hash-chained audit log.
|
|
2142
|
+
*
|
|
2143
|
+
* The ledger is lazy-initialized on first access and cached for the
|
|
2144
|
+
* lifetime of the Compartment instance. Every LedgerStore instance
|
|
2145
|
+
* shares the same adapter and DEK resolver, so `compartment.ledger()`
|
|
2146
|
+
* can be called repeatedly without performance cost.
|
|
2147
|
+
*
|
|
2148
|
+
* The LedgerStore itself is the public API: consumers call
|
|
2149
|
+
* `.append()` (via Collection internals), `.head()`, `.verify()`,
|
|
2150
|
+
* and `.entries({ from, to })`. See the LedgerStore docstring for
|
|
2151
|
+
* the full surface and the concurrency caveats.
|
|
2152
|
+
*/
|
|
2153
|
+
ledger(): LedgerStore;
|
|
374
2154
|
/** List all collection names in this compartment. */
|
|
375
2155
|
collections(): Promise<string[]>;
|
|
376
|
-
/**
|
|
2156
|
+
/**
|
|
2157
|
+
* Dump compartment as a verifiable encrypted JSON backup string.
|
|
2158
|
+
*
|
|
2159
|
+
* v0.4 backups embed the current ledger head and the full
|
|
2160
|
+
* `_ledger` + `_ledger_deltas` internal collections so the
|
|
2161
|
+
* receiver can run `verifyBackupIntegrity()` after `load()` and
|
|
2162
|
+
* detect any tampering between dump and restore. Pre-v0.4 callers
|
|
2163
|
+
* who didn't have a ledger get a backup without these fields, and
|
|
2164
|
+
* the corresponding `load()` skips the integrity check with a
|
|
2165
|
+
* warning — both modes round-trip cleanly.
|
|
2166
|
+
*/
|
|
377
2167
|
dump(): Promise<string>;
|
|
378
|
-
/**
|
|
2168
|
+
/**
|
|
2169
|
+
* Restore a compartment from a verifiable backup.
|
|
2170
|
+
*
|
|
2171
|
+
* After loading, runs `verifyBackupIntegrity()` to confirm:
|
|
2172
|
+
* 1. The hash chain is intact (no `prevHash` mismatches)
|
|
2173
|
+
* 2. The chain head matches the embedded `ledgerHead.hash`
|
|
2174
|
+
* from the backup
|
|
2175
|
+
* 3. Every data envelope's `payloadHash` matches the
|
|
2176
|
+
* corresponding ledger entry — i.e. nobody swapped
|
|
2177
|
+
* ciphertext between dump and restore
|
|
2178
|
+
*
|
|
2179
|
+
* On any failure, throws `BackupLedgerError` (chain or head
|
|
2180
|
+
* mismatch) or `BackupCorruptedError` (data envelope mismatch).
|
|
2181
|
+
* The compartment state on the adapter has already been written
|
|
2182
|
+
* by the time we throw, so the caller is responsible for either
|
|
2183
|
+
* accepting the suspect state or wiping it and trying a different
|
|
2184
|
+
* backup.
|
|
2185
|
+
*
|
|
2186
|
+
* Pre-v0.4 backups (no `ledgerHead` field, no `_internal`) load
|
|
2187
|
+
* with a console warning and skip the integrity check entirely
|
|
2188
|
+
* — there's no chain to verify against.
|
|
2189
|
+
*/
|
|
379
2190
|
load(backupJson: string): Promise<void>;
|
|
2191
|
+
/**
|
|
2192
|
+
* End-to-end backup integrity check. Runs both:
|
|
2193
|
+
*
|
|
2194
|
+
* 1. `ledger.verify()` — walks the hash chain and confirms
|
|
2195
|
+
* every `prevHash` matches the recomputed hash of its
|
|
2196
|
+
* predecessor.
|
|
2197
|
+
*
|
|
2198
|
+
* 2. **Data envelope cross-check** — for every (collection, id)
|
|
2199
|
+
* that has a current value, find the most recent ledger
|
|
2200
|
+
* entry recording a `put` for that pair, recompute the
|
|
2201
|
+
* sha256 of the stored envelope's `_data`, and compare to
|
|
2202
|
+
* the entry's `payloadHash`. Any mismatch means an
|
|
2203
|
+
* out-of-band write modified the data without updating the
|
|
2204
|
+
* ledger.
|
|
2205
|
+
*
|
|
2206
|
+
* Returns a discriminated union so callers can handle the two
|
|
2207
|
+
* failure modes differently:
|
|
2208
|
+
* - `{ ok: true, head, length }` — chain verified and all
|
|
2209
|
+
* data matches; safe to use.
|
|
2210
|
+
* - `{ ok: false, kind: 'chain', divergedAt, message }` — the
|
|
2211
|
+
* chain itself is broken at the given index.
|
|
2212
|
+
* - `{ ok: false, kind: 'data', collection, id, message }` —
|
|
2213
|
+
* a specific data envelope doesn't match its ledger entry.
|
|
2214
|
+
*
|
|
2215
|
+
* This method is exposed so users can call it any time, not just
|
|
2216
|
+
* during `load()`. A scheduled background check is the simplest
|
|
2217
|
+
* way to detect tampering of an in-place compartment.
|
|
2218
|
+
*/
|
|
2219
|
+
verifyBackupIntegrity(): Promise<{
|
|
2220
|
+
readonly ok: true;
|
|
2221
|
+
readonly head: string;
|
|
2222
|
+
readonly length: number;
|
|
2223
|
+
} | {
|
|
2224
|
+
readonly ok: false;
|
|
2225
|
+
readonly kind: 'chain';
|
|
2226
|
+
readonly divergedAt: number;
|
|
2227
|
+
readonly message: string;
|
|
2228
|
+
} | {
|
|
2229
|
+
readonly ok: false;
|
|
2230
|
+
readonly kind: 'data';
|
|
2231
|
+
readonly collection: string;
|
|
2232
|
+
readonly id: string;
|
|
2233
|
+
readonly message: string;
|
|
2234
|
+
}>;
|
|
380
2235
|
/** Export compartment as decrypted JSON (owner only). */
|
|
381
2236
|
export(): Promise<string>;
|
|
382
2237
|
}
|
|
@@ -523,4 +2378,4 @@ declare function validatePassphrase(passphrase: string): void;
|
|
|
523
2378
|
*/
|
|
524
2379
|
declare function estimateEntropy(passphrase: string): number;
|
|
525
2380
|
|
|
526
|
-
export { type BiometricCredential, type ChangeEvent, type ChangeType, Collection, Compartment, type CompartmentBackup, type CompartmentSnapshot, type Conflict, ConflictError, type ConflictStrategy, DecryptionError, type DiffEntry, type DirtyEntry, type EncryptedEnvelope, type GrantOptions, type HistoryConfig, type HistoryEntry, type HistoryOptions, InvalidKeyError, type KeyringFile, NOYDB_BACKUP_VERSION, NOYDB_FORMAT_VERSION, NOYDB_KEYRING_VERSION, NOYDB_SYNC_VERSION, NetworkError, NoAccessError, NotFoundError, Noydb, type NoydbAdapter, NoydbError, type NoydbEventMap, type NoydbOptions, type Permission, PermissionDeniedError, type Permissions, type PruneOptions, type PullResult, type PushResult, ReadOnlyError, type RevokeOptions, type Role, SyncEngine, type SyncMetadata, type SyncStatus, TamperedError, type UserInfo, ValidationError, createNoydb, defineAdapter, diff, enrollBiometric, estimateEntropy, formatDiff, isBiometricAvailable, loadBiometric, removeBiometric, saveBiometric, unlockBiometric, validatePassphrase };
|
|
2381
|
+
export { type AppendInput, BackupCorruptedError, BackupLedgerError, type BiometricCredential, type CacheOptions, type CacheStats, type ChangeEvent, type ChangeType, type Clause, Collection, CollectionIndexes, Compartment, type CompartmentBackup, type CompartmentSnapshot, type Conflict, ConflictError, type ConflictStrategy, DecryptionError, type DiffEntry, type DirtyEntry, type EncryptedEnvelope, type FieldClause, type FilterClause, type GrantOptions, type GroupClause, type HashIndex, type HistoryConfig, type HistoryEntry, type HistoryOptions, type IndexDef, type InferOutput, InvalidKeyError, type JsonPatch, type JsonPatchOp, type KeyringFile, LEDGER_COLLECTION, LEDGER_DELTAS_COLLECTION, type LedgerEntry, LedgerStore, type ListPageResult, Lru, type LruOptions, type LruStats, NOYDB_BACKUP_VERSION, NOYDB_FORMAT_VERSION, NOYDB_KEYRING_VERSION, NOYDB_SYNC_VERSION, NetworkError, NoAccessError, NotFoundError, Noydb, type NoydbAdapter, NoydbError, type NoydbEventMap, type NoydbOptions, type Operator, type OrderBy, type Permission, PermissionDeniedError, type Permissions, type PruneOptions, type PullResult, type PushResult, Query, type QueryPlan, type QuerySource, ReadOnlyError, type RefDescriptor, RefIntegrityError, type RefMode, RefRegistry, RefScopeError, type RefViolation, type RevokeOptions, type Role, SchemaValidationError, type StandardSchemaV1, type StandardSchemaV1Issue, type StandardSchemaV1SyncResult, SyncEngine, type SyncMetadata, type SyncStatus, TamperedError, type UserInfo, ValidationError, type VerifyResult, applyPatch, canonicalJson, computePatch, createNoydb, defineAdapter, diff, enrollBiometric, envelopePayloadHash, estimateEntropy, estimateRecordBytes, evaluateClause, evaluateFieldClause, executePlan, formatDiff, hashEntry, isBiometricAvailable, loadBiometric, paddedIndex, parseBytes, parseIndex, readPath, ref, removeBiometric, saveBiometric, sha256Hex, unlockBiometric, validatePassphrase, validateSchemaInput, validateSchemaOutput };
|