@noy-db/hub 0.1.0-pre.3 → 0.1.0-pre.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/blobs/index.cjs.map +1 -1
- package/dist/blobs/index.d.cts +2 -2
- package/dist/blobs/index.d.ts +2 -2
- package/dist/blobs/index.js +2 -2
- package/dist/bundle/index.d.cts +2 -2
- package/dist/bundle/index.d.ts +2 -2
- package/dist/{chunk-EXQRC2L4.js → chunk-AVWFLPNR.js} +2 -2
- package/dist/{chunk-ZLMV3TUA.js → chunk-EARQCIL7.js} +3 -3
- package/dist/{chunk-4OWFYIDQ.js → chunk-L77MEFCH.js} +3 -3
- package/dist/{chunk-5AATM2M2.js → chunk-LSZHBNDG.js} +2 -2
- package/dist/{chunk-XHFOENR2.js → chunk-NK2NSXXK.js} +2 -2
- package/dist/{chunk-ZRG4V3F5.js → chunk-O5GK62FJ.js} +1 -1
- package/dist/chunk-O5GK62FJ.js.map +1 -0
- package/dist/{chunk-M2F2JAWB.js → chunk-PSHTHSIX.js} +2 -2
- package/dist/{chunk-UQFSPSWG.js → chunk-QZIACZZU.js} +2 -2
- package/dist/consent/index.d.cts +2 -2
- package/dist/consent/index.d.ts +2 -2
- package/dist/{dev-unlock-CeXic1xC.d.cts → dev-unlock-5SmCVGyx.d.cts} +1 -1
- package/dist/{dev-unlock-KrKkcqD3.d.ts → dev-unlock-XOUecfQ9.d.ts} +1 -1
- package/dist/{hash-ChfJjRjQ.d.ts → hash-Bxud16vM.d.ts} +1 -1
- package/dist/{hash-9KO1BGxh.d.cts → hash-CvuKN2gH.d.cts} +1 -1
- package/dist/history/index.cjs.map +1 -1
- package/dist/history/index.d.cts +3 -3
- package/dist/history/index.d.ts +3 -3
- package/dist/history/index.js +2 -2
- package/dist/i18n/index.cjs.map +1 -1
- package/dist/i18n/index.d.cts +2 -2
- package/dist/i18n/index.d.ts +2 -2
- package/dist/i18n/index.js +3 -3
- package/dist/{index-DhjMjz7L.d.cts → index-BvUiM47h.d.cts} +1 -1
- package/dist/{index-C8kQtmOk.d.ts → index-Cy-MKrdK.d.ts} +1 -1
- package/dist/index.cjs +11 -3
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +5 -5
- package/dist/index.d.ts +5 -5
- package/dist/index.js +19 -11
- package/dist/index.js.map +1 -1
- package/dist/{ledger-2NX4L7PN.js → ledger-HWXYGUIQ.js} +3 -3
- package/dist/periods/index.cjs.map +1 -1
- package/dist/periods/index.d.cts +2 -2
- package/dist/periods/index.d.ts +2 -2
- package/dist/periods/index.js +3 -3
- package/dist/session/index.d.cts +3 -3
- package/dist/session/index.d.ts +3 -3
- package/dist/shadow/index.d.cts +2 -2
- package/dist/shadow/index.d.ts +2 -2
- package/dist/store/index.d.cts +2 -2
- package/dist/store/index.d.ts +2 -2
- package/dist/sync/index.cjs.map +1 -1
- package/dist/sync/index.d.cts +1 -1
- package/dist/sync/index.d.ts +1 -1
- package/dist/sync/index.js +2 -2
- package/dist/team/index.cjs.map +1 -1
- package/dist/team/index.d.cts +2 -2
- package/dist/team/index.d.ts +2 -2
- package/dist/team/index.js +4 -4
- package/dist/tx/index.d.cts +2 -2
- package/dist/tx/index.d.ts +2 -2
- package/dist/{types-Bfs0qr5F.d.cts → types-BVSfkYg6.d.cts} +40 -1
- package/dist/{types-BZpCZB8N.d.ts → types-Dmi7nrC9.d.ts} +40 -1
- package/package.json +1 -1
- package/dist/chunk-ZRG4V3F5.js.map +0 -1
- /package/dist/{chunk-EXQRC2L4.js.map → chunk-AVWFLPNR.js.map} +0 -0
- /package/dist/{chunk-ZLMV3TUA.js.map → chunk-EARQCIL7.js.map} +0 -0
- /package/dist/{chunk-4OWFYIDQ.js.map → chunk-L77MEFCH.js.map} +0 -0
- /package/dist/{chunk-5AATM2M2.js.map → chunk-LSZHBNDG.js.map} +0 -0
- /package/dist/{chunk-XHFOENR2.js.map → chunk-NK2NSXXK.js.map} +0 -0
- /package/dist/{chunk-M2F2JAWB.js.map → chunk-PSHTHSIX.js.map} +0 -0
- /package/dist/{chunk-UQFSPSWG.js.map → chunk-QZIACZZU.js.map} +0 -0
- /package/dist/{ledger-2NX4L7PN.js.map → ledger-HWXYGUIQ.js.map} +0 -0
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../src/history/index.ts","../../src/history/history.ts","../../src/history/diff.ts","../../src/types.ts","../../src/errors.ts","../../src/crypto.ts","../../src/history/ledger/entry.ts","../../src/history/ledger/patch.ts","../../src/history/ledger/constants.ts","../../src/history/ledger/hash.ts","../../src/history/ledger/store.ts","../../src/history/time-machine.ts","../../src/history/active.ts"],"sourcesContent":["/**\n * `@noy-db/hub/history` — subpath export for record-history / diff /\n * ledger-based audit trail + point-in-time primitives.\n *\n * Solo apps that don't track versioning, don't need a hash-chained\n * audit log, and don't restore to earlier points in time can exclude\n * this entire surface. Bundle savings ~1,880 LOC.\n *\n * The strategy seam is `withHistory()` — pass the returned\n * strategy to `createNoydb({ historyStrategy: ... })`. Without it,\n * the core `NO_HISTORY` stub no-ops snapshots/prune/clear and throws\n * on read APIs (`history`, `getVersion`, `diff`, `vault.at`,\n * `vault.ledger`).\n *\n * Named re-exports (not `export *`) so tsup keeps the barrel\n * populated even with `sideEffects: false`.\n */\n\n// ─── Strategy seam ─────────────────────────────────────\nexport { withHistory } from './active.js'\nexport type { HistoryStrategy } from './strategy.js'\n\n// ─── Per-record history ──────────────────────────────────\nexport {\n saveHistory,\n getHistory,\n getVersionEnvelope,\n pruneHistory,\n clearHistory,\n} from './history.js'\n\n// ─── Diff ────────────────────────────────────────────────\nexport { diff, formatDiff } from './diff.js'\nexport type { ChangeType, DiffEntry } from './diff.js'\n\n// ─── Hash-chained ledger ─────────────────────────────────\nexport {\n LedgerStore,\n LEDGER_COLLECTION,\n LEDGER_DELTAS_COLLECTION,\n envelopePayloadHash,\n} from './ledger/store.js'\nexport type { AppendInput, VerifyResult } from './ledger/store.js'\n\nexport {\n canonicalJson,\n sha256Hex,\n hashEntry,\n paddedIndex,\n parseIndex,\n} from './ledger/entry.js'\nexport type { LedgerEntry } from './ledger/entry.js'\n\n// ─── JSON Patch — delta history ──────────────────────────\nexport { computePatch, applyPatch } from './ledger/patch.js'\nexport type { JsonPatch, JsonPatchOp } from './ledger/patch.js'\n\n// ─── Time-machine queries ──────────────────────────────\nexport { VaultInstant, CollectionInstant } from './time-machine.js'\nexport type { VaultEngine } from './time-machine.js'\n","import type { NoydbStore, EncryptedEnvelope, HistoryOptions, PruneOptions } from '../types.js'\n\n/**\n * History storage convention:\n * Collection: `_history`\n * ID format: `{collection}:{recordId}:{paddedVersion}`\n * Version is zero-padded to 10 digits for lexicographic sorting.\n */\n\nconst HISTORY_COLLECTION = '_history'\nconst VERSION_PAD = 10\n\nfunction historyId(collection: string, recordId: string, version: number): string {\n return `${collection}:${recordId}:${String(version).padStart(VERSION_PAD, '0')}`\n}\n\n// Unused today, kept for future history-id parsing utilities.\n// eslint-disable-next-line @typescript-eslint/no-unused-vars\nfunction parseHistoryId(id: string): { collection: string; recordId: string; version: number } | null {\n const lastColon = id.lastIndexOf(':')\n if (lastColon < 0) return null\n const versionStr = id.slice(lastColon + 1)\n const rest = id.slice(0, lastColon)\n const firstColon = rest.indexOf(':')\n if (firstColon < 0) return null\n return {\n collection: rest.slice(0, firstColon),\n recordId: rest.slice(firstColon + 1),\n version: parseInt(versionStr, 10),\n }\n}\n\nfunction matchesPrefix(id: string, collection: string, recordId?: string): boolean {\n if (recordId) {\n return id.startsWith(`${collection}:${recordId}:`)\n }\n return id.startsWith(`${collection}:`)\n}\n\n/** Save a history entry (a complete encrypted envelope snapshot). */\nexport async function saveHistory(\n adapter: NoydbStore,\n vault: string,\n collection: string,\n recordId: string,\n envelope: EncryptedEnvelope,\n): Promise<void> {\n const id = historyId(collection, recordId, envelope._v)\n await adapter.put(vault, HISTORY_COLLECTION, id, envelope)\n}\n\n/** Get history entries for a record, sorted newest-first. */\nexport async function getHistory(\n adapter: NoydbStore,\n vault: string,\n collection: string,\n recordId: string,\n options?: HistoryOptions,\n): Promise<EncryptedEnvelope[]> {\n const allIds = await adapter.list(vault, HISTORY_COLLECTION)\n const matchingIds = allIds\n .filter(id => matchesPrefix(id, collection, recordId))\n .sort()\n .reverse() // newest first\n\n const entries: EncryptedEnvelope[] = []\n\n for (const id of matchingIds) {\n const envelope = await adapter.get(vault, HISTORY_COLLECTION, id)\n if (!envelope) continue\n\n // Apply time filters\n if (options?.from && envelope._ts < options.from) continue\n if (options?.to && envelope._ts > options.to) continue\n\n entries.push(envelope)\n\n if (options?.limit && entries.length >= options.limit) break\n }\n\n return entries\n}\n\n/** Get a specific version's envelope from history. */\nexport async function getVersionEnvelope(\n adapter: NoydbStore,\n vault: string,\n collection: string,\n recordId: string,\n version: number,\n): Promise<EncryptedEnvelope | null> {\n const id = historyId(collection, recordId, version)\n return adapter.get(vault, HISTORY_COLLECTION, id)\n}\n\n/** Prune history entries. Returns the number of entries deleted. */\nexport async function pruneHistory(\n adapter: NoydbStore,\n vault: string,\n collection: string,\n recordId: string | undefined,\n options: PruneOptions,\n): Promise<number> {\n const allIds = await adapter.list(vault, HISTORY_COLLECTION)\n const matchingIds = allIds\n .filter(id => recordId ? matchesPrefix(id, collection, recordId) : matchesPrefix(id, collection))\n .sort()\n\n let toDelete: string[] = []\n\n if (options.keepVersions !== undefined) {\n // Keep only the N most recent, delete the rest\n const keep = options.keepVersions\n if (matchingIds.length > keep) {\n toDelete = matchingIds.slice(0, matchingIds.length - keep)\n }\n }\n\n if (options.beforeDate) {\n // Delete entries older than the specified date\n for (const id of matchingIds) {\n if (toDelete.includes(id)) continue\n const envelope = await adapter.get(vault, HISTORY_COLLECTION, id)\n if (envelope && envelope._ts < options.beforeDate) {\n toDelete.push(id)\n }\n }\n }\n\n // Deduplicate\n const uniqueDeletes = [...new Set(toDelete)]\n\n for (const id of uniqueDeletes) {\n await adapter.delete(vault, HISTORY_COLLECTION, id)\n }\n\n return uniqueDeletes.length\n}\n\n/** Clear all history for a vault, optionally scoped to a collection or record. */\nexport async function clearHistory(\n adapter: NoydbStore,\n vault: string,\n collection?: string,\n recordId?: string,\n): Promise<number> {\n const allIds = await adapter.list(vault, HISTORY_COLLECTION)\n let toDelete: string[]\n\n if (collection && recordId) {\n toDelete = allIds.filter(id => matchesPrefix(id, collection, recordId))\n } else if (collection) {\n toDelete = allIds.filter(id => matchesPrefix(id, collection))\n } else {\n toDelete = allIds\n }\n\n for (const id of toDelete) {\n await adapter.delete(vault, HISTORY_COLLECTION, id)\n }\n\n return toDelete.length\n}\n","/**\n * Zero-dependency JSON diff.\n * Produces a flat list of changes between two plain objects.\n */\n\nexport type ChangeType = 'added' | 'removed' | 'changed'\n\nexport interface DiffEntry {\n /** Dot-separated path to the changed field (e.g. \"address.city\"). */\n readonly path: string\n /** Type of change. */\n readonly type: ChangeType\n /** Previous value (undefined for 'added'). */\n readonly from?: unknown\n /** New value (undefined for 'removed'). */\n readonly to?: unknown\n}\n\n/**\n * Compute differences between two objects.\n * Returns an array of DiffEntry describing each changed field.\n * Returns empty array if objects are identical.\n */\nexport function diff(oldObj: unknown, newObj: unknown, basePath = ''): DiffEntry[] {\n const changes: DiffEntry[] = []\n\n // Both primitives or nulls\n if (oldObj === newObj) return changes\n\n // One is null/undefined\n if (oldObj == null && newObj != null) {\n return [{ path: basePath || '(root)', type: 'added', to: newObj }]\n }\n if (oldObj != null && newObj == null) {\n return [{ path: basePath || '(root)', type: 'removed', from: oldObj }]\n }\n\n // Different types\n if (typeof oldObj !== typeof newObj) {\n return [{ path: basePath || '(root)', type: 'changed', from: oldObj, to: newObj }]\n }\n\n // Both primitives (and not equal — checked above)\n if (typeof oldObj !== 'object') {\n return [{ path: basePath || '(root)', type: 'changed', from: oldObj, to: newObj }]\n }\n\n // Both arrays\n if (Array.isArray(oldObj) && Array.isArray(newObj)) {\n const maxLen = Math.max(oldObj.length, newObj.length)\n for (let i = 0; i < maxLen; i++) {\n const p = basePath ? `${basePath}[${i}]` : `[${i}]`\n if (i >= oldObj.length) {\n changes.push({ path: p, type: 'added', to: newObj[i] })\n } else if (i >= newObj.length) {\n changes.push({ path: p, type: 'removed', from: oldObj[i] })\n } else {\n changes.push(...diff(oldObj[i], newObj[i], p))\n }\n }\n return changes\n }\n\n // Both objects\n const oldRecord = oldObj as Record<string, unknown>\n const newRecord = newObj as Record<string, unknown>\n const allKeys = new Set([...Object.keys(oldRecord), ...Object.keys(newRecord)])\n\n for (const key of allKeys) {\n const p = basePath ? `${basePath}.${key}` : key\n if (!(key in oldRecord)) {\n changes.push({ path: p, type: 'added', to: newRecord[key] })\n } else if (!(key in newRecord)) {\n changes.push({ path: p, type: 'removed', from: oldRecord[key] })\n } else {\n changes.push(...diff(oldRecord[key], newRecord[key], p))\n }\n }\n\n return changes\n}\n\n/** Format a diff as a human-readable string. */\nexport function formatDiff(changes: DiffEntry[]): string {\n if (changes.length === 0) return '(no changes)'\n return changes.map(c => {\n switch (c.type) {\n case 'added':\n return `+ ${c.path}: ${JSON.stringify(c.to)}`\n case 'removed':\n return `- ${c.path}: ${JSON.stringify(c.from)}`\n case 'changed':\n return `~ ${c.path}: ${JSON.stringify(c.from)} → ${JSON.stringify(c.to)}`\n }\n }).join('\\n')\n}\n","/**\n * Core types — the {@link NoydbStore} interface, envelope format, roles, and\n * all configuration shapes consumed by {@link createNoydb}.\n *\n * ## What lives here\n *\n * - **{@link NoydbStore}** — the 6-method contract every backend must implement\n * (`get`, `put`, `delete`, `list`, `loadAll`, `saveAll`).\n * - **{@link EncryptedEnvelope}** — the wire format stored by backends:\n * `{ _noydb, _v, _ts, _iv, _data }`. Backends only ever see this shape.\n * - **{@link Role} / {@link Permission}** — the access-control vocabulary\n * (`owner`, `admin`, `operator`, `viewer`, `client`).\n * - **{@link NoydbOptions}** — the full configuration object passed to\n * {@link createNoydb}.\n *\n * ## Extending the store interface\n *\n * All optional store capabilities (`ping`, `listPage`, `listSince`,\n * `presencePublish`, `presenceSubscribe`, `listVaults`) are additive extensions\n * discovered via `'method' in store`. Implementing them unlocks features but\n * is never required — core always falls back to the 6-method baseline.\n *\n * @module\n */\n\nimport type { StandardSchemaV1 } from './schema.js'\nimport type { SyncPolicy } from './store/sync-policy.js'\nimport type { BlobStrategy } from './blobs/strategy.js'\nimport type { IndexStrategy } from './indexing/strategy.js'\nimport type { AggregateStrategy } from './aggregate/strategy.js'\nimport type { CrdtStrategy } from './crdt/strategy.js'\nimport type { ConsentStrategy } from './consent/strategy.js'\nimport type { PeriodsStrategy } from './periods/strategy.js'\nimport type { ShadowStrategy } from './shadow/strategy.js'\nimport type { TxStrategy } from './tx/strategy.js'\nimport type { HistoryStrategy } from './history/strategy.js'\nimport type { I18nStrategy } from './i18n/strategy.js'\nimport type { SessionStrategy } from './session/strategy.js'\nimport type { SyncStrategy } from './team/sync-strategy.js'\n\n/** Format version for encrypted record envelopes. */\nexport const NOYDB_FORMAT_VERSION = 1 as const\n\n/** Format version for keyring files. */\nexport const NOYDB_KEYRING_VERSION = 1 as const\n\n/** Format version for backup files. */\nexport const NOYDB_BACKUP_VERSION = 1 as const\n\n/** Format version for sync metadata. */\nexport const NOYDB_SYNC_VERSION = 1 as const\n\n// ─── Roles & Permissions ───────────────────────────────────────────────\n\n/**\n * Access role assigned to a user within a vault.\n *\n * Roles control both the operations a user can perform and which DEKs\n * they receive in their keyring:\n *\n * | Role | Collections | Can grant/revoke | Can export |\n * |------------|-----------------|:----------------:|:----------:|\n * | `owner` | all (rw) | Yes (all roles) | Yes |\n * | `admin` | all (rw) | Yes (≤ admin) | Yes |\n * | `operator` | explicit (rw) | No | ACL-scoped |\n * | `viewer` | all (ro) | No | Yes |\n * | `client` | explicit (ro) | No | ACL-scoped |\n */\nexport type Role = 'owner' | 'admin' | 'operator' | 'viewer' | 'client'\n\n/**\n * Read-write or read-only access on a collection.\n * Stored per-collection in the user's keyring.\n */\nexport type Permission = 'rw' | 'ro'\n\n/**\n * Map of collection name → permission level for a user's keyring entry.\n * `'*'` is the wildcard collection matching all collections in the vault.\n */\nexport type Permissions = Record<string, Permission>\n\n// ─── Encrypted Envelope ────────────────────────────────────────────────\n\n/** The encrypted wrapper stored by adapters. Adapters only ever see this. */\nexport interface EncryptedEnvelope {\n readonly _noydb: typeof NOYDB_FORMAT_VERSION\n readonly _v: number\n readonly _ts: string\n readonly _iv: string\n readonly _data: string\n /** User who created this version (unencrypted metadata). */\n readonly _by?: string\n /**\n * Hierarchical access tier. Omitted → tier 0.\n *\n * Unencrypted on purpose — the store reads it to route the envelope\n * to the right DEK slot without having to try-decrypt against every\n * tier. Only leaks the tier of each record, not any value\n * equivalence.\n */\n readonly _tier?: number\n /**\n * User id who last elevated this record. Used by\n * `demote()` to gate the reverse operation: only the original\n * elevator or an owner can demote a record back down. Cleared on\n * every successful demote so a later re-elevate requires the new\n * actor to own the demotion right.\n */\n readonly _elevatedBy?: string\n /**\n * Deterministic-encryption index. Map of field name →\n * base64 deterministic ciphertext. Present only when the collection\n * declares `deterministicFields` and the feature is acknowledged. The\n * field names are unencrypted (they're the index keys); the values\n * are AES-GCM ciphertext with an HKDF-derived deterministic IV.\n *\n * Enables blind equality search (`collection.findByDet(field,\n * value)`) without decrypting every record. Leaks equality as a known\n * side channel.\n */\n readonly _det?: Record<string, string>\n}\n\n/**\n * Placeholder returned by `getAtTier()` in `'ghost'` mode when a\n * record is at a tier the caller cannot decrypt. Record existence is\n * advertised — the id and tier are visible — but contents are\n * withheld. `canElevateFrom` lists user ids authorized to elevate\n * access for this caller when known; absent when the workflow is\n * not configured.\n */\nexport interface GhostRecord {\n readonly _ghost: true\n readonly _tier: number\n readonly canElevateFrom?: readonly string[]\n}\n\n/** Control what lower-tier reads see above their clearance. */\nexport type TierMode = 'invisibility' | 'ghost'\n\n/**\n * Event emitted when a record at a tier above the caller's inherent\n * clearance is read or written successfully (via elevation or\n * delegation). Always written to the ledger; subscribers get a\n * real-time feed.\n */\nexport interface CrossTierAccessEvent {\n readonly actor: string\n readonly collection: string\n readonly id: string\n readonly tier: number\n /** How the caller gained tier access: they elevated it, or a delegation is active. */\n readonly authorization: 'elevation' | 'delegation' | 'inherent'\n readonly op: 'get' | 'put' | 'elevate' | 'demote'\n readonly ts: string\n /**\n * When `authorization === 'elevation'`, the audit reason string the\n * caller passed to `vault.elevate(...)`. Empty for inherent /\n * delegation paths.\n */\n readonly reason?: string\n /**\n * When `authorization === 'elevation'`, the tier the caller's\n * keyring effectively held BEFORE elevation. Useful for audit\n * dashboards distinguishing \"operator elevating to 2\" from\n * \"inherent tier-2 write.\"\n */\n readonly elevatedFrom?: number\n}\n\n/**\n * A single deterministic-ciphertext index slot on an envelope. Stored\n * as `iv:data` (both base64, colon-separated) so a single string per\n * field keeps the envelope compact.\n */\nexport type DeterministicCipher = string\n\n// ─── Vault Snapshot ──────────────────────────────────────────────\n\n/** All records across all collections for a compartment. */\nexport type VaultSnapshot = Record<string, Record<string, EncryptedEnvelope>>\n\n/**\n * Result of a single page fetch via the optional `listPage` adapter extension.\n *\n * `items` carries the actual encrypted envelopes (not just ids) so the\n * caller can decrypt and emit a single record without an extra `get()`\n * round-trip per id. `nextCursor` is `null` on the final page.\n */\nexport interface ListPageResult {\n /** Encrypted envelopes for this page, in adapter-defined order. */\n items: Array<{ id: string; envelope: EncryptedEnvelope }>\n /** Opaque cursor for the next page, or `null` if this was the last page. */\n nextCursor: string | null\n}\n\n// ─── Store Interface ───────────────────────────────────────────────────\n\nexport interface NoydbStore {\n /**\n * Optional human-readable adapter name (e.g. 'memory', 'file', 'dynamo').\n * Used in diagnostic messages and the listPage fallback warning. Adapters\n * are encouraged to set this so logs are clearer about which backend is\n * involved when something goes wrong.\n */\n name?: string\n\n /** Get a single record. Returns null if not found. */\n get(vault: string, collection: string, id: string): Promise<EncryptedEnvelope | null>\n\n /** Put a record. Throws ConflictError if expectedVersion doesn't match. */\n put(\n vault: string,\n collection: string,\n id: string,\n envelope: EncryptedEnvelope,\n expectedVersion?: number,\n ): Promise<void>\n\n /** Delete a record. */\n delete(vault: string, collection: string, id: string): Promise<void>\n\n /** List all record IDs in a collection. */\n list(vault: string, collection: string): Promise<string[]>\n\n /** Load all records for a vault (initial hydration). */\n loadAll(vault: string): Promise<VaultSnapshot>\n\n /** Save all records for a vault (bulk write / restore). */\n saveAll(vault: string, data: VaultSnapshot): Promise<void>\n\n /** Optional connectivity check for sync engine. */\n ping?(): Promise<boolean>\n\n /**\n * Optional: list record IDs in a collection that have `_ts` after `since`.\n * Used by partial sync (`pull({ modifiedSince })`). Adapters that omit this\n * fall back to a full `loadAll` + client-side timestamp filter.\n */\n listSince?(vault: string, collection: string, since: string): Promise<string[]>\n\n /**\n * Optional pagination extension. Adapters that implement `listPage` get\n * the streaming `Collection.scan()` fast path; adapters that don't are\n * silently fallen back to a full `loadAll()` + slice (with a one-time\n * console.warn).\n *\n * `cursor` is opaque to the core — each adapter encodes its own paging\n * state (DynamoDB: base64 LastEvaluatedKey JSON; S3: ContinuationToken;\n * memory/file/browser: numeric offset of a sorted id list). Pass\n * `undefined` to start from the beginning.\n *\n * `limit` is a soft upper bound on `items.length`. Adapters MAY return\n * fewer items even when more exist (e.g. if the underlying store has\n * its own page size cap), and MUST signal \"no more pages\" by returning\n * `nextCursor: null`.\n *\n * The 6-method core contract is unchanged — this is an additive\n * extension discovered via `'listPage' in adapter`.\n */\n listPage?(\n vault: string,\n collection: string,\n cursor?: string,\n limit?: number,\n ): Promise<ListPageResult>\n\n /**\n * Optional pub/sub for real-time presence.\n * Publish an encrypted payload to a presence channel.\n * Falls back to storage-based polling when absent.\n */\n presencePublish?(channel: string, payload: string): Promise<void>\n\n /**\n * Optional pub/sub for real-time presence.\n * Subscribe to a presence channel. Returns an unsubscribe function.\n * Falls back to storage-based polling when absent.\n */\n presenceSubscribe?(channel: string, callback: (payload: string) => void): () => void\n\n /**\n * Optional cross-vault enumeration extension.\n *\n * Returns the names of every top-level vault the store\n * currently stores. Used by `Noydb.listAccessibleVaults()` to\n * enumerate the universe of vaults before filtering down to\n * the ones the calling principal can actually unwrap.\n *\n * **Why this is optional:** the storage shape of compartments\n * differs across backends. Memory and file stores store\n * vaults as top-level keys / directories and can enumerate\n * them in O(1) calls. DynamoDB stores everything in a single table\n * keyed by `(compartment#collection, id)` — enumerating compartments\n * requires either a Scan (expensive, eventually consistent, leaks\n * ciphertext metadata) or a dedicated GSI that the consumer\n * provisioned. S3 needs a prefix list (cheap if enabled, ACL-sensitive\n * otherwise). Browser localStorage can scan keys by prefix.\n *\n * Stores that cannot implement `listVaults` cheaply or\n * cleanly should omit it. Core surfaces a `StoreCapabilityError`\n * with a clear message when a caller invokes\n * `listAccessibleVaults()` against a store that doesn't\n * provide this method, so consumers know to either upgrade their\n * store, provide a candidate list explicitly to `queryAcross()`,\n * or fall back to maintaining the compartment index out of band.\n *\n * **Privacy note:** `listVaults` returns *every* compartment\n * the store has, not just the ones the caller can access. The\n * existence-leak filtering (returning only compartments whose\n * keyring the caller can unwrap) happens in core, not in the\n * store. The store is trusted to know its own contents — that\n * is not a leak in the threat model. The leak the API guards\n * against is the *return value* of `listAccessibleVaults()`\n * exposing existence to a downstream observer who only sees that\n * function's output.\n *\n * The 6-method core contract is unchanged — this is an additive\n * extension discovered via `'listVaults' in store`.\n */\n listVaults?(): Promise<string[]>\n\n /**\n * Optional: generate a presigned URL for direct client download.\n * Only meaningful for object stores (S3, GCS) that support URL signing.\n * Returns a time-limited URL that fetches the encrypted envelope directly.\n * The caller must decrypt client-side (the URL returns ciphertext).\n */\n presignUrl?(vault: string, collection: string, id: string, expiresInSeconds?: number): Promise<string>\n\n /**\n * Optional: estimate current storage usage.\n * Returns `{ usedBytes, quotaBytes }` or null if the store cannot estimate.\n * Used by quota-aware routing to detect overflow conditions.\n */\n estimateUsage?(): Promise<{ usedBytes: number; quotaBytes: number } | null>\n\n /**\n * Optional multi-record atomic write.\n *\n * When present, `db.transaction(async (tx) => { ... })` uses this to\n * commit every staged op in one storage-layer transaction — either\n * all ops land or none do, regardless of which records they touch.\n * Every `TxOp.expectedVersion` (when set) must be honored atomically\n * alongside the write; any violation throws `ConflictError` and the\n * whole batch fails.\n *\n * Stores that omit this fall through to the hub's per-record OCC\n * fallback: pre-flight CAS check, then sequential `put`/`delete`\n * with best-effort unwind on mid-batch failure (see\n * `runTransaction` for the exact semantics and crash window).\n *\n * Native implementations: `to-memory` (single Map mutation),\n * `to-dynamo` (`TransactWriteItems`), `to-browser-idb` (one\n * `readwrite` transaction). File / S3 cannot implement this\n * atomically and should omit the method.\n */\n tx?(ops: readonly TxOp[]): Promise<void>\n}\n\n/**\n * A single staged operation inside a `db.transaction(fn)` commit. The\n * hub assembles `TxOp[]` from the user's `tx.collection().put/delete`\n * calls, encrypts any `record` values into `envelope`, and hands the\n * array to `NoydbStore.tx()` when the store supports atomic batch\n * writes. Stores that implement `tx()` MUST honor every\n * `expectedVersion` atomically against the stored envelope version.\n */\nexport interface TxOp {\n readonly type: 'put' | 'delete'\n readonly vault: string\n readonly collection: string\n readonly id: string\n /** Populated for `type: 'put'` — the encrypted envelope to write. */\n readonly envelope?: EncryptedEnvelope\n /** Optional per-record CAS. Mismatch must throw `ConflictError`. */\n readonly expectedVersion?: number\n}\n\n// ─── Store Factory Helper ──────────────────────────────────────────────\n\n/** Type-safe helper for creating store factories. */\nexport function createStore<TOptions>(\n factory: (options: TOptions) => NoydbStore,\n): (options: TOptions) => NoydbStore {\n return factory\n}\n\n// ─── Keyring ───────────────────────────────────────────────────────────\n\n/**\n * Interchange formats `@noy-db/as-*` packages can produce. `'*'` is a\n * wildcard granting every current + future plaintext format.\n */\nexport type ExportFormat =\n | 'xlsx'\n | 'csv'\n | 'json'\n | 'ndjson'\n | 'xml'\n | 'sql'\n | 'pdf'\n | 'blob'\n | 'zip'\n | '*'\n\n/**\n * Owner-granted export capability on a keyring.\n *\n * Two independent dimensions:\n *\n * - `plaintext` — per-format allowlist for record formatters + blob\n * extractors that emit plaintext bytes (`as-xlsx`, `as-csv`,\n * `as-blob`, `as-zip`, …). **Defaults to empty** for every role;\n * the owner/admin must positively grant per-format (or `'*'`).\n * - `bundle` — boolean for `.noydb` encrypted container export\n * (`as-noydb`). **Default policy: on for owner/admin, off for\n * operator/viewer/client** — applied when the field is absent or\n * undefined (see `hasExportCapability`).\n */\nexport interface ExportCapability {\n readonly plaintext?: readonly ExportFormat[]\n readonly bundle?: boolean\n}\n\n/**\n * Owner-granted import capability on a keyring (sibling of\n * `ExportCapability`, issue ).\n *\n * Two independent dimensions:\n *\n * - `plaintext` — per-format allowlist for `as-*` readers that ingest\n * plaintext bytes (`as-csv`, `as-json`, `as-ndjson`, `as-zip`, …).\n * Defaults to empty for every role; the owner/admin must positively\n * grant per-format (or `'*'`).\n * - `bundle` — boolean gate for `.noydb` bundle import. **Defaults to\n * `false` for every role**, including owner/admin. Import is more\n * dangerous than export (corrupts vs leaks), so the policy is\n * default-closed across the board — the owner explicitly opts a\n * keyring in via `db.grant({ importCapability: { bundle: true } })`.\n */\nexport interface ImportCapability {\n readonly plaintext?: readonly ExportFormat[]\n readonly bundle?: boolean\n}\n\nexport interface KeyringFile {\n readonly _noydb_keyring: typeof NOYDB_KEYRING_VERSION\n readonly user_id: string\n readonly display_name: string\n readonly role: Role\n readonly permissions: Permissions\n readonly deks: Record<string, string>\n readonly salt: string\n readonly created_at: string\n readonly granted_by: string\n /**\n * Optional — authorization spec capability bits. Absent on keyrings written\n * before the RFC implementation. Loading falls back to role-based\n * defaults (owner/admin get bundle-on, everyone else off).\n */\n readonly export_capability?: ExportCapability\n /**\n * Optional bundle-slot expiry. ISO-8601 timestamp; past\n * the cutoff `loadKeyring` throws `KeyringExpiredError` before any\n * DEK unwrap is attempted. Useful for time-boxed audit access:\n * \"this slot works for 30 days then becomes opaque to its holder.\"\n *\n * Absent on live keyrings written via `db.grant()` — the field is\n * meaningful for `BundleRecipient` slots produced by\n * `writeNoydbBundle({ recipients: [...] })`. Setting it on a live\n * keyring is allowed but unusual.\n */\n readonly expires_at?: string\n /**\n * Optional — issue import-capability bits. Absent on keyrings\n * written before landed. Loading falls back to default-closed\n * for every role and every format.\n */\n readonly import_capability?: ImportCapability\n /**\n * hierarchical access clearance. Absent → 0 (advisory;\n * the real check is whether the DEK map carries a `collection#tier`\n * entry for the requested tier). Owners and admins default to the\n * highest tier they have DEKs for at grant time.\n */\n readonly clearance?: number\n}\n\n// ─── Backup ────────────────────────────────────────────────────────────\n\nexport interface VaultBackup {\n readonly _noydb_backup: typeof NOYDB_BACKUP_VERSION\n readonly _compartment: string\n readonly _exported_at: string\n readonly _exported_by: string\n readonly keyrings: Record<string, KeyringFile>\n readonly collections: VaultSnapshot\n /**\n * Internal collections (`_ledger`, `_ledger_deltas`, `_history`, `_sync`, …)\n * captured alongside the data collections. Optional for backwards\n * compat with backups, which only stored data collections —\n * loading a backup leaves the ledger empty (and `verifyBackupIntegrity`\n * skips the chain check, surfacing only a console warning).\n */\n readonly _internal?: VaultSnapshot\n /**\n * Verifiable-backup metadata. Embeds the ledger head at\n * dump time so `load()` can cross-check that the loaded chain matches\n * exactly what was exported. A backup whose chain has been tampered\n * with — either by modifying ledger entries or by modifying data\n * envelopes that the chain references — fails this check.\n *\n * Optional for backwards compat with backups; missing means\n * \"legacy backup, load with a warning, no integrity check\".\n */\n readonly ledgerHead?: {\n /** Hex sha256 of the canonical JSON of the last ledger entry. */\n readonly hash: string\n /** Sequential index of the last ledger entry. */\n readonly index: number\n /** ISO timestamp captured at dump time. */\n readonly ts: string\n }\n}\n\n// ─── Export ────────────────────────────────────────────────────────────\n\n/**\n * Options for `Vault.exportStream()` and `Vault.exportJSON()`.\n *\n * The defaults match the most common consumer pattern: one chunk per\n * collection, no ledger metadata. Per-record streaming and ledger-head\n * inclusion are opt-in because both add structure most consumers don't\n * need.\n */\nexport interface ExportStreamOptions {\n /**\n * `'collection'` (default) yields one chunk per collection with all\n * records bundled in `chunk.records`. `'record'` yields one chunk per\n * record, useful for arbitrarily large collections that should never\n * be materialized as a single array.\n */\n readonly granularity?: 'collection' | 'record'\n\n /**\n * When `true`, every chunk includes the current compartment ledger\n * head under `chunk.ledgerHead`. The value is identical across every\n * chunk in a single export (one ledger per compartment). Forward-\n * compatible with future partition work where the head would become\n * per-partition. Default: `false`.\n */\n readonly withLedgerHead?: boolean\n /**\n * When set to a BCP 47 locale string (e.g. `'th'`), `exportJSON()`\n * resolves all `dictKey` labels to that locale and omits the raw\n * `dictionaries` snapshot from the output. Has no effect\n * on `exportStream()` — format packages use the `chunk.dictionaries`\n * snapshot directly and apply their own locale strategy.\n *\n * Default: `undefined` — embed the raw snapshot under `_dictionaries`.\n */\n readonly resolveLabels?: string\n}\n\n/**\n * One chunk yielded by `Vault.exportStream()`.\n *\n * `granularity: 'collection'` yields one chunk per collection with the\n * full record array in `records`. `granularity: 'record'` yields one\n * chunk per record with `records` containing exactly one element — the\n * `schema` and `refs` metadata is repeated on every chunk so consumers\n * doing per-record streaming don't have to thread state across yields.\n */\nexport interface ExportChunk<T = unknown> {\n /** Collection name (no leading underscore — internal collections are filtered out). */\n readonly collection: string\n\n /**\n * Standard Schema validator attached to the collection at `collection()`\n * construction time, or `null` if no schema was provided. Surfaced so\n * downstream serializers (`@noy-db/as-*` packages, custom\n * exporters) can produce schema-aware output (typed CSV headers, XSD\n * generation, etc.) without poking at collection internals.\n */\n readonly schema: StandardSchemaV1<unknown, T> | null\n\n /**\n * Foreign-key references declared on the collection via the `refs`\n * option, as the `{ field → { target, mode } }` map produced by\n * `RefRegistry.getOutbound`. Empty object when no refs were declared.\n */\n readonly refs: Record<string, { readonly target: string; readonly mode: 'strict' | 'warn' | 'cascade' }>\n\n /**\n * Decrypted, ACL-scoped, schema-validated records. Length 1 in\n * `granularity: 'record'` mode, full collection in `granularity: 'collection'`\n * mode. Records are returned by reference from the collection's eager\n * cache where applicable — consumers must treat them as immutable.\n */\n readonly records: T[]\n\n /**\n * Dictionary snapshots for every `dictKey` field declared on this\n * collection. Captured once at stream-start and held\n * constant across all chunks within the same export — a rename\n * mid-export does not change the snapshot. `undefined` when the\n * collection has no `dictKeyFields`.\n *\n * Shape: `{ [fieldName]: { [stableKey]: { [locale]: label } } }`\n *\n * @example\n * ```ts\n * chunk.dictionaries?.status?.paid?.th // → 'ชำระแล้ว'\n * ```\n */\n readonly dictionaries?: Record<\n string, // field name\n Record<string, Record<string, string>> // stable key → locale → label\n >\n\n /**\n * Vault ledger head at export time. Present only when\n * `exportStream({ withLedgerHead: true })` was called. Identical\n * across every chunk in the same export — included on every chunk\n * for forward-compatibility with future per-partition ledgers, where\n * the value will differ per chunk.\n */\n readonly ledgerHead?: {\n readonly hash: string\n readonly index: number\n readonly ts: string\n }\n}\n\n// ─── Sync ──────────────────────────────────────────────────────────────\n\nexport interface DirtyEntry {\n readonly vault: string\n readonly collection: string\n readonly id: string\n readonly action: 'put' | 'delete'\n readonly version: number\n readonly timestamp: string\n}\n\nexport interface SyncMetadata {\n readonly _noydb_sync: typeof NOYDB_SYNC_VERSION\n readonly last_push: string | null\n readonly last_pull: string | null\n readonly dirty: DirtyEntry[]\n}\n\nexport interface Conflict {\n readonly vault: string\n readonly collection: string\n readonly id: string\n readonly local: EncryptedEnvelope\n readonly remote: EncryptedEnvelope\n readonly localVersion: number\n readonly remoteVersion: number\n /**\n * Present only when the collection uses `conflictPolicy: 'manual'`.\n * Call `resolve(winner)` to commit the winning envelope, or\n * `resolve(null)` to defer (conflict stays queued for the next sync).\n * Called synchronously inside the `sync:conflict` event handler.\n */\n readonly resolve?: (winner: EncryptedEnvelope | null) => void\n}\n\nexport type ConflictStrategy =\n | 'local-wins'\n | 'remote-wins'\n | 'version'\n | ((conflict: Conflict) => 'local' | 'remote')\n\n/**\n * Collection-level conflict policy.\n * Overrides the db-level `conflict` option for the specific collection.\n *\n * - `'last-writer-wins'` — higher `_ts` wins (timestamp LWW).\n * - `'first-writer-wins'` — lower `_v` wins (earlier version is preserved).\n * - `'manual'` — emits `sync:conflict` with a `resolve` callback. Call\n * `resolve(winner)` synchronously to commit or `resolve(null)` to defer.\n * - Custom fn — synchronous `(local: T, remote: T) => T`. Must be pure.\n */\nexport type ConflictPolicy<T> =\n | 'last-writer-wins'\n | 'first-writer-wins'\n | 'manual'\n | ((local: T, remote: T) => T)\n\n/**\n * Envelope-level resolver registered per collection with the SyncEngine.\n * Receives the `id` of the conflicting record and both envelopes.\n * Returns the winning envelope, or `null` to defer resolution.\n * @internal\n */\nexport type CollectionConflictResolver = (\n id: string,\n local: EncryptedEnvelope,\n remote: EncryptedEnvelope,\n) => Promise<EncryptedEnvelope | null>\n\n/** Options for targeted push operations. */\nexport interface PushOptions {\n /** Only push records belonging to these collections. Omit to push all dirty. */\n collections?: string[]\n}\n\n/** Options for targeted pull operations. */\nexport interface PullOptions {\n /** Only pull these collections. Omit to pull all. */\n collections?: string[]\n /**\n * Only pull records with `_ts` strictly after this ISO timestamp.\n * Adapters that implement `listSince` use it directly; others fall back\n * to a full scan with client-side filtering.\n */\n modifiedSince?: string\n}\n\nexport interface PushResult {\n readonly pushed: number\n readonly conflicts: Conflict[]\n readonly errors: Error[]\n}\n\nexport interface PullResult {\n readonly pulled: number\n readonly conflicts: Conflict[]\n readonly errors: Error[]\n}\n\n/** Result of a sync transaction commit. */\nexport interface SyncTransactionResult {\n readonly status: 'committed' | 'conflict'\n readonly pushed: number\n readonly conflicts: Conflict[]\n}\n\nexport interface SyncStatus {\n readonly dirty: number\n readonly lastPush: string | null\n readonly lastPull: string | null\n readonly online: boolean\n}\n\n// ─── Sync Target ─────────────────────────────────────────\n\nexport type SyncTargetRole = 'sync-peer' | 'backup' | 'archive'\n\n/**\n * A sync target with role and optional per-target policy.\n *\n * | Role | Direction | Conflict resolution | Typical use |\n * |-------------|---------------|---------------------|--------------------------|\n * | `sync-peer` | Bidirectional | ConflictStrategy | DynamoDB live sync |\n * | `backup` | Push-only | N/A (receives merged)| S3 dump, Google Drive |\n * | `archive` | Push-only | N/A | IPFS, Git tags, S3 Lock |\n */\nexport interface SyncTarget {\n /** The store to sync with. */\n readonly store: NoydbStore\n /** Role determines sync direction and conflict handling. */\n readonly role: SyncTargetRole\n /** Per-target sync policy. Inherits store-category default when absent. */\n readonly policy?: SyncPolicy\n /** Human-readable label for DevTools and audit logs. */\n readonly label?: string\n}\n\n// ─── Events ────────────────────────────────────────────────────────────\n\nexport interface ChangeEvent {\n readonly vault: string\n readonly collection: string\n readonly id: string\n readonly action: 'put' | 'delete'\n}\n\nexport interface NoydbEventMap {\n 'change': ChangeEvent\n 'error': Error\n 'sync:push': PushResult\n 'sync:pull': PullResult\n 'sync:conflict': Conflict\n 'sync:online': void\n 'sync:offline': void\n 'sync:backup-error': { vault: string; target: string; error: Error }\n 'history:save': { vault: string; collection: string; id: string; version: number }\n 'history:prune': { vault: string; collection: string; id: string; pruned: number }\n /**\n * Emitted when a persisted-index side-car put/delete fails after the\n * main record write already succeeded. The main record is durable; the\n * index mirror may have drifted. Operators reconcile via\n * `collection.reconcileIndex(field)`.\n */\n 'index:write-partial': {\n vault: string\n collection: string\n id: string\n action: 'put' | 'delete'\n error: Error\n }\n /**\n * emitted by `Collection.ensurePersistedIndexesLoaded()`\n * once per field on first lazy-mode query when\n * `reconcileOnOpen: 'auto' | 'dry-run'` is configured. `applied` is\n * `0` in `'dry-run'` mode. `skipped` is reserved for a future\n * drift-stamp optimization that short-circuits the reconcile when\n * the mirror version matches what's on disk — currently always\n * `false` (the full reconcile runs every session).\n */\n 'index:reconciled': {\n vault: string\n collection: string\n field: string\n missing: readonly string[]\n stale: readonly string[]\n applied: number\n skipped: boolean\n }\n}\n\n// ─── Grant / Revoke ────────────────────────────────────────────────────\n\nexport interface GrantOptions {\n readonly userId: string\n readonly displayName: string\n readonly role: Role\n readonly passphrase: string\n readonly permissions?: Permissions\n /**\n * Optional `@noy-db/as-*` export capability. Omit or\n * leave undefined to apply role-based defaults (see\n * `hasExportCapability` and `ExportCapability`).\n */\n readonly exportCapability?: ExportCapability\n /**\n * Optional `@noy-db/as-*` import capability (issue ). Omit or\n * leave undefined for default-closed semantics — no plaintext format\n * is grantable until positively listed; bundle import is denied.\n */\n readonly importCapability?: ImportCapability\n}\n\nexport interface RevokeOptions {\n readonly userId: string\n readonly rotateKeys?: boolean\n\n /**\n * Cascade behavior when the revoked user is an admin who has granted\n * other admins.\n *\n * - `'strict'` (default) — recursively revoke every admin that the\n * target (transitively) granted. The cascade walks the\n * `granted_by` field on each keyring file and stops at non-admin\n * leaves. All affected collections are accumulated and rotated in\n * a single pass at the end, so cascade cost is O(records in\n * affected collections), not O(records × cascade depth).\n *\n * - `'warn'` — leave the descendant admins in place but emit a\n * `console.warn` listing them. Useful for diagnostic dry runs and\n * for environments where the operator wants to clean up the\n * delegation tree manually.\n *\n * No effect when the target is not an admin (operators, viewers, and\n * clients cannot grant other users, so they have no delegation\n * subtree to cascade through). Defaults to `'strict'`.\n */\n readonly cascade?: 'strict' | 'warn'\n}\n\n// ─── Cross-vault queries ──────────────────────────────\n\n/**\n * One entry returned by `Noydb.listAccessibleVaults()`. Carries\n * the compartment id and the role the calling principal holds in it,\n * so the consumer can decide how to fan out without re-checking\n * permissions per vault.\n */\nexport interface AccessibleVault {\n readonly id: string\n readonly role: Role\n}\n\n/**\n * Options for `Noydb.listAccessibleVaults()`.\n */\nexport interface ListAccessibleVaultsOptions {\n /**\n * Minimum role the caller must hold to include a compartment in the\n * result. Compartments where the caller's role is strictly *below*\n * this threshold are silently excluded. Defaults to `'client'`,\n * which means \"every vault I can unwrap is returned.\" Set to\n * `'admin'` for \"vaults where I can grant/revoke,\" or\n * `'owner'` for \"vaults I own.\"\n *\n * The privilege ordering used:\n * `client (1) < viewer (2) < operator (3) < admin (4) < owner (5)`\n *\n * Note: `viewer` and `client` are conceptually peers in the ACL\n * (neither can grant), but `viewer` has read-all access while\n * `client` has only explicit-collection read. The numeric order\n * reflects \"how much can this principal see,\" not \"how much can\n * this principal modify.\"\n */\n readonly minRole?: Role\n}\n\n/**\n * Options for `Noydb.queryAcross()`.\n */\nexport interface QueryAcrossOptions {\n /**\n * Maximum number of compartments to process in parallel. Defaults\n * to `1` (sequential) — conservative because the per-compartment\n * callback typically does its own I/O and an unbounded fan-out can\n * exhaust adapter connections (DynamoDB throughput, S3 socket\n * limits, browser fetch concurrency).\n *\n * Set to `4` or `8` for cloud-backed compartments where parallelism\n * is the whole point of fanning out. Set to `1` (default) for local\n * adapters where the disk I/O serializes anyway.\n */\n readonly concurrency?: number\n}\n\n/**\n * One entry in the array returned by `Noydb.queryAcross()`. Either\n * `result` is set (callback succeeded for this compartment) or\n * `error` is set (callback threw, or compartment failed to open).\n *\n * Per-compartment errors do **not** abort the overall fan-out — every\n * compartment is given a chance to run its callback, and the\n * partition between success and failure is exposed in the return\n * value. Consumers that want fail-fast semantics can check\n * `r.error !== undefined` and short-circuit themselves.\n */\nexport type QueryAcrossResult<T> =\n | { readonly vault: string; readonly result: T; readonly error?: undefined }\n | { readonly vault: string; readonly result?: undefined; readonly error: Error }\n\n// ─── User Info ─────────────────────────────────────────────────────────\n\nexport interface UserInfo {\n readonly userId: string\n readonly displayName: string\n readonly role: Role\n readonly permissions: Permissions\n readonly createdAt: string\n readonly grantedBy: string\n}\n\n// ─── Session ───────────────────────────────────────────────\n\n/**\n * Operations that a session policy can require re-authentication for.\n * Passed as the `requireReAuthFor` array in `SessionPolicy`.\n */\nexport type ReAuthOperation = 'export' | 'grant' | 'revoke' | 'rotate' | 'changeSecret'\n\n/**\n * Session policy controlling lifetime, re-auth requirements, and\n * background-lock behavior.\n *\n * All timeout values are in milliseconds. `undefined` means \"no limit.\"\n * The policy is evaluated lazily — it does not start timers itself;\n * enforcement happens at the Noydb call site.\n */\nexport interface SessionPolicy {\n /**\n * Idle timeout in ms. If no NOYDB operation is performed for this\n * duration, the session is revoked on the next operation attempt\n * (which will throw `SessionExpiredError`). The idle clock resets\n * on every successful operation.\n *\n * Default: `undefined` (no idle timeout).\n */\n readonly idleTimeoutMs?: number\n\n /**\n * Absolute timeout in ms from session creation. After this duration\n * the session is unconditionally revoked regardless of activity.\n *\n * Default: `undefined` (no absolute timeout).\n */\n readonly absoluteTimeoutMs?: number\n\n /**\n * Operations that require the user to re-authenticate (re-enter their\n * passphrase or perform a fresh WebAuthn assertion) before proceeding,\n * even if the session is still alive.\n *\n * Common pattern: `requireReAuthFor: ['export', 'grant']` — allow\n * read/write operations in the background but demand a fresh credential\n * for high-risk mutations.\n *\n * Default: `[]` (no extra re-auth requirements).\n */\n readonly requireReAuthFor?: readonly ReAuthOperation[]\n\n /**\n * If `true`, the session is revoked when the page goes to the background\n * (visibilitychange event, `document.hidden === true`). Useful for\n * high-sensitivity deployments where leaving the tab is treated as\n * a session boundary.\n *\n * No-op in non-browser environments (Node.js, workers without document).\n * Default: `false`.\n */\n readonly lockOnBackground?: boolean\n}\n\n// ─── i18n / Locale ─────────────────────────────────────\n\n/**\n * Locale-aware read options. Pass to `Collection.get()`, `list()`,\n * `query()`, and `scan()` to trigger per-record locale resolution for\n * `dictKey` and `i18nText` fields.\n *\n * - **`locale: 'raw'`** — skip resolution for `i18nText` fields and\n * return the full `{ [locale]: string }` map. Dict key fields still\n * return the stable key (no `<field>Label` added).\n * - **`fallback`** — single locale code or ordered list. Use `'any'` as\n * the last element to fall back to any present translation.\n *\n * When neither the call-level locale nor the compartment's default locale\n * is set, reading a record with `i18nText` fields throws\n * `LocaleNotSpecifiedError`.\n */\nexport interface LocaleReadOptions {\n /**\n * The target locale code (e.g. `'th'`), or `'raw'` to return the full\n * language map without resolution.\n */\n readonly locale?: string\n /**\n * Fallback locale or ordered fallback chain. Use `'any'` as the last\n * element to fall back to any present translation.\n */\n readonly fallback?: string | readonly string[]\n}\n\n// ─── plaintextTranslator hook ──────────────────────────────\n\n/**\n * Context passed to the consumer-supplied `plaintextTranslator` function.\n * The hook receives the source text plus enough metadata to route it to the\n * right translation service and record what it did.\n */\nexport interface PlaintextTranslatorContext {\n /** The plaintext string to translate. */\n readonly text: string\n /** BCP 47 source locale (the locale the text is written in). */\n readonly from: string\n /** BCP 47 target locale to translate into. */\n readonly to: string\n /** The schema field name that triggered the translation. */\n readonly field: string\n /** The collection the record is being put into. */\n readonly collection: string\n}\n\n/**\n * A consumer-supplied async function that translates a single string\n * from one locale to another. noy-db ships no built-in translator.\n *\n * **Security:** this function receives plaintext. The consumer is\n * responsible for the data policy of whatever service it calls. See\n * `NOYDB_SPEC.md § Zero-Knowledge Storage` and the `plaintextTranslator`\n * JSDoc on `NoydbOptions` for the full invariant statement.\n */\nexport type PlaintextTranslatorFn = (\n ctx: PlaintextTranslatorContext,\n) => Promise<string>\n\n/**\n * One entry in the in-process translator audit log. Cleared when\n * `db.close()` is called — same lifetime as the KEK and DEKs.\n *\n * Deliberately omits any content hash or translated-text fingerprint\n * to prevent correlation attacks on the audit trail.\n */\nexport interface TranslatorAuditEntry {\n readonly type: 'translator-invocation'\n /** Schema field name that was translated. */\n readonly field: string\n /** Collection the record belongs to. */\n readonly collection: string\n /** Source locale. */\n readonly fromLocale: string\n /** Target locale. */\n readonly toLocale: string\n /**\n * Consumer-provided translator name from\n * `NoydbOptions.plaintextTranslatorName`. Defaults to `'anonymous'`\n * when not supplied.\n */\n readonly translatorName: string\n /** ISO 8601 timestamp of the invocation. */\n readonly timestamp: string\n /**\n * `true` when the result was served from the in-process cache rather\n * than by calling the translator function. Present only on cache hits\n * so the absence of the field also communicates a cache miss.\n */\n readonly cached?: true\n}\n\n// ─── Presence ─────────────────────────────────────────────\n\n/**\n * A presence peer entry. `lastSeen` is an ISO timestamp set by core on each\n * `update()` call. Stale entries (lastSeen older than `staleMs`) are filtered\n * before delivering to the subscriber callback.\n */\nexport interface PresencePeer<P> {\n readonly userId: string\n readonly payload: P\n readonly lastSeen: string\n}\n\n// ─── CRDT ─────────────────────────────────────────────────\n\n// Re-exported from crdt.ts so consumers only need one import path.\nexport type { CrdtMode, CrdtState, LwwMapState, RgaState, YjsState } from './crdt/crdt.js'\n\n// ─── Blob / Attachment Store ────────────────────────\n\n/**\n * Second store shape for blob-store backends (Drive, WebDAV, Git, iCloud)\n * that operate on whole-vault bundles rather than per-record KV.\n *\n * Implement `readBundle` / `writeBundle` instead of the six-method KV\n * contract. Use `wrapBundleStore()` from `@noy-db/hub` to convert to a\n * `NoydbStore` that the rest of the API consumes transparently.\n *\n * Named `NoydbBundleStore` (not `NoydbBundleAdapter`) for consistency\n * with the hub / to-* / in-* rename. Concrete implementations ship\n * in `@noy-db/to-*` packages starting in.\n */\nexport interface NoydbBundleStore {\n /** Discriminant for engine auto-detection of store shape. */\n readonly kind: 'bundle'\n /** Human-readable name for diagnostics (e.g. `'drive'`, `'webdav'`). */\n readonly name?: string\n /**\n * Read the entire vault as raw bytes. Returns `null` if no bundle exists\n * yet (first open of a brand-new vault).\n */\n readBundle(vaultId: string): Promise<{ bytes: Uint8Array; version: string } | null>\n /**\n * Write the entire vault as raw bytes. `expectedVersion` is the version\n * token from the last `readBundle` (or `null` for a first write).\n * Implementations MUST reject the write if the stored version has advanced\n * past `expectedVersion` — throw `BundleVersionConflictError`.\n * Returns the new version token on success.\n */\n writeBundle(\n vaultId: string,\n bytes: Uint8Array,\n expectedVersion: string | null,\n ): Promise<{ version: string }>\n /** Delete a vault bundle. Idempotent — no-op if the bundle does not exist. */\n deleteBundle(vaultId: string): Promise<void>\n /** List all vault bundles managed by this store. */\n listBundles(): Promise<Array<{ vaultId: string; version: string; size: number }>>\n}\n\n/**\n * Content-addressed blob object stored in the vault-level blob index.\n * Identified by HMAC-SHA-256(blobDEK, plaintext) — opaque to the store.\n *\n * Shared across all collections within a vault for deduplication: two\n * records that attach identical byte content reference the same `eTag`\n * and share a single set of encrypted chunks in `_blob_chunks`.\n */\nexport interface BlobObject {\n /** HMAC-SHA-256 hex of the original plaintext bytes, keyed by `_blob` DEK. */\n readonly eTag: string\n /** Original uncompressed size in bytes. */\n readonly size: number\n /** Compressed size in bytes (the payload that is actually encrypted and chunked). */\n readonly compressedSize: number\n /** Compression algorithm applied before encryption. */\n readonly compression: 'gzip' | 'none'\n /** Raw chunk size in bytes used at write time. Readers MUST use this value. */\n readonly chunkSize: number\n /** Total number of chunks written. Reader expects exactly this many. */\n readonly chunkCount: number\n /** MIME type if provided or auto-detected at upload time. */\n readonly mimeType?: string\n /** ISO timestamp of first upload. */\n readonly createdAt: string\n /** Live reference count — slots + published versions pointing to this blob. */\n readonly refCount: number\n /**\n * Hint indicating which store holds the chunk data.\n * Used by `routeStore` size-tiered routing: `'default'` for small blobs\n * stored inline (e.g. DynamoDB), `'blobs'` for large blobs in the overflow\n * store (e.g. S3). Absent when no routing is configured.\n */\n readonly storeHint?: 'default' | 'blobs'\n}\n\n// ─── Attachment types ─────────────────────────────────────────\n\n/** Single attachment metadata entry stored inside a record's attachment envelope. */\nexport interface AttachmentEntry {\n /** Content-addressed identifier (HMAC-SHA-256 of plaintext). */\n readonly eTag: string\n /** User-visible filename for the slot. */\n readonly filename: string\n /** Original uncompressed size in bytes. */\n readonly size: number\n /** MIME type, if provided or auto-detected at upload time. */\n readonly mimeType?: string\n /** ISO timestamp of the upload. */\n readonly uploadedAt: string\n /** User ID of the uploader, if available. */\n readonly uploadedBy?: string\n}\n\n/** Attachment entry annotated with its slot name, as returned by `AttachmentHandle.list()`. */\nexport type AttachmentInfo = AttachmentEntry & { readonly name: string }\n\n/** Options for `AttachmentHandle.put()`. */\nexport interface AttachmentPutOptions {\n /** Compress the attachment with gzip before encryption. Default: `true`. */\n compress?: boolean\n /** Chunk size in bytes. Default: `DEFAULT_CHUNK_SIZE` (256 KB). */\n chunkSize?: number\n /** MIME type to store with the attachment. Auto-detected from magic bytes if omitted. */\n mimeType?: string\n /** User ID to record as the uploader. Falls back to the active user's ID. */\n uploadedBy?: string\n}\n\n/** Options for `AttachmentHandle.response()`. */\nexport interface AttachmentResponseOptions {\n /**\n * Set `Content-Disposition: inline` so the browser renders the file\n * instead of downloading it. Default: `false` (attachment disposition).\n */\n inline?: boolean\n}\n\n/**\n * Slot record — mutable metadata linking a named slot on a record\n * to a `BlobObject` via its eTag.\n *\n * Multiple slots (even across different records) may reference the same\n * `eTag` — the underlying chunks are shared. Updating metadata creates\n * a new envelope version (`_v++`) while the blob data is unchanged.\n */\nexport interface SlotRecord {\n /** Reference to the `BlobObject` in `_blob_index`. */\n readonly eTag: string\n /** User-visible filename for the slot. */\n readonly filename: string\n /** Original uncompressed size in bytes (denormalized from `BlobObject`). */\n readonly size: number\n /** MIME type. Takes precedence over the MIME type stored in `BlobObject`. */\n readonly mimeType?: string\n /** ISO timestamp of the upload that set this slot. */\n readonly uploadedAt: string\n /** User ID of the uploader, if available. */\n readonly uploadedBy?: string\n}\n\n/** Result of `BlobSet.list()` — slot record plus its named slot key. */\nexport interface SlotInfo extends SlotRecord {\n /** The slot name (key in the record's slot map). */\n readonly name: string\n}\n\n/**\n * Explicitly published version snapshot — an independent reference to a\n * blob at a specific point in time.\n */\nexport interface VersionRecord {\n /** User-defined label (e.g. `'issued-2025-01'`, `'amendment-2025-02'`). */\n readonly label: string\n /** eTag of the blob snapshot at publish time — independent of the current slot. */\n readonly eTag: string\n /** ISO timestamp when the version was published. */\n readonly publishedAt: string\n /** User ID of the publisher, if available. */\n readonly publishedBy?: string\n}\n\n/** Options for `BlobSet.put()`. */\nexport interface BlobPutOptions {\n /** MIME type hint. If omitted, auto-detected from magic bytes. */\n mimeType?: string\n /**\n * Raw chunk size in bytes. Priority: this value > store.maxBlobBytes > 256 KB.\n */\n chunkSize?: number\n /**\n * Whether to gzip-compress bytes before encrypting. Default: `true`.\n * Auto-set to `false` for pre-compressed MIME types (JPEG, PNG, ZIP, etc.).\n */\n compress?: boolean\n /** User ID to record as `uploadedBy`. Defaults to the Noydb session user. */\n uploadedBy?: string\n}\n\n/** Options for `BlobSet.response()` and `BlobSet.responseVersion()`. */\nexport interface BlobResponseOptions {\n /**\n * When `true`, sets `Content-Disposition: inline; filename=\"...\"` so\n * the browser renders the file in the tab. Default (`false`) sets\n * `attachment; filename=\"...\"` which triggers a download.\n */\n inline?: boolean\n /** Override the filename in the Content-Disposition header. */\n filename?: string\n}\n\n// ─── Store Capabilities ─────────────────────────────\n\nexport type StoreAuthKind =\n | 'none'\n | 'filesystem'\n | 'api-key'\n | 'iam'\n | 'oauth'\n | 'kerberos'\n | 'browser-origin'\n\nexport interface StoreAuth {\n kind: StoreAuthKind | StoreAuthKind[]\n required: boolean\n flow: 'static' | 'oauth' | 'kerberos' | 'implicit'\n}\n\nexport interface StoreCapabilities {\n /**\n * true — the store's expectedVersion check and write are atomic at the\n * storage layer. Two concurrent puts with the same expectedVersion will\n * produce exactly one success and one ConflictError.\n * false — check and write are separate operations with a race window.\n */\n casAtomic: boolean\n auth: StoreAuth\n /**\n * true — the store implements {@link NoydbStore.tx} and commits\n * every op atomically at the storage layer. The hub's\n * `db.transaction(fn)` will delegate to `tx(ops)` and surface a\n * single pass/fail outcome. false (or absent) — no native\n * multi-record atomicity; the hub falls back to per-record OCC\n * with best-effort unwind on partial failure.\n */\n txAtomic?: boolean\n /**\n * Maximum raw bytes per blob chunk record.\n * `undefined` — no limit (S3, file, IDB); blob stored as single chunk.\n * `256 * 1024` — DynamoDB (400 KB item limit minus envelope overhead).\n * `5 * 1024 * 1024` — localStorage quota safety.\n */\n maxBlobBytes?: number\n}\n\n// ─── Factory Options ───────────────────────────────────────────────────\n\nexport interface NoydbOptions {\n /** Primary store (local storage). */\n readonly store: NoydbStore\n /**\n * tree-shake seam — optional blob strategy. Pass `withBlobs()`\n * from `@noy-db/hub/blobs` to enable `collection.blob(id)` storage.\n * When omitted, hub's blob machinery stays out of the bundle (ESM\n * tree-shaking) and `collection.blob(id)` throws with a pointer at\n * the subpath. `BlobStrategy` is `@internal` — users only construct\n * it via the subpath factory.\n *\n * @internal\n */\n readonly blobStrategy?: BlobStrategy\n /**\n * tree-shake seam — optional indexing strategy. Pass\n * `withIndexing()` from `@noy-db/hub/indexing` to enable eager-mode\n * `==/in` fast-paths, lazy-mode `.lazyQuery()`, rebuild/reconcile,\n * and auto-reconcile. When omitted, indexing code never reaches the\n * bundle; `.lazyQuery()` throws with a pointer at the subpath, and\n * eager-mode collections fall back to linear scans regardless of\n * `indexes: [...]` declarations. `IndexStrategy` is `@internal` —\n * users only construct it via the subpath factory.\n *\n * @internal\n */\n readonly indexStrategy?: IndexStrategy\n /**\n * tree-shake seam — optional aggregate strategy. Pass\n * `withAggregate()` from `@noy-db/hub/aggregate` to enable\n * `.aggregate()` and `.groupBy()` on Query. When omitted, those\n * methods throw with a pointer at the subpath; the ~886 LOC of\n * Aggregation + GroupedQuery machinery never reaches the bundle.\n * Streaming `scan().aggregate()` works independently of this\n * strategy — it doesn't use the `Aggregation` class.\n *\n * @internal\n */\n readonly aggregateStrategy?: AggregateStrategy\n /**\n * tree-shake seam — optional CRDT strategy. Required when\n * any collection is declared with `crdt: 'lww-map' | 'rga' | 'yjs'`;\n * otherwise the first put/sync-merge hitting the CRDT path throws.\n * When omitted, ~221 LOC of LWW-Map / RGA / merge helpers never\n * reach the bundle.\n *\n * @internal\n */\n readonly crdtStrategy?: CrdtStrategy\n /**\n * tree-shake seam — optional consent-audit strategy. Pass\n * `withConsent()` from `@noy-db/hub/consent` to enable per-op audit\n * writes into `_consent_audit` when a consent scope is active.\n * When omitted, `vault.consentAudit()` returns `[]` and writes are\n * no-ops; the consent module's ~194 LOC never reaches the bundle.\n *\n * @internal\n */\n readonly consentStrategy?: ConsentStrategy\n /**\n * tree-shake seam — optional periods strategy. Pass\n * `withPeriods()` from `@noy-db/hub/periods` to enable\n * `vault.closePeriod()` / `.openPeriod()` / write-guard on closed\n * periods. When omitted, `vault.listPeriods()` returns `[]` and\n * the write-guard is a no-op; the ~363 LOC of period validation +\n * ledger appending stay out of the bundle.\n *\n * @internal\n */\n readonly periodsStrategy?: PeriodsStrategy\n /**\n * tree-shake seam — optional VaultFrame strategy. Pass\n * `withShadow()` from `@noy-db/hub/shadow` to enable\n * `vault.frame()`. Without it, calling `vault.frame()` throws.\n *\n * @internal\n */\n readonly shadowStrategy?: ShadowStrategy\n /**\n * tree-shake seam — optional multi-record transactions. Pass\n * `withTransactions()` from `@noy-db/hub/tx` to enable\n * `db.transaction(fn)`. Without it, calling the method throws.\n *\n * @internal\n */\n readonly txStrategy?: TxStrategy\n /**\n * tree-shake seam — optional history + ledger + time-machine.\n * Pass `withHistory()` from `@noy-db/hub/history` to enable\n * per-record version snapshots, the hash-chained audit ledger, JSON\n * Patch deltas, `vault.ledger()`, `vault.at()`, and the\n * `collection.history()` / `getVersion()` / `revert()` / `diff()` /\n * `clearHistory()` / `pruneRecordHistory()` read APIs. When omitted,\n * snapshots/prune/clear are silent no-ops, the read APIs throw with\n * a pointer at the subpath, and ~1,880 LOC stay out of the bundle.\n *\n * @internal\n */\n readonly historyStrategy?: HistoryStrategy\n /**\n * tree-shake seam — optional i18n strategy. Pass `withI18n()`\n * from `@noy-db/hub/i18n` to enable `i18nText`/`dictKey` field\n * resolution on reads, `i18nText` validation on writes, and\n * `vault.dictionary(name)`. When omitted, locale resolution is the\n * identity (raw values returned), the validators throw with a\n * pointer to the subpath, and ~854 LOC of dictionary + locale\n * machinery stay out of the bundle.\n *\n * @internal\n */\n readonly i18nStrategy?: I18nStrategy\n /**\n * tree-shake seam — optional session-policy strategy. Pass\n * `withSession()` from `@noy-db/hub/session` to enable\n * `sessionPolicy` validation, `PolicyEnforcer` lifecycle (idle /\n * absolute timeouts, lockOnBackground), and global session-token\n * revocation. When omitted, setting `sessionPolicy` throws at\n * `createNoydb()` time, and ~495 LOC of policy + token machinery\n * stay out of the bundle.\n *\n * @internal\n */\n readonly sessionStrategy?: SessionStrategy\n /**\n * tree-shake seam — optional sync engine + presence strategy.\n * Pass `withSync()` from `@noy-db/hub/sync` to enable\n * `db.push()` / `pull()` / replication, `db.transaction(vault)`\n * for sync-aware transactions, and `collection.presence()`. When\n * omitted, configuring `sync` / calling these surfaces throws with\n * a pointer at the subpath, and ~856 LOC of replication + presence\n * machinery stay out of the bundle. Keyring stays core; grant/\n * revoke/magic-link/delegation tree-shake via direct imports.\n *\n * @internal\n */\n readonly syncStrategy?: SyncStrategy\n /** Optional remote store(s) for sync. Accepts a single store, a SyncTarget, or an array. */\n readonly sync?: NoydbStore | SyncTarget | SyncTarget[]\n /** User identifier. */\n readonly user: string\n /** Passphrase for key derivation. Required unless encrypt is false. */\n readonly secret?: string\n /** Auth method. Default: 'passphrase'. */\n readonly auth?: 'passphrase' | 'biometric'\n /** Enable encryption. Default: true. */\n readonly encrypt?: boolean\n /** Conflict resolution strategy. Default: 'version'. */\n readonly conflict?: ConflictStrategy\n /**\n * Sync scheduling policy. Controls when push/pull fire.\n * Default inferred from store category: per-record → `on-change`,\n * bundle → `debounce 30s`.\n */\n readonly syncPolicy?: SyncPolicy\n /**\n * @deprecated Use `syncPolicy` instead. Kept for backward compatibility.\n * When both are supplied, `syncPolicy` takes precedence.\n */\n readonly autoSync?: boolean\n /**\n * @deprecated Use `syncPolicy` instead. Kept for backward compatibility.\n */\n readonly syncInterval?: number\n /**\n * Session timeout in ms. Clears keys after inactivity. Default: none.\n * @deprecated Use `sessionPolicy.idleTimeoutMs` instead. This field is\n * still honored for backwards compatibility but `sessionPolicy` takes\n * precedence when both are supplied.\n */\n readonly sessionTimeout?: number\n /**\n * Session policy controlling lifetime, re-auth requirements, and\n * background-lock behavior. When supplied, replaces the\n * legacy `sessionTimeout` field.\n */\n readonly sessionPolicy?: SessionPolicy\n /** Validate passphrase strength on creation. Default: true. */\n readonly validatePassphrase?: boolean\n /** Audit history configuration. */\n readonly history?: HistoryConfig\n /**\n * Consumer-supplied translation function for `i18nText` fields with\n * `autoTranslate: true`.\n *\n * ⚠ **`plaintextTranslator` receives unencrypted text.** Configuring\n * this hook causes plaintext to leave noy-db's zero-knowledge boundary\n * over whatever channel the consumer's implementation uses. noy-db ships\n * no built-in translator and adds no translator SDKs as dependencies.\n * The consumer chooses and owns the data policy of the external service.\n *\n * Per-field opt-in via `autoTranslate: true` on `i18nText()`. Calling\n * `put()` on a collection with `autoTranslate: true` fields while this\n * option is absent throws `TranslatorNotConfiguredError`.\n *\n * See `NOYDB_SPEC.md § Zero-Knowledge Storage` for the invariant text.\n */\n readonly plaintextTranslator?: PlaintextTranslatorFn\n /**\n * Human-readable name for the translator, recorded in the in-process\n * audit log (e.g. `'deepl-pro-with-dpa'`, `'self-hosted-llama-7b'`).\n * Defaults to `'anonymous'` when not supplied.\n */\n readonly plaintextTranslatorName?: string\n}\n\n// ─── History / Audit Trail ─────────────────────────────────────────────\n\n/** History configuration. */\nexport interface HistoryConfig {\n /** Enable history tracking. Default: true. */\n readonly enabled?: boolean\n /** Maximum history entries per record. Oldest pruned on overflow. Default: unlimited. */\n readonly maxVersions?: number\n}\n\n/** Options for querying history. */\nexport interface HistoryOptions {\n /** Start date (inclusive), ISO 8601. */\n readonly from?: string\n /** End date (inclusive), ISO 8601. */\n readonly to?: string\n /** Maximum entries to return. */\n readonly limit?: number\n}\n\n/** Options for pruning history. */\nexport interface PruneOptions {\n /** Keep only the N most recent versions. */\n readonly keepVersions?: number\n /** Delete versions older than this date, ISO 8601. */\n readonly beforeDate?: string\n}\n\n/** A decrypted history entry. */\nexport interface HistoryEntry<T> {\n readonly version: number\n readonly timestamp: string\n readonly userId: string\n readonly record: T\n}\n\n// ─── Bulk operations ──────────────────────────────────────\n\n/** Per-item options for `Collection.putMany()`. */\nexport interface PutManyItemOptions {\n /**\n * Optimistic-concurrency check: fail this item if the stored version\n * is not `expectedVersion`. Honored only in `atomic: true` mode;\n * ignored in the default best-effort loop.\n */\n readonly expectedVersion?: number\n}\n\n/**\n * Batch-level options for `Collection.putMany()` and `deleteMany()`.\n *\n * `atomic: true` switches the call from best-effort loop\n * to all-or-nothing: a pre-flight CAS check runs first, then every op\n * is executed; any mid-batch failure triggers a best-effort revert.\n * On failure in atomic mode the whole call throws — you won't get a\n * partial `PutManyResult`. On success the result mirrors the default\n * loop's shape.\n */\nexport interface PutManyOptions {\n readonly atomic?: boolean\n}\n\n/** Result of `Collection.putMany()`. */\nexport interface PutManyResult {\n /** `true` iff every entry succeeded. */\n readonly ok: boolean\n /** IDs that were successfully written. */\n readonly success: readonly string[]\n /** Entries that failed, with the error that prevented each write. */\n readonly failures: ReadonlyArray<{ readonly id: string; readonly error: Error }>\n}\n\n/** Result of `Collection.deleteMany()`. Same shape as `PutManyResult`. */\nexport interface DeleteManyResult {\n readonly ok: boolean\n readonly success: readonly string[]\n readonly failures: ReadonlyArray<{ readonly id: string; readonly error: Error }>\n}\n","/**\n * All NOYDB error classes — a single import surface for `catch` blocks and\n * `instanceof` checks.\n *\n * ## Class hierarchy\n *\n * ```\n * Error\n * └─ NoydbError (code: string)\n * ├─ Crypto errors\n * │ ├─ DecryptionError — AES-GCM tag failure\n * │ ├─ TamperedError — ciphertext modified after write\n * │ └─ InvalidKeyError — wrong passphrase / corrupt keyring\n * ├─ Access errors\n * │ ├─ NoAccessError — no DEK for this collection\n * │ ├─ ReadOnlyError — ro permission, write attempted\n * │ ├─ PermissionDeniedError — role too low for operation\n * │ ├─ PrivilegeEscalationError — grant wider than grantor holds\n * │ └─ StoreCapabilityError — optional store method missing\n * ├─ Sync errors\n * │ ├─ ConflictError — optimistic-lock version mismatch\n * │ ├─ BundleVersionConflictError — bundle push rejected by remote\n * │ └─ NetworkError — push/pull network failure\n * ├─ Data errors\n * │ ├─ NotFoundError — get(id) on missing record\n * │ ├─ ValidationError — application-level guard failed\n * │ └─ SchemaValidationError — Standard Schema v1 rejection\n * ├─ Query errors\n * │ ├─ JoinTooLargeError — join row ceiling exceeded\n * │ ├─ DanglingReferenceError — strict ref() points at nothing\n * │ ├─ GroupCardinalityError — groupBy bucket cap exceeded\n * │ ├─ IndexRequiredError — lazy-mode query touches unindexed field\n * │ └─ IndexWriteFailureError — index side-car put/delete failed post-main\n * ├─ i18n / Dictionary errors\n * │ ├─ ReservedCollectionNameError\n * │ ├─ DictKeyMissingError\n * │ ├─ DictKeyInUseError\n * │ ├─ MissingTranslationError\n * │ ├─ LocaleNotSpecifiedError\n * │ └─ TranslatorNotConfiguredError\n * ├─ Backup errors\n * │ ├─ BackupLedgerError — hash-chain verification failed\n * │ └─ BackupCorruptedError — envelope hash mismatch in dump\n * ├─ Bundle errors\n * │ └─ BundleIntegrityError — .noydb body sha256 mismatch\n * └─ Session errors\n * ├─ SessionExpiredError\n * ├─ SessionNotFoundError\n * └─ SessionPolicyError\n * ```\n *\n * ## Catching all NOYDB errors\n *\n * ```ts\n * import { NoydbError, InvalidKeyError, ConflictError } from '@noy-db/hub'\n *\n * try {\n * await vault.unlock(passphrase)\n * } catch (e) {\n * if (e instanceof InvalidKeyError) { showBadPassphraseUI(); return }\n * if (e instanceof NoydbError) { logToSentry(e.code, e); return }\n * throw e // unexpected — re-throw\n * }\n * ```\n *\n * @module\n */\n\n/**\n * Base class for all NOYDB errors.\n *\n * Every error thrown by `@noy-db/hub` extends this class, so consumers can\n * catch all NOYDB errors in a single `catch (e) { if (e instanceof NoydbError) ... }`\n * block. The `code` field is a machine-readable string (e.g. `'DECRYPTION_FAILED'`)\n * suitable for `switch` statements and logging pipelines.\n */\nexport class NoydbError extends Error {\n /** Machine-readable error code. Stable across library versions. */\n readonly code: string\n\n constructor(code: string, message: string) {\n super(message)\n this.name = 'NoydbError'\n this.code = code\n }\n}\n\n// ─── Crypto Errors ─────────────────────────────────────────────────────\n\n/**\n * Thrown when AES-GCM decryption fails.\n *\n * The most common cause is a wrong passphrase or a corrupted ciphertext.\n * A `DecryptionError` at the wrong passphrase level is caught internally\n * and re-thrown as `InvalidKeyError` — so in practice this surfaces for\n * per-record corruption rather than authentication failures.\n */\nexport class DecryptionError extends NoydbError {\n constructor(message = 'Decryption failed') {\n super('DECRYPTION_FAILED', message)\n this.name = 'DecryptionError'\n }\n}\n\n/**\n * Thrown when GCM tag verification fails, indicating the ciphertext was\n * modified after encryption.\n *\n * AES-256-GCM is authenticated encryption — the tag over the ciphertext\n * is checked on every decrypt. If any byte was flipped (accidental\n * corruption or deliberate tampering), decryption throws this error.\n * Treat it as a security alert: the stored bytes are not what NOYDB wrote.\n */\nexport class TamperedError extends NoydbError {\n constructor(message = 'Data integrity check failed — record may have been tampered with') {\n super('TAMPERED', message)\n this.name = 'TamperedError'\n }\n}\n\n/**\n * Thrown when key unwrapping fails, typically because the passphrase is wrong\n * or the keyring file is corrupted.\n *\n * NOYDB uses AES-KW (RFC 3394) to wrap DEKs with the KEK. If AES-KW\n * unwrapping fails, it means either the KEK was derived from the wrong\n * passphrase (PBKDF2 with 600K iterations) or the keyring bytes are\n * corrupted. This is the error shown to the user on a failed unlock attempt.\n */\nexport class InvalidKeyError extends NoydbError {\n constructor(message = 'Invalid key — wrong passphrase or corrupted keyring') {\n super('INVALID_KEY', message)\n this.name = 'InvalidKeyError'\n }\n}\n\n// ─── Access Errors ─────────────────────────────────────────────────────\n\n/**\n * Thrown when the authenticated user does not have a DEK for the requested\n * collection — i.e. the collection is not in their keyring at all.\n *\n * This is the \"no key for this door\" error. It is different from\n * `ReadOnlyError` (user has a key but it only grants ro) and from\n * `PermissionDeniedError` (user's role doesn't allow the operation).\n */\nexport class NoAccessError extends NoydbError {\n constructor(message = 'No access — user does not have a key for this collection') {\n super('NO_ACCESS', message)\n this.name = 'NoAccessError'\n }\n}\n\n/**\n * Thrown when a user with read-only (`ro`) permission attempts a write\n * operation (`put` or `delete`) on a collection.\n *\n * The user has a DEK for the collection (they can decrypt and read), but\n * their keyring grants only `ro`. To fix: re-grant the user with `rw`\n * permission, or do not attempt writes as a viewer/client role.\n */\nexport class ReadOnlyError extends NoydbError {\n constructor(message = 'Read-only — user has ro permission on this collection') {\n super('READ_ONLY', message)\n this.name = 'ReadOnlyError'\n }\n}\n\n/**\n * Thrown when a write is attempted against a historical view produced\n * by `vault.at(timestamp)`. Time-machine views are read-only by\n * contract — mutating the past would require either the shadow-vault\n * mechanism or a ledger-history rewrite (which breaks\n * the tamper-evidence guarantee).\n *\n * Distinct from {@link ReadOnlyError} (keyring-level) and\n * {@link PermissionDeniedError} (role-level): this error is about the\n * *view* being historical, independent of the caller's permissions.\n */\nexport class ReadOnlyAtInstantError extends NoydbError {\n constructor(operation: string, timestamp: string) {\n super(\n 'READ_ONLY_AT_INSTANT',\n `Cannot ${operation}() on a vault view anchored at ${timestamp} — time-machine views are read-only`,\n )\n this.name = 'ReadOnlyAtInstantError'\n }\n}\n\n/**\n * Thrown when a write is attempted against a shadow-vault frame\n * produced by `vault.frame()`. Frames are read-only by contract —\n * the use case is screen-sharing / demos / compliance review where\n * the operator wants to prevent accidental edits.\n *\n * Behavioural enforcement only — the underlying keyring still holds\n * write-capable DEKs. See {@link VaultFrame} for the full caveat.\n */\nexport class ReadOnlyFrameError extends NoydbError {\n constructor(operation: string) {\n super(\n 'READ_ONLY_FRAME',\n `Cannot ${operation}() on a vault frame — frames are read-only presentations of the current vault`,\n )\n this.name = 'ReadOnlyFrameError'\n }\n}\n\n/**\n * Thrown when the authenticated user's role does not permit the requested\n * operation — e.g. a `viewer` calling `grantAccess()`, or an `operator`\n * calling `rotateKeys()`.\n *\n * This is a role-level check (what the user's role allows), distinct from\n * `NoAccessError` (collection not in keyring) and `ReadOnlyError` (in\n * keyring, but write not allowed).\n */\nexport class PermissionDeniedError extends NoydbError {\n constructor(message = 'Permission denied — insufficient role for this operation') {\n super('PERMISSION_DENIED', message)\n this.name = 'PermissionDeniedError'\n }\n}\n\n/**\n * Thrown when an `@noy-db/as-*` export is attempted without the\n * required capability bit on the invoking keyring.\n *\n * Two sub-cases discriminated by the `tier` field:\n *\n * - `tier: 'plaintext'` — a plaintext-tier export (`as-xlsx`,\n * `as-csv`, `as-blob`, `as-zip`, …) was attempted but the\n * keyring's `exportCapability.plaintext` does not include the\n * requested `format` (nor the `'*'` wildcard). Default for every\n * role is `plaintext: []` — the owner must positively grant.\n * - `tier: 'bundle'` — an encrypted `as-noydb` bundle export was\n * attempted but the keyring's `exportCapability.bundle` is\n * `false`. Default for `owner`/`admin` is `true`; for\n * `operator`/`viewer`/`client` it is `false`.\n *\n * Distinct from `PermissionDeniedError` (role-level check) and\n * `NoAccessError` (collection not readable). Surfaces separately so\n * UI layers can show a \"request the export capability from your\n * admin\" flow rather than a generic permission error.\n */\nexport class ExportCapabilityError extends NoydbError {\n readonly tier: 'plaintext' | 'bundle'\n readonly format?: string\n readonly userId: string\n\n constructor(opts: {\n tier: 'plaintext' | 'bundle'\n userId: string\n format?: string\n message?: string\n }) {\n const msg =\n opts.message ??\n (opts.tier === 'plaintext'\n ? `Export capability denied — keyring \"${opts.userId}\" is not granted plaintext-export capability for format \"${opts.format ?? '<unknown>'}\". Ask a vault owner or admin to grant it via vault.grant({ exportCapability: { plaintext: ['${opts.format ?? '<format>'}'] } }).`\n : `Export capability denied — keyring \"${opts.userId}\" is not granted encrypted-bundle export capability. Ask a vault owner or admin to grant it via vault.grant({ exportCapability: { bundle: true } }).`)\n super('EXPORT_CAPABILITY', msg)\n this.name = 'ExportCapabilityError'\n this.tier = opts.tier\n this.userId = opts.userId\n if (opts.format !== undefined) this.format = opts.format\n }\n}\n\n/**\n * Thrown when a keyring file's `expires_at` cutoff has passed.\n * Surfaced by `loadKeyring` before any DEK unwrap is attempted —\n * past the cutoff the slot refuses to open even with the right\n * passphrase. Distinct from PBKDF2 / unwrap errors so consumer code\n * can show a precise \"this bundle slot has expired\" message instead\n * of the generic decryption-failure UX.\n *\n * Used predominantly on `BundleRecipient` slots produced by\n * `writeNoydbBundle({ recipients: [...] })` to time-box audit access.\n */\nexport class KeyringExpiredError extends NoydbError {\n readonly userId: string\n readonly expiresAt: string\n constructor(opts: { userId: string; expiresAt: string }) {\n super(\n 'KEYRING_EXPIRED',\n `Keyring \"${opts.userId}\" expired at ${opts.expiresAt}. ` +\n 'The slot refuses to unlock past its expiry timestamp.',\n )\n this.name = 'KeyringExpiredError'\n this.userId = opts.userId\n this.expiresAt = opts.expiresAt\n }\n}\n\n/**\n * Thrown when an `@noy-db/as-*` import is attempted but the invoking\n * keyring lacks the required import-capability bit (issue ).\n *\n * - `tier: 'plaintext'` — a plaintext-tier import (`as-csv`, `as-json`,\n * `as-ndjson`, `as-zip`, …) was attempted but the keyring's\n * `importCapability.plaintext` does not include the requested\n * `format` (nor the `'*'` wildcard).\n * - `tier: 'bundle'` — a `.noydb` bundle import was attempted but the\n * keyring's `importCapability.bundle` is not `true`.\n *\n * Default for every role on every dimension is closed — owners and\n * admins must positively grant the capability. Distinct from\n * `PermissionDeniedError` and `NoAccessError` so UI layers can show a\n * specific \"request the import capability\" flow.\n */\nexport class ImportCapabilityError extends NoydbError {\n readonly tier: 'plaintext' | 'bundle'\n readonly format?: string\n readonly userId: string\n\n constructor(opts: {\n tier: 'plaintext' | 'bundle'\n userId: string\n format?: string\n message?: string\n }) {\n const msg =\n opts.message ??\n (opts.tier === 'plaintext'\n ? `Import capability denied — keyring \"${opts.userId}\" is not granted plaintext-import capability for format \"${opts.format ?? '<unknown>'}\". Ask a vault owner or admin to grant it via vault.grant({ importCapability: { plaintext: ['${opts.format ?? '<format>'}'] } }).`\n : `Import capability denied — keyring \"${opts.userId}\" is not granted encrypted-bundle import capability. Ask a vault owner or admin to grant it via vault.grant({ importCapability: { bundle: true } }).`)\n super('IMPORT_CAPABILITY', msg)\n this.name = 'ImportCapabilityError'\n this.tier = opts.tier\n this.userId = opts.userId\n if (opts.format !== undefined) this.format = opts.format\n }\n}\n\n/**\n * Thrown when a grant would give the grantee a permission the grantor\n * does not themselves hold — the \"admin cannot grant what admin cannot\n * do\" rule from the admin-delegation work.\n *\n * Distinct from `PermissionDeniedError` so callers can tell the two\n * cases apart in logs and tests:\n *\n * - `PermissionDeniedError` — \"you are not allowed to perform this\n * operation at all\" (wrong role).\n * - `PrivilegeEscalationError` — \"you are allowed to grant, but not\n * with these specific permissions\" (widening attempt).\n *\n * Under the admin model the grantee of an admin-grants-admin call\n * inherits the caller's entire DEK set by construction, so this error\n * is structurally unreachable in typical flows. The check and error\n * class exist so that future per-collection admin scoping cannot\n * accidentally bypass the subset rule — the guard is already wired in.\n *\n * `offendingCollection` carries the first collection name that failed\n * the subset check, to make the violation actionable in error output.\n */\n/**\n * Thrown when a caller invokes an API that requires an optional\n * store capability the active store does not implement.\n *\n * Today the only call site is `Noydb.listAccessibleVaults()`,\n * which depends on the optional `NoydbStore.listVaults()`\n * method. The error message names the missing method and the calling\n * API so consumers know exactly which combination is unsupported,\n * and the `capability` field is machine-readable so library code can\n * pattern-match in catch blocks (e.g. fall back to a candidate-list\n * shape).\n *\n * The class lives in `errors.ts` rather than as a generic\n * `ValidationError` because the diagnostic shape is different: a\n * `ValidationError` says \"the inputs you passed are wrong\"; this\n * error says \"the inputs are fine, but the store you wired up\n * doesn't support what you're asking for.\" Different fix, different\n * documentation.\n */\nexport class StoreCapabilityError extends NoydbError {\n /** The store method/capability that was missing. */\n readonly capability: string\n\n constructor(capability: string, callerApi: string, storeName?: string) {\n super(\n 'STORE_CAPABILITY',\n `${callerApi} requires the optional store capability \"${capability}\" ` +\n `but the active store${storeName ? ` (${storeName})` : ''} does not implement it. ` +\n `Use a store that supports \"${capability}\" (store-memory, store-file) or pass an explicit ` +\n `vault list to bypass enumeration.`,\n )\n this.name = 'StoreCapabilityError'\n this.capability = capability\n }\n}\n\nexport class PrivilegeEscalationError extends NoydbError {\n readonly offendingCollection: string\n\n constructor(offendingCollection: string, message?: string) {\n super(\n 'PRIVILEGE_ESCALATION',\n message ??\n `Privilege escalation: grantor has no DEK for collection \"${offendingCollection}\" and cannot grant access to it.`,\n )\n this.name = 'PrivilegeEscalationError'\n this.offendingCollection = offendingCollection\n }\n}\n\n/**\n * Thrown by `Collection.put` / `.delete` when the target record's\n * envelope `_ts` falls within a closed accounting period.\n *\n * Distinct from `ReadOnlyError` (keyring-level), `ReadOnlyAtInstantError`\n * (historical view), and `ReadOnlyFrameError` (shadow vault): this\n * error is about the STORED RECORD being sealed by an operator call\n * to `vault.closePeriod()`, independent of caller permissions or\n * view type. The `periodName` and `endDate` fields name the sealing\n * period so audit UIs can surface a \"this record is locked in\n * FY2026-Q1 (closed 2026-03-31)\" message without parsing the error\n * string.\n *\n * To apply a correction after close, book a compensating entry in a\n * new period rather than unlocking the old one. Re-opening a closed\n * period is deliberately unsupported.\n */\nexport class PeriodClosedError extends NoydbError {\n readonly periodName: string\n readonly endDate: string\n readonly recordTs: string\n\n constructor(periodName: string, endDate: string, recordTs: string) {\n super(\n 'PERIOD_CLOSED',\n `Cannot modify record (last written ${recordTs}) — sealed by closed period ` +\n `\"${periodName}\" (endDate: ${endDate}). Post a compensating entry in a ` +\n `new period instead.`,\n )\n this.name = 'PeriodClosedError'\n this.periodName = periodName\n this.endDate = endDate\n this.recordTs = recordTs\n }\n}\n\n// ─── Hierarchical Access Errors ─────────────────────\n\n/**\n * Thrown when a user tries to act at a tier they are not cleared for.\n *\n * This is the umbrella error for tier write refusals:\n * - `put({ tier: N })` when the user's keyring lacks tier-N DEK.\n * - `elevate(id, N)` when the caller cannot reach tier N.\n *\n * Distinct from `TierAccessDeniedError` which covers *read* refusals on\n * the invisibility/ghost path.\n */\nexport class TierNotGrantedError extends NoydbError {\n readonly tier: number\n readonly collection: string\n\n constructor(collection: string, tier: number) {\n super(\n 'TIER_NOT_GRANTED',\n `User has no DEK for tier ${tier} in collection \"${collection}\"`,\n )\n this.name = 'TierNotGrantedError'\n this.collection = collection\n this.tier = tier\n }\n}\n\n/**\n * Thrown when an elevated-handle operation runs after the elevation's\n * TTL expired. Reads continue at the original tier; only writes\n * through the scoped handle flip to throwing once expired.\n */\nexport class ElevationExpiredError extends NoydbError {\n readonly tier: number\n readonly expiresAt: number\n\n constructor(opts: { tier: number; expiresAt: number }) {\n super(\n 'ELEVATION_EXPIRED',\n `Elevation to tier ${opts.tier} expired at ${new Date(opts.expiresAt).toISOString()}`,\n )\n this.name = 'ElevationExpiredError'\n this.tier = opts.tier\n this.expiresAt = opts.expiresAt\n }\n}\n\n/**\n * Thrown by `vault.elevate(...)` when an elevation is already active\n * on the vault. Adopters must `release()` the existing handle before\n * starting a new elevation.\n */\nexport class AlreadyElevatedError extends NoydbError {\n readonly activeTier: number\n\n constructor(activeTier: number) {\n super(\n 'ALREADY_ELEVATED',\n `Vault is already elevated to tier ${activeTier}; release the existing handle first`,\n )\n this.name = 'AlreadyElevatedError'\n this.activeTier = activeTier\n }\n}\n\n/**\n * Thrown when `demote()` is called by someone who is not the original\n * elevator and not an owner.\n */\nexport class TierDemoteDeniedError extends NoydbError {\n constructor(id: string, tier: number) {\n super(\n 'TIER_DEMOTE_DENIED',\n `Only the original elevator or an owner can demote record \"${id}\" from tier ${tier}`,\n )\n this.name = 'TierDemoteDeniedError'\n }\n}\n\n/**\n * Thrown when `db.delegate()` is called against a user that has no\n * keyring in the target vault — the delegation token cannot be\n * constructed without the target user's KEK wrap.\n */\nexport class DelegationTargetMissingError extends NoydbError {\n readonly toUser: string\n\n constructor(toUser: string) {\n super(\n 'DELEGATION_TARGET_MISSING',\n `Delegation target user \"${toUser}\" has no keyring in this vault`,\n )\n this.name = 'DelegationTargetMissingError'\n this.toUser = toUser\n }\n}\n\n// ─── Sync Errors ───────────────────────────────────────────────────────\n\n/**\n * Thrown when a `put()` detects an optimistic concurrency conflict.\n *\n * NOYDB uses version numbers (`_v`) for optimistic locking. If a `put()`\n * is called with `expectedVersion: N` but the stored record is at version\n * `M ≠ N`, the write is rejected and the caller must re-read, re-apply their\n * change, and retry. The `version` field carries the actual stored version\n * so callers can decide whether to retry or surface the conflict to the user.\n */\nexport class ConflictError extends NoydbError {\n /** The actual stored version at the time of conflict. */\n readonly version: number\n\n constructor(version: number, message = 'Version conflict') {\n super('CONFLICT', message)\n this.name = 'ConflictError'\n this.version = version\n }\n}\n\n/**\n * Thrown by `LedgerStore.append()` after exhausting its CAS retry\n * budget under multi-writer contention. Two browser tabs, a\n * web app + an offline mobile peer, or a server worker pool all\n * producing ledger entries against the same vault can race on the\n * \"read head, write head+1\" cycle; the optimistic-CAS retry loop\n * resolves the race for `casAtomic: true` stores, but pathological\n * contention (or a buggy peer) can still exhaust the budget. When\n * that happens, the chain is intact — the failed writer simply\n * couldn't claim a slot. Caller's choice whether to retry, queue,\n * or surface the failure to the user.\n */\nexport class LedgerContentionError extends NoydbError {\n readonly attempts: number\n\n constructor(attempts: number) {\n super(\n 'LEDGER_CONTENTION',\n `LedgerStore.append: failed to claim a chain slot after ${attempts} optimistic-CAS retries`,\n )\n this.name = 'LedgerContentionError'\n this.attempts = attempts\n }\n}\n\n/**\n * Thrown when a bundle push is rejected because the remote has been updated\n * since the local bundle was last pulled.\n *\n * Unlike `ConflictError` (per-record), this is a whole-bundle conflict —\n * the remote's bundle handle has changed. The caller must pull the new\n * bundle, merge, and re-push. `remoteVersion` is the handle of the newer\n * remote bundle for use in diagnostics.\n */\nexport class BundleVersionConflictError extends NoydbError {\n /** The bundle handle of the newer remote version that rejected the push. */\n readonly remoteVersion: string\n\n constructor(remoteVersion: string, message = 'Bundle version conflict — remote has been updated') {\n super('BUNDLE_VERSION_CONFLICT', message)\n this.name = 'BundleVersionConflictError'\n this.remoteVersion = remoteVersion\n }\n}\n\n/**\n * Thrown when a sync operation (push or pull) fails due to a network error.\n *\n * NOYDB's offline-first design means network errors are expected during sync.\n * Callers should catch `NetworkError`, surface connectivity status in the UI,\n * and rely on the `SyncScheduler` to retry when connectivity is restored.\n */\nexport class NetworkError extends NoydbError {\n constructor(message = 'Network error') {\n super('NETWORK_ERROR', message)\n this.name = 'NetworkError'\n }\n}\n\n// ─── Data Errors ───────────────────────────────────────────────────────\n\n/**\n * Thrown when `collection.get(id)` is called with an ID that does not exist.\n *\n * NOYDB collections are memory-first, so this error is synchronous and cheap —\n * it does not make a network round-trip. Callers that expect the record to be\n * absent should use `collection.getOrNull(id)` instead.\n */\nexport class NotFoundError extends NoydbError {\n constructor(message = 'Record not found') {\n super('NOT_FOUND', message)\n this.name = 'NotFoundError'\n }\n}\n\n/**\n * Thrown when application-level validation fails before encryption.\n *\n * Distinct from `SchemaValidationError` (Standard Schema v1 validator)\n * and `MissingTranslationError` (i18nText). `ValidationError` is the\n * general-purpose validation base — use it for custom guards in `put()`\n * hooks or store middleware.\n */\nexport class ValidationError extends NoydbError {\n constructor(message = 'Validation error') {\n super('VALIDATION_ERROR', message)\n this.name = 'ValidationError'\n }\n}\n\n/**\n * Thrown when a Standard Schema v1 validator rejects a record on\n * `put()` (input validation) or on read (output validation). Carries\n * the raw issue list so callers can render field-level errors.\n *\n * `direction` distinguishes the two cases:\n * - `'input'`: the user passed bad data into `put()`. This is a\n * normal error case that application code should handle — typically\n * by showing validation messages in the UI.\n * - `'output'`: stored data does not match the current schema. This\n * indicates a schema drift (the schema was changed without\n * migrating the existing records) and should be treated as a bug\n * — the application should not swallow it silently.\n *\n * The `issues` type is deliberately `readonly unknown[]` on this class\n * so that `errors.ts` doesn't need to import from `schema.ts` (and\n * create a dependency cycle). Callers who know they're holding a\n * `SchemaValidationError` can cast to the more precise\n * `readonly StandardSchemaV1Issue[]` from `schema.ts`.\n */\nexport class SchemaValidationError extends NoydbError {\n readonly issues: readonly unknown[]\n readonly direction: 'input' | 'output'\n\n constructor(\n message: string,\n issues: readonly unknown[],\n direction: 'input' | 'output',\n ) {\n super('SCHEMA_VALIDATION_FAILED', message)\n this.name = 'SchemaValidationError'\n this.issues = issues\n this.direction = direction\n }\n}\n\n// ─── Query DSL Errors ─────────────────────────────────────────────────\n\n/**\n * Thrown when `.groupBy().aggregate()` produces more than the hard\n * cardinality cap (default 100_000 groups)..\n *\n * The cap exists because `.groupBy()` materializes one bucket per\n * distinct key value in memory, and runaway cardinality — a groupBy\n * on a high-uniqueness field like `id` or `createdAt` — is almost\n * always a query mistake rather than legitimate use. A hard error is\n * better than silent OOM: the consumer sees an actionable message\n * naming the field and the observed cardinality, with guidance to\n * either narrow the query with `.where()` or accept the ceiling\n * override.\n *\n * A separate one-shot warning fires at 10% of the cap (10_000\n * groups) so consumers get a heads-up before the hard error — same\n * pattern as `JoinTooLargeError` and the `.join()` row ceiling.\n *\n * **Not overridable in.** The 100k cap is a fixed constant so\n * the failure mode is consistent across the codebase; a\n * `{ maxGroups }` override can be added later without a break if a\n * real consumer asks.\n */\nexport class GroupCardinalityError extends NoydbError {\n /** The field being grouped on. */\n readonly field: string\n /** Observed number of distinct groups at the moment the cap tripped. */\n readonly cardinality: number\n /** The cap that was exceeded. */\n readonly maxGroups: number\n\n constructor(field: string, cardinality: number, maxGroups: number) {\n super(\n 'GROUP_CARDINALITY',\n `.groupBy(\"${field}\") produced ${cardinality} distinct groups, ` +\n `exceeding the ${maxGroups}-group ceiling. This is almost always a ` +\n `query mistake — grouping on a high-uniqueness field like \"id\" or ` +\n `\"createdAt\" produces one bucket per record. Narrow the query with ` +\n `.where() before grouping, or group on a lower-cardinality field ` +\n `(status, category, clientId). If you genuinely need high-cardinality ` +\n `grouping, file an issue with your use case.`,\n )\n this.name = 'GroupCardinalityError'\n this.field = field\n this.cardinality = cardinality\n this.maxGroups = maxGroups\n }\n}\n\n/**\n * Thrown in lazy mode when a `.query()` / `.where()` / `.orderBy()` clause\n * references a field that does not have a declared index.\n *\n * Lazy-mode queries only work when every touched field is indexed.\n * This is deliberate — silent scan-fallback would hide the performance\n * cliff that lazy-mode indexes exist to prevent.\n *\n * Payload:\n * - `collection` — name of the collection queried\n * - `touchedFields` — every field referenced by the query (filter + order)\n * - `missingFields` — subset of `touchedFields` that have no declared index\n */\nexport class IndexRequiredError extends NoydbError {\n readonly collection: string\n readonly touchedFields: readonly string[]\n readonly missingFields: readonly string[]\n\n constructor(args: { collection: string; touchedFields: readonly string[]; missingFields: readonly string[] }) {\n super(\n 'INDEX_REQUIRED',\n `Collection \"${args.collection}\": query references unindexed fields in lazy mode ` +\n `(missing: ${args.missingFields.join(', ')}). ` +\n `Declare an index on each field, or use collection.scan() for non-indexed iteration.`,\n )\n this.name = 'IndexRequiredError'\n this.collection = args.collection\n this.touchedFields = [...args.touchedFields]\n this.missingFields = [...args.missingFields]\n }\n}\n\n/**\n * Thrown (or surfaced via the `index:write-partial` event) when one or more\n * per-indexed-field side-car writes fail after the main record write has\n * already succeeded.\n *\n * Not thrown out of `.put()` / `.delete()` directly — those succeed when the\n * main record succeeds. Instead, `IndexWriteFailureError` instances are collected\n * into the session-scoped reconcile queue and emitted on the Collection\n * emitter as `index:write-partial`.\n *\n * Payload:\n * - `recordId` — the id of the main record whose side-car writes failed\n * - `field` — the indexed field whose side-car write failed\n * - `op` — `'put'` or `'delete'`, indicating which mutation was in flight\n * - `cause` — the underlying error from the store\n */\nexport class IndexWriteFailureError extends NoydbError {\n readonly recordId: string\n readonly field: string\n readonly op: 'put' | 'delete'\n override readonly cause: unknown\n\n constructor(args: { recordId: string; field: string; op: 'put' | 'delete'; cause: unknown }) {\n super(\n 'INDEX_WRITE_FAILURE',\n `Index side-car ${args.op} failed for field \"${args.field}\" on record \"${args.recordId}\"`,\n )\n this.name = 'IndexWriteFailureError'\n this.recordId = args.recordId\n this.field = args.field\n this.op = args.op\n this.cause = args.cause\n }\n}\n\n// ─── Bundle Format Errors ─────────────────────────────────\n\n/**\n * Thrown by `readNoydbBundle()` when the body bytes don't match\n * the integrity hash declared in the bundle header — i.e. someone\n * modified the bytes between write and read.\n *\n * Distinct from a generic `Error` (which would be thrown for\n * format violations like a missing magic prefix or malformed\n * header JSON) so consumers can pattern-match the corruption case\n * and handle it differently from a producer bug. A\n * `BundleIntegrityError` indicates \"the bytes you got are not\n * what was written\"; a plain `Error` from `parsePrefixAndHeader`\n * indicates \"what was written wasn't a valid bundle in the first\n * place.\"\n *\n * Also thrown when decompression fails after the integrity hash\n * passed — that's a producer bug (the wrong algorithm byte was\n * written) but it surfaces with the same error class because the\n * end result is \"the body cannot be turned back into a dump.\"\n */\nexport class BundleIntegrityError extends NoydbError {\n constructor(message: string) {\n super('BUNDLE_INTEGRITY', `.noydb bundle integrity check failed: ${message}`)\n this.name = 'BundleIntegrityError'\n }\n}\n\n// ─── i18n / Dictionary Errors ──────────────────────────\n\n/**\n * Thrown when `vault.collection()` is called with a name that is\n * reserved for NOYDB internal use (any name starting with `_dict_`).\n *\n * Dictionary collections are accessed exclusively via\n * `vault.dictionary(name)` — attempting to open one as a regular\n * collection would bypass the dictionary invariants (ACL, rename\n * tracking, reserved-name policy).\n */\nexport class ReservedCollectionNameError extends NoydbError {\n /** The rejected collection name. */\n readonly collectionName: string\n\n constructor(collectionName: string) {\n super(\n 'RESERVED_COLLECTION_NAME',\n `\"${collectionName}\" is a reserved collection name. ` +\n `Use vault.dictionary(\"${collectionName.replace(/^_dict_/, '')}\") ` +\n `to access dictionary collections.`,\n )\n this.name = 'ReservedCollectionNameError'\n this.collectionName = collectionName\n }\n}\n\n/**\n * Thrown by `DictionaryHandle.get()` and `DictionaryHandle.delete()` when\n * the requested key does not exist in the dictionary.\n *\n * Distinct from `NotFoundError` (which is for data records) so callers\n * can distinguish \"data record missing\" from \"dictionary key missing\"\n * without inspecting error messages.\n */\nexport class DictKeyMissingError extends NoydbError {\n /** The dictionary name. */\n readonly dictionaryName: string\n /** The key that was not found. */\n readonly key: string\n\n constructor(dictionaryName: string, key: string) {\n super(\n 'DICT_KEY_MISSING',\n `Dictionary \"${dictionaryName}\" has no entry for key \"${key}\".`,\n )\n this.name = 'DictKeyMissingError'\n this.dictionaryName = dictionaryName\n this.key = key\n }\n}\n\n/**\n * Thrown by `DictionaryHandle.delete()` in strict mode when the key to\n * be deleted is still referenced by one or more records.\n *\n * The caller must either rename the key first (the only sanctioned\n * mass-mutation path) or pass `{ mode: 'warn' }` to skip the check\n * (development only).\n */\nexport class DictKeyInUseError extends NoydbError {\n /** The dictionary name. */\n readonly dictionaryName: string\n /** The key that is still referenced. */\n readonly key: string\n /** Name of the first collection found to reference this key. */\n readonly usedBy: string\n /** Number of records in `usedBy` that reference this key. */\n readonly count: number\n\n constructor(\n dictionaryName: string,\n key: string,\n usedBy: string,\n count: number,\n ) {\n super(\n 'DICT_KEY_IN_USE',\n `Cannot delete key \"${key}\" from dictionary \"${dictionaryName}\": ` +\n `${count} record(s) in \"${usedBy}\" still reference it. ` +\n `Use dictionary.rename(\"${key}\", newKey) to rewrite references first.`,\n )\n this.name = 'DictKeyInUseError'\n this.dictionaryName = dictionaryName\n this.key = key\n this.usedBy = usedBy\n this.count = count\n }\n}\n\n/**\n * Thrown by `Collection.put()` when an `i18nText` field is missing one\n * or more required translations.\n *\n * The `missing` array names each locale code that was absent from the\n * field value. The `field` property names the field so callers can\n * render a field-level error message without parsing the string.\n */\nexport class MissingTranslationError extends NoydbError {\n /** The field name whose translation(s) are missing. */\n readonly field: string\n /** Locale codes that were required but absent. */\n readonly missing: readonly string[]\n\n constructor(field: string, missing: readonly string[], message?: string) {\n super(\n 'MISSING_TRANSLATION',\n message ??\n `Field \"${field}\": missing required translation(s): ${missing.join(', ')}.`,\n )\n this.name = 'MissingTranslationError'\n this.field = field\n this.missing = missing\n }\n}\n\n/**\n * Thrown when reading an `i18nText` field without specifying a locale —\n * either at the call site (`get(id, { locale })`) or on the vault\n * (`openVault(name, { locale })`).\n *\n * Also thrown when `resolveI18nText()` exhausts the fallback chain and\n * no translation is available for the requested locale.\n *\n * The `field` property names the field that triggered the error so the\n * caller can surface it in the UI.\n */\nexport class LocaleNotSpecifiedError extends NoydbError {\n /** The field name that required a locale. */\n readonly field: string\n\n constructor(field: string, message?: string) {\n super(\n 'LOCALE_NOT_SPECIFIED',\n message ??\n `Cannot read i18nText field \"${field}\" without a locale. ` +\n `Pass { locale } to get()/list()/query() or set a default via ` +\n `openVault(name, { locale }).`,\n )\n this.name = 'LocaleNotSpecifiedError'\n this.field = field\n }\n}\n\n// ─── Translator Errors ─────────────────────────────────────\n\n/**\n * Thrown when a collection has an `i18nText` field with\n * `autoTranslate: true` but no `plaintextTranslator` was configured\n * on `createNoydb()`.\n *\n * The error is raised at `put()` time (not at schema construction) so\n * the mis-configuration is surfaced by the first write rather than\n * silently at startup.\n */\nexport class TranslatorNotConfiguredError extends NoydbError {\n /** The field that requested auto-translation. */\n readonly field: string\n /** The collection the put was targeting. */\n readonly collection: string\n\n constructor(field: string, collection: string) {\n super(\n 'TRANSLATOR_NOT_CONFIGURED',\n `Field \"${field}\" in collection \"${collection}\" has autoTranslate: true, ` +\n `but no plaintextTranslator was configured on createNoydb(). ` +\n `Either configure a plaintextTranslator or remove autoTranslate from the schema.`,\n )\n this.name = 'TranslatorNotConfiguredError'\n this.field = field\n this.collection = collection\n }\n}\n\n// ─── Backup Errors ─────────────────────────────────────────\n\n/**\n * Thrown when `Vault.load()` finds that a backup's hash chain\n * doesn't verify, or that its embedded `ledgerHead.hash` doesn't\n * match the chain head reconstructed from the loaded entries.\n *\n * Distinct from `BackupCorruptedError` so callers can choose to\n * recover from one but not the other (e.g., a corrupted JSON file is\n * unrecoverable; a chain mismatch might mean the backup is from an\n * incompatible noy-db version).\n */\nexport class BackupLedgerError extends NoydbError {\n /** First-broken-entry index, if known. */\n readonly divergedAt?: number\n\n constructor(message: string, divergedAt?: number) {\n super('BACKUP_LEDGER', message)\n this.name = 'BackupLedgerError'\n if (divergedAt !== undefined) this.divergedAt = divergedAt\n }\n}\n\n/**\n * Thrown when `Vault.load()` finds that the backup's data\n * collection content doesn't match the ledger's recorded\n * `payloadHash`es. This is the \"envelope was tampered with after\n * dump\" detection — the chain itself can be intact, but if any\n * encrypted record bytes were swapped, this check catches it.\n */\nexport class BackupCorruptedError extends NoydbError {\n /** The (collection, id) pair whose envelope failed the hash check. */\n readonly collection: string\n readonly id: string\n\n constructor(collection: string, id: string, message: string) {\n super('BACKUP_CORRUPTED', message)\n this.name = 'BackupCorruptedError'\n this.collection = collection\n this.id = id\n }\n}\n\n// ─── Session Errors ───────────────────────────────────────\n\n/**\n * Thrown by `resolveSession()` when the session token's `expiresAt`\n * timestamp is in the past. The session key is also removed from the\n * in-memory store when this is thrown, so retrying with the same sessionId\n * will produce `SessionNotFoundError`.\n *\n * Separate from `SessionNotFoundError` so callers can distinguish between\n * \"session is gone\" (key store cleared, tab reloaded) and \"session is\n * still in the store but has exceeded its lifetime\" (idle timeout, absolute\n * timeout, policy-driven expiry). The remediation differs: expired sessions\n * should prompt a fresh unlock; not-found sessions may indicate a bug or a\n * cross-tab scenario where the session was never established.\n */\nexport class SessionExpiredError extends NoydbError {\n readonly sessionId: string\n\n constructor(sessionId: string) {\n super('SESSION_EXPIRED', `Session \"${sessionId}\" has expired. Re-unlock to continue.`)\n this.name = 'SessionExpiredError'\n this.sessionId = sessionId\n }\n}\n\n/**\n * Thrown by `resolveSession()` when the session key cannot be found in\n * the module-level store. This happens when:\n * - The session was explicitly revoked via `revokeSession()`.\n * - The JS context was reloaded (tab navigation, page refresh, worker restart).\n * - `Noydb.close()` was called (which calls `revokeAllSessions()`).\n * - The sessionId is wrong or was generated by a different JS context.\n *\n * The session token (if the caller holds it) is permanently useless after\n * this error — the key is gone and cannot be recovered.\n */\nexport class SessionNotFoundError extends NoydbError {\n readonly sessionId: string\n\n constructor(sessionId: string) {\n super('SESSION_NOT_FOUND', `Session key for \"${sessionId}\" not found. The session may have been revoked or the page reloaded.`)\n this.name = 'SessionNotFoundError'\n this.sessionId = sessionId\n }\n}\n\n/**\n * Thrown when a session policy blocks an operation — for example,\n * `requireReAuthFor: ['export']` is set and the caller attempts to\n * call `exportStream()` without re-authenticating for this session.\n *\n * The `operation` field names the specific operation that was blocked\n * (e.g. `'export'`, `'grant'`, `'rotate'`) so the caller can surface\n * a targeted prompt (\"Please re-enter your passphrase to export data\").\n */\nexport class SessionPolicyError extends NoydbError {\n readonly operation: string\n\n constructor(operation: string, message?: string) {\n super(\n 'SESSION_POLICY',\n message ?? `Operation \"${operation}\" requires re-authentication per the active session policy.`,\n )\n this.name = 'SessionPolicyError'\n this.operation = operation\n }\n}\n\n// ─── Query / Join Errors ────────────────────────────────────\n\n/**\n * Thrown when a `.join()` would exceed its configured row ceiling on\n * either side. The ceiling defaults to 50,000 per side and can be\n * overridden via the `{ maxRows }` option on `.join()`.\n *\n * Carries both row counts so the error message can show which side\n * tripped the limit (e.g. \"left had 60,000 rows, right had 1,200,\n * max was 50,000\"). The `side` field is machine-readable so test\n * code and devtools can match on it without regex-parsing the\n * message.\n *\n * The row ceiling exists because joins are bounded in-memory\n * operations over materialized record sets. Consumers whose\n * collections genuinely exceed the ceiling should track \n * (streaming joins over `scan()`) or filter the left side further\n * with `where()` / `limit()` before joining.\n */\nexport class JoinTooLargeError extends NoydbError {\n readonly leftRows: number\n readonly rightRows: number\n readonly maxRows: number\n readonly side: 'left' | 'right'\n\n constructor(opts: {\n leftRows: number\n rightRows: number\n maxRows: number\n side: 'left' | 'right'\n message: string\n }) {\n super('JOIN_TOO_LARGE', opts.message)\n this.name = 'JoinTooLargeError'\n this.leftRows = opts.leftRows\n this.rightRows = opts.rightRows\n this.maxRows = opts.maxRows\n this.side = opts.side\n }\n}\n\n/**\n * Thrown by `.join()` in strict `ref()` mode when a left-side record\n * points at a right-side id that does not exist in the target\n * collection.\n *\n * Distinct from `RefIntegrityError` so test code can pattern-match\n * on the *read-time* dangling case without catching *write-time*\n * integrity violations. Both indicate \"ref points at nothing\" but\n * happen at different lifecycle phases and deserve different\n * remediation in documentation: a RefIntegrityError on `put()`\n * means the input is invalid; a DanglingReferenceError on `.join()`\n * means stored data has drifted and `vault.checkIntegrity()`\n * is the right tool to find the full set of orphans.\n */\nexport class DanglingReferenceError extends NoydbError {\n readonly field: string\n readonly target: string\n readonly refId: string\n\n constructor(opts: {\n field: string\n target: string\n refId: string\n message: string\n }) {\n super('DANGLING_REFERENCE', opts.message)\n this.name = 'DanglingReferenceError'\n this.field = opts.field\n this.target = opts.target\n this.refId = opts.refId\n }\n}\n\n/**\n * Thrown by {@link sanitizeFilename} when an input filename cannot be\n * made safe — NUL byte, empty after normalization, missing\n * `opaqueId` for the opaque profile, `..` segment, or a `maxBytes`\n * cap too small to hold a single code point.\n */\nexport class FilenameSanitizationError extends NoydbError {\n constructor(message: string) {\n super('FILENAME_SANITIZATION', message)\n this.name = 'FilenameSanitizationError'\n }\n}\n\n/**\n * Thrown when a write target resolves OUTSIDE the requested\n * directory after sanitization — the canonical Zip-Slip class. The\n * sanitizer's job is to strip path-traversal segments; this error\n * is the defense-in-depth fallback at the FS write site.\n */\nexport class PathEscapeError extends NoydbError {\n readonly attempted: string\n readonly targetDir: string\n\n constructor(opts: { attempted: string; targetDir: string }) {\n super(\n 'PATH_ESCAPE',\n `Sanitized filename \"${opts.attempted}\" resolves outside target dir \"${opts.targetDir}\"`,\n )\n this.name = 'PathEscapeError'\n this.attempted = opts.attempted\n this.targetDir = opts.targetDir\n }\n}\n","/**\n * Cryptographic primitives — thin wrappers around the Web Crypto API.\n *\n * ## Design principle\n *\n * **Zero npm crypto dependencies.** Every operation uses `globalThis.crypto.subtle`,\n * which is available natively in Node.js ≥ 18, all modern browsers, and\n * Deno/Bun. This avoids supply-chain risk from third-party crypto packages and\n * ensures the library stays auditable.\n *\n * ## Algorithms\n *\n * | Use case | Algorithm | Parameters |\n * |----------|-----------|------------|\n * | Key derivation | PBKDF2-SHA256 | 600,000 iterations, 32-byte salt |\n * | Record encryption | AES-256-GCM | 12-byte random IV per operation |\n * | DEK wrapping | AES-KW (RFC 3394) | 256-bit KEK |\n * | Binary encrypt | AES-256-GCM | same as record encryption |\n * | Integrity | HMAC-SHA256 | for presence channels |\n * | Content hash | SHA-256 | for ledger and bundle integrity |\n *\n * ## Key lifecycle\n *\n * ```\n * passphrase + salt\n * └─► deriveKey() → KEK (CryptoKey, extractable: false)\n * └─► wrapKey() → wrapped DEK bytes [stored in keyring]\n * └─► unwrapKey() → DEK (CryptoKey) [memory only during session]\n * └─► encrypt() / decrypt() → ciphertext / plaintext\n * ```\n *\n * IVs are generated fresh by {@link generateIV} on every encrypt call.\n * Reusing an IV with the same key would break GCM's authentication guarantee —\n * this function should be the only place IVs are produced.\n *\n * @module\n */\n\nimport { DecryptionError, InvalidKeyError, TamperedError } from './errors.js'\n\nconst PBKDF2_ITERATIONS = 600_000\nconst SALT_BYTES = 32\nconst IV_BYTES = 12\nconst KEY_BITS = 256\n\nconst subtle = globalThis.crypto.subtle\n\n// ─── Key Derivation ────────────────────────────────────────────────────\n\n/** Derive a KEK from a passphrase and salt using PBKDF2-SHA256. */\nexport async function deriveKey(\n passphrase: string,\n salt: Uint8Array,\n): Promise<CryptoKey> {\n const keyMaterial = await subtle.importKey(\n 'raw',\n new TextEncoder().encode(passphrase),\n 'PBKDF2',\n false,\n ['deriveKey'],\n )\n\n return subtle.deriveKey(\n {\n name: 'PBKDF2',\n salt: salt as BufferSource,\n iterations: PBKDF2_ITERATIONS,\n hash: 'SHA-256',\n },\n keyMaterial,\n { name: 'AES-KW', length: KEY_BITS },\n false,\n ['wrapKey', 'unwrapKey'],\n )\n}\n\n// ─── DEK Generation ────────────────────────────────────────────────────\n\n/** Generate a random AES-256-GCM data encryption key. */\nexport async function generateDEK(): Promise<CryptoKey> {\n return subtle.generateKey(\n { name: 'AES-GCM', length: KEY_BITS },\n true, // extractable — needed for AES-KW wrapping\n ['encrypt', 'decrypt'],\n )\n}\n\n// ─── Key Wrapping ──────────────────────────────────────────────────────\n\n/** Wrap (encrypt) a DEK with a KEK using AES-KW. Returns base64 string. */\nexport async function wrapKey(dek: CryptoKey, kek: CryptoKey): Promise<string> {\n const wrapped = await subtle.wrapKey('raw', dek, kek, 'AES-KW')\n return bufferToBase64(wrapped)\n}\n\n/** Unwrap (decrypt) a DEK from base64 string using a KEK. */\nexport async function unwrapKey(\n wrappedBase64: string,\n kek: CryptoKey,\n): Promise<CryptoKey> {\n try {\n return await subtle.unwrapKey(\n 'raw',\n base64ToBuffer(wrappedBase64) as BufferSource,\n kek,\n 'AES-KW',\n { name: 'AES-GCM', length: KEY_BITS },\n true,\n ['encrypt', 'decrypt'],\n )\n } catch {\n throw new InvalidKeyError()\n }\n}\n\n// ─── Encrypt / Decrypt ─────────────────────────────────────────────────\n\nexport interface EncryptResult {\n iv: string // base64\n data: string // base64\n}\n\n/** Encrypt plaintext JSON string with AES-256-GCM. Fresh IV per call. */\nexport async function encrypt(\n plaintext: string,\n dek: CryptoKey,\n): Promise<EncryptResult> {\n const iv = generateIV()\n const encoded = new TextEncoder().encode(plaintext)\n\n const ciphertext = await subtle.encrypt(\n { name: 'AES-GCM', iv: iv as BufferSource },\n dek,\n encoded,\n )\n\n return {\n iv: bufferToBase64(iv),\n data: bufferToBase64(ciphertext),\n }\n}\n\n/** Decrypt AES-256-GCM ciphertext. Throws on wrong key or tampered data. */\nexport async function decrypt(\n ivBase64: string,\n dataBase64: string,\n dek: CryptoKey,\n): Promise<string> {\n const iv = base64ToBuffer(ivBase64)\n const ciphertext = base64ToBuffer(dataBase64)\n\n try {\n const plaintext = await subtle.decrypt(\n { name: 'AES-GCM', iv: iv as BufferSource },\n dek,\n ciphertext as BufferSource,\n )\n return new TextDecoder().decode(plaintext)\n } catch (err) {\n if (err instanceof Error && err.name === 'OperationError') {\n throw new TamperedError()\n }\n throw new DecryptionError(\n err instanceof Error ? err.message : 'Decryption failed',\n )\n }\n}\n\n// ─── Binary Encrypt / Decrypt ────────\n\n/**\n * Encrypt raw bytes with AES-256-GCM using a fresh random IV.\n * Used by the attachment store so binary blobs avoid double base64 encoding\n * (the existing `encrypt()` function calls `TextEncoder` on a string — here\n * we pass the `Uint8Array` directly to `subtle.encrypt`).\n */\nexport async function encryptBytes(\n data: Uint8Array,\n dek: CryptoKey,\n): Promise<EncryptResult> {\n const iv = generateIV()\n const ciphertext = await subtle.encrypt(\n { name: 'AES-GCM', iv: iv as BufferSource },\n dek,\n data as unknown as BufferSource,\n )\n return {\n iv: bufferToBase64(iv),\n data: bufferToBase64(ciphertext),\n }\n}\n\n/**\n * Decrypt AES-256-GCM ciphertext back to raw bytes.\n * Counterpart to `encryptBytes`. Throws `TamperedError` on auth-tag failure.\n */\nexport async function decryptBytes(\n ivBase64: string,\n dataBase64: string,\n dek: CryptoKey,\n): Promise<Uint8Array> {\n const iv = base64ToBuffer(ivBase64)\n const ciphertext = base64ToBuffer(dataBase64)\n try {\n const plaintext = await subtle.decrypt(\n { name: 'AES-GCM', iv: iv as BufferSource },\n dek,\n ciphertext as BufferSource,\n )\n return new Uint8Array(plaintext)\n } catch (err) {\n if (err instanceof Error && err.name === 'OperationError') {\n throw new TamperedError()\n }\n throw new DecryptionError(\n err instanceof Error ? err.message : 'Decryption failed',\n )\n }\n}\n\n/**\n * SHA-256 hex digest of raw bytes. Used to derive content-addressed\n * eTags for blob deduplication. Computed on plaintext bytes\n * before compression and encryption so the eTag identifies content, not\n * ciphertext, and survives re-encryption (key rotation, re-upload).\n */\nexport async function sha256Hex(data: Uint8Array): Promise<string> {\n const hash = await subtle.digest('SHA-256', data as unknown as BufferSource)\n return Array.from(new Uint8Array(hash))\n .map((b) => b.toString(16).padStart(2, '0'))\n .join('')\n}\n\n// ─── HMAC-SHA-256 ─────────────────────────────\n\n/**\n * Compute HMAC-SHA-256(key, data) and return hex string.\n *\n * Used to derive content-addressed eTags that are opaque to the store:\n * ```\n * eTag = hmacSha256Hex(blobDEK, plaintext)\n * ```\n *\n * Unlike a plain SHA-256, the HMAC is keyed by the vault-shared `_blob` DEK,\n * so an attacker with store access cannot pre-compute eTags for known files.\n * Deduplication still works within a vault (same key + same content = same eTag).\n */\nexport async function hmacSha256Hex(key: CryptoKey, data: Uint8Array): Promise<string> {\n // Export AES-GCM DEK raw bytes → import as HMAC key\n const rawKey = await subtle.exportKey('raw', key)\n const hmacKey = await subtle.importKey(\n 'raw',\n rawKey,\n { name: 'HMAC', hash: 'SHA-256' },\n false,\n ['sign'],\n )\n const sig = await subtle.sign('HMAC', hmacKey, data as unknown as BufferSource)\n return Array.from(new Uint8Array(sig))\n .map((b) => b.toString(16).padStart(2, '0'))\n .join('')\n}\n\n// ─── AAD-aware Binary Encrypt / Decrypt ──\n\n/**\n * Encrypt raw bytes with AES-256-GCM using Additional Authenticated Data.\n *\n * The AAD binds each chunk to its parent blob and position, preventing\n * chunk reorder, substitution, and truncation attacks:\n * ```\n * AAD = UTF-8(\"{eTag}:{chunkIndex}:{chunkCount}\")\n * ```\n *\n * The AAD is NOT stored — the reader reconstructs it from `BlobObject`\n * metadata and passes it to `decryptBytesWithAAD`.\n */\nexport async function encryptBytesWithAAD(\n data: Uint8Array,\n dek: CryptoKey,\n aad: Uint8Array,\n): Promise<EncryptResult> {\n const iv = generateIV()\n const ciphertext = await subtle.encrypt(\n {\n name: 'AES-GCM',\n iv: iv as BufferSource,\n additionalData: aad as BufferSource,\n },\n dek,\n data as unknown as BufferSource,\n )\n return {\n iv: bufferToBase64(iv),\n data: bufferToBase64(ciphertext),\n }\n}\n\n/**\n * Decrypt AES-256-GCM ciphertext with AAD verification.\n *\n * If the AAD does not match the one used at encryption time (e.g. because\n * a chunk was reordered or substituted from another blob), the GCM auth\n * tag fails and this throws `TamperedError`.\n */\nexport async function decryptBytesWithAAD(\n ivBase64: string,\n dataBase64: string,\n dek: CryptoKey,\n aad: Uint8Array,\n): Promise<Uint8Array> {\n const iv = base64ToBuffer(ivBase64)\n const ciphertext = base64ToBuffer(dataBase64)\n try {\n const plaintext = await subtle.decrypt(\n {\n name: 'AES-GCM',\n iv: iv as BufferSource,\n additionalData: aad as BufferSource,\n },\n dek,\n ciphertext as BufferSource,\n )\n return new Uint8Array(plaintext)\n } catch (err) {\n if (err instanceof Error && err.name === 'OperationError') {\n throw new TamperedError()\n }\n throw new DecryptionError(\n err instanceof Error ? err.message : 'Decryption failed',\n )\n }\n}\n\n// ─── Presence Key Derivation ──────────────────────────────\n\n/**\n * Derive an AES-256-GCM presence key from a collection DEK using HKDF-SHA256.\n *\n * The presence key is domain-separated from the data DEK by the fixed salt\n * `'noydb-presence'` and the `info` = collection name. This means:\n * - The adapter never sees the presence key.\n * - Presence payloads rotate automatically when the collection DEK is rotated.\n * - Revoked users cannot derive the new presence key after a DEK rotation.\n *\n * @param dek The collection's AES-256-GCM DEK (extractable).\n * @param collectionName Used as the HKDF `info` parameter for domain separation.\n * @returns A non-extractable AES-256-GCM key suitable for presence payload encryption.\n */\nexport async function derivePresenceKey(dek: CryptoKey, collectionName: string): Promise<CryptoKey> {\n // Step 1: export DEK raw bytes\n const rawDek = await subtle.exportKey('raw', dek)\n\n // Step 2: import as HKDF key material\n const hkdfKey = await subtle.importKey(\n 'raw',\n rawDek,\n 'HKDF',\n false,\n ['deriveBits'],\n )\n\n // Step 3: derive 256 bits with salt='noydb-presence' and info=collectionName\n const salt = new TextEncoder().encode('noydb-presence')\n const info = new TextEncoder().encode(collectionName)\n const bits = await subtle.deriveBits(\n { name: 'HKDF', hash: 'SHA-256', salt, info },\n hkdfKey,\n KEY_BITS,\n )\n\n // Step 4: import derived bits as AES-GCM key\n return subtle.importKey(\n 'raw',\n bits,\n { name: 'AES-GCM', length: KEY_BITS },\n false,\n ['encrypt', 'decrypt'],\n )\n}\n\n// ─── Deterministic Encryption ────────────────────────────\n\n/**\n * Derive a deterministic 12-byte IV from `{ DEK, context, plaintext }`\n * via HKDF-SHA256. Given the same three inputs, the IV is identical, so\n * `encryptDeterministic` produces the same ciphertext on every call —\n * which is precisely what enables blind equality search on encrypted\n * fields.\n *\n * **The side channel this opens.** Two records whose field value is the\n * same produce the same ciphertext. An observer with store access can\n * therefore tell which records share a value — not *what* the value is,\n * but the equivalence class. This is the well-known trade-off of\n * deterministic encryption and is why the feature is strictly opt-in\n * per field, guarded by `acknowledgeDeterministicRisk: true` at\n * collection creation.\n *\n * The context string MUST include the collection name and field name,\n * so:\n * - The same plaintext in two different fields encrypts differently\n * (no cross-field equality leak).\n * - The same plaintext in two different collections (different DEKs)\n * encrypts differently by virtue of the key, even before HKDF\n * domain separation kicks in.\n */\nasync function deriveDeterministicIV(\n dek: CryptoKey,\n context: string,\n plaintext: string,\n): Promise<Uint8Array> {\n const rawDek = await subtle.exportKey('raw', dek)\n const hkdfKey = await subtle.importKey('raw', rawDek, 'HKDF', false, ['deriveBits'])\n const salt = new TextEncoder().encode('noydb-deterministic-v1')\n const info = new TextEncoder().encode(`${context}\\x00${plaintext}`)\n const bits = await subtle.deriveBits(\n { name: 'HKDF', hash: 'SHA-256', salt, info },\n hkdfKey,\n IV_BYTES * 8,\n )\n return new Uint8Array(bits)\n}\n\n/**\n * Encrypt a plaintext string with AES-256-GCM and a deterministic,\n * HKDF-derived IV.\n *\n * The same `{ dek, context, plaintext }` triple always produces the\n * same `{ iv, data }` — call this twice and you can string-compare the\n * ciphertexts to check equality of the inputs without decrypting them.\n *\n * @param context Domain-separation string — by convention\n * `'<collection>/<field>'`. Different contexts encrypt\n * the same plaintext to different ciphertexts, so\n * `email` in collection `users` does not collide with\n * `email` in collection `customers`.\n */\nexport async function encryptDeterministic(\n plaintext: string,\n dek: CryptoKey,\n context: string,\n): Promise<EncryptResult> {\n const iv = await deriveDeterministicIV(dek, context, plaintext)\n const encoded = new TextEncoder().encode(plaintext)\n const ciphertext = await subtle.encrypt(\n { name: 'AES-GCM', iv: iv as BufferSource },\n dek,\n encoded,\n )\n return {\n iv: bufferToBase64(iv),\n data: bufferToBase64(ciphertext),\n }\n}\n\n/**\n * Counterpart to {@link encryptDeterministic}. The IV is stored\n * alongside the ciphertext (exactly like the randomized path), so\n * decrypt uses the stored IV and verifies the GCM auth tag — a tampered\n * ciphertext throws `TamperedError` just like randomized AES-GCM.\n */\nexport async function decryptDeterministic(\n ivBase64: string,\n dataBase64: string,\n dek: CryptoKey,\n): Promise<string> {\n return decrypt(ivBase64, dataBase64, dek)\n}\n\n// ─── Random Generation ─────────────────────────────────────────────────\n\n/** Generate a random 12-byte IV for AES-GCM. */\nexport function generateIV(): Uint8Array {\n return globalThis.crypto.getRandomValues(new Uint8Array(IV_BYTES))\n}\n\n/** Generate a random 32-byte salt for PBKDF2. */\nexport function generateSalt(): Uint8Array {\n return globalThis.crypto.getRandomValues(new Uint8Array(SALT_BYTES))\n}\n\n// ─── Base64 Helpers ────────────────────────────────────────────────────\n\nexport function bufferToBase64(buffer: ArrayBuffer | Uint8Array): string {\n const bytes = buffer instanceof Uint8Array ? buffer : new Uint8Array(buffer)\n let binary = ''\n for (let i = 0; i < bytes.length; i++) {\n binary += String.fromCharCode(bytes[i]!)\n }\n return btoa(binary)\n}\n\nexport function base64ToBuffer(base64: string): Uint8Array<ArrayBuffer> {\n const binary = atob(base64)\n const bytes = new Uint8Array(binary.length)\n for (let i = 0; i < binary.length; i++) {\n bytes[i] = binary.charCodeAt(i)\n }\n return bytes\n}\n","/**\n * Ledger entry shape + canonical JSON + sha256 helpers.\n *\n * This file holds the PURE primitives used by the hash-chained ledger:\n * the entry type, the deterministic (sort-stable) JSON encoder, and\n * the sha256 hasher that produces `prevHash` and `ledger.head()`.\n *\n * Everything here is validator-free and side-effect free — the only\n * runtime dep is Web Crypto's `subtle.digest` for the sha256 call,\n * which we already use for every other hashing operation in the core.\n *\n * The hash chain property works like this:\n *\n * hash(entry[i]) = sha256(canonicalJSON(entry[i]))\n * entry[i+1].prevHash = hash(entry[i])\n *\n * Any modification to `entry[i]` (field values, field order, whitespace)\n * produces a different `hash(entry[i])`, which means `entry[i+1]`'s\n * stored `prevHash` no longer matches the recomputed hash, which means\n * `verify()` returns `{ ok: false, divergedAt: i + 1 }`. The chain is\n * append-only and tamper-evident without external anchoring.\n */\n\n/**\n * A single ledger entry in its plaintext form — what gets serialized,\n * hashed, and then encrypted with the ledger DEK before being written\n * to the `_ledger/` adapter collection.\n *\n * ## Why hash the ciphertext, not the plaintext?\n *\n * `payloadHash` is the sha256 of the record's ENCRYPTED envelope bytes,\n * not its plaintext. This matters:\n *\n * 1. **Zero-knowledge preserved.** A user (or a third party) can\n * verify the ledger against the stored envelopes without any\n * decryption keys. The adapter layer already holds only\n * ciphertext, so hashing the ciphertext keeps the ledger at the\n * same privacy level as the adapter.\n *\n * 2. **Determinism.** Plaintext → ciphertext is randomized by the\n * fresh per-write IV, so `hash(plaintext)` would need extra\n * normalization. `hash(ciphertext)` is already deterministic and\n * unique per write.\n *\n * 3. **Detection property.** If an attacker modifies even one byte of\n * the stored ciphertext (trying to flip a record), the hash\n * changes, the ledger's recorded `payloadHash` no longer matches,\n * and a data-integrity check fails. We don't do that check in\n * `verify()` today, but the\n * hook is there for a future `verifyIntegrity()` follow-up.\n *\n * Fields marked `op`, `collection`, `id`, `version`, `ts`, `actor` are\n * plaintext METADATA about the operation — NOT the record itself. The\n * entry is still encrypted at rest via the ledger DEK, but adapters\n * could theoretically infer operation patterns from the sizes and\n * timestamps. This is an accepted trade-off for the tamper-evidence\n * property; full ORAM-level privacy is out of scope for noy-db.\n */\nexport interface LedgerEntry {\n /**\n * Zero-based sequential position of this entry in the chain. The\n * canonical adapter key is this number zero-padded to 10 digits\n * (`\"0000000001\"`) so lexicographic ordering matches numeric order.\n */\n readonly index: number\n\n /**\n * Hex-encoded sha256 of the canonical JSON of the PREVIOUS entry.\n * The genesis entry (index 0) has `prevHash === ''` — the first\n * entry in a fresh vault has nothing to point back to.\n */\n readonly prevHash: string\n\n /**\n * Which kind of mutation this entry records. only supports\n * data operations (`put`, `delete`). Access-control operations\n * (`grant`, `revoke`, `rotate`) will be added in a follow-up once\n * the keyring write path is instrumented — that's tracked in the\n * epic issue.\n */\n readonly op: 'put' | 'delete'\n\n /** The collection the mutation targeted. */\n readonly collection: string\n\n /** The record id the mutation targeted. */\n readonly id: string\n\n /**\n * The record version AFTER this mutation. For `put` this is the\n * newly assigned version; for `delete` this is the version that\n * was deleted (the last version visible to reads).\n */\n readonly version: number\n\n /** ISO timestamp of the mutation. */\n readonly ts: string\n\n /** User id of the actor who performed the mutation. */\n readonly actor: string\n\n /**\n * Hex-encoded sha256 of the encrypted envelope's `_data` field.\n * For `put`, this is the hash of the new ciphertext. For `delete`,\n * it's the hash of the last visible ciphertext at deletion time,\n * or the empty string if nothing was there to delete. Hashing the\n * ciphertext (not the plaintext) preserves zero-knowledge — see\n * the file docstring.\n */\n readonly payloadHash: string\n\n /**\n * Optional hex-encoded sha256 of the encrypted JSON Patch delta\n * blob stored alongside this entry in `_ledger_deltas/`. Present\n * only for `put` operations that had a previous version — the\n * genesis put of a new record, and every `delete`, leave this\n * field undefined.\n *\n * The delta payload itself lives in a sibling internal collection\n * (`_ledger_deltas/<paddedIndex>`) and is encrypted with the\n * ledger DEK. Callers use `ledger.loadDelta(index)` to decrypt and\n * deserialize it when reconstructing a historical version.\n *\n * Why optional instead of always-present: the first put of a\n * record has no previous version to diff against, so storing an\n * empty patch would be noise. For deletes there's no \"next\" state\n * to describe with a delta. Both cases set this field to undefined.\n *\n * Note: the canonical-JSON hasher treats `undefined` as invalid\n * (it's one of the guard rails), so on the wire this field is\n * either `{ deltaHash: '<hex>' }` or absent from the JSON\n * entirely — never `{ deltaHash: undefined }`.\n */\n readonly deltaHash?: string\n}\n\n/**\n * Canonical (sort-stable) JSON encoder.\n *\n * This function is the load-bearing primitive of the hash chain:\n * `sha256(canonicalJSON(entry))` must produce the same hex string\n * every time, on every machine, for the same logical entry — otherwise\n * `verify()` would return `{ ok: false }` on cross-platform reads.\n *\n * JavaScript's `JSON.stringify` is almost canonical, but NOT quite:\n * it preserves the insertion order of object keys, which means\n * `{a:1,b:2}` and `{b:2,a:1}` serialize differently. We fix this by\n * recursively walking objects and sorting their keys before\n * concatenation.\n *\n * Arrays keep their original order (reordering them would change\n * semantics). Numbers, strings, booleans, and `null` use the default\n * JSON encoding. `undefined` and functions are rejected — ledger\n * entries are plain data, and silently dropping `undefined` would\n * break the \"same input → same hash\" property if a caller forgot to\n * omit a field.\n *\n * Performance: one pass per nesting level; O(n log n) for key sorting\n * at each object. Entries are small (< 1 KB) so this is negligible\n * compared to the sha256 call.\n */\nexport function canonicalJson(value: unknown): string {\n if (value === null) return 'null'\n if (typeof value === 'boolean') return value ? 'true' : 'false'\n if (typeof value === 'number') {\n if (!Number.isFinite(value)) {\n throw new Error(\n `canonicalJson: refusing to encode non-finite number ${String(value)}`,\n )\n }\n return JSON.stringify(value)\n }\n if (typeof value === 'string') return JSON.stringify(value)\n if (typeof value === 'bigint') {\n throw new Error('canonicalJson: BigInt is not JSON-serializable')\n }\n if (typeof value === 'undefined' || typeof value === 'function') {\n throw new Error(\n `canonicalJson: refusing to encode ${typeof value} — include all fields explicitly`,\n )\n }\n if (Array.isArray(value)) {\n return '[' + value.map((v) => canonicalJson(v)).join(',') + ']'\n }\n if (typeof value === 'object') {\n const obj = value as Record<string, unknown>\n const keys = Object.keys(obj).sort()\n const parts: string[] = []\n for (const key of keys) {\n parts.push(JSON.stringify(key) + ':' + canonicalJson(obj[key]))\n }\n return '{' + parts.join(',') + '}'\n }\n throw new Error(`canonicalJson: unexpected value type: ${typeof value}`)\n}\n\n/**\n * Compute a hex-encoded sha256 of a string via Web Crypto's subtle API.\n *\n * We use hex (not base64) for hashes because hex is case-insensitive,\n * fixed-length (64 chars), and easier to compare visually in debug\n * output. Base64 would save a few bytes in storage but every encrypted\n * ledger entry is already much larger than the hash itself.\n */\nexport async function sha256Hex(input: string): Promise<string> {\n const bytes = new TextEncoder().encode(input)\n const digest = await globalThis.crypto.subtle.digest('SHA-256', bytes)\n return bytesToHex(new Uint8Array(digest))\n}\n\n/**\n * Compute the canonical hash of a ledger entry. Short wrapper around\n * `canonicalJson` + `sha256Hex`; callers use this instead of composing\n * the two functions every time, so any future change to the hashing\n * pipeline (e.g., adding a domain-separation prefix) lives in one place.\n */\nexport async function hashEntry(entry: LedgerEntry): Promise<string> {\n return sha256Hex(canonicalJson(entry))\n}\n\n/** Convert a Uint8Array to a lowercase hex string. */\nfunction bytesToHex(bytes: Uint8Array): string {\n const hex = new Array<string>(bytes.length)\n for (let i = 0; i < bytes.length; i++) {\n // Non-null assertion: indexing a Uint8Array within bounds always\n // returns a number, but the compiler's noUncheckedIndexedAccess\n // flag widens it to `number | undefined`. Safe here by construction.\n hex[i] = (bytes[i] ?? 0).toString(16).padStart(2, '0')\n }\n return hex.join('')\n}\n\n/**\n * Pad an index to the canonical 10-digit form used as the adapter key.\n * Ten digits is enough for ~10 billion ledger entries per vault\n * — far beyond any realistic use case, but cheap enough that the extra\n * digits don't hurt storage.\n */\nexport function paddedIndex(index: number): string {\n return String(index).padStart(10, '0')\n}\n\n/** Parse a padded adapter key back into a number. Returns NaN on malformed input. */\nexport function parseIndex(key: string): number {\n return Number.parseInt(key, 10)\n}\n","/**\n * RFC 6902 JSON Patch — compute + apply.\n *\n * This module is the \"delta history\" primitive: instead of\n * snapshotting the full record on every put (the behavior),\n * `Collection.put` computes a JSON Patch from the previous version to\n * the new version and stores only the patch in the ledger. To\n * reconstruct version N, we walk from the genesis snapshot forward\n * applying patches. Storage scales with **edit size**, not record\n * size — a 10 KB record edited 1000 times costs ~10 KB of deltas\n * instead of ~10 MB of snapshots.\n *\n * ## Why hand-roll instead of using a library?\n *\n * RFC 6902 has good libraries (`fast-json-patch`, `rfc6902`) but every\n * single one of them adds a runtime dependency to `@noy-db/core`. The\n * \"zero runtime dependencies\" promise is one of the core's load-bearing\n * features, and the patch surface we actually need is small enough\n * (~150 LoC) that vendoring is the right call.\n *\n * What we implement:\n * - `add` — insert a value at a path\n * - `remove` — delete the value at a path\n * - `replace` — overwrite the value at a path\n *\n * What we deliberately skip (out of scope for the ledger use):\n * - `move` and `copy` — optimizations; the diff algorithm doesn't\n * emit them, so the apply path doesn't need them\n * - `test` — used for transactional patches; we already have\n * optimistic concurrency via `_v` at the envelope layer\n * - Sophisticated array diffing (LCS, edit distance) — we treat\n * arrays as atomic values and emit a single `replace` op when\n * they differ. The accounting domain has small arrays where this\n * is fine; if we ever need patch-level array diffing we can add\n * it without changing the storage format.\n *\n * ## Path encoding (RFC 6902 §3)\n *\n * Paths look like `/foo/bar/0`. Each path segment is either an object\n * key or a numeric array index. Two characters need escaping inside\n * keys: `~` becomes `~0` and `/` becomes `~1`. We implement both.\n *\n * Empty path (`\"\"`) refers to the root document. Only `replace` makes\n * sense at the root, and our diff function emits it as a top-level\n * `replace` when `prev` and `next` differ in shape (object vs array,\n * primitive vs object, etc.).\n */\n\n/** A single JSON Patch operation. Subset of RFC 6902 — see file docstring. */\nexport type JsonPatchOp =\n | { readonly op: 'add'; readonly path: string; readonly value: unknown }\n | { readonly op: 'remove'; readonly path: string }\n | { readonly op: 'replace'; readonly path: string; readonly value: unknown }\n\n/** A complete JSON Patch document — an array of operations. */\nexport type JsonPatch = readonly JsonPatchOp[]\n\n// ─── Compute (diff) ──────────────────────────────────────────────────\n\n/**\n * Compute a JSON Patch that, when applied to `prev`, produces `next`.\n *\n * The algorithm is a straightforward recursive object walk:\n *\n * 1. If both inputs are plain objects (and not arrays/null):\n * - For each key in `prev`, recurse if `next` has it, else emit `remove`\n * - For each key in `next` not in `prev`, emit `add`\n * 2. If both inputs are arrays AND structurally equal, no-op.\n * Otherwise emit a single `replace` for the whole array.\n * 3. If both inputs are deeply equal primitives, no-op.\n * 4. Otherwise emit a `replace` at the current path.\n *\n * We do not minimize patches across move-like rearrangements — every\n * generated patch is straightforward enough to apply by hand if you\n * had to debug it.\n */\nexport function computePatch(prev: unknown, next: unknown): JsonPatch {\n const ops: JsonPatchOp[] = []\n diff(prev, next, '', ops)\n return ops\n}\n\nfunction diff(\n prev: unknown,\n next: unknown,\n path: string,\n out: JsonPatchOp[],\n): void {\n // Both null / both undefined → no-op (we don't differentiate them\n // in JSON terms; canonicalJson would reject undefined anyway).\n if (prev === next) return\n\n // One side null, the other not → straight replace.\n if (prev === null || next === null) {\n out.push({ op: 'replace', path, value: next })\n return\n }\n\n const prevIsArray = Array.isArray(prev)\n const nextIsArray = Array.isArray(next)\n const prevIsObject = typeof prev === 'object' && !prevIsArray\n const nextIsObject = typeof next === 'object' && !nextIsArray\n\n // Type changed (e.g., object → primitive, array → object). Replace.\n if (prevIsArray !== nextIsArray || prevIsObject !== nextIsObject) {\n out.push({ op: 'replace', path, value: next })\n return\n }\n\n // Both arrays. We don't do clever LCS-based diffing — emit a single\n // replace for the whole array if they differ. See file docstring for\n // the rationale.\n if (prevIsArray && nextIsArray) {\n if (!arrayDeepEqual(prev as unknown[], next as unknown[])) {\n out.push({ op: 'replace', path, value: next })\n }\n return\n }\n\n // Both plain objects. Recurse key by key.\n if (prevIsObject && nextIsObject) {\n const prevObj = prev as Record<string, unknown>\n const nextObj = next as Record<string, unknown>\n const prevKeys = Object.keys(prevObj)\n const nextKeys = Object.keys(nextObj)\n\n // Handle removes and overlapping recursions in one pass over prev.\n for (const key of prevKeys) {\n const childPath = path + '/' + escapePathSegment(key)\n if (!(key in nextObj)) {\n out.push({ op: 'remove', path: childPath })\n } else {\n diff(prevObj[key], nextObj[key], childPath, out)\n }\n }\n // Handle adds.\n for (const key of nextKeys) {\n if (!(key in prevObj)) {\n out.push({\n op: 'add',\n path: path + '/' + escapePathSegment(key),\n value: nextObj[key],\n })\n }\n }\n return\n }\n\n // Two primitives that aren't strictly equal — replace.\n out.push({ op: 'replace', path, value: next })\n}\n\nfunction arrayDeepEqual(a: unknown[], b: unknown[]): boolean {\n if (a.length !== b.length) return false\n for (let i = 0; i < a.length; i++) {\n if (!deepEqual(a[i], b[i])) return false\n }\n return true\n}\n\nfunction deepEqual(a: unknown, b: unknown): boolean {\n if (a === b) return true\n if (a === null || b === null) return false\n if (typeof a !== typeof b) return false\n if (typeof a !== 'object') return false\n const aArray = Array.isArray(a)\n const bArray = Array.isArray(b)\n if (aArray !== bArray) return false\n if (aArray && bArray) return arrayDeepEqual(a, b as unknown[])\n const aObj = a as Record<string, unknown>\n const bObj = b as Record<string, unknown>\n const aKeys = Object.keys(aObj)\n const bKeys = Object.keys(bObj)\n if (aKeys.length !== bKeys.length) return false\n for (const key of aKeys) {\n if (!(key in bObj)) return false\n if (!deepEqual(aObj[key], bObj[key])) return false\n }\n return true\n}\n\n// ─── Apply ──────────────────────────────────────────────────────────\n\n/**\n * Apply a JSON Patch to a base document and return the result.\n *\n * The base document is **not mutated** — every op clones the parent\n * container before writing to it, so the caller's reference to `base`\n * stays untouched. This costs an extra allocation per op but makes\n * the apply pipeline reorderable and safe to interrupt.\n *\n * Throws on:\n * - Removing a path that doesn't exist\n * - Adding to a path whose parent doesn't exist\n * - A path component that doesn't match the document shape (e.g.,\n * trying to step into a primitive)\n *\n * Throwing is the right behavior for the ledger use case: a failed\n * apply means the chain is corrupted, which should be loud rather\n * than silently producing a wrong reconstruction.\n */\nexport function applyPatch<T = unknown>(base: T, patch: JsonPatch): T {\n let result: unknown = clone(base)\n for (const op of patch) {\n result = applyOp(result, op)\n }\n return result as T\n}\n\nfunction applyOp(doc: unknown, op: JsonPatchOp): unknown {\n // Empty path → operation targets the root. Only `replace` and `add`\n // make sense at the root, but we handle `remove` for completeness\n // (root removal returns null).\n if (op.path === '') {\n if (op.op === 'remove') return null\n return clone(op.value)\n }\n\n const segments = parsePath(op.path)\n return walkAndApply(doc, segments, op)\n}\n\nfunction walkAndApply(\n doc: unknown,\n segments: string[],\n op: JsonPatchOp,\n): unknown {\n if (segments.length === 0) {\n // Should never happen — empty path is handled in applyOp().\n throw new Error('walkAndApply: empty segments (internal error)')\n }\n\n const [head, ...rest] = segments\n if (head === undefined) throw new Error('walkAndApply: undefined segment')\n\n if (rest.length === 0) {\n return applyAtTerminal(doc, head, op)\n }\n\n // Recurse into the child container, then rebuild the parent with\n // the modified child.\n if (Array.isArray(doc)) {\n const idx = parseArrayIndex(head, doc.length)\n const child = doc[idx]\n const newChild = walkAndApply(child, rest, op)\n const next = doc.slice()\n next[idx] = newChild\n return next\n }\n if (doc !== null && typeof doc === 'object') {\n const obj = doc as Record<string, unknown>\n if (!(head in obj)) {\n throw new Error(`applyPatch: path segment \"${head}\" not found in object`)\n }\n const newChild = walkAndApply(obj[head], rest, op)\n return { ...obj, [head]: newChild }\n }\n throw new Error(\n `applyPatch: cannot step into ${typeof doc} at segment \"${head}\"`,\n )\n}\n\nfunction applyAtTerminal(\n doc: unknown,\n segment: string,\n op: JsonPatchOp,\n): unknown {\n if (Array.isArray(doc)) {\n const idx =\n segment === '-' ? doc.length : parseArrayIndex(segment, doc.length + 1)\n const next = doc.slice()\n if (op.op === 'remove') {\n next.splice(idx, 1)\n return next\n }\n if (op.op === 'add') {\n next.splice(idx, 0, clone(op.value))\n return next\n }\n if (op.op === 'replace') {\n if (idx >= doc.length) {\n throw new Error(\n `applyPatch: replace at out-of-bounds array index ${idx}`,\n )\n }\n next[idx] = clone(op.value)\n return next\n }\n }\n if (doc !== null && typeof doc === 'object') {\n const obj = doc as Record<string, unknown>\n if (op.op === 'remove') {\n if (!(segment in obj)) {\n throw new Error(\n `applyPatch: remove on missing key \"${segment}\"`,\n )\n }\n const next = { ...obj }\n delete next[segment]\n return next\n }\n if (op.op === 'add') {\n // RFC 6902: `add` on an existing key replaces it.\n return { ...obj, [segment]: clone(op.value) }\n }\n if (op.op === 'replace') {\n if (!(segment in obj)) {\n throw new Error(\n `applyPatch: replace on missing key \"${segment}\"`,\n )\n }\n return { ...obj, [segment]: clone(op.value) }\n }\n }\n throw new Error(\n `applyPatch: cannot apply ${op.op} at terminal segment \"${segment}\"`,\n )\n}\n\n// ─── Path encoding (RFC 6902 §3) ─────────────────────────────────────\n\n/**\n * Escape a single path segment per RFC 6902 §3:\n * `~` → `~0`\n * `/` → `~1`\n *\n * Order matters: `~` must be escaped first, otherwise the `~1` we\n * just emitted would be re-escaped to `~01`.\n */\nfunction escapePathSegment(segment: string): string {\n return segment.replace(/~/g, '~0').replace(/\\//g, '~1')\n}\n\nfunction unescapePathSegment(segment: string): string {\n return segment.replace(/~1/g, '/').replace(/~0/g, '~')\n}\n\nfunction parsePath(path: string): string[] {\n if (!path.startsWith('/')) {\n throw new Error(`applyPatch: path must start with '/', got \"${path}\"`)\n }\n return path\n .slice(1)\n .split('/')\n .map(unescapePathSegment)\n}\n\nfunction parseArrayIndex(segment: string, max: number): number {\n if (!/^\\d+$/.test(segment)) {\n throw new Error(\n `applyPatch: array index must be a non-negative integer, got \"${segment}\"`,\n )\n }\n const idx = Number.parseInt(segment, 10)\n if (idx < 0 || idx > max) {\n throw new Error(\n `applyPatch: array index ${idx} out of range [0, ${max}]`,\n )\n }\n return idx\n}\n\n// ─── Cheap structural clone ─────────────────────────────────────────\n\n/**\n * Plain-JSON clone via JSON.parse(JSON.stringify(value)).\n *\n * Faster than `structuredClone` for our use because (a) we know our\n * inputs are JSON-compatible (no Dates, Maps, or BigInts — anything\n * else gets rejected by canonicalJson upstream), and (b) `structuredClone`\n * has overhead for handling arbitrary structured data we don't need.\n *\n * For tiny ledger entries (< 1 KB), the JSON round-trip is in the\n * single-digit microsecond range.\n */\nfunction clone<T>(value: T): T {\n if (value === null || value === undefined) return value\n if (typeof value !== 'object') return value\n return JSON.parse(JSON.stringify(value)) as T\n}\n","/**\n * Ledger storage constants — pinned in their own leaf module so\n * always-on core code (vault.ts, dictionary.ts) can import them\n * without dragging the `LedgerStore` class into the bundle.\n *\n * `splitting: true` in tsup is not enough on its own: when a\n * source file exports both pure constants and a heavyweight class,\n * the bundler keeps the entire chunk reachable from any importer.\n * Extracting the constants lets the floor scenario import them\n * without paying for the class.\n *\n * @internal\n */\n\n/** The internal collection name used for ledger entry storage. */\nexport const LEDGER_COLLECTION = '_ledger'\n\n/**\n * The internal collection name used for delta payload storage.\n *\n * Deltas live in a sibling collection (not inside `_ledger`) for two\n * reasons:\n *\n * 1. **Listing efficiency.** `ledger.loadAllEntries()` calls\n * `adapter.list(_ledger)` which would otherwise return every\n * delta key alongside every entry key. Splitting them keeps the\n * list small (one key per ledger entry) and the delta reads\n * keyed by the entry's index.\n *\n * 2. **Prune-friendliness.** A future `pruneHistory()` will delete\n * old deltas while keeping the ledger chain intact (folding old\n * deltas into a base snapshot). Separating the storage makes\n * that deletion a targeted operation on one collection instead\n * of a filter across a mixed list.\n *\n * Both collections share the same ledger DEK — one DEK, two\n * internal collections, same zero-knowledge guarantees.\n */\nexport const LEDGER_DELTAS_COLLECTION = '_ledger_deltas'\n","/**\n * Envelope payload hash — pinned in its own leaf module so consumers\n * (DictionaryHandle, the active history strategy) can import it\n * without dragging in the `LedgerStore` class.\n *\n * see `constants.ts` for the broader rationale.\n *\n * @internal\n */\n\nimport type { EncryptedEnvelope } from '../../types.js'\nimport { sha256Hex } from './entry.js'\n\n/**\n * Compute the `payloadHash` value for an encrypted envelope. Used by\n * `LedgerStore.append` for both put (hash the new envelope) and\n * delete (hash the previous envelope) paths, and by\n * `DictionaryHandle` so its ledger entries match the same contract.\n *\n * Returns the empty string when there is no envelope (delete of a\n * never-existed record). The empty string tolerated by the ledger\n * entry's `payloadHash` field as the canonical \"nothing here\" value.\n */\nexport async function envelopePayloadHash(\n envelope: EncryptedEnvelope | null,\n): Promise<string> {\n if (!envelope) return ''\n // `_data` is a base64 string for encrypted envelopes and the raw\n // JSON for plaintext ones. Both are strings, so a single sha256Hex\n // call works for both modes — the hash value differs between\n // encrypted/plaintext compartments because the bytes on disk\n // differ.\n return sha256Hex(envelope._data)\n}\n","/**\n * `LedgerStore` — read/write access to a compartment's hash-chained\n * audit log.\n *\n * The store is a thin wrapper around the adapter's `_ledger/` internal\n * collection. Every append:\n *\n * 1. Loads the current head (or treats an empty ledger as head = -1)\n * 2. Computes `prevHash` = sha256(canonicalJson(head))\n * 3. Builds the new entry with `index = head.index + 1`\n * 4. Encrypts the entry with the compartment's ledger DEK\n * 5. Writes the encrypted envelope to `_ledger/<paddedIndex>`\n *\n * `verify()` walks the chain from genesis forward and returns\n * `{ ok: true, head }` on success or `{ ok: false, divergedAt }` on the\n * first broken link.\n *\n * ## Thread / concurrency model\n *\n * For we assume a **single writer per vault**. Two\n * concurrent `append()` calls would race on the \"read head, write\n * head+1\" cycle and could produce a broken chain. The sync engine\n * is the primary concurrent-writer scenario, and it uses\n * optimistic-concurrency via `expectedVersion` on the adapter — but\n * the ledger path has no such guard today. Multi-writer hardening is a\n * follow-up.\n *\n * Single-writer usage IS safe, including across process restarts:\n * `head()` reads the adapter fresh each call, so a crash between the\n * adapter.put of a data record and the ledger append just means the\n * ledger is missing an entry for that record. `verify()` still\n * succeeds; a future `verifyIntegrity()` helper can cross-check the\n * ledger against the data collections to catch the gap.\n *\n * ## Why hide the ledger from `vault.collection()`?\n *\n * The `_ledger` name starts with `_`, matching the existing prefix\n * convention for internal collections (`_keyring`, `_sync`,\n * `_history`). The Vault's public `collection()` method already\n * returns entries for any name, but `loadAll()` filters out\n * underscore-prefixed collections so backups and exports don't leak\n * ledger metadata. We keep the ledger accessible ONLY via\n * `vault.ledger()` to enforce the hash-chain invariants — direct\n * puts via `collection('_ledger')` would bypass the `append()` logic.\n */\n\nimport type { NoydbStore, EncryptedEnvelope } from '../../types.js'\nimport { NOYDB_FORMAT_VERSION } from '../../types.js'\nimport { encrypt, decrypt } from '../../crypto.js'\nimport { ConflictError, LedgerContentionError } from '../../errors.js'\nimport {\n canonicalJson,\n hashEntry,\n paddedIndex,\n sha256Hex,\n type LedgerEntry,\n} from './entry.js'\nimport type { JsonPatch } from './patch.js'\nimport { applyPatch } from './patch.js'\nimport { LEDGER_COLLECTION, LEDGER_DELTAS_COLLECTION } from './constants.js'\nimport { envelopePayloadHash } from './hash.js'\n\n/**\n * Maximum optimistic-CAS retries on the ledger head. Each failed\n * attempt invalidates the head cache, re-reads, and retries with a\n * fresh next-index. After N failures we surface\n * `LedgerContentionError` so the caller can decide whether to retry,\n * queue, or alert.\n */\nconst MAX_APPEND_ATTEMPTS = 8\n\n// — re-export the constants + helper so any existing\n// `import { LEDGER_COLLECTION } from '...store.js'` paths keep\n// working. Internal core paths (vault.ts) import from the leaf\n// modules directly to avoid pulling this file's class into the\n// floor bundle.\nexport { LEDGER_COLLECTION, LEDGER_DELTAS_COLLECTION, envelopePayloadHash }\n\n/**\n * Input shape for `LedgerStore.append()`. The caller supplies the\n * operation metadata; the store fills in `index` and `prevHash`.\n */\nexport interface AppendInput {\n op: LedgerEntry['op']\n collection: string\n id: string\n version: number\n actor: string\n payloadHash: string\n /**\n * Optional JSON Patch representing the delta from the previous\n * version to the new version. Present only for `put` operations\n * that had a previous version; omitted for genesis puts and for\n * deletes. When present, `LedgerStore.append` persists the patch\n * in `_ledger_deltas/<paddedIndex>` and records its sha256 hash\n * as the entry's `deltaHash` field.\n */\n delta?: JsonPatch\n}\n\n/**\n * Result of `LedgerStore.verify()`. On success, `head` is the hash of\n * the last entry — the same value that should be published to any\n * external anchoring service (blockchain, OpenTimestamps, etc.). On\n * failure, `divergedAt` is the 0-based index of the first entry whose\n * recorded `prevHash` does not match the recomputed hash of its\n * predecessor. Entries at `divergedAt` and later are untrustworthy;\n * entries before that index are still valid.\n */\nexport type VerifyResult =\n | { readonly ok: true; readonly head: string; readonly length: number }\n | {\n readonly ok: false\n readonly divergedAt: number\n readonly expected: string\n readonly actual: string\n }\n\n/**\n * A LedgerStore is bound to a single vault. Callers obtain one\n * via `vault.ledger()` — there is no public constructor to keep\n * the hash-chain invariants in one place.\n *\n * The class holds no mutable state beyond its dependencies (adapter,\n * vault name, DEK resolver, actor id). Every method reads the\n * adapter fresh so multiple instances against the same vault\n * see each other's writes immediately (at the cost of re-parsing the\n * ledger on every head() / verify() call; acceptable at scale).\n */\nexport class LedgerStore {\n private readonly adapter: NoydbStore\n private readonly vault: string\n private readonly encrypted: boolean\n private readonly getDEK: (collectionName: string) => Promise<CryptoKey>\n private readonly actor: string\n\n /**\n * In-memory cache of the chain head — the most recently appended\n * entry along with its precomputed hash. Without this, every\n * `append()` would re-load every prior entry to recompute the\n * prevHash, making N puts O(N²) — a 1K-record stress test goes from\n * < 100ms to a multi-second timeout.\n *\n * The cache is populated on first read (`append`, `head`, `verify`)\n * and updated in-place on every successful `append`. Single-writer\n * usage (the assumption) keeps it consistent. A second\n * LedgerStore instance writing to the same vault would not\n * see the first instance's appends in its cached state — that's the\n * concurrency caveat documented at the class level.\n *\n * Sentinel `undefined` means \"not yet loaded\"; an explicit `null`\n * value means \"loaded and confirmed empty\" — distinguishing these\n * matters because an empty ledger is a valid state (genesis prevHash\n * is the empty string), and we don't want to re-scan the adapter\n * just because the chain is freshly initialized.\n */\n private headCache: { entry: LedgerEntry; hash: string } | null | undefined = undefined\n\n constructor(opts: {\n adapter: NoydbStore\n vault: string\n encrypted: boolean\n getDEK: (collectionName: string) => Promise<CryptoKey>\n actor: string\n }) {\n this.adapter = opts.adapter\n this.vault = opts.vault\n this.encrypted = opts.encrypted\n this.getDEK = opts.getDEK\n this.actor = opts.actor\n }\n\n /**\n * Lazily load (or return cached) the current chain head. The cache\n * sentinel is `undefined` until first access; after the first call,\n * the cache holds either a `{ entry, hash }` for non-empty ledgers\n * or `null` for empty ones.\n */\n private async getCachedHead(): Promise<{ entry: LedgerEntry; hash: string } | null> {\n if (this.headCache !== undefined) return this.headCache\n const entries = await this.loadAllEntries()\n const last = entries[entries.length - 1]\n if (!last) {\n this.headCache = null\n return null\n }\n this.headCache = { entry: last, hash: await hashEntry(last) }\n return this.headCache\n }\n\n /**\n * Append a new entry to the ledger. Returns the full entry that was\n * written (with its assigned index and computed prevHash) so the\n * caller can use the hash for downstream purposes (e.g., embedding\n * in a verifiable backup).\n *\n * This is the **only** way to add entries. Direct adapter writes to\n * `_ledger/` would bypass the chain math and would be caught by the\n * next `verify()` call as a divergence.\n *\n * ## Multi-writer correctness\n *\n * Append is implemented as an optimistic-CAS retry loop. On every\n * attempt:\n *\n * 1. Read fresh head (cache invalidated on retry).\n * 2. Compute `nextIndex = head.index + 1`, `prevHash = hash(head)`.\n * 3. Encrypt delta payload IN MEMORY (no adapter write yet) so we\n * can compute `deltaHash` before claiming the chain slot.\n * 4. Build + encrypt the entry envelope.\n * 5. `adapter.put(_ledger, paddedIndex, envelope, expectedVersion: 0)`\n * — the `expectedVersion: 0` asserts \"this slot must not exist.\"\n * Stores with `casAtomic: true` honor the CAS check; under\n * contention the second writer's put throws `ConflictError`.\n * 6. On `ConflictError`: invalidate the head cache, sleep with\n * bounded backoff + jitter, retry. After `MAX_APPEND_ATTEMPTS`\n * retries throw {@link LedgerContentionError}.\n * 7. On success: write the delta envelope (if any) at the same\n * index. Update the head cache.\n *\n * Entry-first ordering matters: writing the delta first under\n * contention would orphan delta records at indices the writer never\n * actually claimed. The deltaHash is computed off the encrypted\n * envelope's `_data` field, which doesn't require the envelope to\n * be persisted.\n *\n * Stores with `casAtomic: false` (file, s3, r2 by default) silently\n * accept the `expectedVersion: 0` argument and proceed without a\n * CAS check. Concurrent appends against those stores remain\n * best-effort — pair them with an advisory lock or with sync\n * single-writer discipline.\n */\n async append(input: AppendInput): Promise<LedgerEntry> {\n let lastConflict: ConflictError | undefined\n for (let attempt = 0; attempt < MAX_APPEND_ATTEMPTS; attempt++) {\n // Force a fresh head read on every retry. The first attempt may\n // hit the cache; subsequent attempts must re-scan the adapter\n // because the prior conflict means our cached state is stale.\n if (attempt > 0) {\n this.headCache = undefined\n }\n try {\n return await this.appendOnce(input)\n } catch (err) {\n if (err instanceof ConflictError) {\n lastConflict = err\n if (attempt < MAX_APPEND_ATTEMPTS - 1) {\n await sleepBackoff(attempt)\n }\n continue\n }\n throw err\n }\n }\n void lastConflict\n throw new LedgerContentionError(MAX_APPEND_ATTEMPTS)\n }\n\n /**\n * One attempt at the append cycle. Throws `ConflictError` when the\n * CAS check on the entry put fails — `append()` catches that and\n * retries. Any other error propagates to the caller.\n */\n private async appendOnce(input: AppendInput): Promise<LedgerEntry> {\n const cached = await this.getCachedHead()\n const lastEntry = cached?.entry\n const prevHash = cached?.hash ?? ''\n const nextIndex = lastEntry ? lastEntry.index + 1 : 0\n\n // Encrypt the delta in memory so we can compute deltaHash WITHOUT\n // claiming the deltas slot yet — entry-put is the chain claim.\n let deltaEnvelope: EncryptedEnvelope | undefined\n let deltaHash: string | undefined\n if (input.delta !== undefined) {\n deltaEnvelope = await this.encryptDelta(input.delta)\n deltaHash = await sha256Hex(deltaEnvelope._data)\n }\n\n // Build the entry. Conditionally include `deltaHash` so\n // canonicalJson (which rejects undefined) never sees it when\n // there's no delta.\n const entryBase = {\n index: nextIndex,\n prevHash,\n op: input.op,\n collection: input.collection,\n id: input.id,\n version: input.version,\n ts: new Date().toISOString(),\n actor: input.actor === '' ? this.actor : input.actor,\n payloadHash: input.payloadHash,\n } as const\n const entry: LedgerEntry =\n deltaHash !== undefined\n ? { ...entryBase, deltaHash }\n : entryBase\n\n const envelope = await this.encryptEntry(entry)\n // expectedVersion: 0 ≡ \"the slot must not yet exist.\" Honored by\n // casAtomic stores; silently passed through by non-CAS stores.\n await this.adapter.put(\n this.vault,\n LEDGER_COLLECTION,\n paddedIndex(entry.index),\n envelope,\n 0,\n )\n\n // Chain slot claimed. Now write the delta record (if any).\n if (deltaEnvelope) {\n await this.adapter.put(\n this.vault,\n LEDGER_DELTAS_COLLECTION,\n paddedIndex(entry.index),\n deltaEnvelope,\n 0,\n )\n }\n\n // Update the head cache so the next append() doesn't re-scan the\n // adapter.\n this.headCache = { entry, hash: await hashEntry(entry) }\n return entry\n }\n\n /**\n * Load a delta payload by its entry index. Returns `null` if the\n * entry at that index doesn't reference a delta (genesis puts and\n * deletes leave the slot empty) or if the delta row is missing\n * (possible after a `pruneHistory` fold).\n *\n * The caller is responsible for deciding what to do with a missing\n * delta — `ledger.reconstruct()` uses it as a \"stop walking\n * backward\" signal and falls back to the on-disk current value.\n */\n async loadDelta(index: number): Promise<JsonPatch | null> {\n const envelope = await this.adapter.get(\n this.vault,\n LEDGER_DELTAS_COLLECTION,\n paddedIndex(index),\n )\n if (!envelope) return null\n if (!this.encrypted) {\n return JSON.parse(envelope._data) as JsonPatch\n }\n const dek = await this.getDEK(LEDGER_COLLECTION)\n const json = await decrypt(envelope._iv, envelope._data, dek)\n return JSON.parse(json) as JsonPatch\n }\n\n /** Encrypt a JSON Patch into an envelope for storage. Mirrors encryptEntry. */\n private async encryptDelta(patch: JsonPatch): Promise<EncryptedEnvelope> {\n const json = JSON.stringify(patch)\n if (!this.encrypted) {\n return {\n _noydb: NOYDB_FORMAT_VERSION,\n _v: 1,\n _ts: new Date().toISOString(),\n _iv: '',\n _data: json,\n _by: this.actor,\n }\n }\n const dek = await this.getDEK(LEDGER_COLLECTION)\n const { iv, data } = await encrypt(json, dek)\n return {\n _noydb: NOYDB_FORMAT_VERSION,\n _v: 1,\n _ts: new Date().toISOString(),\n _iv: iv,\n _data: data,\n _by: this.actor,\n }\n }\n\n /**\n * Read all entries in ascending-index order. Used internally by\n * `append()`, `head()`, `verify()`, and `entries()`. Decryption is\n * serial because the entries are tiny and the overhead of a Promise\n * pool would dominate at realistic chain lengths (< 100K entries).\n */\n async loadAllEntries(): Promise<LedgerEntry[]> {\n const keys = await this.adapter.list(this.vault, LEDGER_COLLECTION)\n // Sort lexicographically, which matches numeric order because\n // keys are zero-padded to 10 digits.\n keys.sort()\n const entries: LedgerEntry[] = []\n for (const key of keys) {\n const envelope = await this.adapter.get(\n this.vault,\n LEDGER_COLLECTION,\n key,\n )\n if (!envelope) continue\n entries.push(await this.decryptEntry(envelope))\n }\n return entries\n }\n\n /**\n * Return the current head of the ledger: the last entry, its hash,\n * and the total chain length. `null` on an empty ledger so callers\n * can distinguish \"no history yet\" from \"empty history\".\n */\n async head(): Promise<\n | { readonly entry: LedgerEntry; readonly hash: string; readonly length: number }\n | null\n > {\n const cached = await this.getCachedHead()\n if (!cached) return null\n // `length` is `entry.index + 1` because indices are zero-based and\n // contiguous. We don't need to re-scan the adapter to compute it.\n return {\n entry: cached.entry,\n hash: cached.hash,\n length: cached.entry.index + 1,\n }\n }\n\n /**\n * Return entries in the requested half-open range `[from, to)`.\n * Defaults: `from = 0`, `to = length`. The indices are clipped to\n * the valid range; no error is thrown for out-of-range queries.\n */\n async entries(opts: { from?: number; to?: number } = {}): Promise<LedgerEntry[]> {\n const all = await this.loadAllEntries()\n const from = Math.max(0, opts.from ?? 0)\n const to = Math.min(all.length, opts.to ?? all.length)\n return all.slice(from, to)\n }\n\n /**\n * Reconstruct a record's state at a given historical version by\n * walking the ledger's delta chain backward from the current state.\n *\n * ## Algorithm\n *\n * Ledger deltas are stored in **reverse** form — each entry's\n * patch describes how to undo that put, transforming the new\n * record back into the previous one. `reconstruct` exploits this\n * by:\n *\n * 1. Finding every ledger entry for `(collection, id)` in the\n * chain, sorted by index ascending.\n * 2. Starting from `current` (the present value of the record,\n * as held by the caller — typically fetched via\n * `Collection.get()`).\n * 3. Walking entries in **descending** index order and applying\n * each entry's reverse patch, stopping when we reach the\n * entry whose version equals `atVersion`.\n *\n * The result is the record as it existed immediately AFTER the\n * put at `atVersion`. To get the state at the genesis put\n * (version 1), the walk runs all the way back through every put\n * after the first.\n *\n * ## Caveats\n *\n * - **Delete entries** break the walk: once we see a delete, the\n * record didn't exist before that point, so there's nothing to\n * reconstruct. We return `null` in that case.\n * - **Missing deltas** (e.g., after `pruneHistory` folds old\n * entries into a base snapshot) also stop the walk. does\n * not ship pruneHistory, so today this only happens if an entry\n * was deleted out-of-band.\n * - The caller MUST pass the correct current value. Passing a\n * mutated object would corrupt the reconstruction — the patch\n * chain is only valid against the exact state that was in\n * effect when the most recent put happened.\n *\n * For, `reconstruct` is the only way to read a historical\n * version via deltas. The legacy `_history` collection still\n * holds full snapshots and `Collection.getVersion()` still reads\n * from there — the two paths coexist until pruneHistory lands in\n * a follow-up and delta becomes the default.\n */\n async reconstruct<T>(\n collection: string,\n id: string,\n current: T,\n atVersion: number,\n ): Promise<T | null> {\n const all = await this.loadAllEntries()\n // Filter to entries for this (collection, id), in ascending index.\n const matching = all.filter(\n (e) => e.collection === collection && e.id === id,\n )\n if (matching.length === 0) {\n // No ledger history at all; the current state IS version 1\n // (or there's nothing), so the only valid atVersion is the\n // current record's version. We can't verify that here, so\n // return current if atVersion is plausible, null otherwise.\n return null\n }\n\n // Walk entries in descending index order, applying each reverse\n // delta until we reach the target version.\n let state: T | null = current\n for (let i = matching.length - 1; i >= 0; i--) {\n const entry = matching[i]\n if (!entry) continue\n\n // Match check FIRST — before applying this entry's reverse\n // patch. `state` at this point is the record state immediately\n // after this entry's put (or before this entry's delete), so\n // if the caller asked for this exact version, we're done.\n if (entry.version === atVersion && entry.op !== 'delete') {\n return state\n }\n\n if (entry.op === 'delete') {\n // A delete erases the live state. If the caller asks for a\n // version older than the delete we should continue walking\n // (state becomes null and the next put resets it). But we\n // can't reconstruct that pre-delete state from the current\n // in-memory `state` — the delete has no reverse patch. So\n // anything past this point is unreachable; return null.\n return null\n }\n\n if (entry.deltaHash === undefined) {\n // Genesis put — the earliest state for this lifecycle. We\n // can't walk further back. If the caller asked for exactly\n // this version, return the current state (we already failed\n // the match check above because a fresh genesis after a\n // delete can have version === atVersion). Otherwise the\n // target is unreachable from here.\n if (entry.version === atVersion) return state\n return null\n }\n\n const patch = await this.loadDelta(entry.index)\n if (!patch) {\n // Delta row is missing (probably pruned). Stop walking.\n return null\n }\n\n if (state === null) {\n // We're trying to walk back across a delete range and there's\n // nothing to apply a reverse patch to. Bail.\n return null\n }\n\n state = applyPatch(state, patch)\n }\n\n // Ran off the end of the walk without matching. The target\n // version doesn't exist in this record's chain.\n return null\n }\n\n /**\n * Walk the chain from genesis forward and verify every link.\n *\n * Returns `{ ok: true, head, length }` if every entry's `prevHash`\n * matches the recomputed hash of its predecessor (and the genesis\n * entry's `prevHash` is the empty string).\n *\n * Returns `{ ok: false, divergedAt, expected, actual }` on the first\n * mismatch. `divergedAt` is the 0-based index of the BROKEN entry\n * — entries before that index still verify cleanly; entries at and\n * after `divergedAt` are untrustworthy.\n *\n * This method detects:\n * - Mutated entry content (fields changed)\n * - Reordered entries (if any adjacent pair swaps, the prevHash\n * of the second no longer matches)\n * - Inserted entries (the inserted entry's prevHash likely fails,\n * and the following entry's prevHash definitely fails)\n * - Deleted entries (the entry after the deletion sees a wrong\n * prevHash)\n *\n * It does NOT detect:\n * - Tampering with the DATA collections that bypassed the ledger\n * entirely (e.g., an attacker who modifies records without\n * appending matching ledger entries — this is why we also\n * plan a `verifyIntegrity()` helper in a follow-up)\n * - Truncation of the chain at the tail (dropping the last N\n * entries leaves a shorter but still consistent chain). External\n * anchoring of `head.hash` to a trusted service is the defense\n * against this.\n */\n async verify(): Promise<VerifyResult> {\n const entries = await this.loadAllEntries()\n let expectedPrevHash = ''\n for (let i = 0; i < entries.length; i++) {\n const entry = entries[i]\n if (!entry) continue\n if (entry.prevHash !== expectedPrevHash) {\n return {\n ok: false,\n divergedAt: i,\n expected: expectedPrevHash,\n actual: entry.prevHash,\n }\n }\n if (entry.index !== i) {\n // An entry whose stored index doesn't match its position in\n // the sorted list means someone rewrote the adapter keys.\n // Treat as divergence.\n return {\n ok: false,\n divergedAt: i,\n expected: `index=${i}`,\n actual: `index=${entry.index}`,\n }\n }\n expectedPrevHash = await hashEntry(entry)\n }\n return {\n ok: true,\n head: expectedPrevHash,\n length: entries.length,\n }\n }\n\n // ─── Encryption plumbing ─────────────────────────────────────────\n\n /**\n * Serialize + encrypt a ledger entry into an EncryptedEnvelope. The\n * envelope's `_v` field is set to `entry.index + 1` so the usual\n * optimistic-concurrency machinery has a reasonable version number\n * to compare against (the ledger is append-only, so concurrent\n * writes should always bump the index).\n */\n private async encryptEntry(entry: LedgerEntry): Promise<EncryptedEnvelope> {\n const json = canonicalJson(entry)\n if (!this.encrypted) {\n return {\n _noydb: NOYDB_FORMAT_VERSION,\n _v: entry.index + 1,\n _ts: entry.ts,\n _iv: '',\n _data: json,\n _by: entry.actor,\n }\n }\n const dek = await this.getDEK(LEDGER_COLLECTION)\n const { iv, data } = await encrypt(json, dek)\n return {\n _noydb: NOYDB_FORMAT_VERSION,\n _v: entry.index + 1,\n _ts: entry.ts,\n _iv: iv,\n _data: data,\n _by: entry.actor,\n }\n }\n\n /** Decrypt an envelope into a LedgerEntry. Throws on bad key / tamper. */\n private async decryptEntry(envelope: EncryptedEnvelope): Promise<LedgerEntry> {\n if (!this.encrypted) {\n return JSON.parse(envelope._data) as LedgerEntry\n }\n const dek = await this.getDEK(LEDGER_COLLECTION)\n const json = await decrypt(envelope._iv, envelope._data, dek)\n return JSON.parse(json) as LedgerEntry\n }\n}\n\n// `envelopePayloadHash` was moved to `./hash.ts` so it can be\n// imported by core code without dragging this file's `LedgerStore`\n// class into the floor bundle. The re-export at the top of this\n// file keeps the original `import { envelopePayloadHash } from '.../store.js'`\n// path working.\n\n/**\n * Exponential backoff with jitter for the append CAS retry loop.\n * Attempt 0 → ~5–10 ms, attempt 7 → ~640–1280 ms. Jitter avoids the\n * thundering-herd problem when multiple writers collide repeatedly.\n */\nfunction sleepBackoff(attempt: number): Promise<void> {\n const base = 5 * Math.pow(2, attempt)\n const jitter = Math.random() * base\n return new Promise((resolve) => setTimeout(resolve, base + jitter))\n}\n","/**\n * Time-machine queries — point-in-time reads reconstructed from the\n * existing history + ledger infrastructure.\n *\n * ## Usage\n *\n * ```ts\n * const vault = await db.openVault('acme', { passphrase })\n * const q1End = vault.at('2026-03-31T23:59:59Z')\n * const invoice = await q1End.collection<Invoice>('invoices').get('inv-001')\n * // → the record as it stood at the close of Q1 2026\n * ```\n *\n * ## How it works\n *\n * Every write path already fans out into two persistence lanes:\n *\n * 1. `saveHistory(...)` persists a **full encrypted envelope snapshot**\n * per version under the `_history` collection (one envelope per\n * version, keyed by `{collection}:{id}:{paddedVersion}`). Each\n * envelope carries its own `_ts` (the write timestamp).\n * 2. `ledger.append(...)` appends a hash-chained audit entry that\n * records the `op` (put / delete), `version`, and `ts`.\n *\n * Reconstruction at a target timestamp T is therefore:\n *\n * - Find the newest history envelope for `(collection, id)` whose\n * `_ts ≤ T` — that's the state the record was in at T.\n * - Check the ledger for any `op: 'delete'` entry for the same\n * `(collection, id)` with `entry.ts` in `(latestEnvelope._ts, T]` —\n * if present, the record was deleted before T, so return `null`.\n * - Decrypt the surviving envelope with the current collection DEK\n * (DEKs are per-collection but stable across versions — the same\n * key encrypts v1 and v15 of a record).\n *\n * No delta replay. The existing `history.ts` module already stores\n * complete snapshots; we just pick the right one.\n *\n * ## Read-only contract\n *\n * Every write method on `CollectionInstant` throws\n * {@link ReadOnlyAtInstantError}. A historical view is a *read*\n * surface — mutating the past would require either a branch/shadow\n * mechanism (tracked under shadow vaults) or a rewrite of\n * history, which breaks the ledger's tamper-evidence guarantee.\n *\n * @module\n */\nimport type { EncryptedEnvelope, NoydbStore } from '../types.js'\nimport type { LedgerStore } from './ledger/store.js'\nimport { getHistory } from './history.js'\nimport { decrypt } from '../crypto.js'\nimport { ReadOnlyAtInstantError } from '../errors.js'\n\n/**\n * Narrow view of a {@link Vault}'s internals that\n * {@link VaultInstant} needs. Passed in by `Vault.at()` rather than\n * constructed here so all crypto + adapter access stays inside the\n * Vault class.\n *\n * Not exported from the public barrel — consumers should get a\n * `VaultInstant` via `vault.at(ts)`, never by constructing one\n * directly.\n */\nexport interface VaultEngine {\n readonly adapter: NoydbStore\n /** Vault name (the compartment). */\n readonly name: string\n /**\n * `true` when the vault was opened with a passphrase (the normal\n * case). `false` in plaintext-mode vaults (`encrypt: false`) — in\n * that case `envelope._data` is raw JSON and we skip the DEK lookup.\n */\n readonly encrypted: boolean\n /**\n * Resolves the DEK used to decrypt a given collection's envelopes.\n * Not called when `encrypted` is false.\n */\n getDEK(collection: string): Promise<CryptoKey>\n /**\n * Lazily-initialised ledger. We consult it to detect deletes that\n * happened between the latest history snapshot and the target\n * timestamp. `null` when history is disabled for this vault — in\n * that case time-machine reads fall back to history-only\n * reconstruction (which may miss deletes).\n */\n getLedger(): LedgerStore | null\n}\n\n/**\n * A vault at a fixed instant. Produced by `vault.at(timestamp)`.\n * Carries no session state of its own — every read is a fresh\n * lookup through the vault's adapter.\n *\n * Cheap to construct; safe to throw away. Create one per query.\n */\nexport class VaultInstant {\n constructor(\n private readonly engine: VaultEngine,\n /** Fully-resolved target timestamp (ISO-8601 UTC). */\n public readonly timestamp: string,\n ) {}\n\n /** Get a point-in-time view of a collection. */\n collection<T = unknown>(name: string): CollectionInstant<T> {\n return new CollectionInstant<T>(this.engine, this.timestamp, name)\n }\n}\n\n/**\n * A read-only collection view anchored to a past instant.\n *\n * Every write method throws {@link ReadOnlyAtInstantError} — see the\n * module docstring for why. The read surface is intentionally smaller\n * than the live {@link Collection}: `get` and `list` cover the\n * \"what did the books look like on date X\" use case without pulling\n * in the full query DSL / joins / aggregates at this stage. Follow-up\n * work tracked under.\n */\nexport class CollectionInstant<T = unknown> {\n constructor(\n private readonly engine: VaultEngine,\n private readonly targetTs: string,\n public readonly name: string,\n ) {}\n\n /**\n * Return the record as it existed at the target timestamp, or\n * `null` if the record had not been created yet or had already been\n * deleted by then.\n */\n async get(id: string): Promise<T | null> {\n const envelope = await this.resolveEnvelope(id)\n if (!envelope) return null\n const plaintext = this.engine.encrypted\n ? await decrypt(envelope._iv, envelope._data, await this.engine.getDEK(this.name))\n : envelope._data\n return JSON.parse(plaintext) as T\n }\n\n /**\n * IDs of records that existed (had at least one `put` and were not\n * subsequently deleted) at the target timestamp.\n *\n * Implemented as a linear scan over history + ledger. Performance\n * is bounded by total history size (not live-vault size), so the\n * memory-first vault-scale cap (1K–50K records × average history\n * depth) still applies.\n */\n async list(): Promise<string[]> {\n const historyIds = await collectHistoryIds(this.engine.adapter, this.engine.name, this.name)\n const liveIds = await this.engine.adapter.list(this.engine.name, this.name)\n const candidateIds = new Set<string>([...historyIds, ...liveIds])\n const alive: string[] = []\n for (const id of candidateIds) {\n const env = await this.resolveEnvelope(id)\n if (env) alive.push(id)\n }\n return alive.sort()\n }\n\n // ── write guards ───────────────────────────────────────────────────\n\n async put(_id: string, _record: T): Promise<never> {\n throw new ReadOnlyAtInstantError('put', this.targetTs)\n }\n async delete(_id: string): Promise<never> {\n throw new ReadOnlyAtInstantError('delete', this.targetTs)\n }\n async update(_id: string, _patch: Partial<T>): Promise<never> {\n throw new ReadOnlyAtInstantError('update', this.targetTs)\n }\n\n // ── internals ─────────────────────────────────────────────────────\n\n /**\n * Return the envelope that represents the record's state at\n * `targetTs`, accounting for deletes. `null` if the record didn't\n * exist at that instant.\n *\n * ## Why we use the ledger as the authoritative timeline\n *\n * The per-version history snapshots saved by `saveHistory()` do\n * carry a `_ts` field, but that timestamp is the moment the\n * snapshot was *captured* (i.e. the instant right before the\n * subsequent overwrite), not the original write time. The ledger,\n * by contrast, records `ts` at the moment of each `put` / `delete`\n * — it's the only source that tracks the real timeline. So:\n *\n * 1. Walk the ledger; find the latest entry for `(collection, id)`\n * with `ts ≤ targetTs`.\n * 2. If that entry is a `delete`, the record was gone at the\n * target instant — return null.\n * 3. Otherwise it's a `put` with a specific `version`. Load the\n * envelope for that version from history, falling back to the\n * live collection for the most recent version.\n *\n * ## Fallback when the ledger is disabled\n *\n * If the vault has history disabled, `getLedger()` returns null and\n * we fall back to comparing envelope `_ts` fields. This is\n * approximate and gets the *last write* right but may confuse the\n * intermediate versions; adopters needing accurate time-machine\n * reads should leave history enabled.\n */\n private async resolveEnvelope(id: string): Promise<EncryptedEnvelope | null> {\n const ledger = this.engine.getLedger()\n if (ledger) {\n return this.resolveViaLedger(id, ledger)\n }\n return this.resolveViaEnvelopeTs(id)\n }\n\n private async resolveViaLedger(id: string, ledger: LedgerStore): Promise<EncryptedEnvelope | null> {\n const entries = await ledger.entries()\n // Entries are already ordered by index which is the mutation order.\n let latest: { op: 'put' | 'delete'; version: number } | null = null\n for (const e of entries) {\n if (e.collection !== this.name || e.id !== id) continue\n if (e.ts > this.targetTs) break // entries are time-ordered by index\n latest = { op: e.op, version: e.version }\n }\n if (!latest) return null\n if (latest.op === 'delete') return null\n return this.loadVersion(id, latest.version)\n }\n\n private async resolveViaEnvelopeTs(id: string): Promise<EncryptedEnvelope | null> {\n const history = await getHistory(\n this.engine.adapter, this.engine.name, this.name, id,\n )\n const live = await this.engine.adapter.get(this.engine.name, this.name, id)\n const byVersion = new Map<number, EncryptedEnvelope>()\n for (const e of history) byVersion.set(e._v, e)\n if (live) byVersion.set(live._v, live)\n const sorted = [...byVersion.values()].sort((a, b) =>\n a._ts < b._ts ? 1 : a._ts > b._ts ? -1 : 0,\n )\n return sorted.find((e) => e._ts <= this.targetTs) ?? null\n }\n\n /**\n * Fetch the envelope for a specific version. The live record (most\n * recent put) lives in the main collection; prior versions live in\n * `_history`. We check live first because the common case after a\n * delete is that we're trying to load the last-live version from\n * history, and skipping live for the current-version case avoids a\n * redundant lookup.\n */\n private async loadVersion(id: string, version: number): Promise<EncryptedEnvelope | null> {\n const live = await this.engine.adapter.get(this.engine.name, this.name, id)\n if (live && live._v === version) return live\n\n // Direct lookup by (collection, id, version) — avoids scanning all history.\n const historyId = `${this.name}:${id}:${String(version).padStart(10, '0')}`\n return await this.engine.adapter.get(this.engine.name, '_history', historyId)\n }\n}\n\n/**\n * Scan the `_history` collection once and collect every distinct\n * `recordId` for the given collection. History keys follow the\n * shape `<collection>:<recordId>:<paddedVersion>`; we split on the\n * last two colons (delimiter-safe because `paddedVersion` is\n * exactly 10 digits).\n */\nasync function collectHistoryIds(\n adapter: NoydbStore,\n vault: string,\n collection: string,\n): Promise<string[]> {\n const all = await adapter.list(vault, '_history')\n const prefix = `${collection}:`\n const seen = new Set<string>()\n for (const key of all) {\n if (!key.startsWith(prefix)) continue\n const lastColon = key.lastIndexOf(':')\n if (lastColon <= prefix.length) continue\n const middle = key.slice(prefix.length, lastColon)\n seen.add(middle)\n }\n return [...seen]\n}\n","/**\n * Active history strategy — `withHistory()` returns the real\n * implementation that wires per-record snapshots, the hash-chained\n * audit ledger, JSON-patch deltas, and time-machine reads into the\n * core write/read paths.\n *\n * Consumers opt in by:\n *\n * ```ts\n * import { createNoydb } from '@noy-db/hub'\n * import { withHistory } from '@noy-db/hub/history'\n *\n * const db = await createNoydb({\n * store: ...,\n * user: ...,\n * historyStrategy: withHistory(),\n * })\n * ```\n *\n * The factory is a thin wrapper that delegates to the existing\n * `history.ts`, `diff.ts`, `ledger/store.ts`, `ledger/patch.ts`, and\n * `time-machine.ts` modules. Splitting the import chain through this\n * file is what lets `tsup` tree-shake the heavy modules out of the\n * default `@noy-db/hub` bundle when no `withHistory()` import is\n * present in the consumer.\n *\n * @public\n */\n\nimport type { HistoryStrategy, BuildLedgerOptions } from './strategy.js'\nimport {\n saveHistory,\n getHistory,\n getVersionEnvelope,\n pruneHistory,\n clearHistory,\n} from './history.js'\nimport { diff as computeDiff } from './diff.js'\nimport { LedgerStore, envelopePayloadHash } from './ledger/store.js'\nimport { computePatch } from './ledger/patch.js'\nimport { VaultInstant } from './time-machine.js'\n\n/**\n * Build the active history strategy. Today the factory takes no\n * options; per-collection retention tuning still flows through\n * `HistoryConfig` on `Vault.collection()` / `vault.openVault()`.\n *\n * Future option slots (kept off the LTS surface for now):\n * - global maxVersions cap\n * - global beforeDate prune cadence\n * - ledger encryption toggle (today inferred from vault.encrypted)\n */\nexport function withHistory(): HistoryStrategy {\n return {\n saveHistory,\n getHistoryEntries: getHistory,\n getVersionEnvelope,\n pruneHistory,\n clearHistory,\n envelopePayloadHash,\n computePatch,\n diff: computeDiff,\n buildLedger(opts: BuildLedgerOptions): LedgerStore {\n return new LedgerStore({\n adapter: opts.adapter,\n vault: opts.vault,\n encrypted: opts.encrypted,\n getDEK: opts.getDEK,\n actor: opts.actor,\n })\n },\n buildVaultInstant(engine, timestamp): VaultInstant {\n return new VaultInstant(engine, timestamp)\n },\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACSA,IAAM,qBAAqB;AAC3B,IAAM,cAAc;AAEpB,SAAS,UAAU,YAAoB,UAAkB,SAAyB;AAChF,SAAO,GAAG,UAAU,IAAI,QAAQ,IAAI,OAAO,OAAO,EAAE,SAAS,aAAa,GAAG,CAAC;AAChF;AAkBA,SAAS,cAAc,IAAY,YAAoB,UAA4B;AACjF,MAAI,UAAU;AACZ,WAAO,GAAG,WAAW,GAAG,UAAU,IAAI,QAAQ,GAAG;AAAA,EACnD;AACA,SAAO,GAAG,WAAW,GAAG,UAAU,GAAG;AACvC;AAGA,eAAsB,YACpB,SACA,OACA,YACA,UACA,UACe;AACf,QAAM,KAAK,UAAU,YAAY,UAAU,SAAS,EAAE;AACtD,QAAM,QAAQ,IAAI,OAAO,oBAAoB,IAAI,QAAQ;AAC3D;AAGA,eAAsB,WACpB,SACA,OACA,YACA,UACA,SAC8B;AAC9B,QAAM,SAAS,MAAM,QAAQ,KAAK,OAAO,kBAAkB;AAC3D,QAAM,cAAc,OACjB,OAAO,QAAM,cAAc,IAAI,YAAY,QAAQ,CAAC,EACpD,KAAK,EACL,QAAQ;AAEX,QAAM,UAA+B,CAAC;AAEtC,aAAW,MAAM,aAAa;AAC5B,UAAM,WAAW,MAAM,QAAQ,IAAI,OAAO,oBAAoB,EAAE;AAChE,QAAI,CAAC,SAAU;AAGf,QAAI,SAAS,QAAQ,SAAS,MAAM,QAAQ,KAAM;AAClD,QAAI,SAAS,MAAM,SAAS,MAAM,QAAQ,GAAI;AAE9C,YAAQ,KAAK,QAAQ;AAErB,QAAI,SAAS,SAAS,QAAQ,UAAU,QAAQ,MAAO;AAAA,EACzD;AAEA,SAAO;AACT;AAGA,eAAsB,mBACpB,SACA,OACA,YACA,UACA,SACmC;AACnC,QAAM,KAAK,UAAU,YAAY,UAAU,OAAO;AAClD,SAAO,QAAQ,IAAI,OAAO,oBAAoB,EAAE;AAClD;AAGA,eAAsB,aACpB,SACA,OACA,YACA,UACA,SACiB;AACjB,QAAM,SAAS,MAAM,QAAQ,KAAK,OAAO,kBAAkB;AAC3D,QAAM,cAAc,OACjB,OAAO,QAAM,WAAW,cAAc,IAAI,YAAY,QAAQ,IAAI,cAAc,IAAI,UAAU,CAAC,EAC/F,KAAK;AAER,MAAI,WAAqB,CAAC;AAE1B,MAAI,QAAQ,iBAAiB,QAAW;AAEtC,UAAM,OAAO,QAAQ;AACrB,QAAI,YAAY,SAAS,MAAM;AAC7B,iBAAW,YAAY,MAAM,GAAG,YAAY,SAAS,IAAI;AAAA,IAC3D;AAAA,EACF;AAEA,MAAI,QAAQ,YAAY;AAEtB,eAAW,MAAM,aAAa;AAC5B,UAAI,SAAS,SAAS,EAAE,EAAG;AAC3B,YAAM,WAAW,MAAM,QAAQ,IAAI,OAAO,oBAAoB,EAAE;AAChE,UAAI,YAAY,SAAS,MAAM,QAAQ,YAAY;AACjD,iBAAS,KAAK,EAAE;AAAA,MAClB;AAAA,IACF;AAAA,EACF;AAGA,QAAM,gBAAgB,CAAC,GAAG,IAAI,IAAI,QAAQ,CAAC;AAE3C,aAAW,MAAM,eAAe;AAC9B,UAAM,QAAQ,OAAO,OAAO,oBAAoB,EAAE;AAAA,EACpD;AAEA,SAAO,cAAc;AACvB;AAGA,eAAsB,aACpB,SACA,OACA,YACA,UACiB;AACjB,QAAM,SAAS,MAAM,QAAQ,KAAK,OAAO,kBAAkB;AAC3D,MAAI;AAEJ,MAAI,cAAc,UAAU;AAC1B,eAAW,OAAO,OAAO,QAAM,cAAc,IAAI,YAAY,QAAQ,CAAC;AAAA,EACxE,WAAW,YAAY;AACrB,eAAW,OAAO,OAAO,QAAM,cAAc,IAAI,UAAU,CAAC;AAAA,EAC9D,OAAO;AACL,eAAW;AAAA,EACb;AAEA,aAAW,MAAM,UAAU;AACzB,UAAM,QAAQ,OAAO,OAAO,oBAAoB,EAAE;AAAA,EACpD;AAEA,SAAO,SAAS;AAClB;;;AC3IO,SAAS,KAAK,QAAiB,QAAiB,WAAW,IAAiB;AACjF,QAAM,UAAuB,CAAC;AAG9B,MAAI,WAAW,OAAQ,QAAO;AAG9B,MAAI,UAAU,QAAQ,UAAU,MAAM;AACpC,WAAO,CAAC,EAAE,MAAM,YAAY,UAAU,MAAM,SAAS,IAAI,OAAO,CAAC;AAAA,EACnE;AACA,MAAI,UAAU,QAAQ,UAAU,MAAM;AACpC,WAAO,CAAC,EAAE,MAAM,YAAY,UAAU,MAAM,WAAW,MAAM,OAAO,CAAC;AAAA,EACvE;AAGA,MAAI,OAAO,WAAW,OAAO,QAAQ;AACnC,WAAO,CAAC,EAAE,MAAM,YAAY,UAAU,MAAM,WAAW,MAAM,QAAQ,IAAI,OAAO,CAAC;AAAA,EACnF;AAGA,MAAI,OAAO,WAAW,UAAU;AAC9B,WAAO,CAAC,EAAE,MAAM,YAAY,UAAU,MAAM,WAAW,MAAM,QAAQ,IAAI,OAAO,CAAC;AAAA,EACnF;AAGA,MAAI,MAAM,QAAQ,MAAM,KAAK,MAAM,QAAQ,MAAM,GAAG;AAClD,UAAM,SAAS,KAAK,IAAI,OAAO,QAAQ,OAAO,MAAM;AACpD,aAAS,IAAI,GAAG,IAAI,QAAQ,KAAK;AAC/B,YAAM,IAAI,WAAW,GAAG,QAAQ,IAAI,CAAC,MAAM,IAAI,CAAC;AAChD,UAAI,KAAK,OAAO,QAAQ;AACtB,gBAAQ,KAAK,EAAE,MAAM,GAAG,MAAM,SAAS,IAAI,OAAO,CAAC,EAAE,CAAC;AAAA,MACxD,WAAW,KAAK,OAAO,QAAQ;AAC7B,gBAAQ,KAAK,EAAE,MAAM,GAAG,MAAM,WAAW,MAAM,OAAO,CAAC,EAAE,CAAC;AAAA,MAC5D,OAAO;AACL,gBAAQ,KAAK,GAAG,KAAK,OAAO,CAAC,GAAG,OAAO,CAAC,GAAG,CAAC,CAAC;AAAA,MAC/C;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAGA,QAAM,YAAY;AAClB,QAAM,YAAY;AAClB,QAAM,UAAU,oBAAI,IAAI,CAAC,GAAG,OAAO,KAAK,SAAS,GAAG,GAAG,OAAO,KAAK,SAAS,CAAC,CAAC;AAE9E,aAAW,OAAO,SAAS;AACzB,UAAM,IAAI,WAAW,GAAG,QAAQ,IAAI,GAAG,KAAK;AAC5C,QAAI,EAAE,OAAO,YAAY;AACvB,cAAQ,KAAK,EAAE,MAAM,GAAG,MAAM,SAAS,IAAI,UAAU,GAAG,EAAE,CAAC;AAAA,IAC7D,WAAW,EAAE,OAAO,YAAY;AAC9B,cAAQ,KAAK,EAAE,MAAM,GAAG,MAAM,WAAW,MAAM,UAAU,GAAG,EAAE,CAAC;AAAA,IACjE,OAAO;AACL,cAAQ,KAAK,GAAG,KAAK,UAAU,GAAG,GAAG,UAAU,GAAG,GAAG,CAAC,CAAC;AAAA,IACzD;AAAA,EACF;AAEA,SAAO;AACT;AAGO,SAAS,WAAW,SAA8B;AACvD,MAAI,QAAQ,WAAW,EAAG,QAAO;AACjC,SAAO,QAAQ,IAAI,OAAK;AACtB,YAAQ,EAAE,MAAM;AAAA,MACd,KAAK;AACH,eAAO,KAAK,EAAE,IAAI,KAAK,KAAK,UAAU,EAAE,EAAE,CAAC;AAAA,MAC7C,KAAK;AACH,eAAO,KAAK,EAAE,IAAI,KAAK,KAAK,UAAU,EAAE,IAAI,CAAC;AAAA,MAC/C,KAAK;AACH,eAAO,KAAK,EAAE,IAAI,KAAK,KAAK,UAAU,EAAE,IAAI,CAAC,WAAM,KAAK,UAAU,EAAE,EAAE,CAAC;AAAA,IAC3E;AAAA,EACF,CAAC,EAAE,KAAK,IAAI;AACd;;;ACtDO,IAAM,uBAAuB;;;ACmC7B,IAAM,aAAN,cAAyB,MAAM;AAAA;AAAA,EAE3B;AAAA,EAET,YAAY,MAAc,SAAiB;AACzC,UAAM,OAAO;AACb,SAAK,OAAO;AACZ,SAAK,OAAO;AAAA,EACd;AACF;AAYO,IAAM,kBAAN,cAA8B,WAAW;AAAA,EAC9C,YAAY,UAAU,qBAAqB;AACzC,UAAM,qBAAqB,OAAO;AAClC,SAAK,OAAO;AAAA,EACd;AACF;AAWO,IAAM,gBAAN,cAA4B,WAAW;AAAA,EAC5C,YAAY,UAAU,yEAAoE;AACxF,UAAM,YAAY,OAAO;AACzB,SAAK,OAAO;AAAA,EACd;AACF;AA6DO,IAAM,yBAAN,cAAqC,WAAW;AAAA,EACrD,YAAY,WAAmB,WAAmB;AAChD;AAAA,MACE;AAAA,MACA,UAAU,SAAS,kCAAkC,SAAS;AAAA,IAChE;AACA,SAAK,OAAO;AAAA,EACd;AACF;AA4WO,IAAM,gBAAN,cAA4B,WAAW;AAAA;AAAA,EAEnC;AAAA,EAET,YAAY,SAAiB,UAAU,oBAAoB;AACzD,UAAM,YAAY,OAAO;AACzB,SAAK,OAAO;AACZ,SAAK,UAAU;AAAA,EACjB;AACF;AAcO,IAAM,wBAAN,cAAoC,WAAW;AAAA,EAC3C;AAAA,EAET,YAAY,UAAkB;AAC5B;AAAA,MACE;AAAA,MACA,0DAA0D,QAAQ;AAAA,IACpE;AACA,SAAK,OAAO;AACZ,SAAK,WAAW;AAAA,EAClB;AACF;;;AC/hBA,IAAM,WAAW;AAGjB,IAAM,SAAS,WAAW,OAAO;AA8EjC,eAAsB,QACpB,WACA,KACwB;AACxB,QAAM,KAAK,WAAW;AACtB,QAAM,UAAU,IAAI,YAAY,EAAE,OAAO,SAAS;AAElD,QAAM,aAAa,MAAM,OAAO;AAAA,IAC9B,EAAE,MAAM,WAAW,GAAuB;AAAA,IAC1C;AAAA,IACA;AAAA,EACF;AAEA,SAAO;AAAA,IACL,IAAI,eAAe,EAAE;AAAA,IACrB,MAAM,eAAe,UAAU;AAAA,EACjC;AACF;AAGA,eAAsB,QACpB,UACA,YACA,KACiB;AACjB,QAAM,KAAK,eAAe,QAAQ;AAClC,QAAM,aAAa,eAAe,UAAU;AAE5C,MAAI;AACF,UAAM,YAAY,MAAM,OAAO;AAAA,MAC7B,EAAE,MAAM,WAAW,GAAuB;AAAA,MAC1C;AAAA,MACA;AAAA,IACF;AACA,WAAO,IAAI,YAAY,EAAE,OAAO,SAAS;AAAA,EAC3C,SAAS,KAAK;AACZ,QAAI,eAAe,SAAS,IAAI,SAAS,kBAAkB;AACzD,YAAM,IAAI,cAAc;AAAA,IAC1B;AACA,UAAM,IAAI;AAAA,MACR,eAAe,QAAQ,IAAI,UAAU;AAAA,IACvC;AAAA,EACF;AACF;AAkTO,SAAS,aAAyB;AACvC,SAAO,WAAW,OAAO,gBAAgB,IAAI,WAAW,QAAQ,CAAC;AACnE;AASO,SAAS,eAAe,QAA0C;AACvE,QAAM,QAAQ,kBAAkB,aAAa,SAAS,IAAI,WAAW,MAAM;AAC3E,MAAI,SAAS;AACb,WAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK;AACrC,cAAU,OAAO,aAAa,MAAM,CAAC,CAAE;AAAA,EACzC;AACA,SAAO,KAAK,MAAM;AACpB;AAEO,SAAS,eAAe,QAAyC;AACtE,QAAM,SAAS,KAAK,MAAM;AAC1B,QAAM,QAAQ,IAAI,WAAW,OAAO,MAAM;AAC1C,WAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK;AACtC,UAAM,CAAC,IAAI,OAAO,WAAW,CAAC;AAAA,EAChC;AACA,SAAO;AACT;;;AClVO,SAAS,cAAc,OAAwB;AACpD,MAAI,UAAU,KAAM,QAAO;AAC3B,MAAI,OAAO,UAAU,UAAW,QAAO,QAAQ,SAAS;AACxD,MAAI,OAAO,UAAU,UAAU;AAC7B,QAAI,CAAC,OAAO,SAAS,KAAK,GAAG;AAC3B,YAAM,IAAI;AAAA,QACR,uDAAuD,OAAO,KAAK,CAAC;AAAA,MACtE;AAAA,IACF;AACA,WAAO,KAAK,UAAU,KAAK;AAAA,EAC7B;AACA,MAAI,OAAO,UAAU,SAAU,QAAO,KAAK,UAAU,KAAK;AAC1D,MAAI,OAAO,UAAU,UAAU;AAC7B,UAAM,IAAI,MAAM,gDAAgD;AAAA,EAClE;AACA,MAAI,OAAO,UAAU,eAAe,OAAO,UAAU,YAAY;AAC/D,UAAM,IAAI;AAAA,MACR,qCAAqC,OAAO,KAAK;AAAA,IACnD;AAAA,EACF;AACA,MAAI,MAAM,QAAQ,KAAK,GAAG;AACxB,WAAO,MAAM,MAAM,IAAI,CAAC,MAAM,cAAc,CAAC,CAAC,EAAE,KAAK,GAAG,IAAI;AAAA,EAC9D;AACA,MAAI,OAAO,UAAU,UAAU;AAC7B,UAAM,MAAM;AACZ,UAAM,OAAO,OAAO,KAAK,GAAG,EAAE,KAAK;AACnC,UAAM,QAAkB,CAAC;AACzB,eAAW,OAAO,MAAM;AACtB,YAAM,KAAK,KAAK,UAAU,GAAG,IAAI,MAAM,cAAc,IAAI,GAAG,CAAC,CAAC;AAAA,IAChE;AACA,WAAO,MAAM,MAAM,KAAK,GAAG,IAAI;AAAA,EACjC;AACA,QAAM,IAAI,MAAM,yCAAyC,OAAO,KAAK,EAAE;AACzE;AAUA,eAAsB,UAAU,OAAgC;AAC9D,QAAM,QAAQ,IAAI,YAAY,EAAE,OAAO,KAAK;AAC5C,QAAM,SAAS,MAAM,WAAW,OAAO,OAAO,OAAO,WAAW,KAAK;AACrE,SAAO,WAAW,IAAI,WAAW,MAAM,CAAC;AAC1C;AAQA,eAAsB,UAAU,OAAqC;AACnE,SAAO,UAAU,cAAc,KAAK,CAAC;AACvC;AAGA,SAAS,WAAW,OAA2B;AAC7C,QAAM,MAAM,IAAI,MAAc,MAAM,MAAM;AAC1C,WAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK;AAIrC,QAAI,CAAC,KAAK,MAAM,CAAC,KAAK,GAAG,SAAS,EAAE,EAAE,SAAS,GAAG,GAAG;AAAA,EACvD;AACA,SAAO,IAAI,KAAK,EAAE;AACpB;AAQO,SAAS,YAAY,OAAuB;AACjD,SAAO,OAAO,KAAK,EAAE,SAAS,IAAI,GAAG;AACvC;AAGO,SAAS,WAAW,KAAqB;AAC9C,SAAO,OAAO,SAAS,KAAK,EAAE;AAChC;;;ACzKO,SAAS,aAAa,MAAe,MAA0B;AACpE,QAAM,MAAqB,CAAC;AAC5B,EAAAA,MAAK,MAAM,MAAM,IAAI,GAAG;AACxB,SAAO;AACT;AAEA,SAASA,MACP,MACA,MACA,MACA,KACM;AAGN,MAAI,SAAS,KAAM;AAGnB,MAAI,SAAS,QAAQ,SAAS,MAAM;AAClC,QAAI,KAAK,EAAE,IAAI,WAAW,MAAM,OAAO,KAAK,CAAC;AAC7C;AAAA,EACF;AAEA,QAAM,cAAc,MAAM,QAAQ,IAAI;AACtC,QAAM,cAAc,MAAM,QAAQ,IAAI;AACtC,QAAM,eAAe,OAAO,SAAS,YAAY,CAAC;AAClD,QAAM,eAAe,OAAO,SAAS,YAAY,CAAC;AAGlD,MAAI,gBAAgB,eAAe,iBAAiB,cAAc;AAChE,QAAI,KAAK,EAAE,IAAI,WAAW,MAAM,OAAO,KAAK,CAAC;AAC7C;AAAA,EACF;AAKA,MAAI,eAAe,aAAa;AAC9B,QAAI,CAAC,eAAe,MAAmB,IAAiB,GAAG;AACzD,UAAI,KAAK,EAAE,IAAI,WAAW,MAAM,OAAO,KAAK,CAAC;AAAA,IAC/C;AACA;AAAA,EACF;AAGA,MAAI,gBAAgB,cAAc;AAChC,UAAM,UAAU;AAChB,UAAM,UAAU;AAChB,UAAM,WAAW,OAAO,KAAK,OAAO;AACpC,UAAM,WAAW,OAAO,KAAK,OAAO;AAGpC,eAAW,OAAO,UAAU;AAC1B,YAAM,YAAY,OAAO,MAAM,kBAAkB,GAAG;AACpD,UAAI,EAAE,OAAO,UAAU;AACrB,YAAI,KAAK,EAAE,IAAI,UAAU,MAAM,UAAU,CAAC;AAAA,MAC5C,OAAO;AACL,QAAAA,MAAK,QAAQ,GAAG,GAAG,QAAQ,GAAG,GAAG,WAAW,GAAG;AAAA,MACjD;AAAA,IACF;AAEA,eAAW,OAAO,UAAU;AAC1B,UAAI,EAAE,OAAO,UAAU;AACrB,YAAI,KAAK;AAAA,UACP,IAAI;AAAA,UACJ,MAAM,OAAO,MAAM,kBAAkB,GAAG;AAAA,UACxC,OAAO,QAAQ,GAAG;AAAA,QACpB,CAAC;AAAA,MACH;AAAA,IACF;AACA;AAAA,EACF;AAGA,MAAI,KAAK,EAAE,IAAI,WAAW,MAAM,OAAO,KAAK,CAAC;AAC/C;AAEA,SAAS,eAAe,GAAc,GAAuB;AAC3D,MAAI,EAAE,WAAW,EAAE,OAAQ,QAAO;AAClC,WAAS,IAAI,GAAG,IAAI,EAAE,QAAQ,KAAK;AACjC,QAAI,CAAC,UAAU,EAAE,CAAC,GAAG,EAAE,CAAC,CAAC,EAAG,QAAO;AAAA,EACrC;AACA,SAAO;AACT;AAEA,SAAS,UAAU,GAAY,GAAqB;AAClD,MAAI,MAAM,EAAG,QAAO;AACpB,MAAI,MAAM,QAAQ,MAAM,KAAM,QAAO;AACrC,MAAI,OAAO,MAAM,OAAO,EAAG,QAAO;AAClC,MAAI,OAAO,MAAM,SAAU,QAAO;AAClC,QAAM,SAAS,MAAM,QAAQ,CAAC;AAC9B,QAAM,SAAS,MAAM,QAAQ,CAAC;AAC9B,MAAI,WAAW,OAAQ,QAAO;AAC9B,MAAI,UAAU,OAAQ,QAAO,eAAe,GAAG,CAAc;AAC7D,QAAM,OAAO;AACb,QAAM,OAAO;AACb,QAAM,QAAQ,OAAO,KAAK,IAAI;AAC9B,QAAM,QAAQ,OAAO,KAAK,IAAI;AAC9B,MAAI,MAAM,WAAW,MAAM,OAAQ,QAAO;AAC1C,aAAW,OAAO,OAAO;AACvB,QAAI,EAAE,OAAO,MAAO,QAAO;AAC3B,QAAI,CAAC,UAAU,KAAK,GAAG,GAAG,KAAK,GAAG,CAAC,EAAG,QAAO;AAAA,EAC/C;AACA,SAAO;AACT;AAsBO,SAAS,WAAwB,MAAS,OAAqB;AACpE,MAAI,SAAkB,MAAM,IAAI;AAChC,aAAW,MAAM,OAAO;AACtB,aAAS,QAAQ,QAAQ,EAAE;AAAA,EAC7B;AACA,SAAO;AACT;AAEA,SAAS,QAAQ,KAAc,IAA0B;AAIvD,MAAI,GAAG,SAAS,IAAI;AAClB,QAAI,GAAG,OAAO,SAAU,QAAO;AAC/B,WAAO,MAAM,GAAG,KAAK;AAAA,EACvB;AAEA,QAAM,WAAW,UAAU,GAAG,IAAI;AAClC,SAAO,aAAa,KAAK,UAAU,EAAE;AACvC;AAEA,SAAS,aACP,KACA,UACA,IACS;AACT,MAAI,SAAS,WAAW,GAAG;AAEzB,UAAM,IAAI,MAAM,+CAA+C;AAAA,EACjE;AAEA,QAAM,CAAC,MAAM,GAAG,IAAI,IAAI;AACxB,MAAI,SAAS,OAAW,OAAM,IAAI,MAAM,iCAAiC;AAEzE,MAAI,KAAK,WAAW,GAAG;AACrB,WAAO,gBAAgB,KAAK,MAAM,EAAE;AAAA,EACtC;AAIA,MAAI,MAAM,QAAQ,GAAG,GAAG;AACtB,UAAM,MAAM,gBAAgB,MAAM,IAAI,MAAM;AAC5C,UAAM,QAAQ,IAAI,GAAG;AACrB,UAAM,WAAW,aAAa,OAAO,MAAM,EAAE;AAC7C,UAAM,OAAO,IAAI,MAAM;AACvB,SAAK,GAAG,IAAI;AACZ,WAAO;AAAA,EACT;AACA,MAAI,QAAQ,QAAQ,OAAO,QAAQ,UAAU;AAC3C,UAAM,MAAM;AACZ,QAAI,EAAE,QAAQ,MAAM;AAClB,YAAM,IAAI,MAAM,6BAA6B,IAAI,uBAAuB;AAAA,IAC1E;AACA,UAAM,WAAW,aAAa,IAAI,IAAI,GAAG,MAAM,EAAE;AACjD,WAAO,EAAE,GAAG,KAAK,CAAC,IAAI,GAAG,SAAS;AAAA,EACpC;AACA,QAAM,IAAI;AAAA,IACR,gCAAgC,OAAO,GAAG,gBAAgB,IAAI;AAAA,EAChE;AACF;AAEA,SAAS,gBACP,KACA,SACA,IACS;AACT,MAAI,MAAM,QAAQ,GAAG,GAAG;AACtB,UAAM,MACJ,YAAY,MAAM,IAAI,SAAS,gBAAgB,SAAS,IAAI,SAAS,CAAC;AACxE,UAAM,OAAO,IAAI,MAAM;AACvB,QAAI,GAAG,OAAO,UAAU;AACtB,WAAK,OAAO,KAAK,CAAC;AAClB,aAAO;AAAA,IACT;AACA,QAAI,GAAG,OAAO,OAAO;AACnB,WAAK,OAAO,KAAK,GAAG,MAAM,GAAG,KAAK,CAAC;AACnC,aAAO;AAAA,IACT;AACA,QAAI,GAAG,OAAO,WAAW;AACvB,UAAI,OAAO,IAAI,QAAQ;AACrB,cAAM,IAAI;AAAA,UACR,oDAAoD,GAAG;AAAA,QACzD;AAAA,MACF;AACA,WAAK,GAAG,IAAI,MAAM,GAAG,KAAK;AAC1B,aAAO;AAAA,IACT;AAAA,EACF;AACA,MAAI,QAAQ,QAAQ,OAAO,QAAQ,UAAU;AAC3C,UAAM,MAAM;AACZ,QAAI,GAAG,OAAO,UAAU;AACtB,UAAI,EAAE,WAAW,MAAM;AACrB,cAAM,IAAI;AAAA,UACR,sCAAsC,OAAO;AAAA,QAC/C;AAAA,MACF;AACA,YAAM,OAAO,EAAE,GAAG,IAAI;AACtB,aAAO,KAAK,OAAO;AACnB,aAAO;AAAA,IACT;AACA,QAAI,GAAG,OAAO,OAAO;AAEnB,aAAO,EAAE,GAAG,KAAK,CAAC,OAAO,GAAG,MAAM,GAAG,KAAK,EAAE;AAAA,IAC9C;AACA,QAAI,GAAG,OAAO,WAAW;AACvB,UAAI,EAAE,WAAW,MAAM;AACrB,cAAM,IAAI;AAAA,UACR,uCAAuC,OAAO;AAAA,QAChD;AAAA,MACF;AACA,aAAO,EAAE,GAAG,KAAK,CAAC,OAAO,GAAG,MAAM,GAAG,KAAK,EAAE;AAAA,IAC9C;AAAA,EACF;AACA,QAAM,IAAI;AAAA,IACR,4BAA4B,GAAG,EAAE,yBAAyB,OAAO;AAAA,EACnE;AACF;AAYA,SAAS,kBAAkB,SAAyB;AAClD,SAAO,QAAQ,QAAQ,MAAM,IAAI,EAAE,QAAQ,OAAO,IAAI;AACxD;AAEA,SAAS,oBAAoB,SAAyB;AACpD,SAAO,QAAQ,QAAQ,OAAO,GAAG,EAAE,QAAQ,OAAO,GAAG;AACvD;AAEA,SAAS,UAAU,MAAwB;AACzC,MAAI,CAAC,KAAK,WAAW,GAAG,GAAG;AACzB,UAAM,IAAI,MAAM,8CAA8C,IAAI,GAAG;AAAA,EACvE;AACA,SAAO,KACJ,MAAM,CAAC,EACP,MAAM,GAAG,EACT,IAAI,mBAAmB;AAC5B;AAEA,SAAS,gBAAgB,SAAiB,KAAqB;AAC7D,MAAI,CAAC,QAAQ,KAAK,OAAO,GAAG;AAC1B,UAAM,IAAI;AAAA,MACR,gEAAgE,OAAO;AAAA,IACzE;AAAA,EACF;AACA,QAAM,MAAM,OAAO,SAAS,SAAS,EAAE;AACvC,MAAI,MAAM,KAAK,MAAM,KAAK;AACxB,UAAM,IAAI;AAAA,MACR,2BAA2B,GAAG,qBAAqB,GAAG;AAAA,IACxD;AAAA,EACF;AACA,SAAO;AACT;AAeA,SAAS,MAAS,OAAa;AAC7B,MAAI,UAAU,QAAQ,UAAU,OAAW,QAAO;AAClD,MAAI,OAAO,UAAU,SAAU,QAAO;AACtC,SAAO,KAAK,MAAM,KAAK,UAAU,KAAK,CAAC;AACzC;;;AC5WO,IAAM,oBAAoB;AAuB1B,IAAM,2BAA2B;;;ACfxC,eAAsB,oBACpB,UACiB;AACjB,MAAI,CAAC,SAAU,QAAO;AAMtB,SAAO,UAAU,SAAS,KAAK;AACjC;;;ACoCA,IAAM,sBAAsB;AA4DrB,IAAM,cAAN,MAAkB;AAAA,EACN;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsBT,YAAqE;AAAA,EAE7E,YAAY,MAMT;AACD,SAAK,UAAU,KAAK;AACpB,SAAK,QAAQ,KAAK;AAClB,SAAK,YAAY,KAAK;AACtB,SAAK,SAAS,KAAK;AACnB,SAAK,QAAQ,KAAK;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAc,gBAAsE;AAClF,QAAI,KAAK,cAAc,OAAW,QAAO,KAAK;AAC9C,UAAM,UAAU,MAAM,KAAK,eAAe;AAC1C,UAAM,OAAO,QAAQ,QAAQ,SAAS,CAAC;AACvC,QAAI,CAAC,MAAM;AACT,WAAK,YAAY;AACjB,aAAO;AAAA,IACT;AACA,SAAK,YAAY,EAAE,OAAO,MAAM,MAAM,MAAM,UAAU,IAAI,EAAE;AAC5D,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4CA,MAAM,OAAO,OAA0C;AACrD,QAAI;AACJ,aAAS,UAAU,GAAG,UAAU,qBAAqB,WAAW;AAI9D,UAAI,UAAU,GAAG;AACf,aAAK,YAAY;AAAA,MACnB;AACA,UAAI;AACF,eAAO,MAAM,KAAK,WAAW,KAAK;AAAA,MACpC,SAAS,KAAK;AACZ,YAAI,eAAe,eAAe;AAChC,yBAAe;AACf,cAAI,UAAU,sBAAsB,GAAG;AACrC,kBAAM,aAAa,OAAO;AAAA,UAC5B;AACA;AAAA,QACF;AACA,cAAM;AAAA,MACR;AAAA,IACF;AACA,SAAK;AACL,UAAM,IAAI,sBAAsB,mBAAmB;AAAA,EACrD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAc,WAAW,OAA0C;AACjE,UAAM,SAAS,MAAM,KAAK,cAAc;AACxC,UAAM,YAAY,QAAQ;AAC1B,UAAM,WAAW,QAAQ,QAAQ;AACjC,UAAM,YAAY,YAAY,UAAU,QAAQ,IAAI;AAIpD,QAAI;AACJ,QAAI;AACJ,QAAI,MAAM,UAAU,QAAW;AAC7B,sBAAgB,MAAM,KAAK,aAAa,MAAM,KAAK;AACnD,kBAAY,MAAM,UAAU,cAAc,KAAK;AAAA,IACjD;AAKA,UAAM,YAAY;AAAA,MAChB,OAAO;AAAA,MACP;AAAA,MACA,IAAI,MAAM;AAAA,MACV,YAAY,MAAM;AAAA,MAClB,IAAI,MAAM;AAAA,MACV,SAAS,MAAM;AAAA,MACf,KAAI,oBAAI,KAAK,GAAE,YAAY;AAAA,MAC3B,OAAO,MAAM,UAAU,KAAK,KAAK,QAAQ,MAAM;AAAA,MAC/C,aAAa,MAAM;AAAA,IACrB;AACA,UAAM,QACJ,cAAc,SACV,EAAE,GAAG,WAAW,UAAU,IAC1B;AAEN,UAAM,WAAW,MAAM,KAAK,aAAa,KAAK;AAG9C,UAAM,KAAK,QAAQ;AAAA,MACjB,KAAK;AAAA,MACL;AAAA,MACA,YAAY,MAAM,KAAK;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAGA,QAAI,eAAe;AACjB,YAAM,KAAK,QAAQ;AAAA,QACjB,KAAK;AAAA,QACL;AAAA,QACA,YAAY,MAAM,KAAK;AAAA,QACvB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAIA,SAAK,YAAY,EAAE,OAAO,MAAM,MAAM,UAAU,KAAK,EAAE;AACvD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,MAAM,UAAU,OAA0C;AACxD,UAAM,WAAW,MAAM,KAAK,QAAQ;AAAA,MAClC,KAAK;AAAA,MACL;AAAA,MACA,YAAY,KAAK;AAAA,IACnB;AACA,QAAI,CAAC,SAAU,QAAO;AACtB,QAAI,CAAC,KAAK,WAAW;AACnB,aAAO,KAAK,MAAM,SAAS,KAAK;AAAA,IAClC;AACA,UAAM,MAAM,MAAM,KAAK,OAAO,iBAAiB;AAC/C,UAAM,OAAO,MAAM,QAAQ,SAAS,KAAK,SAAS,OAAO,GAAG;AAC5D,WAAO,KAAK,MAAM,IAAI;AAAA,EACxB;AAAA;AAAA,EAGA,MAAc,aAAa,OAA8C;AACvE,UAAM,OAAO,KAAK,UAAU,KAAK;AACjC,QAAI,CAAC,KAAK,WAAW;AACnB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,IAAI;AAAA,QACJ,MAAK,oBAAI,KAAK,GAAE,YAAY;AAAA,QAC5B,KAAK;AAAA,QACL,OAAO;AAAA,QACP,KAAK,KAAK;AAAA,MACZ;AAAA,IACF;AACA,UAAM,MAAM,MAAM,KAAK,OAAO,iBAAiB;AAC/C,UAAM,EAAE,IAAI,KAAK,IAAI,MAAM,QAAQ,MAAM,GAAG;AAC5C,WAAO;AAAA,MACL,QAAQ;AAAA,MACR,IAAI;AAAA,MACJ,MAAK,oBAAI,KAAK,GAAE,YAAY;AAAA,MAC5B,KAAK;AAAA,MACL,OAAO;AAAA,MACP,KAAK,KAAK;AAAA,IACZ;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,iBAAyC;AAC7C,UAAM,OAAO,MAAM,KAAK,QAAQ,KAAK,KAAK,OAAO,iBAAiB;AAGlE,SAAK,KAAK;AACV,UAAM,UAAyB,CAAC;AAChC,eAAW,OAAO,MAAM;AACtB,YAAM,WAAW,MAAM,KAAK,QAAQ;AAAA,QAClC,KAAK;AAAA,QACL;AAAA,QACA;AAAA,MACF;AACA,UAAI,CAAC,SAAU;AACf,cAAQ,KAAK,MAAM,KAAK,aAAa,QAAQ,CAAC;AAAA,IAChD;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,OAGJ;AACA,UAAM,SAAS,MAAM,KAAK,cAAc;AACxC,QAAI,CAAC,OAAQ,QAAO;AAGpB,WAAO;AAAA,MACL,OAAO,OAAO;AAAA,MACd,MAAM,OAAO;AAAA,MACb,QAAQ,OAAO,MAAM,QAAQ;AAAA,IAC/B;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,QAAQ,OAAuC,CAAC,GAA2B;AAC/E,UAAM,MAAM,MAAM,KAAK,eAAe;AACtC,UAAM,OAAO,KAAK,IAAI,GAAG,KAAK,QAAQ,CAAC;AACvC,UAAM,KAAK,KAAK,IAAI,IAAI,QAAQ,KAAK,MAAM,IAAI,MAAM;AACrD,WAAO,IAAI,MAAM,MAAM,EAAE;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA+CA,MAAM,YACJ,YACA,IACA,SACA,WACmB;AACnB,UAAM,MAAM,MAAM,KAAK,eAAe;AAEtC,UAAM,WAAW,IAAI;AAAA,MACnB,CAAC,MAAM,EAAE,eAAe,cAAc,EAAE,OAAO;AAAA,IACjD;AACA,QAAI,SAAS,WAAW,GAAG;AAKzB,aAAO;AAAA,IACT;AAIA,QAAI,QAAkB;AACtB,aAAS,IAAI,SAAS,SAAS,GAAG,KAAK,GAAG,KAAK;AAC7C,YAAM,QAAQ,SAAS,CAAC;AACxB,UAAI,CAAC,MAAO;AAMZ,UAAI,MAAM,YAAY,aAAa,MAAM,OAAO,UAAU;AACxD,eAAO;AAAA,MACT;AAEA,UAAI,MAAM,OAAO,UAAU;AAOzB,eAAO;AAAA,MACT;AAEA,UAAI,MAAM,cAAc,QAAW;AAOjC,YAAI,MAAM,YAAY,UAAW,QAAO;AACxC,eAAO;AAAA,MACT;AAEA,YAAM,QAAQ,MAAM,KAAK,UAAU,MAAM,KAAK;AAC9C,UAAI,CAAC,OAAO;AAEV,eAAO;AAAA,MACT;AAEA,UAAI,UAAU,MAAM;AAGlB,eAAO;AAAA,MACT;AAEA,cAAQ,WAAW,OAAO,KAAK;AAAA,IACjC;AAIA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiCA,MAAM,SAAgC;AACpC,UAAM,UAAU,MAAM,KAAK,eAAe;AAC1C,QAAI,mBAAmB;AACvB,aAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK;AACvC,YAAM,QAAQ,QAAQ,CAAC;AACvB,UAAI,CAAC,MAAO;AACZ,UAAI,MAAM,aAAa,kBAAkB;AACvC,eAAO;AAAA,UACL,IAAI;AAAA,UACJ,YAAY;AAAA,UACZ,UAAU;AAAA,UACV,QAAQ,MAAM;AAAA,QAChB;AAAA,MACF;AACA,UAAI,MAAM,UAAU,GAAG;AAIrB,eAAO;AAAA,UACL,IAAI;AAAA,UACJ,YAAY;AAAA,UACZ,UAAU,SAAS,CAAC;AAAA,UACpB,QAAQ,SAAS,MAAM,KAAK;AAAA,QAC9B;AAAA,MACF;AACA,yBAAmB,MAAM,UAAU,KAAK;AAAA,IAC1C;AACA,WAAO;AAAA,MACL,IAAI;AAAA,MACJ,MAAM;AAAA,MACN,QAAQ,QAAQ;AAAA,IAClB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAc,aAAa,OAAgD;AACzE,UAAM,OAAO,cAAc,KAAK;AAChC,QAAI,CAAC,KAAK,WAAW;AACnB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,IAAI,MAAM,QAAQ;AAAA,QAClB,KAAK,MAAM;AAAA,QACX,KAAK;AAAA,QACL,OAAO;AAAA,QACP,KAAK,MAAM;AAAA,MACb;AAAA,IACF;AACA,UAAM,MAAM,MAAM,KAAK,OAAO,iBAAiB;AAC/C,UAAM,EAAE,IAAI,KAAK,IAAI,MAAM,QAAQ,MAAM,GAAG;AAC5C,WAAO;AAAA,MACL,QAAQ;AAAA,MACR,IAAI,MAAM,QAAQ;AAAA,MAClB,KAAK,MAAM;AAAA,MACX,KAAK;AAAA,MACL,OAAO;AAAA,MACP,KAAK,MAAM;AAAA,IACb;AAAA,EACF;AAAA;AAAA,EAGA,MAAc,aAAa,UAAmD;AAC5E,QAAI,CAAC,KAAK,WAAW;AACnB,aAAO,KAAK,MAAM,SAAS,KAAK;AAAA,IAClC;AACA,UAAM,MAAM,MAAM,KAAK,OAAO,iBAAiB;AAC/C,UAAM,OAAO,MAAM,QAAQ,SAAS,KAAK,SAAS,OAAO,GAAG;AAC5D,WAAO,KAAK,MAAM,IAAI;AAAA,EACxB;AACF;AAaA,SAAS,aAAa,SAAgC;AACpD,QAAM,OAAO,IAAI,KAAK,IAAI,GAAG,OAAO;AACpC,QAAM,SAAS,KAAK,OAAO,IAAI;AAC/B,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,OAAO,MAAM,CAAC;AACpE;;;ACnkBO,IAAM,eAAN,MAAmB;AAAA,EACxB,YACmB,QAED,WAChB;AAHiB;AAED;AAAA,EACf;AAAA,EAHgB;AAAA,EAED;AAAA;AAAA,EAIlB,WAAwB,MAAoC;AAC1D,WAAO,IAAI,kBAAqB,KAAK,QAAQ,KAAK,WAAW,IAAI;AAAA,EACnE;AACF;AAYO,IAAM,oBAAN,MAAqC;AAAA,EAC1C,YACmB,QACA,UACD,MAChB;AAHiB;AACA;AACD;AAAA,EACf;AAAA,EAHgB;AAAA,EACA;AAAA,EACD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQlB,MAAM,IAAI,IAA+B;AACvC,UAAM,WAAW,MAAM,KAAK,gBAAgB,EAAE;AAC9C,QAAI,CAAC,SAAU,QAAO;AACtB,UAAM,YAAY,KAAK,OAAO,YAC1B,MAAM,QAAQ,SAAS,KAAK,SAAS,OAAO,MAAM,KAAK,OAAO,OAAO,KAAK,IAAI,CAAC,IAC/E,SAAS;AACb,WAAO,KAAK,MAAM,SAAS;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,OAA0B;AAC9B,UAAM,aAAa,MAAM,kBAAkB,KAAK,OAAO,SAAS,KAAK,OAAO,MAAM,KAAK,IAAI;AAC3F,UAAM,UAAU,MAAM,KAAK,OAAO,QAAQ,KAAK,KAAK,OAAO,MAAM,KAAK,IAAI;AAC1E,UAAM,eAAe,oBAAI,IAAY,CAAC,GAAG,YAAY,GAAG,OAAO,CAAC;AAChE,UAAM,QAAkB,CAAC;AACzB,eAAW,MAAM,cAAc;AAC7B,YAAM,MAAM,MAAM,KAAK,gBAAgB,EAAE;AACzC,UAAI,IAAK,OAAM,KAAK,EAAE;AAAA,IACxB;AACA,WAAO,MAAM,KAAK;AAAA,EACpB;AAAA;AAAA,EAIA,MAAM,IAAI,KAAa,SAA4B;AACjD,UAAM,IAAI,uBAAuB,OAAO,KAAK,QAAQ;AAAA,EACvD;AAAA,EACA,MAAM,OAAO,KAA6B;AACxC,UAAM,IAAI,uBAAuB,UAAU,KAAK,QAAQ;AAAA,EAC1D;AAAA,EACA,MAAM,OAAO,KAAa,QAAoC;AAC5D,UAAM,IAAI,uBAAuB,UAAU,KAAK,QAAQ;AAAA,EAC1D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkCA,MAAc,gBAAgB,IAA+C;AAC3E,UAAM,SAAS,KAAK,OAAO,UAAU;AACrC,QAAI,QAAQ;AACV,aAAO,KAAK,iBAAiB,IAAI,MAAM;AAAA,IACzC;AACA,WAAO,KAAK,qBAAqB,EAAE;AAAA,EACrC;AAAA,EAEA,MAAc,iBAAiB,IAAY,QAAwD;AACjG,UAAM,UAAU,MAAM,OAAO,QAAQ;AAErC,QAAI,SAA2D;AAC/D,eAAW,KAAK,SAAS;AACvB,UAAI,EAAE,eAAe,KAAK,QAAQ,EAAE,OAAO,GAAI;AAC/C,UAAI,EAAE,KAAK,KAAK,SAAU;AAC1B,eAAS,EAAE,IAAI,EAAE,IAAI,SAAS,EAAE,QAAQ;AAAA,IAC1C;AACA,QAAI,CAAC,OAAQ,QAAO;AACpB,QAAI,OAAO,OAAO,SAAU,QAAO;AACnC,WAAO,KAAK,YAAY,IAAI,OAAO,OAAO;AAAA,EAC5C;AAAA,EAEA,MAAc,qBAAqB,IAA+C;AAChF,UAAM,UAAU,MAAM;AAAA,MACpB,KAAK,OAAO;AAAA,MAAS,KAAK,OAAO;AAAA,MAAM,KAAK;AAAA,MAAM;AAAA,IACpD;AACA,UAAM,OAAO,MAAM,KAAK,OAAO,QAAQ,IAAI,KAAK,OAAO,MAAM,KAAK,MAAM,EAAE;AAC1E,UAAM,YAAY,oBAAI,IAA+B;AACrD,eAAW,KAAK,QAAS,WAAU,IAAI,EAAE,IAAI,CAAC;AAC9C,QAAI,KAAM,WAAU,IAAI,KAAK,IAAI,IAAI;AACrC,UAAM,SAAS,CAAC,GAAG,UAAU,OAAO,CAAC,EAAE;AAAA,MAAK,CAAC,GAAG,MAC9C,EAAE,MAAM,EAAE,MAAM,IAAI,EAAE,MAAM,EAAE,MAAM,KAAK;AAAA,IAC3C;AACA,WAAO,OAAO,KAAK,CAAC,MAAM,EAAE,OAAO,KAAK,QAAQ,KAAK;AAAA,EACvD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAc,YAAY,IAAY,SAAoD;AACxF,UAAM,OAAO,MAAM,KAAK,OAAO,QAAQ,IAAI,KAAK,OAAO,MAAM,KAAK,MAAM,EAAE;AAC1E,QAAI,QAAQ,KAAK,OAAO,QAAS,QAAO;AAGxC,UAAMC,aAAY,GAAG,KAAK,IAAI,IAAI,EAAE,IAAI,OAAO,OAAO,EAAE,SAAS,IAAI,GAAG,CAAC;AACzE,WAAO,MAAM,KAAK,OAAO,QAAQ,IAAI,KAAK,OAAO,MAAM,YAAYA,UAAS;AAAA,EAC9E;AACF;AASA,eAAe,kBACb,SACA,OACA,YACmB;AACnB,QAAM,MAAM,MAAM,QAAQ,KAAK,OAAO,UAAU;AAChD,QAAM,SAAS,GAAG,UAAU;AAC5B,QAAM,OAAO,oBAAI,IAAY;AAC7B,aAAW,OAAO,KAAK;AACrB,QAAI,CAAC,IAAI,WAAW,MAAM,EAAG;AAC7B,UAAM,YAAY,IAAI,YAAY,GAAG;AACrC,QAAI,aAAa,OAAO,OAAQ;AAChC,UAAM,SAAS,IAAI,MAAM,OAAO,QAAQ,SAAS;AACjD,SAAK,IAAI,MAAM;AAAA,EACjB;AACA,SAAO,CAAC,GAAG,IAAI;AACjB;;;ACtOO,SAAS,cAA+B;AAC7C,SAAO;AAAA,IACL;AAAA,IACA,mBAAmB;AAAA,IACnB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,YAAY,MAAuC;AACjD,aAAO,IAAI,YAAY;AAAA,QACrB,SAAS,KAAK;AAAA,QACd,OAAO,KAAK;AAAA,QACZ,WAAW,KAAK;AAAA,QAChB,QAAQ,KAAK;AAAA,QACb,OAAO,KAAK;AAAA,MACd,CAAC;AAAA,IACH;AAAA,IACA,kBAAkB,QAAQ,WAAyB;AACjD,aAAO,IAAI,aAAa,QAAQ,SAAS;AAAA,IAC3C;AAAA,EACF;AACF;","names":["diff","historyId"]}
|
|
1
|
+
{"version":3,"sources":["../../src/history/index.ts","../../src/history/history.ts","../../src/history/diff.ts","../../src/types.ts","../../src/errors.ts","../../src/crypto.ts","../../src/history/ledger/entry.ts","../../src/history/ledger/patch.ts","../../src/history/ledger/constants.ts","../../src/history/ledger/hash.ts","../../src/history/ledger/store.ts","../../src/history/time-machine.ts","../../src/history/active.ts"],"sourcesContent":["/**\n * `@noy-db/hub/history` — subpath export for record-history / diff /\n * ledger-based audit trail + point-in-time primitives.\n *\n * Solo apps that don't track versioning, don't need a hash-chained\n * audit log, and don't restore to earlier points in time can exclude\n * this entire surface. Bundle savings ~1,880 LOC.\n *\n * The strategy seam is `withHistory()` — pass the returned\n * strategy to `createNoydb({ historyStrategy: ... })`. Without it,\n * the core `NO_HISTORY` stub no-ops snapshots/prune/clear and throws\n * on read APIs (`history`, `getVersion`, `diff`, `vault.at`,\n * `vault.ledger`).\n *\n * Named re-exports (not `export *`) so tsup keeps the barrel\n * populated even with `sideEffects: false`.\n */\n\n// ─── Strategy seam ─────────────────────────────────────\nexport { withHistory } from './active.js'\nexport type { HistoryStrategy } from './strategy.js'\n\n// ─── Per-record history ──────────────────────────────────\nexport {\n saveHistory,\n getHistory,\n getVersionEnvelope,\n pruneHistory,\n clearHistory,\n} from './history.js'\n\n// ─── Diff ────────────────────────────────────────────────\nexport { diff, formatDiff } from './diff.js'\nexport type { ChangeType, DiffEntry } from './diff.js'\n\n// ─── Hash-chained ledger ─────────────────────────────────\nexport {\n LedgerStore,\n LEDGER_COLLECTION,\n LEDGER_DELTAS_COLLECTION,\n envelopePayloadHash,\n} from './ledger/store.js'\nexport type { AppendInput, VerifyResult } from './ledger/store.js'\n\nexport {\n canonicalJson,\n sha256Hex,\n hashEntry,\n paddedIndex,\n parseIndex,\n} from './ledger/entry.js'\nexport type { LedgerEntry } from './ledger/entry.js'\n\n// ─── JSON Patch — delta history ──────────────────────────\nexport { computePatch, applyPatch } from './ledger/patch.js'\nexport type { JsonPatch, JsonPatchOp } from './ledger/patch.js'\n\n// ─── Time-machine queries ──────────────────────────────\nexport { VaultInstant, CollectionInstant } from './time-machine.js'\nexport type { VaultEngine } from './time-machine.js'\n","import type { NoydbStore, EncryptedEnvelope, HistoryOptions, PruneOptions } from '../types.js'\n\n/**\n * History storage convention:\n * Collection: `_history`\n * ID format: `{collection}:{recordId}:{paddedVersion}`\n * Version is zero-padded to 10 digits for lexicographic sorting.\n */\n\nconst HISTORY_COLLECTION = '_history'\nconst VERSION_PAD = 10\n\nfunction historyId(collection: string, recordId: string, version: number): string {\n return `${collection}:${recordId}:${String(version).padStart(VERSION_PAD, '0')}`\n}\n\n// Unused today, kept for future history-id parsing utilities.\n// eslint-disable-next-line @typescript-eslint/no-unused-vars\nfunction parseHistoryId(id: string): { collection: string; recordId: string; version: number } | null {\n const lastColon = id.lastIndexOf(':')\n if (lastColon < 0) return null\n const versionStr = id.slice(lastColon + 1)\n const rest = id.slice(0, lastColon)\n const firstColon = rest.indexOf(':')\n if (firstColon < 0) return null\n return {\n collection: rest.slice(0, firstColon),\n recordId: rest.slice(firstColon + 1),\n version: parseInt(versionStr, 10),\n }\n}\n\nfunction matchesPrefix(id: string, collection: string, recordId?: string): boolean {\n if (recordId) {\n return id.startsWith(`${collection}:${recordId}:`)\n }\n return id.startsWith(`${collection}:`)\n}\n\n/** Save a history entry (a complete encrypted envelope snapshot). */\nexport async function saveHistory(\n adapter: NoydbStore,\n vault: string,\n collection: string,\n recordId: string,\n envelope: EncryptedEnvelope,\n): Promise<void> {\n const id = historyId(collection, recordId, envelope._v)\n await adapter.put(vault, HISTORY_COLLECTION, id, envelope)\n}\n\n/** Get history entries for a record, sorted newest-first. */\nexport async function getHistory(\n adapter: NoydbStore,\n vault: string,\n collection: string,\n recordId: string,\n options?: HistoryOptions,\n): Promise<EncryptedEnvelope[]> {\n const allIds = await adapter.list(vault, HISTORY_COLLECTION)\n const matchingIds = allIds\n .filter(id => matchesPrefix(id, collection, recordId))\n .sort()\n .reverse() // newest first\n\n const entries: EncryptedEnvelope[] = []\n\n for (const id of matchingIds) {\n const envelope = await adapter.get(vault, HISTORY_COLLECTION, id)\n if (!envelope) continue\n\n // Apply time filters\n if (options?.from && envelope._ts < options.from) continue\n if (options?.to && envelope._ts > options.to) continue\n\n entries.push(envelope)\n\n if (options?.limit && entries.length >= options.limit) break\n }\n\n return entries\n}\n\n/** Get a specific version's envelope from history. */\nexport async function getVersionEnvelope(\n adapter: NoydbStore,\n vault: string,\n collection: string,\n recordId: string,\n version: number,\n): Promise<EncryptedEnvelope | null> {\n const id = historyId(collection, recordId, version)\n return adapter.get(vault, HISTORY_COLLECTION, id)\n}\n\n/** Prune history entries. Returns the number of entries deleted. */\nexport async function pruneHistory(\n adapter: NoydbStore,\n vault: string,\n collection: string,\n recordId: string | undefined,\n options: PruneOptions,\n): Promise<number> {\n const allIds = await adapter.list(vault, HISTORY_COLLECTION)\n const matchingIds = allIds\n .filter(id => recordId ? matchesPrefix(id, collection, recordId) : matchesPrefix(id, collection))\n .sort()\n\n let toDelete: string[] = []\n\n if (options.keepVersions !== undefined) {\n // Keep only the N most recent, delete the rest\n const keep = options.keepVersions\n if (matchingIds.length > keep) {\n toDelete = matchingIds.slice(0, matchingIds.length - keep)\n }\n }\n\n if (options.beforeDate) {\n // Delete entries older than the specified date\n for (const id of matchingIds) {\n if (toDelete.includes(id)) continue\n const envelope = await adapter.get(vault, HISTORY_COLLECTION, id)\n if (envelope && envelope._ts < options.beforeDate) {\n toDelete.push(id)\n }\n }\n }\n\n // Deduplicate\n const uniqueDeletes = [...new Set(toDelete)]\n\n for (const id of uniqueDeletes) {\n await adapter.delete(vault, HISTORY_COLLECTION, id)\n }\n\n return uniqueDeletes.length\n}\n\n/** Clear all history for a vault, optionally scoped to a collection or record. */\nexport async function clearHistory(\n adapter: NoydbStore,\n vault: string,\n collection?: string,\n recordId?: string,\n): Promise<number> {\n const allIds = await adapter.list(vault, HISTORY_COLLECTION)\n let toDelete: string[]\n\n if (collection && recordId) {\n toDelete = allIds.filter(id => matchesPrefix(id, collection, recordId))\n } else if (collection) {\n toDelete = allIds.filter(id => matchesPrefix(id, collection))\n } else {\n toDelete = allIds\n }\n\n for (const id of toDelete) {\n await adapter.delete(vault, HISTORY_COLLECTION, id)\n }\n\n return toDelete.length\n}\n","/**\n * Zero-dependency JSON diff.\n * Produces a flat list of changes between two plain objects.\n */\n\nexport type ChangeType = 'added' | 'removed' | 'changed'\n\nexport interface DiffEntry {\n /** Dot-separated path to the changed field (e.g. \"address.city\"). */\n readonly path: string\n /** Type of change. */\n readonly type: ChangeType\n /** Previous value (undefined for 'added'). */\n readonly from?: unknown\n /** New value (undefined for 'removed'). */\n readonly to?: unknown\n}\n\n/**\n * Compute differences between two objects.\n * Returns an array of DiffEntry describing each changed field.\n * Returns empty array if objects are identical.\n */\nexport function diff(oldObj: unknown, newObj: unknown, basePath = ''): DiffEntry[] {\n const changes: DiffEntry[] = []\n\n // Both primitives or nulls\n if (oldObj === newObj) return changes\n\n // One is null/undefined\n if (oldObj == null && newObj != null) {\n return [{ path: basePath || '(root)', type: 'added', to: newObj }]\n }\n if (oldObj != null && newObj == null) {\n return [{ path: basePath || '(root)', type: 'removed', from: oldObj }]\n }\n\n // Different types\n if (typeof oldObj !== typeof newObj) {\n return [{ path: basePath || '(root)', type: 'changed', from: oldObj, to: newObj }]\n }\n\n // Both primitives (and not equal — checked above)\n if (typeof oldObj !== 'object') {\n return [{ path: basePath || '(root)', type: 'changed', from: oldObj, to: newObj }]\n }\n\n // Both arrays\n if (Array.isArray(oldObj) && Array.isArray(newObj)) {\n const maxLen = Math.max(oldObj.length, newObj.length)\n for (let i = 0; i < maxLen; i++) {\n const p = basePath ? `${basePath}[${i}]` : `[${i}]`\n if (i >= oldObj.length) {\n changes.push({ path: p, type: 'added', to: newObj[i] })\n } else if (i >= newObj.length) {\n changes.push({ path: p, type: 'removed', from: oldObj[i] })\n } else {\n changes.push(...diff(oldObj[i], newObj[i], p))\n }\n }\n return changes\n }\n\n // Both objects\n const oldRecord = oldObj as Record<string, unknown>\n const newRecord = newObj as Record<string, unknown>\n const allKeys = new Set([...Object.keys(oldRecord), ...Object.keys(newRecord)])\n\n for (const key of allKeys) {\n const p = basePath ? `${basePath}.${key}` : key\n if (!(key in oldRecord)) {\n changes.push({ path: p, type: 'added', to: newRecord[key] })\n } else if (!(key in newRecord)) {\n changes.push({ path: p, type: 'removed', from: oldRecord[key] })\n } else {\n changes.push(...diff(oldRecord[key], newRecord[key], p))\n }\n }\n\n return changes\n}\n\n/** Format a diff as a human-readable string. */\nexport function formatDiff(changes: DiffEntry[]): string {\n if (changes.length === 0) return '(no changes)'\n return changes.map(c => {\n switch (c.type) {\n case 'added':\n return `+ ${c.path}: ${JSON.stringify(c.to)}`\n case 'removed':\n return `- ${c.path}: ${JSON.stringify(c.from)}`\n case 'changed':\n return `~ ${c.path}: ${JSON.stringify(c.from)} → ${JSON.stringify(c.to)}`\n }\n }).join('\\n')\n}\n","/**\n * Core types — the {@link NoydbStore} interface, envelope format, roles, and\n * all configuration shapes consumed by {@link createNoydb}.\n *\n * ## What lives here\n *\n * - **{@link NoydbStore}** — the 6-method contract every backend must implement\n * (`get`, `put`, `delete`, `list`, `loadAll`, `saveAll`).\n * - **{@link EncryptedEnvelope}** — the wire format stored by backends:\n * `{ _noydb, _v, _ts, _iv, _data }`. Backends only ever see this shape.\n * - **{@link Role} / {@link Permission}** — the access-control vocabulary\n * (`owner`, `admin`, `operator`, `viewer`, `client`).\n * - **{@link NoydbOptions}** — the full configuration object passed to\n * {@link createNoydb}.\n *\n * ## Extending the store interface\n *\n * All optional store capabilities (`ping`, `listPage`, `listSince`,\n * `presencePublish`, `presenceSubscribe`, `listVaults`) are additive extensions\n * discovered via `'method' in store`. Implementing them unlocks features but\n * is never required — core always falls back to the 6-method baseline.\n *\n * @module\n */\n\nimport type { StandardSchemaV1 } from './schema.js'\nimport type { SyncPolicy } from './store/sync-policy.js'\nimport type { BlobStrategy } from './blobs/strategy.js'\nimport type { IndexStrategy } from './indexing/strategy.js'\nimport type { AggregateStrategy } from './aggregate/strategy.js'\nimport type { CrdtStrategy } from './crdt/strategy.js'\nimport type { ConsentStrategy } from './consent/strategy.js'\nimport type { PeriodsStrategy } from './periods/strategy.js'\nimport type { ShadowStrategy } from './shadow/strategy.js'\nimport type { TxStrategy } from './tx/strategy.js'\nimport type { HistoryStrategy } from './history/strategy.js'\nimport type { I18nStrategy } from './i18n/strategy.js'\nimport type { SessionStrategy } from './session/strategy.js'\nimport type { SyncStrategy } from './team/sync-strategy.js'\nimport type { UnlockedKeyring } from './team/keyring.js'\n\n/** Format version for encrypted record envelopes. */\nexport const NOYDB_FORMAT_VERSION = 1 as const\n\n/** Format version for keyring files. */\nexport const NOYDB_KEYRING_VERSION = 1 as const\n\n/** Format version for backup files. */\nexport const NOYDB_BACKUP_VERSION = 1 as const\n\n/** Format version for sync metadata. */\nexport const NOYDB_SYNC_VERSION = 1 as const\n\n// ─── Roles & Permissions ───────────────────────────────────────────────\n\n/**\n * Access role assigned to a user within a vault.\n *\n * Roles control both the operations a user can perform and which DEKs\n * they receive in their keyring:\n *\n * | Role | Collections | Can grant/revoke | Can export |\n * |------------|-----------------|:----------------:|:----------:|\n * | `owner` | all (rw) | Yes (all roles) | Yes |\n * | `admin` | all (rw) | Yes (≤ admin) | Yes |\n * | `operator` | explicit (rw) | No | ACL-scoped |\n * | `viewer` | all (ro) | No | Yes |\n * | `client` | explicit (ro) | No | ACL-scoped |\n */\nexport type Role = 'owner' | 'admin' | 'operator' | 'viewer' | 'client'\n\n/**\n * Read-write or read-only access on a collection.\n * Stored per-collection in the user's keyring.\n */\nexport type Permission = 'rw' | 'ro'\n\n/**\n * Map of collection name → permission level for a user's keyring entry.\n * `'*'` is the wildcard collection matching all collections in the vault.\n */\nexport type Permissions = Record<string, Permission>\n\n// ─── Encrypted Envelope ────────────────────────────────────────────────\n\n/** The encrypted wrapper stored by adapters. Adapters only ever see this. */\nexport interface EncryptedEnvelope {\n readonly _noydb: typeof NOYDB_FORMAT_VERSION\n readonly _v: number\n readonly _ts: string\n readonly _iv: string\n readonly _data: string\n /** User who created this version (unencrypted metadata). */\n readonly _by?: string\n /**\n * Hierarchical access tier. Omitted → tier 0.\n *\n * Unencrypted on purpose — the store reads it to route the envelope\n * to the right DEK slot without having to try-decrypt against every\n * tier. Only leaks the tier of each record, not any value\n * equivalence.\n */\n readonly _tier?: number\n /**\n * User id who last elevated this record. Used by\n * `demote()` to gate the reverse operation: only the original\n * elevator or an owner can demote a record back down. Cleared on\n * every successful demote so a later re-elevate requires the new\n * actor to own the demotion right.\n */\n readonly _elevatedBy?: string\n /**\n * Deterministic-encryption index. Map of field name →\n * base64 deterministic ciphertext. Present only when the collection\n * declares `deterministicFields` and the feature is acknowledged. The\n * field names are unencrypted (they're the index keys); the values\n * are AES-GCM ciphertext with an HKDF-derived deterministic IV.\n *\n * Enables blind equality search (`collection.findByDet(field,\n * value)`) without decrypting every record. Leaks equality as a known\n * side channel.\n */\n readonly _det?: Record<string, string>\n}\n\n/**\n * Placeholder returned by `getAtTier()` in `'ghost'` mode when a\n * record is at a tier the caller cannot decrypt. Record existence is\n * advertised — the id and tier are visible — but contents are\n * withheld. `canElevateFrom` lists user ids authorized to elevate\n * access for this caller when known; absent when the workflow is\n * not configured.\n */\nexport interface GhostRecord {\n readonly _ghost: true\n readonly _tier: number\n readonly canElevateFrom?: readonly string[]\n}\n\n/** Control what lower-tier reads see above their clearance. */\nexport type TierMode = 'invisibility' | 'ghost'\n\n/**\n * Event emitted when a record at a tier above the caller's inherent\n * clearance is read or written successfully (via elevation or\n * delegation). Always written to the ledger; subscribers get a\n * real-time feed.\n */\nexport interface CrossTierAccessEvent {\n readonly actor: string\n readonly collection: string\n readonly id: string\n readonly tier: number\n /** How the caller gained tier access: they elevated it, or a delegation is active. */\n readonly authorization: 'elevation' | 'delegation' | 'inherent'\n readonly op: 'get' | 'put' | 'elevate' | 'demote'\n readonly ts: string\n /**\n * When `authorization === 'elevation'`, the audit reason string the\n * caller passed to `vault.elevate(...)`. Empty for inherent /\n * delegation paths.\n */\n readonly reason?: string\n /**\n * When `authorization === 'elevation'`, the tier the caller's\n * keyring effectively held BEFORE elevation. Useful for audit\n * dashboards distinguishing \"operator elevating to 2\" from\n * \"inherent tier-2 write.\"\n */\n readonly elevatedFrom?: number\n}\n\n/**\n * A single deterministic-ciphertext index slot on an envelope. Stored\n * as `iv:data` (both base64, colon-separated) so a single string per\n * field keeps the envelope compact.\n */\nexport type DeterministicCipher = string\n\n// ─── Vault Snapshot ──────────────────────────────────────────────\n\n/** All records across all collections for a compartment. */\nexport type VaultSnapshot = Record<string, Record<string, EncryptedEnvelope>>\n\n/**\n * Result of a single page fetch via the optional `listPage` adapter extension.\n *\n * `items` carries the actual encrypted envelopes (not just ids) so the\n * caller can decrypt and emit a single record without an extra `get()`\n * round-trip per id. `nextCursor` is `null` on the final page.\n */\nexport interface ListPageResult {\n /** Encrypted envelopes for this page, in adapter-defined order. */\n items: Array<{ id: string; envelope: EncryptedEnvelope }>\n /** Opaque cursor for the next page, or `null` if this was the last page. */\n nextCursor: string | null\n}\n\n// ─── Store Interface ───────────────────────────────────────────────────\n\nexport interface NoydbStore {\n /**\n * Optional human-readable adapter name (e.g. 'memory', 'file', 'dynamo').\n * Used in diagnostic messages and the listPage fallback warning. Adapters\n * are encouraged to set this so logs are clearer about which backend is\n * involved when something goes wrong.\n */\n name?: string\n\n /** Get a single record. Returns null if not found. */\n get(vault: string, collection: string, id: string): Promise<EncryptedEnvelope | null>\n\n /** Put a record. Throws ConflictError if expectedVersion doesn't match. */\n put(\n vault: string,\n collection: string,\n id: string,\n envelope: EncryptedEnvelope,\n expectedVersion?: number,\n ): Promise<void>\n\n /** Delete a record. */\n delete(vault: string, collection: string, id: string): Promise<void>\n\n /** List all record IDs in a collection. */\n list(vault: string, collection: string): Promise<string[]>\n\n /** Load all records for a vault (initial hydration). */\n loadAll(vault: string): Promise<VaultSnapshot>\n\n /** Save all records for a vault (bulk write / restore). */\n saveAll(vault: string, data: VaultSnapshot): Promise<void>\n\n /** Optional connectivity check for sync engine. */\n ping?(): Promise<boolean>\n\n /**\n * Optional: list record IDs in a collection that have `_ts` after `since`.\n * Used by partial sync (`pull({ modifiedSince })`). Adapters that omit this\n * fall back to a full `loadAll` + client-side timestamp filter.\n */\n listSince?(vault: string, collection: string, since: string): Promise<string[]>\n\n /**\n * Optional pagination extension. Adapters that implement `listPage` get\n * the streaming `Collection.scan()` fast path; adapters that don't are\n * silently fallen back to a full `loadAll()` + slice (with a one-time\n * console.warn).\n *\n * `cursor` is opaque to the core — each adapter encodes its own paging\n * state (DynamoDB: base64 LastEvaluatedKey JSON; S3: ContinuationToken;\n * memory/file/browser: numeric offset of a sorted id list). Pass\n * `undefined` to start from the beginning.\n *\n * `limit` is a soft upper bound on `items.length`. Adapters MAY return\n * fewer items even when more exist (e.g. if the underlying store has\n * its own page size cap), and MUST signal \"no more pages\" by returning\n * `nextCursor: null`.\n *\n * The 6-method core contract is unchanged — this is an additive\n * extension discovered via `'listPage' in adapter`.\n */\n listPage?(\n vault: string,\n collection: string,\n cursor?: string,\n limit?: number,\n ): Promise<ListPageResult>\n\n /**\n * Optional pub/sub for real-time presence.\n * Publish an encrypted payload to a presence channel.\n * Falls back to storage-based polling when absent.\n */\n presencePublish?(channel: string, payload: string): Promise<void>\n\n /**\n * Optional pub/sub for real-time presence.\n * Subscribe to a presence channel. Returns an unsubscribe function.\n * Falls back to storage-based polling when absent.\n */\n presenceSubscribe?(channel: string, callback: (payload: string) => void): () => void\n\n /**\n * Optional cross-vault enumeration extension.\n *\n * Returns the names of every top-level vault the store\n * currently stores. Used by `Noydb.listAccessibleVaults()` to\n * enumerate the universe of vaults before filtering down to\n * the ones the calling principal can actually unwrap.\n *\n * **Why this is optional:** the storage shape of compartments\n * differs across backends. Memory and file stores store\n * vaults as top-level keys / directories and can enumerate\n * them in O(1) calls. DynamoDB stores everything in a single table\n * keyed by `(compartment#collection, id)` — enumerating compartments\n * requires either a Scan (expensive, eventually consistent, leaks\n * ciphertext metadata) or a dedicated GSI that the consumer\n * provisioned. S3 needs a prefix list (cheap if enabled, ACL-sensitive\n * otherwise). Browser localStorage can scan keys by prefix.\n *\n * Stores that cannot implement `listVaults` cheaply or\n * cleanly should omit it. Core surfaces a `StoreCapabilityError`\n * with a clear message when a caller invokes\n * `listAccessibleVaults()` against a store that doesn't\n * provide this method, so consumers know to either upgrade their\n * store, provide a candidate list explicitly to `queryAcross()`,\n * or fall back to maintaining the compartment index out of band.\n *\n * **Privacy note:** `listVaults` returns *every* compartment\n * the store has, not just the ones the caller can access. The\n * existence-leak filtering (returning only compartments whose\n * keyring the caller can unwrap) happens in core, not in the\n * store. The store is trusted to know its own contents — that\n * is not a leak in the threat model. The leak the API guards\n * against is the *return value* of `listAccessibleVaults()`\n * exposing existence to a downstream observer who only sees that\n * function's output.\n *\n * The 6-method core contract is unchanged — this is an additive\n * extension discovered via `'listVaults' in store`.\n */\n listVaults?(): Promise<string[]>\n\n /**\n * Optional: generate a presigned URL for direct client download.\n * Only meaningful for object stores (S3, GCS) that support URL signing.\n * Returns a time-limited URL that fetches the encrypted envelope directly.\n * The caller must decrypt client-side (the URL returns ciphertext).\n */\n presignUrl?(vault: string, collection: string, id: string, expiresInSeconds?: number): Promise<string>\n\n /**\n * Optional: estimate current storage usage.\n * Returns `{ usedBytes, quotaBytes }` or null if the store cannot estimate.\n * Used by quota-aware routing to detect overflow conditions.\n */\n estimateUsage?(): Promise<{ usedBytes: number; quotaBytes: number } | null>\n\n /**\n * Optional multi-record atomic write.\n *\n * When present, `db.transaction(async (tx) => { ... })` uses this to\n * commit every staged op in one storage-layer transaction — either\n * all ops land or none do, regardless of which records they touch.\n * Every `TxOp.expectedVersion` (when set) must be honored atomically\n * alongside the write; any violation throws `ConflictError` and the\n * whole batch fails.\n *\n * Stores that omit this fall through to the hub's per-record OCC\n * fallback: pre-flight CAS check, then sequential `put`/`delete`\n * with best-effort unwind on mid-batch failure (see\n * `runTransaction` for the exact semantics and crash window).\n *\n * Native implementations: `to-memory` (single Map mutation),\n * `to-dynamo` (`TransactWriteItems`), `to-browser-idb` (one\n * `readwrite` transaction). File / S3 cannot implement this\n * atomically and should omit the method.\n */\n tx?(ops: readonly TxOp[]): Promise<void>\n}\n\n/**\n * A single staged operation inside a `db.transaction(fn)` commit. The\n * hub assembles `TxOp[]` from the user's `tx.collection().put/delete`\n * calls, encrypts any `record` values into `envelope`, and hands the\n * array to `NoydbStore.tx()` when the store supports atomic batch\n * writes. Stores that implement `tx()` MUST honor every\n * `expectedVersion` atomically against the stored envelope version.\n */\nexport interface TxOp {\n readonly type: 'put' | 'delete'\n readonly vault: string\n readonly collection: string\n readonly id: string\n /** Populated for `type: 'put'` — the encrypted envelope to write. */\n readonly envelope?: EncryptedEnvelope\n /** Optional per-record CAS. Mismatch must throw `ConflictError`. */\n readonly expectedVersion?: number\n}\n\n// ─── Store Factory Helper ──────────────────────────────────────────────\n\n/** Type-safe helper for creating store factories. */\nexport function createStore<TOptions>(\n factory: (options: TOptions) => NoydbStore,\n): (options: TOptions) => NoydbStore {\n return factory\n}\n\n// ─── Keyring ───────────────────────────────────────────────────────────\n\n/**\n * Interchange formats `@noy-db/as-*` packages can produce. `'*'` is a\n * wildcard granting every current + future plaintext format.\n */\nexport type ExportFormat =\n | 'xlsx'\n | 'csv'\n | 'json'\n | 'ndjson'\n | 'xml'\n | 'sql'\n | 'pdf'\n | 'blob'\n | 'zip'\n | '*'\n\n/**\n * Owner-granted export capability on a keyring.\n *\n * Two independent dimensions:\n *\n * - `plaintext` — per-format allowlist for record formatters + blob\n * extractors that emit plaintext bytes (`as-xlsx`, `as-csv`,\n * `as-blob`, `as-zip`, …). **Defaults to empty** for every role;\n * the owner/admin must positively grant per-format (or `'*'`).\n * - `bundle` — boolean for `.noydb` encrypted container export\n * (`as-noydb`). **Default policy: on for owner/admin, off for\n * operator/viewer/client** — applied when the field is absent or\n * undefined (see `hasExportCapability`).\n */\nexport interface ExportCapability {\n readonly plaintext?: readonly ExportFormat[]\n readonly bundle?: boolean\n}\n\n/**\n * Owner-granted import capability on a keyring (sibling of\n * `ExportCapability`, issue ).\n *\n * Two independent dimensions:\n *\n * - `plaintext` — per-format allowlist for `as-*` readers that ingest\n * plaintext bytes (`as-csv`, `as-json`, `as-ndjson`, `as-zip`, …).\n * Defaults to empty for every role; the owner/admin must positively\n * grant per-format (or `'*'`).\n * - `bundle` — boolean gate for `.noydb` bundle import. **Defaults to\n * `false` for every role**, including owner/admin. Import is more\n * dangerous than export (corrupts vs leaks), so the policy is\n * default-closed across the board — the owner explicitly opts a\n * keyring in via `db.grant({ importCapability: { bundle: true } })`.\n */\nexport interface ImportCapability {\n readonly plaintext?: readonly ExportFormat[]\n readonly bundle?: boolean\n}\n\nexport interface KeyringFile {\n readonly _noydb_keyring: typeof NOYDB_KEYRING_VERSION\n readonly user_id: string\n readonly display_name: string\n readonly role: Role\n readonly permissions: Permissions\n readonly deks: Record<string, string>\n readonly salt: string\n readonly created_at: string\n readonly granted_by: string\n /**\n * Optional — authorization spec capability bits. Absent on keyrings written\n * before the RFC implementation. Loading falls back to role-based\n * defaults (owner/admin get bundle-on, everyone else off).\n */\n readonly export_capability?: ExportCapability\n /**\n * Optional bundle-slot expiry. ISO-8601 timestamp; past\n * the cutoff `loadKeyring` throws `KeyringExpiredError` before any\n * DEK unwrap is attempted. Useful for time-boxed audit access:\n * \"this slot works for 30 days then becomes opaque to its holder.\"\n *\n * Absent on live keyrings written via `db.grant()` — the field is\n * meaningful for `BundleRecipient` slots produced by\n * `writeNoydbBundle({ recipients: [...] })`. Setting it on a live\n * keyring is allowed but unusual.\n */\n readonly expires_at?: string\n /**\n * Optional — issue import-capability bits. Absent on keyrings\n * written before landed. Loading falls back to default-closed\n * for every role and every format.\n */\n readonly import_capability?: ImportCapability\n /**\n * hierarchical access clearance. Absent → 0 (advisory;\n * the real check is whether the DEK map carries a `collection#tier`\n * entry for the requested tier). Owners and admins default to the\n * highest tier they have DEKs for at grant time.\n */\n readonly clearance?: number\n}\n\n// ─── Backup ────────────────────────────────────────────────────────────\n\nexport interface VaultBackup {\n readonly _noydb_backup: typeof NOYDB_BACKUP_VERSION\n readonly _compartment: string\n readonly _exported_at: string\n readonly _exported_by: string\n readonly keyrings: Record<string, KeyringFile>\n readonly collections: VaultSnapshot\n /**\n * Internal collections (`_ledger`, `_ledger_deltas`, `_history`, `_sync`, …)\n * captured alongside the data collections. Optional for backwards\n * compat with backups, which only stored data collections —\n * loading a backup leaves the ledger empty (and `verifyBackupIntegrity`\n * skips the chain check, surfacing only a console warning).\n */\n readonly _internal?: VaultSnapshot\n /**\n * Verifiable-backup metadata. Embeds the ledger head at\n * dump time so `load()` can cross-check that the loaded chain matches\n * exactly what was exported. A backup whose chain has been tampered\n * with — either by modifying ledger entries or by modifying data\n * envelopes that the chain references — fails this check.\n *\n * Optional for backwards compat with backups; missing means\n * \"legacy backup, load with a warning, no integrity check\".\n */\n readonly ledgerHead?: {\n /** Hex sha256 of the canonical JSON of the last ledger entry. */\n readonly hash: string\n /** Sequential index of the last ledger entry. */\n readonly index: number\n /** ISO timestamp captured at dump time. */\n readonly ts: string\n }\n}\n\n// ─── Export ────────────────────────────────────────────────────────────\n\n/**\n * Options for `Vault.exportStream()` and `Vault.exportJSON()`.\n *\n * The defaults match the most common consumer pattern: one chunk per\n * collection, no ledger metadata. Per-record streaming and ledger-head\n * inclusion are opt-in because both add structure most consumers don't\n * need.\n */\nexport interface ExportStreamOptions {\n /**\n * `'collection'` (default) yields one chunk per collection with all\n * records bundled in `chunk.records`. `'record'` yields one chunk per\n * record, useful for arbitrarily large collections that should never\n * be materialized as a single array.\n */\n readonly granularity?: 'collection' | 'record'\n\n /**\n * When `true`, every chunk includes the current compartment ledger\n * head under `chunk.ledgerHead`. The value is identical across every\n * chunk in a single export (one ledger per compartment). Forward-\n * compatible with future partition work where the head would become\n * per-partition. Default: `false`.\n */\n readonly withLedgerHead?: boolean\n /**\n * When set to a BCP 47 locale string (e.g. `'th'`), `exportJSON()`\n * resolves all `dictKey` labels to that locale and omits the raw\n * `dictionaries` snapshot from the output. Has no effect\n * on `exportStream()` — format packages use the `chunk.dictionaries`\n * snapshot directly and apply their own locale strategy.\n *\n * Default: `undefined` — embed the raw snapshot under `_dictionaries`.\n */\n readonly resolveLabels?: string\n}\n\n/**\n * One chunk yielded by `Vault.exportStream()`.\n *\n * `granularity: 'collection'` yields one chunk per collection with the\n * full record array in `records`. `granularity: 'record'` yields one\n * chunk per record with `records` containing exactly one element — the\n * `schema` and `refs` metadata is repeated on every chunk so consumers\n * doing per-record streaming don't have to thread state across yields.\n */\nexport interface ExportChunk<T = unknown> {\n /** Collection name (no leading underscore — internal collections are filtered out). */\n readonly collection: string\n\n /**\n * Standard Schema validator attached to the collection at `collection()`\n * construction time, or `null` if no schema was provided. Surfaced so\n * downstream serializers (`@noy-db/as-*` packages, custom\n * exporters) can produce schema-aware output (typed CSV headers, XSD\n * generation, etc.) without poking at collection internals.\n */\n readonly schema: StandardSchemaV1<unknown, T> | null\n\n /**\n * Foreign-key references declared on the collection via the `refs`\n * option, as the `{ field → { target, mode } }` map produced by\n * `RefRegistry.getOutbound`. Empty object when no refs were declared.\n */\n readonly refs: Record<string, { readonly target: string; readonly mode: 'strict' | 'warn' | 'cascade' }>\n\n /**\n * Decrypted, ACL-scoped, schema-validated records. Length 1 in\n * `granularity: 'record'` mode, full collection in `granularity: 'collection'`\n * mode. Records are returned by reference from the collection's eager\n * cache where applicable — consumers must treat them as immutable.\n */\n readonly records: T[]\n\n /**\n * Dictionary snapshots for every `dictKey` field declared on this\n * collection. Captured once at stream-start and held\n * constant across all chunks within the same export — a rename\n * mid-export does not change the snapshot. `undefined` when the\n * collection has no `dictKeyFields`.\n *\n * Shape: `{ [fieldName]: { [stableKey]: { [locale]: label } } }`\n *\n * @example\n * ```ts\n * chunk.dictionaries?.status?.paid?.th // → 'ชำระแล้ว'\n * ```\n */\n readonly dictionaries?: Record<\n string, // field name\n Record<string, Record<string, string>> // stable key → locale → label\n >\n\n /**\n * Vault ledger head at export time. Present only when\n * `exportStream({ withLedgerHead: true })` was called. Identical\n * across every chunk in the same export — included on every chunk\n * for forward-compatibility with future per-partition ledgers, where\n * the value will differ per chunk.\n */\n readonly ledgerHead?: {\n readonly hash: string\n readonly index: number\n readonly ts: string\n }\n}\n\n// ─── Sync ──────────────────────────────────────────────────────────────\n\nexport interface DirtyEntry {\n readonly vault: string\n readonly collection: string\n readonly id: string\n readonly action: 'put' | 'delete'\n readonly version: number\n readonly timestamp: string\n}\n\nexport interface SyncMetadata {\n readonly _noydb_sync: typeof NOYDB_SYNC_VERSION\n readonly last_push: string | null\n readonly last_pull: string | null\n readonly dirty: DirtyEntry[]\n}\n\nexport interface Conflict {\n readonly vault: string\n readonly collection: string\n readonly id: string\n readonly local: EncryptedEnvelope\n readonly remote: EncryptedEnvelope\n readonly localVersion: number\n readonly remoteVersion: number\n /**\n * Present only when the collection uses `conflictPolicy: 'manual'`.\n * Call `resolve(winner)` to commit the winning envelope, or\n * `resolve(null)` to defer (conflict stays queued for the next sync).\n * Called synchronously inside the `sync:conflict` event handler.\n */\n readonly resolve?: (winner: EncryptedEnvelope | null) => void\n}\n\nexport type ConflictStrategy =\n | 'local-wins'\n | 'remote-wins'\n | 'version'\n | ((conflict: Conflict) => 'local' | 'remote')\n\n/**\n * Collection-level conflict policy.\n * Overrides the db-level `conflict` option for the specific collection.\n *\n * - `'last-writer-wins'` — higher `_ts` wins (timestamp LWW).\n * - `'first-writer-wins'` — lower `_v` wins (earlier version is preserved).\n * - `'manual'` — emits `sync:conflict` with a `resolve` callback. Call\n * `resolve(winner)` synchronously to commit or `resolve(null)` to defer.\n * - Custom fn — synchronous `(local: T, remote: T) => T`. Must be pure.\n */\nexport type ConflictPolicy<T> =\n | 'last-writer-wins'\n | 'first-writer-wins'\n | 'manual'\n | ((local: T, remote: T) => T)\n\n/**\n * Envelope-level resolver registered per collection with the SyncEngine.\n * Receives the `id` of the conflicting record and both envelopes.\n * Returns the winning envelope, or `null` to defer resolution.\n * @internal\n */\nexport type CollectionConflictResolver = (\n id: string,\n local: EncryptedEnvelope,\n remote: EncryptedEnvelope,\n) => Promise<EncryptedEnvelope | null>\n\n/** Options for targeted push operations. */\nexport interface PushOptions {\n /** Only push records belonging to these collections. Omit to push all dirty. */\n collections?: string[]\n}\n\n/** Options for targeted pull operations. */\nexport interface PullOptions {\n /** Only pull these collections. Omit to pull all. */\n collections?: string[]\n /**\n * Only pull records with `_ts` strictly after this ISO timestamp.\n * Adapters that implement `listSince` use it directly; others fall back\n * to a full scan with client-side filtering.\n */\n modifiedSince?: string\n}\n\nexport interface PushResult {\n readonly pushed: number\n readonly conflicts: Conflict[]\n readonly errors: Error[]\n}\n\nexport interface PullResult {\n readonly pulled: number\n readonly conflicts: Conflict[]\n readonly errors: Error[]\n}\n\n/** Result of a sync transaction commit. */\nexport interface SyncTransactionResult {\n readonly status: 'committed' | 'conflict'\n readonly pushed: number\n readonly conflicts: Conflict[]\n}\n\nexport interface SyncStatus {\n readonly dirty: number\n readonly lastPush: string | null\n readonly lastPull: string | null\n readonly online: boolean\n}\n\n// ─── Sync Target ─────────────────────────────────────────\n\nexport type SyncTargetRole = 'sync-peer' | 'backup' | 'archive'\n\n/**\n * A sync target with role and optional per-target policy.\n *\n * | Role | Direction | Conflict resolution | Typical use |\n * |-------------|---------------|---------------------|--------------------------|\n * | `sync-peer` | Bidirectional | ConflictStrategy | DynamoDB live sync |\n * | `backup` | Push-only | N/A (receives merged)| S3 dump, Google Drive |\n * | `archive` | Push-only | N/A | IPFS, Git tags, S3 Lock |\n */\nexport interface SyncTarget {\n /** The store to sync with. */\n readonly store: NoydbStore\n /** Role determines sync direction and conflict handling. */\n readonly role: SyncTargetRole\n /** Per-target sync policy. Inherits store-category default when absent. */\n readonly policy?: SyncPolicy\n /** Human-readable label for DevTools and audit logs. */\n readonly label?: string\n}\n\n// ─── Events ────────────────────────────────────────────────────────────\n\nexport interface ChangeEvent {\n readonly vault: string\n readonly collection: string\n readonly id: string\n readonly action: 'put' | 'delete'\n}\n\nexport interface NoydbEventMap {\n 'change': ChangeEvent\n 'error': Error\n 'sync:push': PushResult\n 'sync:pull': PullResult\n 'sync:conflict': Conflict\n 'sync:online': void\n 'sync:offline': void\n 'sync:backup-error': { vault: string; target: string; error: Error }\n 'history:save': { vault: string; collection: string; id: string; version: number }\n 'history:prune': { vault: string; collection: string; id: string; pruned: number }\n /**\n * Emitted when a persisted-index side-car put/delete fails after the\n * main record write already succeeded. The main record is durable; the\n * index mirror may have drifted. Operators reconcile via\n * `collection.reconcileIndex(field)`.\n */\n 'index:write-partial': {\n vault: string\n collection: string\n id: string\n action: 'put' | 'delete'\n error: Error\n }\n /**\n * emitted by `Collection.ensurePersistedIndexesLoaded()`\n * once per field on first lazy-mode query when\n * `reconcileOnOpen: 'auto' | 'dry-run'` is configured. `applied` is\n * `0` in `'dry-run'` mode. `skipped` is reserved for a future\n * drift-stamp optimization that short-circuits the reconcile when\n * the mirror version matches what's on disk — currently always\n * `false` (the full reconcile runs every session).\n */\n 'index:reconciled': {\n vault: string\n collection: string\n field: string\n missing: readonly string[]\n stale: readonly string[]\n applied: number\n skipped: boolean\n }\n}\n\n// ─── Grant / Revoke ────────────────────────────────────────────────────\n\nexport interface GrantOptions {\n readonly userId: string\n readonly displayName: string\n readonly role: Role\n readonly passphrase: string\n readonly permissions?: Permissions\n /**\n * Optional `@noy-db/as-*` export capability. Omit or\n * leave undefined to apply role-based defaults (see\n * `hasExportCapability` and `ExportCapability`).\n */\n readonly exportCapability?: ExportCapability\n /**\n * Optional `@noy-db/as-*` import capability (issue ). Omit or\n * leave undefined for default-closed semantics — no plaintext format\n * is grantable until positively listed; bundle import is denied.\n */\n readonly importCapability?: ImportCapability\n}\n\nexport interface RevokeOptions {\n readonly userId: string\n readonly rotateKeys?: boolean\n\n /**\n * Cascade behavior when the revoked user is an admin who has granted\n * other admins.\n *\n * - `'strict'` (default) — recursively revoke every admin that the\n * target (transitively) granted. The cascade walks the\n * `granted_by` field on each keyring file and stops at non-admin\n * leaves. All affected collections are accumulated and rotated in\n * a single pass at the end, so cascade cost is O(records in\n * affected collections), not O(records × cascade depth).\n *\n * - `'warn'` — leave the descendant admins in place but emit a\n * `console.warn` listing them. Useful for diagnostic dry runs and\n * for environments where the operator wants to clean up the\n * delegation tree manually.\n *\n * No effect when the target is not an admin (operators, viewers, and\n * clients cannot grant other users, so they have no delegation\n * subtree to cascade through). Defaults to `'strict'`.\n */\n readonly cascade?: 'strict' | 'warn'\n}\n\n// ─── Cross-vault queries ──────────────────────────────\n\n/**\n * One entry returned by `Noydb.listAccessibleVaults()`. Carries\n * the compartment id and the role the calling principal holds in it,\n * so the consumer can decide how to fan out without re-checking\n * permissions per vault.\n */\nexport interface AccessibleVault {\n readonly id: string\n readonly role: Role\n}\n\n/**\n * Options for `Noydb.listAccessibleVaults()`.\n */\nexport interface ListAccessibleVaultsOptions {\n /**\n * Minimum role the caller must hold to include a compartment in the\n * result. Compartments where the caller's role is strictly *below*\n * this threshold are silently excluded. Defaults to `'client'`,\n * which means \"every vault I can unwrap is returned.\" Set to\n * `'admin'` for \"vaults where I can grant/revoke,\" or\n * `'owner'` for \"vaults I own.\"\n *\n * The privilege ordering used:\n * `client (1) < viewer (2) < operator (3) < admin (4) < owner (5)`\n *\n * Note: `viewer` and `client` are conceptually peers in the ACL\n * (neither can grant), but `viewer` has read-all access while\n * `client` has only explicit-collection read. The numeric order\n * reflects \"how much can this principal see,\" not \"how much can\n * this principal modify.\"\n */\n readonly minRole?: Role\n}\n\n/**\n * Options for `Noydb.queryAcross()`.\n */\nexport interface QueryAcrossOptions {\n /**\n * Maximum number of compartments to process in parallel. Defaults\n * to `1` (sequential) — conservative because the per-compartment\n * callback typically does its own I/O and an unbounded fan-out can\n * exhaust adapter connections (DynamoDB throughput, S3 socket\n * limits, browser fetch concurrency).\n *\n * Set to `4` or `8` for cloud-backed compartments where parallelism\n * is the whole point of fanning out. Set to `1` (default) for local\n * adapters where the disk I/O serializes anyway.\n */\n readonly concurrency?: number\n}\n\n/**\n * One entry in the array returned by `Noydb.queryAcross()`. Either\n * `result` is set (callback succeeded for this compartment) or\n * `error` is set (callback threw, or compartment failed to open).\n *\n * Per-compartment errors do **not** abort the overall fan-out — every\n * compartment is given a chance to run its callback, and the\n * partition between success and failure is exposed in the return\n * value. Consumers that want fail-fast semantics can check\n * `r.error !== undefined` and short-circuit themselves.\n */\nexport type QueryAcrossResult<T> =\n | { readonly vault: string; readonly result: T; readonly error?: undefined }\n | { readonly vault: string; readonly result?: undefined; readonly error: Error }\n\n// ─── User Info ─────────────────────────────────────────────────────────\n\nexport interface UserInfo {\n readonly userId: string\n readonly displayName: string\n readonly role: Role\n readonly permissions: Permissions\n readonly createdAt: string\n readonly grantedBy: string\n}\n\n// ─── Session ───────────────────────────────────────────────\n\n/**\n * Operations that a session policy can require re-authentication for.\n * Passed as the `requireReAuthFor` array in `SessionPolicy`.\n */\nexport type ReAuthOperation = 'export' | 'grant' | 'revoke' | 'rotate' | 'changeSecret'\n\n/**\n * Session policy controlling lifetime, re-auth requirements, and\n * background-lock behavior.\n *\n * All timeout values are in milliseconds. `undefined` means \"no limit.\"\n * The policy is evaluated lazily — it does not start timers itself;\n * enforcement happens at the Noydb call site.\n */\nexport interface SessionPolicy {\n /**\n * Idle timeout in ms. If no NOYDB operation is performed for this\n * duration, the session is revoked on the next operation attempt\n * (which will throw `SessionExpiredError`). The idle clock resets\n * on every successful operation.\n *\n * Default: `undefined` (no idle timeout).\n */\n readonly idleTimeoutMs?: number\n\n /**\n * Absolute timeout in ms from session creation. After this duration\n * the session is unconditionally revoked regardless of activity.\n *\n * Default: `undefined` (no absolute timeout).\n */\n readonly absoluteTimeoutMs?: number\n\n /**\n * Operations that require the user to re-authenticate (re-enter their\n * passphrase or perform a fresh WebAuthn assertion) before proceeding,\n * even if the session is still alive.\n *\n * Common pattern: `requireReAuthFor: ['export', 'grant']` — allow\n * read/write operations in the background but demand a fresh credential\n * for high-risk mutations.\n *\n * Default: `[]` (no extra re-auth requirements).\n */\n readonly requireReAuthFor?: readonly ReAuthOperation[]\n\n /**\n * If `true`, the session is revoked when the page goes to the background\n * (visibilitychange event, `document.hidden === true`). Useful for\n * high-sensitivity deployments where leaving the tab is treated as\n * a session boundary.\n *\n * No-op in non-browser environments (Node.js, workers without document).\n * Default: `false`.\n */\n readonly lockOnBackground?: boolean\n}\n\n// ─── i18n / Locale ─────────────────────────────────────\n\n/**\n * Locale-aware read options. Pass to `Collection.get()`, `list()`,\n * `query()`, and `scan()` to trigger per-record locale resolution for\n * `dictKey` and `i18nText` fields.\n *\n * - **`locale: 'raw'`** — skip resolution for `i18nText` fields and\n * return the full `{ [locale]: string }` map. Dict key fields still\n * return the stable key (no `<field>Label` added).\n * - **`fallback`** — single locale code or ordered list. Use `'any'` as\n * the last element to fall back to any present translation.\n *\n * When neither the call-level locale nor the compartment's default locale\n * is set, reading a record with `i18nText` fields throws\n * `LocaleNotSpecifiedError`.\n */\nexport interface LocaleReadOptions {\n /**\n * The target locale code (e.g. `'th'`), or `'raw'` to return the full\n * language map without resolution.\n */\n readonly locale?: string\n /**\n * Fallback locale or ordered fallback chain. Use `'any'` as the last\n * element to fall back to any present translation.\n */\n readonly fallback?: string | readonly string[]\n}\n\n// ─── plaintextTranslator hook ──────────────────────────────\n\n/**\n * Context passed to the consumer-supplied `plaintextTranslator` function.\n * The hook receives the source text plus enough metadata to route it to the\n * right translation service and record what it did.\n */\nexport interface PlaintextTranslatorContext {\n /** The plaintext string to translate. */\n readonly text: string\n /** BCP 47 source locale (the locale the text is written in). */\n readonly from: string\n /** BCP 47 target locale to translate into. */\n readonly to: string\n /** The schema field name that triggered the translation. */\n readonly field: string\n /** The collection the record is being put into. */\n readonly collection: string\n}\n\n/**\n * A consumer-supplied async function that translates a single string\n * from one locale to another. noy-db ships no built-in translator.\n *\n * **Security:** this function receives plaintext. The consumer is\n * responsible for the data policy of whatever service it calls. See\n * `NOYDB_SPEC.md § Zero-Knowledge Storage` and the `plaintextTranslator`\n * JSDoc on `NoydbOptions` for the full invariant statement.\n */\nexport type PlaintextTranslatorFn = (\n ctx: PlaintextTranslatorContext,\n) => Promise<string>\n\n/**\n * One entry in the in-process translator audit log. Cleared when\n * `db.close()` is called — same lifetime as the KEK and DEKs.\n *\n * Deliberately omits any content hash or translated-text fingerprint\n * to prevent correlation attacks on the audit trail.\n */\nexport interface TranslatorAuditEntry {\n readonly type: 'translator-invocation'\n /** Schema field name that was translated. */\n readonly field: string\n /** Collection the record belongs to. */\n readonly collection: string\n /** Source locale. */\n readonly fromLocale: string\n /** Target locale. */\n readonly toLocale: string\n /**\n * Consumer-provided translator name from\n * `NoydbOptions.plaintextTranslatorName`. Defaults to `'anonymous'`\n * when not supplied.\n */\n readonly translatorName: string\n /** ISO 8601 timestamp of the invocation. */\n readonly timestamp: string\n /**\n * `true` when the result was served from the in-process cache rather\n * than by calling the translator function. Present only on cache hits\n * so the absence of the field also communicates a cache miss.\n */\n readonly cached?: true\n}\n\n// ─── Presence ─────────────────────────────────────────────\n\n/**\n * A presence peer entry. `lastSeen` is an ISO timestamp set by core on each\n * `update()` call. Stale entries (lastSeen older than `staleMs`) are filtered\n * before delivering to the subscriber callback.\n */\nexport interface PresencePeer<P> {\n readonly userId: string\n readonly payload: P\n readonly lastSeen: string\n}\n\n// ─── CRDT ─────────────────────────────────────────────────\n\n// Re-exported from crdt.ts so consumers only need one import path.\nexport type { CrdtMode, CrdtState, LwwMapState, RgaState, YjsState } from './crdt/crdt.js'\n\n// ─── Blob / Attachment Store ────────────────────────\n\n/**\n * Second store shape for blob-store backends (Drive, WebDAV, Git, iCloud)\n * that operate on whole-vault bundles rather than per-record KV.\n *\n * Implement `readBundle` / `writeBundle` instead of the six-method KV\n * contract. Use `wrapBundleStore()` from `@noy-db/hub` to convert to a\n * `NoydbStore` that the rest of the API consumes transparently.\n *\n * Named `NoydbBundleStore` (not `NoydbBundleAdapter`) for consistency\n * with the hub / to-* / in-* rename. Concrete implementations ship\n * in `@noy-db/to-*` packages starting in.\n */\nexport interface NoydbBundleStore {\n /** Discriminant for engine auto-detection of store shape. */\n readonly kind: 'bundle'\n /** Human-readable name for diagnostics (e.g. `'drive'`, `'webdav'`). */\n readonly name?: string\n /**\n * Read the entire vault as raw bytes. Returns `null` if no bundle exists\n * yet (first open of a brand-new vault).\n */\n readBundle(vaultId: string): Promise<{ bytes: Uint8Array; version: string } | null>\n /**\n * Write the entire vault as raw bytes. `expectedVersion` is the version\n * token from the last `readBundle` (or `null` for a first write).\n * Implementations MUST reject the write if the stored version has advanced\n * past `expectedVersion` — throw `BundleVersionConflictError`.\n * Returns the new version token on success.\n */\n writeBundle(\n vaultId: string,\n bytes: Uint8Array,\n expectedVersion: string | null,\n ): Promise<{ version: string }>\n /** Delete a vault bundle. Idempotent — no-op if the bundle does not exist. */\n deleteBundle(vaultId: string): Promise<void>\n /** List all vault bundles managed by this store. */\n listBundles(): Promise<Array<{ vaultId: string; version: string; size: number }>>\n}\n\n/**\n * Content-addressed blob object stored in the vault-level blob index.\n * Identified by HMAC-SHA-256(blobDEK, plaintext) — opaque to the store.\n *\n * Shared across all collections within a vault for deduplication: two\n * records that attach identical byte content reference the same `eTag`\n * and share a single set of encrypted chunks in `_blob_chunks`.\n */\nexport interface BlobObject {\n /** HMAC-SHA-256 hex of the original plaintext bytes, keyed by `_blob` DEK. */\n readonly eTag: string\n /** Original uncompressed size in bytes. */\n readonly size: number\n /** Compressed size in bytes (the payload that is actually encrypted and chunked). */\n readonly compressedSize: number\n /** Compression algorithm applied before encryption. */\n readonly compression: 'gzip' | 'none'\n /** Raw chunk size in bytes used at write time. Readers MUST use this value. */\n readonly chunkSize: number\n /** Total number of chunks written. Reader expects exactly this many. */\n readonly chunkCount: number\n /** MIME type if provided or auto-detected at upload time. */\n readonly mimeType?: string\n /** ISO timestamp of first upload. */\n readonly createdAt: string\n /** Live reference count — slots + published versions pointing to this blob. */\n readonly refCount: number\n /**\n * Hint indicating which store holds the chunk data.\n * Used by `routeStore` size-tiered routing: `'default'` for small blobs\n * stored inline (e.g. DynamoDB), `'blobs'` for large blobs in the overflow\n * store (e.g. S3). Absent when no routing is configured.\n */\n readonly storeHint?: 'default' | 'blobs'\n}\n\n// ─── Attachment types ─────────────────────────────────────────\n\n/** Single attachment metadata entry stored inside a record's attachment envelope. */\nexport interface AttachmentEntry {\n /** Content-addressed identifier (HMAC-SHA-256 of plaintext). */\n readonly eTag: string\n /** User-visible filename for the slot. */\n readonly filename: string\n /** Original uncompressed size in bytes. */\n readonly size: number\n /** MIME type, if provided or auto-detected at upload time. */\n readonly mimeType?: string\n /** ISO timestamp of the upload. */\n readonly uploadedAt: string\n /** User ID of the uploader, if available. */\n readonly uploadedBy?: string\n}\n\n/** Attachment entry annotated with its slot name, as returned by `AttachmentHandle.list()`. */\nexport type AttachmentInfo = AttachmentEntry & { readonly name: string }\n\n/** Options for `AttachmentHandle.put()`. */\nexport interface AttachmentPutOptions {\n /** Compress the attachment with gzip before encryption. Default: `true`. */\n compress?: boolean\n /** Chunk size in bytes. Default: `DEFAULT_CHUNK_SIZE` (256 KB). */\n chunkSize?: number\n /** MIME type to store with the attachment. Auto-detected from magic bytes if omitted. */\n mimeType?: string\n /** User ID to record as the uploader. Falls back to the active user's ID. */\n uploadedBy?: string\n}\n\n/** Options for `AttachmentHandle.response()`. */\nexport interface AttachmentResponseOptions {\n /**\n * Set `Content-Disposition: inline` so the browser renders the file\n * instead of downloading it. Default: `false` (attachment disposition).\n */\n inline?: boolean\n}\n\n/**\n * Slot record — mutable metadata linking a named slot on a record\n * to a `BlobObject` via its eTag.\n *\n * Multiple slots (even across different records) may reference the same\n * `eTag` — the underlying chunks are shared. Updating metadata creates\n * a new envelope version (`_v++`) while the blob data is unchanged.\n */\nexport interface SlotRecord {\n /** Reference to the `BlobObject` in `_blob_index`. */\n readonly eTag: string\n /** User-visible filename for the slot. */\n readonly filename: string\n /** Original uncompressed size in bytes (denormalized from `BlobObject`). */\n readonly size: number\n /** MIME type. Takes precedence over the MIME type stored in `BlobObject`. */\n readonly mimeType?: string\n /** ISO timestamp of the upload that set this slot. */\n readonly uploadedAt: string\n /** User ID of the uploader, if available. */\n readonly uploadedBy?: string\n}\n\n/** Result of `BlobSet.list()` — slot record plus its named slot key. */\nexport interface SlotInfo extends SlotRecord {\n /** The slot name (key in the record's slot map). */\n readonly name: string\n}\n\n/**\n * Explicitly published version snapshot — an independent reference to a\n * blob at a specific point in time.\n */\nexport interface VersionRecord {\n /** User-defined label (e.g. `'issued-2025-01'`, `'amendment-2025-02'`). */\n readonly label: string\n /** eTag of the blob snapshot at publish time — independent of the current slot. */\n readonly eTag: string\n /** ISO timestamp when the version was published. */\n readonly publishedAt: string\n /** User ID of the publisher, if available. */\n readonly publishedBy?: string\n}\n\n/** Options for `BlobSet.put()`. */\nexport interface BlobPutOptions {\n /** MIME type hint. If omitted, auto-detected from magic bytes. */\n mimeType?: string\n /**\n * Raw chunk size in bytes. Priority: this value > store.maxBlobBytes > 256 KB.\n */\n chunkSize?: number\n /**\n * Whether to gzip-compress bytes before encrypting. Default: `true`.\n * Auto-set to `false` for pre-compressed MIME types (JPEG, PNG, ZIP, etc.).\n */\n compress?: boolean\n /** User ID to record as `uploadedBy`. Defaults to the Noydb session user. */\n uploadedBy?: string\n}\n\n/** Options for `BlobSet.response()` and `BlobSet.responseVersion()`. */\nexport interface BlobResponseOptions {\n /**\n * When `true`, sets `Content-Disposition: inline; filename=\"...\"` so\n * the browser renders the file in the tab. Default (`false`) sets\n * `attachment; filename=\"...\"` which triggers a download.\n */\n inline?: boolean\n /** Override the filename in the Content-Disposition header. */\n filename?: string\n}\n\n// ─── Store Capabilities ─────────────────────────────\n\nexport type StoreAuthKind =\n | 'none'\n | 'filesystem'\n | 'api-key'\n | 'iam'\n | 'oauth'\n | 'kerberos'\n | 'browser-origin'\n\nexport interface StoreAuth {\n kind: StoreAuthKind | StoreAuthKind[]\n required: boolean\n flow: 'static' | 'oauth' | 'kerberos' | 'implicit'\n}\n\nexport interface StoreCapabilities {\n /**\n * true — the store's expectedVersion check and write are atomic at the\n * storage layer. Two concurrent puts with the same expectedVersion will\n * produce exactly one success and one ConflictError.\n * false — check and write are separate operations with a race window.\n */\n casAtomic: boolean\n auth: StoreAuth\n /**\n * true — the store implements {@link NoydbStore.tx} and commits\n * every op atomically at the storage layer. The hub's\n * `db.transaction(fn)` will delegate to `tx(ops)` and surface a\n * single pass/fail outcome. false (or absent) — no native\n * multi-record atomicity; the hub falls back to per-record OCC\n * with best-effort unwind on partial failure.\n */\n txAtomic?: boolean\n /**\n * Maximum raw bytes per blob chunk record.\n * `undefined` — no limit (S3, file, IDB); blob stored as single chunk.\n * `256 * 1024` — DynamoDB (400 KB item limit minus envelope overhead).\n * `5 * 1024 * 1024` — localStorage quota safety.\n */\n maxBlobBytes?: number\n}\n\n// ─── Factory Options ───────────────────────────────────────────────────\n\nexport interface NoydbOptions {\n /** Primary store (local storage). */\n readonly store: NoydbStore\n /**\n * tree-shake seam — optional blob strategy. Pass `withBlobs()`\n * from `@noy-db/hub/blobs` to enable `collection.blob(id)` storage.\n * When omitted, hub's blob machinery stays out of the bundle (ESM\n * tree-shaking) and `collection.blob(id)` throws with a pointer at\n * the subpath. `BlobStrategy` is `@internal` — users only construct\n * it via the subpath factory.\n *\n * @internal\n */\n readonly blobStrategy?: BlobStrategy\n /**\n * tree-shake seam — optional indexing strategy. Pass\n * `withIndexing()` from `@noy-db/hub/indexing` to enable eager-mode\n * `==/in` fast-paths, lazy-mode `.lazyQuery()`, rebuild/reconcile,\n * and auto-reconcile. When omitted, indexing code never reaches the\n * bundle; `.lazyQuery()` throws with a pointer at the subpath, and\n * eager-mode collections fall back to linear scans regardless of\n * `indexes: [...]` declarations. `IndexStrategy` is `@internal` —\n * users only construct it via the subpath factory.\n *\n * @internal\n */\n readonly indexStrategy?: IndexStrategy\n /**\n * tree-shake seam — optional aggregate strategy. Pass\n * `withAggregate()` from `@noy-db/hub/aggregate` to enable\n * `.aggregate()` and `.groupBy()` on Query. When omitted, those\n * methods throw with a pointer at the subpath; the ~886 LOC of\n * Aggregation + GroupedQuery machinery never reaches the bundle.\n * Streaming `scan().aggregate()` works independently of this\n * strategy — it doesn't use the `Aggregation` class.\n *\n * @internal\n */\n readonly aggregateStrategy?: AggregateStrategy\n /**\n * tree-shake seam — optional CRDT strategy. Required when\n * any collection is declared with `crdt: 'lww-map' | 'rga' | 'yjs'`;\n * otherwise the first put/sync-merge hitting the CRDT path throws.\n * When omitted, ~221 LOC of LWW-Map / RGA / merge helpers never\n * reach the bundle.\n *\n * @internal\n */\n readonly crdtStrategy?: CrdtStrategy\n /**\n * tree-shake seam — optional consent-audit strategy. Pass\n * `withConsent()` from `@noy-db/hub/consent` to enable per-op audit\n * writes into `_consent_audit` when a consent scope is active.\n * When omitted, `vault.consentAudit()` returns `[]` and writes are\n * no-ops; the consent module's ~194 LOC never reaches the bundle.\n *\n * @internal\n */\n readonly consentStrategy?: ConsentStrategy\n /**\n * tree-shake seam — optional periods strategy. Pass\n * `withPeriods()` from `@noy-db/hub/periods` to enable\n * `vault.closePeriod()` / `.openPeriod()` / write-guard on closed\n * periods. When omitted, `vault.listPeriods()` returns `[]` and\n * the write-guard is a no-op; the ~363 LOC of period validation +\n * ledger appending stay out of the bundle.\n *\n * @internal\n */\n readonly periodsStrategy?: PeriodsStrategy\n /**\n * tree-shake seam — optional VaultFrame strategy. Pass\n * `withShadow()` from `@noy-db/hub/shadow` to enable\n * `vault.frame()`. Without it, calling `vault.frame()` throws.\n *\n * @internal\n */\n readonly shadowStrategy?: ShadowStrategy\n /**\n * tree-shake seam — optional multi-record transactions. Pass\n * `withTransactions()` from `@noy-db/hub/tx` to enable\n * `db.transaction(fn)`. Without it, calling the method throws.\n *\n * @internal\n */\n readonly txStrategy?: TxStrategy\n /**\n * tree-shake seam — optional history + ledger + time-machine.\n * Pass `withHistory()` from `@noy-db/hub/history` to enable\n * per-record version snapshots, the hash-chained audit ledger, JSON\n * Patch deltas, `vault.ledger()`, `vault.at()`, and the\n * `collection.history()` / `getVersion()` / `revert()` / `diff()` /\n * `clearHistory()` / `pruneRecordHistory()` read APIs. When omitted,\n * snapshots/prune/clear are silent no-ops, the read APIs throw with\n * a pointer at the subpath, and ~1,880 LOC stay out of the bundle.\n *\n * @internal\n */\n readonly historyStrategy?: HistoryStrategy\n /**\n * tree-shake seam — optional i18n strategy. Pass `withI18n()`\n * from `@noy-db/hub/i18n` to enable `i18nText`/`dictKey` field\n * resolution on reads, `i18nText` validation on writes, and\n * `vault.dictionary(name)`. When omitted, locale resolution is the\n * identity (raw values returned), the validators throw with a\n * pointer to the subpath, and ~854 LOC of dictionary + locale\n * machinery stay out of the bundle.\n *\n * @internal\n */\n readonly i18nStrategy?: I18nStrategy\n /**\n * tree-shake seam — optional session-policy strategy. Pass\n * `withSession()` from `@noy-db/hub/session` to enable\n * `sessionPolicy` validation, `PolicyEnforcer` lifecycle (idle /\n * absolute timeouts, lockOnBackground), and global session-token\n * revocation. When omitted, setting `sessionPolicy` throws at\n * `createNoydb()` time, and ~495 LOC of policy + token machinery\n * stay out of the bundle.\n *\n * @internal\n */\n readonly sessionStrategy?: SessionStrategy\n /**\n * tree-shake seam — optional sync engine + presence strategy.\n * Pass `withSync()` from `@noy-db/hub/sync` to enable\n * `db.push()` / `pull()` / replication, `db.transaction(vault)`\n * for sync-aware transactions, and `collection.presence()`. When\n * omitted, configuring `sync` / calling these surfaces throws with\n * a pointer at the subpath, and ~856 LOC of replication + presence\n * machinery stay out of the bundle. Keyring stays core; grant/\n * revoke/magic-link/delegation tree-shake via direct imports.\n *\n * @internal\n */\n readonly syncStrategy?: SyncStrategy\n /** Optional remote store(s) for sync. Accepts a single store, a SyncTarget, or an array. */\n readonly sync?: NoydbStore | SyncTarget | SyncTarget[]\n /** User identifier. */\n readonly user: string\n /** Passphrase for key derivation. Required unless encrypt is false or `getKeyring` is provided. */\n readonly secret?: string\n /**\n * Optional callback that returns an unlocked keyring for a given vault.\n * Use this to plug in WebAuthn / OIDC / Shamir / any unlock path that\n * produces an `UnlockedKeyring` outside the passphrase model.\n *\n * When set, `secret` MUST NOT also be set — `createNoydb` throws if both\n * are supplied. When neither is set (and `encrypt !== false`), `createNoydb`\n * also throws.\n *\n * The callback is called lazily, on the first operation that needs the\n * keyring for a given vault. Noydb caches the returned keyring per-vault\n * for the lifetime of the instance, so the callback is invoked at most\n * once per `(instance, vault)` pair (assuming the callback resolves\n * successfully). If the callback rejects, the rejection surfaces from the\n * first vault operation that triggered the unlock; subsequent operations\n * will retry the callback.\n *\n * @example\n * ```ts\n * import { createNoydb } from '@noy-db/hub'\n * import { unlockWebAuthn } from '@noy-db/on-webauthn'\n *\n * const enrollment = await loadEnrollment()\n * const db = await createNoydb({\n * store,\n * user: 'alice',\n * getKeyring: (vault) => unlockWebAuthn(enrollment),\n * })\n * ```\n *\n * Note: this callback is responsible for both the \"open existing vault\"\n * and the \"create new vault\" cases. Unlike the passphrase path, there is\n * no automatic `NoAccessError` → `createOwnerKeyring` fallback, because\n * the callback owner has the UI context to decide which path to run.\n * For first-time bootstrap, use a passphrase or recovery code, enroll\n * WebAuthn from the unlocked keyring, then swap to `getKeyring` on\n * subsequent sessions.\n */\n readonly getKeyring?: (vault: string) => Promise<UnlockedKeyring>\n /** Auth method. Default: 'passphrase'. */\n readonly auth?: 'passphrase' | 'biometric'\n /** Enable encryption. Default: true. */\n readonly encrypt?: boolean\n /** Conflict resolution strategy. Default: 'version'. */\n readonly conflict?: ConflictStrategy\n /**\n * Sync scheduling policy. Controls when push/pull fire.\n * Default inferred from store category: per-record → `on-change`,\n * bundle → `debounce 30s`.\n */\n readonly syncPolicy?: SyncPolicy\n /**\n * @deprecated Use `syncPolicy` instead. Kept for backward compatibility.\n * When both are supplied, `syncPolicy` takes precedence.\n */\n readonly autoSync?: boolean\n /**\n * @deprecated Use `syncPolicy` instead. Kept for backward compatibility.\n */\n readonly syncInterval?: number\n /**\n * Session timeout in ms. Clears keys after inactivity. Default: none.\n * @deprecated Use `sessionPolicy.idleTimeoutMs` instead. This field is\n * still honored for backwards compatibility but `sessionPolicy` takes\n * precedence when both are supplied.\n */\n readonly sessionTimeout?: number\n /**\n * Session policy controlling lifetime, re-auth requirements, and\n * background-lock behavior. When supplied, replaces the\n * legacy `sessionTimeout` field.\n */\n readonly sessionPolicy?: SessionPolicy\n /** Validate passphrase strength on creation. Default: true. */\n readonly validatePassphrase?: boolean\n /** Audit history configuration. */\n readonly history?: HistoryConfig\n /**\n * Consumer-supplied translation function for `i18nText` fields with\n * `autoTranslate: true`.\n *\n * ⚠ **`plaintextTranslator` receives unencrypted text.** Configuring\n * this hook causes plaintext to leave noy-db's zero-knowledge boundary\n * over whatever channel the consumer's implementation uses. noy-db ships\n * no built-in translator and adds no translator SDKs as dependencies.\n * The consumer chooses and owns the data policy of the external service.\n *\n * Per-field opt-in via `autoTranslate: true` on `i18nText()`. Calling\n * `put()` on a collection with `autoTranslate: true` fields while this\n * option is absent throws `TranslatorNotConfiguredError`.\n *\n * See `NOYDB_SPEC.md § Zero-Knowledge Storage` for the invariant text.\n */\n readonly plaintextTranslator?: PlaintextTranslatorFn\n /**\n * Human-readable name for the translator, recorded in the in-process\n * audit log (e.g. `'deepl-pro-with-dpa'`, `'self-hosted-llama-7b'`).\n * Defaults to `'anonymous'` when not supplied.\n */\n readonly plaintextTranslatorName?: string\n}\n\n// ─── History / Audit Trail ─────────────────────────────────────────────\n\n/** History configuration. */\nexport interface HistoryConfig {\n /** Enable history tracking. Default: true. */\n readonly enabled?: boolean\n /** Maximum history entries per record. Oldest pruned on overflow. Default: unlimited. */\n readonly maxVersions?: number\n}\n\n/** Options for querying history. */\nexport interface HistoryOptions {\n /** Start date (inclusive), ISO 8601. */\n readonly from?: string\n /** End date (inclusive), ISO 8601. */\n readonly to?: string\n /** Maximum entries to return. */\n readonly limit?: number\n}\n\n/** Options for pruning history. */\nexport interface PruneOptions {\n /** Keep only the N most recent versions. */\n readonly keepVersions?: number\n /** Delete versions older than this date, ISO 8601. */\n readonly beforeDate?: string\n}\n\n/** A decrypted history entry. */\nexport interface HistoryEntry<T> {\n readonly version: number\n readonly timestamp: string\n readonly userId: string\n readonly record: T\n}\n\n// ─── Bulk operations ──────────────────────────────────────\n\n/** Per-item options for `Collection.putMany()`. */\nexport interface PutManyItemOptions {\n /**\n * Optimistic-concurrency check: fail this item if the stored version\n * is not `expectedVersion`. Honored only in `atomic: true` mode;\n * ignored in the default best-effort loop.\n */\n readonly expectedVersion?: number\n}\n\n/**\n * Batch-level options for `Collection.putMany()` and `deleteMany()`.\n *\n * `atomic: true` switches the call from best-effort loop\n * to all-or-nothing: a pre-flight CAS check runs first, then every op\n * is executed; any mid-batch failure triggers a best-effort revert.\n * On failure in atomic mode the whole call throws — you won't get a\n * partial `PutManyResult`. On success the result mirrors the default\n * loop's shape.\n */\nexport interface PutManyOptions {\n readonly atomic?: boolean\n}\n\n/** Result of `Collection.putMany()`. */\nexport interface PutManyResult {\n /** `true` iff every entry succeeded. */\n readonly ok: boolean\n /** IDs that were successfully written. */\n readonly success: readonly string[]\n /** Entries that failed, with the error that prevented each write. */\n readonly failures: ReadonlyArray<{ readonly id: string; readonly error: Error }>\n}\n\n/** Result of `Collection.deleteMany()`. Same shape as `PutManyResult`. */\nexport interface DeleteManyResult {\n readonly ok: boolean\n readonly success: readonly string[]\n readonly failures: ReadonlyArray<{ readonly id: string; readonly error: Error }>\n}\n","/**\n * All NOYDB error classes — a single import surface for `catch` blocks and\n * `instanceof` checks.\n *\n * ## Class hierarchy\n *\n * ```\n * Error\n * └─ NoydbError (code: string)\n * ├─ Crypto errors\n * │ ├─ DecryptionError — AES-GCM tag failure\n * │ ├─ TamperedError — ciphertext modified after write\n * │ └─ InvalidKeyError — wrong passphrase / corrupt keyring\n * ├─ Access errors\n * │ ├─ NoAccessError — no DEK for this collection\n * │ ├─ ReadOnlyError — ro permission, write attempted\n * │ ├─ PermissionDeniedError — role too low for operation\n * │ ├─ PrivilegeEscalationError — grant wider than grantor holds\n * │ └─ StoreCapabilityError — optional store method missing\n * ├─ Sync errors\n * │ ├─ ConflictError — optimistic-lock version mismatch\n * │ ├─ BundleVersionConflictError — bundle push rejected by remote\n * │ └─ NetworkError — push/pull network failure\n * ├─ Data errors\n * │ ├─ NotFoundError — get(id) on missing record\n * │ ├─ ValidationError — application-level guard failed\n * │ └─ SchemaValidationError — Standard Schema v1 rejection\n * ├─ Query errors\n * │ ├─ JoinTooLargeError — join row ceiling exceeded\n * │ ├─ DanglingReferenceError — strict ref() points at nothing\n * │ ├─ GroupCardinalityError — groupBy bucket cap exceeded\n * │ ├─ IndexRequiredError — lazy-mode query touches unindexed field\n * │ └─ IndexWriteFailureError — index side-car put/delete failed post-main\n * ├─ i18n / Dictionary errors\n * │ ├─ ReservedCollectionNameError\n * │ ├─ DictKeyMissingError\n * │ ├─ DictKeyInUseError\n * │ ├─ MissingTranslationError\n * │ ├─ LocaleNotSpecifiedError\n * │ └─ TranslatorNotConfiguredError\n * ├─ Backup errors\n * │ ├─ BackupLedgerError — hash-chain verification failed\n * │ └─ BackupCorruptedError — envelope hash mismatch in dump\n * ├─ Bundle errors\n * │ └─ BundleIntegrityError — .noydb body sha256 mismatch\n * └─ Session errors\n * ├─ SessionExpiredError\n * ├─ SessionNotFoundError\n * └─ SessionPolicyError\n * ```\n *\n * ## Catching all NOYDB errors\n *\n * ```ts\n * import { NoydbError, InvalidKeyError, ConflictError } from '@noy-db/hub'\n *\n * try {\n * await vault.unlock(passphrase)\n * } catch (e) {\n * if (e instanceof InvalidKeyError) { showBadPassphraseUI(); return }\n * if (e instanceof NoydbError) { logToSentry(e.code, e); return }\n * throw e // unexpected — re-throw\n * }\n * ```\n *\n * @module\n */\n\n/**\n * Base class for all NOYDB errors.\n *\n * Every error thrown by `@noy-db/hub` extends this class, so consumers can\n * catch all NOYDB errors in a single `catch (e) { if (e instanceof NoydbError) ... }`\n * block. The `code` field is a machine-readable string (e.g. `'DECRYPTION_FAILED'`)\n * suitable for `switch` statements and logging pipelines.\n */\nexport class NoydbError extends Error {\n /** Machine-readable error code. Stable across library versions. */\n readonly code: string\n\n constructor(code: string, message: string) {\n super(message)\n this.name = 'NoydbError'\n this.code = code\n }\n}\n\n// ─── Crypto Errors ─────────────────────────────────────────────────────\n\n/**\n * Thrown when AES-GCM decryption fails.\n *\n * The most common cause is a wrong passphrase or a corrupted ciphertext.\n * A `DecryptionError` at the wrong passphrase level is caught internally\n * and re-thrown as `InvalidKeyError` — so in practice this surfaces for\n * per-record corruption rather than authentication failures.\n */\nexport class DecryptionError extends NoydbError {\n constructor(message = 'Decryption failed') {\n super('DECRYPTION_FAILED', message)\n this.name = 'DecryptionError'\n }\n}\n\n/**\n * Thrown when GCM tag verification fails, indicating the ciphertext was\n * modified after encryption.\n *\n * AES-256-GCM is authenticated encryption — the tag over the ciphertext\n * is checked on every decrypt. If any byte was flipped (accidental\n * corruption or deliberate tampering), decryption throws this error.\n * Treat it as a security alert: the stored bytes are not what NOYDB wrote.\n */\nexport class TamperedError extends NoydbError {\n constructor(message = 'Data integrity check failed — record may have been tampered with') {\n super('TAMPERED', message)\n this.name = 'TamperedError'\n }\n}\n\n/**\n * Thrown when key unwrapping fails, typically because the passphrase is wrong\n * or the keyring file is corrupted.\n *\n * NOYDB uses AES-KW (RFC 3394) to wrap DEKs with the KEK. If AES-KW\n * unwrapping fails, it means either the KEK was derived from the wrong\n * passphrase (PBKDF2 with 600K iterations) or the keyring bytes are\n * corrupted. This is the error shown to the user on a failed unlock attempt.\n */\nexport class InvalidKeyError extends NoydbError {\n constructor(message = 'Invalid key — wrong passphrase or corrupted keyring') {\n super('INVALID_KEY', message)\n this.name = 'InvalidKeyError'\n }\n}\n\n// ─── Access Errors ─────────────────────────────────────────────────────\n\n/**\n * Thrown when the authenticated user does not have a DEK for the requested\n * collection — i.e. the collection is not in their keyring at all.\n *\n * This is the \"no key for this door\" error. It is different from\n * `ReadOnlyError` (user has a key but it only grants ro) and from\n * `PermissionDeniedError` (user's role doesn't allow the operation).\n */\nexport class NoAccessError extends NoydbError {\n constructor(message = 'No access — user does not have a key for this collection') {\n super('NO_ACCESS', message)\n this.name = 'NoAccessError'\n }\n}\n\n/**\n * Thrown when a user with read-only (`ro`) permission attempts a write\n * operation (`put` or `delete`) on a collection.\n *\n * The user has a DEK for the collection (they can decrypt and read), but\n * their keyring grants only `ro`. To fix: re-grant the user with `rw`\n * permission, or do not attempt writes as a viewer/client role.\n */\nexport class ReadOnlyError extends NoydbError {\n constructor(message = 'Read-only — user has ro permission on this collection') {\n super('READ_ONLY', message)\n this.name = 'ReadOnlyError'\n }\n}\n\n/**\n * Thrown when a write is attempted against a historical view produced\n * by `vault.at(timestamp)`. Time-machine views are read-only by\n * contract — mutating the past would require either the shadow-vault\n * mechanism or a ledger-history rewrite (which breaks\n * the tamper-evidence guarantee).\n *\n * Distinct from {@link ReadOnlyError} (keyring-level) and\n * {@link PermissionDeniedError} (role-level): this error is about the\n * *view* being historical, independent of the caller's permissions.\n */\nexport class ReadOnlyAtInstantError extends NoydbError {\n constructor(operation: string, timestamp: string) {\n super(\n 'READ_ONLY_AT_INSTANT',\n `Cannot ${operation}() on a vault view anchored at ${timestamp} — time-machine views are read-only`,\n )\n this.name = 'ReadOnlyAtInstantError'\n }\n}\n\n/**\n * Thrown when a write is attempted against a shadow-vault frame\n * produced by `vault.frame()`. Frames are read-only by contract —\n * the use case is screen-sharing / demos / compliance review where\n * the operator wants to prevent accidental edits.\n *\n * Behavioural enforcement only — the underlying keyring still holds\n * write-capable DEKs. See {@link VaultFrame} for the full caveat.\n */\nexport class ReadOnlyFrameError extends NoydbError {\n constructor(operation: string) {\n super(\n 'READ_ONLY_FRAME',\n `Cannot ${operation}() on a vault frame — frames are read-only presentations of the current vault`,\n )\n this.name = 'ReadOnlyFrameError'\n }\n}\n\n/**\n * Thrown when the authenticated user's role does not permit the requested\n * operation — e.g. a `viewer` calling `grantAccess()`, or an `operator`\n * calling `rotateKeys()`.\n *\n * This is a role-level check (what the user's role allows), distinct from\n * `NoAccessError` (collection not in keyring) and `ReadOnlyError` (in\n * keyring, but write not allowed).\n */\nexport class PermissionDeniedError extends NoydbError {\n constructor(message = 'Permission denied — insufficient role for this operation') {\n super('PERMISSION_DENIED', message)\n this.name = 'PermissionDeniedError'\n }\n}\n\n/**\n * Thrown when an `@noy-db/as-*` export is attempted without the\n * required capability bit on the invoking keyring.\n *\n * Two sub-cases discriminated by the `tier` field:\n *\n * - `tier: 'plaintext'` — a plaintext-tier export (`as-xlsx`,\n * `as-csv`, `as-blob`, `as-zip`, …) was attempted but the\n * keyring's `exportCapability.plaintext` does not include the\n * requested `format` (nor the `'*'` wildcard). Default for every\n * role is `plaintext: []` — the owner must positively grant.\n * - `tier: 'bundle'` — an encrypted `as-noydb` bundle export was\n * attempted but the keyring's `exportCapability.bundle` is\n * `false`. Default for `owner`/`admin` is `true`; for\n * `operator`/`viewer`/`client` it is `false`.\n *\n * Distinct from `PermissionDeniedError` (role-level check) and\n * `NoAccessError` (collection not readable). Surfaces separately so\n * UI layers can show a \"request the export capability from your\n * admin\" flow rather than a generic permission error.\n */\nexport class ExportCapabilityError extends NoydbError {\n readonly tier: 'plaintext' | 'bundle'\n readonly format?: string\n readonly userId: string\n\n constructor(opts: {\n tier: 'plaintext' | 'bundle'\n userId: string\n format?: string\n message?: string\n }) {\n const msg =\n opts.message ??\n (opts.tier === 'plaintext'\n ? `Export capability denied — keyring \"${opts.userId}\" is not granted plaintext-export capability for format \"${opts.format ?? '<unknown>'}\". Ask a vault owner or admin to grant it via vault.grant({ exportCapability: { plaintext: ['${opts.format ?? '<format>'}'] } }).`\n : `Export capability denied — keyring \"${opts.userId}\" is not granted encrypted-bundle export capability. Ask a vault owner or admin to grant it via vault.grant({ exportCapability: { bundle: true } }).`)\n super('EXPORT_CAPABILITY', msg)\n this.name = 'ExportCapabilityError'\n this.tier = opts.tier\n this.userId = opts.userId\n if (opts.format !== undefined) this.format = opts.format\n }\n}\n\n/**\n * Thrown when a keyring file's `expires_at` cutoff has passed.\n * Surfaced by `loadKeyring` before any DEK unwrap is attempted —\n * past the cutoff the slot refuses to open even with the right\n * passphrase. Distinct from PBKDF2 / unwrap errors so consumer code\n * can show a precise \"this bundle slot has expired\" message instead\n * of the generic decryption-failure UX.\n *\n * Used predominantly on `BundleRecipient` slots produced by\n * `writeNoydbBundle({ recipients: [...] })` to time-box audit access.\n */\nexport class KeyringExpiredError extends NoydbError {\n readonly userId: string\n readonly expiresAt: string\n constructor(opts: { userId: string; expiresAt: string }) {\n super(\n 'KEYRING_EXPIRED',\n `Keyring \"${opts.userId}\" expired at ${opts.expiresAt}. ` +\n 'The slot refuses to unlock past its expiry timestamp.',\n )\n this.name = 'KeyringExpiredError'\n this.userId = opts.userId\n this.expiresAt = opts.expiresAt\n }\n}\n\n/**\n * Thrown when an `@noy-db/as-*` import is attempted but the invoking\n * keyring lacks the required import-capability bit (issue ).\n *\n * - `tier: 'plaintext'` — a plaintext-tier import (`as-csv`, `as-json`,\n * `as-ndjson`, `as-zip`, …) was attempted but the keyring's\n * `importCapability.plaintext` does not include the requested\n * `format` (nor the `'*'` wildcard).\n * - `tier: 'bundle'` — a `.noydb` bundle import was attempted but the\n * keyring's `importCapability.bundle` is not `true`.\n *\n * Default for every role on every dimension is closed — owners and\n * admins must positively grant the capability. Distinct from\n * `PermissionDeniedError` and `NoAccessError` so UI layers can show a\n * specific \"request the import capability\" flow.\n */\nexport class ImportCapabilityError extends NoydbError {\n readonly tier: 'plaintext' | 'bundle'\n readonly format?: string\n readonly userId: string\n\n constructor(opts: {\n tier: 'plaintext' | 'bundle'\n userId: string\n format?: string\n message?: string\n }) {\n const msg =\n opts.message ??\n (opts.tier === 'plaintext'\n ? `Import capability denied — keyring \"${opts.userId}\" is not granted plaintext-import capability for format \"${opts.format ?? '<unknown>'}\". Ask a vault owner or admin to grant it via vault.grant({ importCapability: { plaintext: ['${opts.format ?? '<format>'}'] } }).`\n : `Import capability denied — keyring \"${opts.userId}\" is not granted encrypted-bundle import capability. Ask a vault owner or admin to grant it via vault.grant({ importCapability: { bundle: true } }).`)\n super('IMPORT_CAPABILITY', msg)\n this.name = 'ImportCapabilityError'\n this.tier = opts.tier\n this.userId = opts.userId\n if (opts.format !== undefined) this.format = opts.format\n }\n}\n\n/**\n * Thrown when a grant would give the grantee a permission the grantor\n * does not themselves hold — the \"admin cannot grant what admin cannot\n * do\" rule from the admin-delegation work.\n *\n * Distinct from `PermissionDeniedError` so callers can tell the two\n * cases apart in logs and tests:\n *\n * - `PermissionDeniedError` — \"you are not allowed to perform this\n * operation at all\" (wrong role).\n * - `PrivilegeEscalationError` — \"you are allowed to grant, but not\n * with these specific permissions\" (widening attempt).\n *\n * Under the admin model the grantee of an admin-grants-admin call\n * inherits the caller's entire DEK set by construction, so this error\n * is structurally unreachable in typical flows. The check and error\n * class exist so that future per-collection admin scoping cannot\n * accidentally bypass the subset rule — the guard is already wired in.\n *\n * `offendingCollection` carries the first collection name that failed\n * the subset check, to make the violation actionable in error output.\n */\n/**\n * Thrown when a caller invokes an API that requires an optional\n * store capability the active store does not implement.\n *\n * Today the only call site is `Noydb.listAccessibleVaults()`,\n * which depends on the optional `NoydbStore.listVaults()`\n * method. The error message names the missing method and the calling\n * API so consumers know exactly which combination is unsupported,\n * and the `capability` field is machine-readable so library code can\n * pattern-match in catch blocks (e.g. fall back to a candidate-list\n * shape).\n *\n * The class lives in `errors.ts` rather than as a generic\n * `ValidationError` because the diagnostic shape is different: a\n * `ValidationError` says \"the inputs you passed are wrong\"; this\n * error says \"the inputs are fine, but the store you wired up\n * doesn't support what you're asking for.\" Different fix, different\n * documentation.\n */\nexport class StoreCapabilityError extends NoydbError {\n /** The store method/capability that was missing. */\n readonly capability: string\n\n constructor(capability: string, callerApi: string, storeName?: string) {\n super(\n 'STORE_CAPABILITY',\n `${callerApi} requires the optional store capability \"${capability}\" ` +\n `but the active store${storeName ? ` (${storeName})` : ''} does not implement it. ` +\n `Use a store that supports \"${capability}\" (store-memory, store-file) or pass an explicit ` +\n `vault list to bypass enumeration.`,\n )\n this.name = 'StoreCapabilityError'\n this.capability = capability\n }\n}\n\nexport class PrivilegeEscalationError extends NoydbError {\n readonly offendingCollection: string\n\n constructor(offendingCollection: string, message?: string) {\n super(\n 'PRIVILEGE_ESCALATION',\n message ??\n `Privilege escalation: grantor has no DEK for collection \"${offendingCollection}\" and cannot grant access to it.`,\n )\n this.name = 'PrivilegeEscalationError'\n this.offendingCollection = offendingCollection\n }\n}\n\n/**\n * Thrown by `Collection.put` / `.delete` when the target record's\n * envelope `_ts` falls within a closed accounting period.\n *\n * Distinct from `ReadOnlyError` (keyring-level), `ReadOnlyAtInstantError`\n * (historical view), and `ReadOnlyFrameError` (shadow vault): this\n * error is about the STORED RECORD being sealed by an operator call\n * to `vault.closePeriod()`, independent of caller permissions or\n * view type. The `periodName` and `endDate` fields name the sealing\n * period so audit UIs can surface a \"this record is locked in\n * FY2026-Q1 (closed 2026-03-31)\" message without parsing the error\n * string.\n *\n * To apply a correction after close, book a compensating entry in a\n * new period rather than unlocking the old one. Re-opening a closed\n * period is deliberately unsupported.\n */\nexport class PeriodClosedError extends NoydbError {\n readonly periodName: string\n readonly endDate: string\n readonly recordTs: string\n\n constructor(periodName: string, endDate: string, recordTs: string) {\n super(\n 'PERIOD_CLOSED',\n `Cannot modify record (last written ${recordTs}) — sealed by closed period ` +\n `\"${periodName}\" (endDate: ${endDate}). Post a compensating entry in a ` +\n `new period instead.`,\n )\n this.name = 'PeriodClosedError'\n this.periodName = periodName\n this.endDate = endDate\n this.recordTs = recordTs\n }\n}\n\n// ─── Hierarchical Access Errors ─────────────────────\n\n/**\n * Thrown when a user tries to act at a tier they are not cleared for.\n *\n * This is the umbrella error for tier write refusals:\n * - `put({ tier: N })` when the user's keyring lacks tier-N DEK.\n * - `elevate(id, N)` when the caller cannot reach tier N.\n *\n * Distinct from `TierAccessDeniedError` which covers *read* refusals on\n * the invisibility/ghost path.\n */\nexport class TierNotGrantedError extends NoydbError {\n readonly tier: number\n readonly collection: string\n\n constructor(collection: string, tier: number) {\n super(\n 'TIER_NOT_GRANTED',\n `User has no DEK for tier ${tier} in collection \"${collection}\"`,\n )\n this.name = 'TierNotGrantedError'\n this.collection = collection\n this.tier = tier\n }\n}\n\n/**\n * Thrown when an elevated-handle operation runs after the elevation's\n * TTL expired. Reads continue at the original tier; only writes\n * through the scoped handle flip to throwing once expired.\n */\nexport class ElevationExpiredError extends NoydbError {\n readonly tier: number\n readonly expiresAt: number\n\n constructor(opts: { tier: number; expiresAt: number }) {\n super(\n 'ELEVATION_EXPIRED',\n `Elevation to tier ${opts.tier} expired at ${new Date(opts.expiresAt).toISOString()}`,\n )\n this.name = 'ElevationExpiredError'\n this.tier = opts.tier\n this.expiresAt = opts.expiresAt\n }\n}\n\n/**\n * Thrown by `vault.elevate(...)` when an elevation is already active\n * on the vault. Adopters must `release()` the existing handle before\n * starting a new elevation.\n */\nexport class AlreadyElevatedError extends NoydbError {\n readonly activeTier: number\n\n constructor(activeTier: number) {\n super(\n 'ALREADY_ELEVATED',\n `Vault is already elevated to tier ${activeTier}; release the existing handle first`,\n )\n this.name = 'AlreadyElevatedError'\n this.activeTier = activeTier\n }\n}\n\n/**\n * Thrown when `demote()` is called by someone who is not the original\n * elevator and not an owner.\n */\nexport class TierDemoteDeniedError extends NoydbError {\n constructor(id: string, tier: number) {\n super(\n 'TIER_DEMOTE_DENIED',\n `Only the original elevator or an owner can demote record \"${id}\" from tier ${tier}`,\n )\n this.name = 'TierDemoteDeniedError'\n }\n}\n\n/**\n * Thrown when `db.delegate()` is called against a user that has no\n * keyring in the target vault — the delegation token cannot be\n * constructed without the target user's KEK wrap.\n */\nexport class DelegationTargetMissingError extends NoydbError {\n readonly toUser: string\n\n constructor(toUser: string) {\n super(\n 'DELEGATION_TARGET_MISSING',\n `Delegation target user \"${toUser}\" has no keyring in this vault`,\n )\n this.name = 'DelegationTargetMissingError'\n this.toUser = toUser\n }\n}\n\n// ─── Sync Errors ───────────────────────────────────────────────────────\n\n/**\n * Thrown when a `put()` detects an optimistic concurrency conflict.\n *\n * NOYDB uses version numbers (`_v`) for optimistic locking. If a `put()`\n * is called with `expectedVersion: N` but the stored record is at version\n * `M ≠ N`, the write is rejected and the caller must re-read, re-apply their\n * change, and retry. The `version` field carries the actual stored version\n * so callers can decide whether to retry or surface the conflict to the user.\n */\nexport class ConflictError extends NoydbError {\n /** The actual stored version at the time of conflict. */\n readonly version: number\n\n constructor(version: number, message = 'Version conflict') {\n super('CONFLICT', message)\n this.name = 'ConflictError'\n this.version = version\n }\n}\n\n/**\n * Thrown by `LedgerStore.append()` after exhausting its CAS retry\n * budget under multi-writer contention. Two browser tabs, a\n * web app + an offline mobile peer, or a server worker pool all\n * producing ledger entries against the same vault can race on the\n * \"read head, write head+1\" cycle; the optimistic-CAS retry loop\n * resolves the race for `casAtomic: true` stores, but pathological\n * contention (or a buggy peer) can still exhaust the budget. When\n * that happens, the chain is intact — the failed writer simply\n * couldn't claim a slot. Caller's choice whether to retry, queue,\n * or surface the failure to the user.\n */\nexport class LedgerContentionError extends NoydbError {\n readonly attempts: number\n\n constructor(attempts: number) {\n super(\n 'LEDGER_CONTENTION',\n `LedgerStore.append: failed to claim a chain slot after ${attempts} optimistic-CAS retries`,\n )\n this.name = 'LedgerContentionError'\n this.attempts = attempts\n }\n}\n\n/**\n * Thrown when a bundle push is rejected because the remote has been updated\n * since the local bundle was last pulled.\n *\n * Unlike `ConflictError` (per-record), this is a whole-bundle conflict —\n * the remote's bundle handle has changed. The caller must pull the new\n * bundle, merge, and re-push. `remoteVersion` is the handle of the newer\n * remote bundle for use in diagnostics.\n */\nexport class BundleVersionConflictError extends NoydbError {\n /** The bundle handle of the newer remote version that rejected the push. */\n readonly remoteVersion: string\n\n constructor(remoteVersion: string, message = 'Bundle version conflict — remote has been updated') {\n super('BUNDLE_VERSION_CONFLICT', message)\n this.name = 'BundleVersionConflictError'\n this.remoteVersion = remoteVersion\n }\n}\n\n/**\n * Thrown when a sync operation (push or pull) fails due to a network error.\n *\n * NOYDB's offline-first design means network errors are expected during sync.\n * Callers should catch `NetworkError`, surface connectivity status in the UI,\n * and rely on the `SyncScheduler` to retry when connectivity is restored.\n */\nexport class NetworkError extends NoydbError {\n constructor(message = 'Network error') {\n super('NETWORK_ERROR', message)\n this.name = 'NetworkError'\n }\n}\n\n// ─── Data Errors ───────────────────────────────────────────────────────\n\n/**\n * Thrown when `collection.get(id)` is called with an ID that does not exist.\n *\n * NOYDB collections are memory-first, so this error is synchronous and cheap —\n * it does not make a network round-trip. Callers that expect the record to be\n * absent should use `collection.getOrNull(id)` instead.\n */\nexport class NotFoundError extends NoydbError {\n constructor(message = 'Record not found') {\n super('NOT_FOUND', message)\n this.name = 'NotFoundError'\n }\n}\n\n/**\n * Thrown when application-level validation fails before encryption.\n *\n * Distinct from `SchemaValidationError` (Standard Schema v1 validator)\n * and `MissingTranslationError` (i18nText). `ValidationError` is the\n * general-purpose validation base — use it for custom guards in `put()`\n * hooks or store middleware.\n */\nexport class ValidationError extends NoydbError {\n constructor(message = 'Validation error') {\n super('VALIDATION_ERROR', message)\n this.name = 'ValidationError'\n }\n}\n\n/**\n * Thrown when a Standard Schema v1 validator rejects a record on\n * `put()` (input validation) or on read (output validation). Carries\n * the raw issue list so callers can render field-level errors.\n *\n * `direction` distinguishes the two cases:\n * - `'input'`: the user passed bad data into `put()`. This is a\n * normal error case that application code should handle — typically\n * by showing validation messages in the UI.\n * - `'output'`: stored data does not match the current schema. This\n * indicates a schema drift (the schema was changed without\n * migrating the existing records) and should be treated as a bug\n * — the application should not swallow it silently.\n *\n * The `issues` type is deliberately `readonly unknown[]` on this class\n * so that `errors.ts` doesn't need to import from `schema.ts` (and\n * create a dependency cycle). Callers who know they're holding a\n * `SchemaValidationError` can cast to the more precise\n * `readonly StandardSchemaV1Issue[]` from `schema.ts`.\n */\nexport class SchemaValidationError extends NoydbError {\n readonly issues: readonly unknown[]\n readonly direction: 'input' | 'output'\n\n constructor(\n message: string,\n issues: readonly unknown[],\n direction: 'input' | 'output',\n ) {\n super('SCHEMA_VALIDATION_FAILED', message)\n this.name = 'SchemaValidationError'\n this.issues = issues\n this.direction = direction\n }\n}\n\n// ─── Query DSL Errors ─────────────────────────────────────────────────\n\n/**\n * Thrown when `.groupBy().aggregate()` produces more than the hard\n * cardinality cap (default 100_000 groups)..\n *\n * The cap exists because `.groupBy()` materializes one bucket per\n * distinct key value in memory, and runaway cardinality — a groupBy\n * on a high-uniqueness field like `id` or `createdAt` — is almost\n * always a query mistake rather than legitimate use. A hard error is\n * better than silent OOM: the consumer sees an actionable message\n * naming the field and the observed cardinality, with guidance to\n * either narrow the query with `.where()` or accept the ceiling\n * override.\n *\n * A separate one-shot warning fires at 10% of the cap (10_000\n * groups) so consumers get a heads-up before the hard error — same\n * pattern as `JoinTooLargeError` and the `.join()` row ceiling.\n *\n * **Not overridable in.** The 100k cap is a fixed constant so\n * the failure mode is consistent across the codebase; a\n * `{ maxGroups }` override can be added later without a break if a\n * real consumer asks.\n */\nexport class GroupCardinalityError extends NoydbError {\n /** The field being grouped on. */\n readonly field: string\n /** Observed number of distinct groups at the moment the cap tripped. */\n readonly cardinality: number\n /** The cap that was exceeded. */\n readonly maxGroups: number\n\n constructor(field: string, cardinality: number, maxGroups: number) {\n super(\n 'GROUP_CARDINALITY',\n `.groupBy(\"${field}\") produced ${cardinality} distinct groups, ` +\n `exceeding the ${maxGroups}-group ceiling. This is almost always a ` +\n `query mistake — grouping on a high-uniqueness field like \"id\" or ` +\n `\"createdAt\" produces one bucket per record. Narrow the query with ` +\n `.where() before grouping, or group on a lower-cardinality field ` +\n `(status, category, clientId). If you genuinely need high-cardinality ` +\n `grouping, file an issue with your use case.`,\n )\n this.name = 'GroupCardinalityError'\n this.field = field\n this.cardinality = cardinality\n this.maxGroups = maxGroups\n }\n}\n\n/**\n * Thrown in lazy mode when a `.query()` / `.where()` / `.orderBy()` clause\n * references a field that does not have a declared index.\n *\n * Lazy-mode queries only work when every touched field is indexed.\n * This is deliberate — silent scan-fallback would hide the performance\n * cliff that lazy-mode indexes exist to prevent.\n *\n * Payload:\n * - `collection` — name of the collection queried\n * - `touchedFields` — every field referenced by the query (filter + order)\n * - `missingFields` — subset of `touchedFields` that have no declared index\n */\nexport class IndexRequiredError extends NoydbError {\n readonly collection: string\n readonly touchedFields: readonly string[]\n readonly missingFields: readonly string[]\n\n constructor(args: { collection: string; touchedFields: readonly string[]; missingFields: readonly string[] }) {\n super(\n 'INDEX_REQUIRED',\n `Collection \"${args.collection}\": query references unindexed fields in lazy mode ` +\n `(missing: ${args.missingFields.join(', ')}). ` +\n `Declare an index on each field, or use collection.scan() for non-indexed iteration.`,\n )\n this.name = 'IndexRequiredError'\n this.collection = args.collection\n this.touchedFields = [...args.touchedFields]\n this.missingFields = [...args.missingFields]\n }\n}\n\n/**\n * Thrown (or surfaced via the `index:write-partial` event) when one or more\n * per-indexed-field side-car writes fail after the main record write has\n * already succeeded.\n *\n * Not thrown out of `.put()` / `.delete()` directly — those succeed when the\n * main record succeeds. Instead, `IndexWriteFailureError` instances are collected\n * into the session-scoped reconcile queue and emitted on the Collection\n * emitter as `index:write-partial`.\n *\n * Payload:\n * - `recordId` — the id of the main record whose side-car writes failed\n * - `field` — the indexed field whose side-car write failed\n * - `op` — `'put'` or `'delete'`, indicating which mutation was in flight\n * - `cause` — the underlying error from the store\n */\nexport class IndexWriteFailureError extends NoydbError {\n readonly recordId: string\n readonly field: string\n readonly op: 'put' | 'delete'\n override readonly cause: unknown\n\n constructor(args: { recordId: string; field: string; op: 'put' | 'delete'; cause: unknown }) {\n super(\n 'INDEX_WRITE_FAILURE',\n `Index side-car ${args.op} failed for field \"${args.field}\" on record \"${args.recordId}\"`,\n )\n this.name = 'IndexWriteFailureError'\n this.recordId = args.recordId\n this.field = args.field\n this.op = args.op\n this.cause = args.cause\n }\n}\n\n// ─── Bundle Format Errors ─────────────────────────────────\n\n/**\n * Thrown by `readNoydbBundle()` when the body bytes don't match\n * the integrity hash declared in the bundle header — i.e. someone\n * modified the bytes between write and read.\n *\n * Distinct from a generic `Error` (which would be thrown for\n * format violations like a missing magic prefix or malformed\n * header JSON) so consumers can pattern-match the corruption case\n * and handle it differently from a producer bug. A\n * `BundleIntegrityError` indicates \"the bytes you got are not\n * what was written\"; a plain `Error` from `parsePrefixAndHeader`\n * indicates \"what was written wasn't a valid bundle in the first\n * place.\"\n *\n * Also thrown when decompression fails after the integrity hash\n * passed — that's a producer bug (the wrong algorithm byte was\n * written) but it surfaces with the same error class because the\n * end result is \"the body cannot be turned back into a dump.\"\n */\nexport class BundleIntegrityError extends NoydbError {\n constructor(message: string) {\n super('BUNDLE_INTEGRITY', `.noydb bundle integrity check failed: ${message}`)\n this.name = 'BundleIntegrityError'\n }\n}\n\n// ─── i18n / Dictionary Errors ──────────────────────────\n\n/**\n * Thrown when `vault.collection()` is called with a name that is\n * reserved for NOYDB internal use (any name starting with `_dict_`).\n *\n * Dictionary collections are accessed exclusively via\n * `vault.dictionary(name)` — attempting to open one as a regular\n * collection would bypass the dictionary invariants (ACL, rename\n * tracking, reserved-name policy).\n */\nexport class ReservedCollectionNameError extends NoydbError {\n /** The rejected collection name. */\n readonly collectionName: string\n\n constructor(collectionName: string) {\n super(\n 'RESERVED_COLLECTION_NAME',\n `\"${collectionName}\" is a reserved collection name. ` +\n `Use vault.dictionary(\"${collectionName.replace(/^_dict_/, '')}\") ` +\n `to access dictionary collections.`,\n )\n this.name = 'ReservedCollectionNameError'\n this.collectionName = collectionName\n }\n}\n\n/**\n * Thrown by `DictionaryHandle.get()` and `DictionaryHandle.delete()` when\n * the requested key does not exist in the dictionary.\n *\n * Distinct from `NotFoundError` (which is for data records) so callers\n * can distinguish \"data record missing\" from \"dictionary key missing\"\n * without inspecting error messages.\n */\nexport class DictKeyMissingError extends NoydbError {\n /** The dictionary name. */\n readonly dictionaryName: string\n /** The key that was not found. */\n readonly key: string\n\n constructor(dictionaryName: string, key: string) {\n super(\n 'DICT_KEY_MISSING',\n `Dictionary \"${dictionaryName}\" has no entry for key \"${key}\".`,\n )\n this.name = 'DictKeyMissingError'\n this.dictionaryName = dictionaryName\n this.key = key\n }\n}\n\n/**\n * Thrown by `DictionaryHandle.delete()` in strict mode when the key to\n * be deleted is still referenced by one or more records.\n *\n * The caller must either rename the key first (the only sanctioned\n * mass-mutation path) or pass `{ mode: 'warn' }` to skip the check\n * (development only).\n */\nexport class DictKeyInUseError extends NoydbError {\n /** The dictionary name. */\n readonly dictionaryName: string\n /** The key that is still referenced. */\n readonly key: string\n /** Name of the first collection found to reference this key. */\n readonly usedBy: string\n /** Number of records in `usedBy` that reference this key. */\n readonly count: number\n\n constructor(\n dictionaryName: string,\n key: string,\n usedBy: string,\n count: number,\n ) {\n super(\n 'DICT_KEY_IN_USE',\n `Cannot delete key \"${key}\" from dictionary \"${dictionaryName}\": ` +\n `${count} record(s) in \"${usedBy}\" still reference it. ` +\n `Use dictionary.rename(\"${key}\", newKey) to rewrite references first.`,\n )\n this.name = 'DictKeyInUseError'\n this.dictionaryName = dictionaryName\n this.key = key\n this.usedBy = usedBy\n this.count = count\n }\n}\n\n/**\n * Thrown by `Collection.put()` when an `i18nText` field is missing one\n * or more required translations.\n *\n * The `missing` array names each locale code that was absent from the\n * field value. The `field` property names the field so callers can\n * render a field-level error message without parsing the string.\n */\nexport class MissingTranslationError extends NoydbError {\n /** The field name whose translation(s) are missing. */\n readonly field: string\n /** Locale codes that were required but absent. */\n readonly missing: readonly string[]\n\n constructor(field: string, missing: readonly string[], message?: string) {\n super(\n 'MISSING_TRANSLATION',\n message ??\n `Field \"${field}\": missing required translation(s): ${missing.join(', ')}.`,\n )\n this.name = 'MissingTranslationError'\n this.field = field\n this.missing = missing\n }\n}\n\n/**\n * Thrown when reading an `i18nText` field without specifying a locale —\n * either at the call site (`get(id, { locale })`) or on the vault\n * (`openVault(name, { locale })`).\n *\n * Also thrown when `resolveI18nText()` exhausts the fallback chain and\n * no translation is available for the requested locale.\n *\n * The `field` property names the field that triggered the error so the\n * caller can surface it in the UI.\n */\nexport class LocaleNotSpecifiedError extends NoydbError {\n /** The field name that required a locale. */\n readonly field: string\n\n constructor(field: string, message?: string) {\n super(\n 'LOCALE_NOT_SPECIFIED',\n message ??\n `Cannot read i18nText field \"${field}\" without a locale. ` +\n `Pass { locale } to get()/list()/query() or set a default via ` +\n `openVault(name, { locale }).`,\n )\n this.name = 'LocaleNotSpecifiedError'\n this.field = field\n }\n}\n\n// ─── Translator Errors ─────────────────────────────────────\n\n/**\n * Thrown when a collection has an `i18nText` field with\n * `autoTranslate: true` but no `plaintextTranslator` was configured\n * on `createNoydb()`.\n *\n * The error is raised at `put()` time (not at schema construction) so\n * the mis-configuration is surfaced by the first write rather than\n * silently at startup.\n */\nexport class TranslatorNotConfiguredError extends NoydbError {\n /** The field that requested auto-translation. */\n readonly field: string\n /** The collection the put was targeting. */\n readonly collection: string\n\n constructor(field: string, collection: string) {\n super(\n 'TRANSLATOR_NOT_CONFIGURED',\n `Field \"${field}\" in collection \"${collection}\" has autoTranslate: true, ` +\n `but no plaintextTranslator was configured on createNoydb(). ` +\n `Either configure a plaintextTranslator or remove autoTranslate from the schema.`,\n )\n this.name = 'TranslatorNotConfiguredError'\n this.field = field\n this.collection = collection\n }\n}\n\n// ─── Backup Errors ─────────────────────────────────────────\n\n/**\n * Thrown when `Vault.load()` finds that a backup's hash chain\n * doesn't verify, or that its embedded `ledgerHead.hash` doesn't\n * match the chain head reconstructed from the loaded entries.\n *\n * Distinct from `BackupCorruptedError` so callers can choose to\n * recover from one but not the other (e.g., a corrupted JSON file is\n * unrecoverable; a chain mismatch might mean the backup is from an\n * incompatible noy-db version).\n */\nexport class BackupLedgerError extends NoydbError {\n /** First-broken-entry index, if known. */\n readonly divergedAt?: number\n\n constructor(message: string, divergedAt?: number) {\n super('BACKUP_LEDGER', message)\n this.name = 'BackupLedgerError'\n if (divergedAt !== undefined) this.divergedAt = divergedAt\n }\n}\n\n/**\n * Thrown when `Vault.load()` finds that the backup's data\n * collection content doesn't match the ledger's recorded\n * `payloadHash`es. This is the \"envelope was tampered with after\n * dump\" detection — the chain itself can be intact, but if any\n * encrypted record bytes were swapped, this check catches it.\n */\nexport class BackupCorruptedError extends NoydbError {\n /** The (collection, id) pair whose envelope failed the hash check. */\n readonly collection: string\n readonly id: string\n\n constructor(collection: string, id: string, message: string) {\n super('BACKUP_CORRUPTED', message)\n this.name = 'BackupCorruptedError'\n this.collection = collection\n this.id = id\n }\n}\n\n// ─── Session Errors ───────────────────────────────────────\n\n/**\n * Thrown by `resolveSession()` when the session token's `expiresAt`\n * timestamp is in the past. The session key is also removed from the\n * in-memory store when this is thrown, so retrying with the same sessionId\n * will produce `SessionNotFoundError`.\n *\n * Separate from `SessionNotFoundError` so callers can distinguish between\n * \"session is gone\" (key store cleared, tab reloaded) and \"session is\n * still in the store but has exceeded its lifetime\" (idle timeout, absolute\n * timeout, policy-driven expiry). The remediation differs: expired sessions\n * should prompt a fresh unlock; not-found sessions may indicate a bug or a\n * cross-tab scenario where the session was never established.\n */\nexport class SessionExpiredError extends NoydbError {\n readonly sessionId: string\n\n constructor(sessionId: string) {\n super('SESSION_EXPIRED', `Session \"${sessionId}\" has expired. Re-unlock to continue.`)\n this.name = 'SessionExpiredError'\n this.sessionId = sessionId\n }\n}\n\n/**\n * Thrown by `resolveSession()` when the session key cannot be found in\n * the module-level store. This happens when:\n * - The session was explicitly revoked via `revokeSession()`.\n * - The JS context was reloaded (tab navigation, page refresh, worker restart).\n * - `Noydb.close()` was called (which calls `revokeAllSessions()`).\n * - The sessionId is wrong or was generated by a different JS context.\n *\n * The session token (if the caller holds it) is permanently useless after\n * this error — the key is gone and cannot be recovered.\n */\nexport class SessionNotFoundError extends NoydbError {\n readonly sessionId: string\n\n constructor(sessionId: string) {\n super('SESSION_NOT_FOUND', `Session key for \"${sessionId}\" not found. The session may have been revoked or the page reloaded.`)\n this.name = 'SessionNotFoundError'\n this.sessionId = sessionId\n }\n}\n\n/**\n * Thrown when a session policy blocks an operation — for example,\n * `requireReAuthFor: ['export']` is set and the caller attempts to\n * call `exportStream()` without re-authenticating for this session.\n *\n * The `operation` field names the specific operation that was blocked\n * (e.g. `'export'`, `'grant'`, `'rotate'`) so the caller can surface\n * a targeted prompt (\"Please re-enter your passphrase to export data\").\n */\nexport class SessionPolicyError extends NoydbError {\n readonly operation: string\n\n constructor(operation: string, message?: string) {\n super(\n 'SESSION_POLICY',\n message ?? `Operation \"${operation}\" requires re-authentication per the active session policy.`,\n )\n this.name = 'SessionPolicyError'\n this.operation = operation\n }\n}\n\n// ─── Query / Join Errors ────────────────────────────────────\n\n/**\n * Thrown when a `.join()` would exceed its configured row ceiling on\n * either side. The ceiling defaults to 50,000 per side and can be\n * overridden via the `{ maxRows }` option on `.join()`.\n *\n * Carries both row counts so the error message can show which side\n * tripped the limit (e.g. \"left had 60,000 rows, right had 1,200,\n * max was 50,000\"). The `side` field is machine-readable so test\n * code and devtools can match on it without regex-parsing the\n * message.\n *\n * The row ceiling exists because joins are bounded in-memory\n * operations over materialized record sets. Consumers whose\n * collections genuinely exceed the ceiling should track \n * (streaming joins over `scan()`) or filter the left side further\n * with `where()` / `limit()` before joining.\n */\nexport class JoinTooLargeError extends NoydbError {\n readonly leftRows: number\n readonly rightRows: number\n readonly maxRows: number\n readonly side: 'left' | 'right'\n\n constructor(opts: {\n leftRows: number\n rightRows: number\n maxRows: number\n side: 'left' | 'right'\n message: string\n }) {\n super('JOIN_TOO_LARGE', opts.message)\n this.name = 'JoinTooLargeError'\n this.leftRows = opts.leftRows\n this.rightRows = opts.rightRows\n this.maxRows = opts.maxRows\n this.side = opts.side\n }\n}\n\n/**\n * Thrown by `.join()` in strict `ref()` mode when a left-side record\n * points at a right-side id that does not exist in the target\n * collection.\n *\n * Distinct from `RefIntegrityError` so test code can pattern-match\n * on the *read-time* dangling case without catching *write-time*\n * integrity violations. Both indicate \"ref points at nothing\" but\n * happen at different lifecycle phases and deserve different\n * remediation in documentation: a RefIntegrityError on `put()`\n * means the input is invalid; a DanglingReferenceError on `.join()`\n * means stored data has drifted and `vault.checkIntegrity()`\n * is the right tool to find the full set of orphans.\n */\nexport class DanglingReferenceError extends NoydbError {\n readonly field: string\n readonly target: string\n readonly refId: string\n\n constructor(opts: {\n field: string\n target: string\n refId: string\n message: string\n }) {\n super('DANGLING_REFERENCE', opts.message)\n this.name = 'DanglingReferenceError'\n this.field = opts.field\n this.target = opts.target\n this.refId = opts.refId\n }\n}\n\n/**\n * Thrown by {@link sanitizeFilename} when an input filename cannot be\n * made safe — NUL byte, empty after normalization, missing\n * `opaqueId` for the opaque profile, `..` segment, or a `maxBytes`\n * cap too small to hold a single code point.\n */\nexport class FilenameSanitizationError extends NoydbError {\n constructor(message: string) {\n super('FILENAME_SANITIZATION', message)\n this.name = 'FilenameSanitizationError'\n }\n}\n\n/**\n * Thrown when a write target resolves OUTSIDE the requested\n * directory after sanitization — the canonical Zip-Slip class. The\n * sanitizer's job is to strip path-traversal segments; this error\n * is the defense-in-depth fallback at the FS write site.\n */\nexport class PathEscapeError extends NoydbError {\n readonly attempted: string\n readonly targetDir: string\n\n constructor(opts: { attempted: string; targetDir: string }) {\n super(\n 'PATH_ESCAPE',\n `Sanitized filename \"${opts.attempted}\" resolves outside target dir \"${opts.targetDir}\"`,\n )\n this.name = 'PathEscapeError'\n this.attempted = opts.attempted\n this.targetDir = opts.targetDir\n }\n}\n","/**\n * Cryptographic primitives — thin wrappers around the Web Crypto API.\n *\n * ## Design principle\n *\n * **Zero npm crypto dependencies.** Every operation uses `globalThis.crypto.subtle`,\n * which is available natively in Node.js ≥ 18, all modern browsers, and\n * Deno/Bun. This avoids supply-chain risk from third-party crypto packages and\n * ensures the library stays auditable.\n *\n * ## Algorithms\n *\n * | Use case | Algorithm | Parameters |\n * |----------|-----------|------------|\n * | Key derivation | PBKDF2-SHA256 | 600,000 iterations, 32-byte salt |\n * | Record encryption | AES-256-GCM | 12-byte random IV per operation |\n * | DEK wrapping | AES-KW (RFC 3394) | 256-bit KEK |\n * | Binary encrypt | AES-256-GCM | same as record encryption |\n * | Integrity | HMAC-SHA256 | for presence channels |\n * | Content hash | SHA-256 | for ledger and bundle integrity |\n *\n * ## Key lifecycle\n *\n * ```\n * passphrase + salt\n * └─► deriveKey() → KEK (CryptoKey, extractable: false)\n * └─► wrapKey() → wrapped DEK bytes [stored in keyring]\n * └─► unwrapKey() → DEK (CryptoKey) [memory only during session]\n * └─► encrypt() / decrypt() → ciphertext / plaintext\n * ```\n *\n * IVs are generated fresh by {@link generateIV} on every encrypt call.\n * Reusing an IV with the same key would break GCM's authentication guarantee —\n * this function should be the only place IVs are produced.\n *\n * @module\n */\n\nimport { DecryptionError, InvalidKeyError, TamperedError } from './errors.js'\n\nconst PBKDF2_ITERATIONS = 600_000\nconst SALT_BYTES = 32\nconst IV_BYTES = 12\nconst KEY_BITS = 256\n\nconst subtle = globalThis.crypto.subtle\n\n// ─── Key Derivation ────────────────────────────────────────────────────\n\n/** Derive a KEK from a passphrase and salt using PBKDF2-SHA256. */\nexport async function deriveKey(\n passphrase: string,\n salt: Uint8Array,\n): Promise<CryptoKey> {\n const keyMaterial = await subtle.importKey(\n 'raw',\n new TextEncoder().encode(passphrase),\n 'PBKDF2',\n false,\n ['deriveKey'],\n )\n\n return subtle.deriveKey(\n {\n name: 'PBKDF2',\n salt: salt as BufferSource,\n iterations: PBKDF2_ITERATIONS,\n hash: 'SHA-256',\n },\n keyMaterial,\n { name: 'AES-KW', length: KEY_BITS },\n false,\n ['wrapKey', 'unwrapKey'],\n )\n}\n\n// ─── DEK Generation ────────────────────────────────────────────────────\n\n/** Generate a random AES-256-GCM data encryption key. */\nexport async function generateDEK(): Promise<CryptoKey> {\n return subtle.generateKey(\n { name: 'AES-GCM', length: KEY_BITS },\n true, // extractable — needed for AES-KW wrapping\n ['encrypt', 'decrypt'],\n )\n}\n\n// ─── Key Wrapping ──────────────────────────────────────────────────────\n\n/** Wrap (encrypt) a DEK with a KEK using AES-KW. Returns base64 string. */\nexport async function wrapKey(dek: CryptoKey, kek: CryptoKey): Promise<string> {\n const wrapped = await subtle.wrapKey('raw', dek, kek, 'AES-KW')\n return bufferToBase64(wrapped)\n}\n\n/** Unwrap (decrypt) a DEK from base64 string using a KEK. */\nexport async function unwrapKey(\n wrappedBase64: string,\n kek: CryptoKey,\n): Promise<CryptoKey> {\n try {\n return await subtle.unwrapKey(\n 'raw',\n base64ToBuffer(wrappedBase64) as BufferSource,\n kek,\n 'AES-KW',\n { name: 'AES-GCM', length: KEY_BITS },\n true,\n ['encrypt', 'decrypt'],\n )\n } catch {\n throw new InvalidKeyError()\n }\n}\n\n// ─── Encrypt / Decrypt ─────────────────────────────────────────────────\n\nexport interface EncryptResult {\n iv: string // base64\n data: string // base64\n}\n\n/** Encrypt plaintext JSON string with AES-256-GCM. Fresh IV per call. */\nexport async function encrypt(\n plaintext: string,\n dek: CryptoKey,\n): Promise<EncryptResult> {\n const iv = generateIV()\n const encoded = new TextEncoder().encode(plaintext)\n\n const ciphertext = await subtle.encrypt(\n { name: 'AES-GCM', iv: iv as BufferSource },\n dek,\n encoded,\n )\n\n return {\n iv: bufferToBase64(iv),\n data: bufferToBase64(ciphertext),\n }\n}\n\n/** Decrypt AES-256-GCM ciphertext. Throws on wrong key or tampered data. */\nexport async function decrypt(\n ivBase64: string,\n dataBase64: string,\n dek: CryptoKey,\n): Promise<string> {\n const iv = base64ToBuffer(ivBase64)\n const ciphertext = base64ToBuffer(dataBase64)\n\n try {\n const plaintext = await subtle.decrypt(\n { name: 'AES-GCM', iv: iv as BufferSource },\n dek,\n ciphertext as BufferSource,\n )\n return new TextDecoder().decode(plaintext)\n } catch (err) {\n if (err instanceof Error && err.name === 'OperationError') {\n throw new TamperedError()\n }\n throw new DecryptionError(\n err instanceof Error ? err.message : 'Decryption failed',\n )\n }\n}\n\n// ─── Binary Encrypt / Decrypt ────────\n\n/**\n * Encrypt raw bytes with AES-256-GCM using a fresh random IV.\n * Used by the attachment store so binary blobs avoid double base64 encoding\n * (the existing `encrypt()` function calls `TextEncoder` on a string — here\n * we pass the `Uint8Array` directly to `subtle.encrypt`).\n */\nexport async function encryptBytes(\n data: Uint8Array,\n dek: CryptoKey,\n): Promise<EncryptResult> {\n const iv = generateIV()\n const ciphertext = await subtle.encrypt(\n { name: 'AES-GCM', iv: iv as BufferSource },\n dek,\n data as unknown as BufferSource,\n )\n return {\n iv: bufferToBase64(iv),\n data: bufferToBase64(ciphertext),\n }\n}\n\n/**\n * Decrypt AES-256-GCM ciphertext back to raw bytes.\n * Counterpart to `encryptBytes`. Throws `TamperedError` on auth-tag failure.\n */\nexport async function decryptBytes(\n ivBase64: string,\n dataBase64: string,\n dek: CryptoKey,\n): Promise<Uint8Array> {\n const iv = base64ToBuffer(ivBase64)\n const ciphertext = base64ToBuffer(dataBase64)\n try {\n const plaintext = await subtle.decrypt(\n { name: 'AES-GCM', iv: iv as BufferSource },\n dek,\n ciphertext as BufferSource,\n )\n return new Uint8Array(plaintext)\n } catch (err) {\n if (err instanceof Error && err.name === 'OperationError') {\n throw new TamperedError()\n }\n throw new DecryptionError(\n err instanceof Error ? err.message : 'Decryption failed',\n )\n }\n}\n\n/**\n * SHA-256 hex digest of raw bytes. Used to derive content-addressed\n * eTags for blob deduplication. Computed on plaintext bytes\n * before compression and encryption so the eTag identifies content, not\n * ciphertext, and survives re-encryption (key rotation, re-upload).\n */\nexport async function sha256Hex(data: Uint8Array): Promise<string> {\n const hash = await subtle.digest('SHA-256', data as unknown as BufferSource)\n return Array.from(new Uint8Array(hash))\n .map((b) => b.toString(16).padStart(2, '0'))\n .join('')\n}\n\n// ─── HMAC-SHA-256 ─────────────────────────────\n\n/**\n * Compute HMAC-SHA-256(key, data) and return hex string.\n *\n * Used to derive content-addressed eTags that are opaque to the store:\n * ```\n * eTag = hmacSha256Hex(blobDEK, plaintext)\n * ```\n *\n * Unlike a plain SHA-256, the HMAC is keyed by the vault-shared `_blob` DEK,\n * so an attacker with store access cannot pre-compute eTags for known files.\n * Deduplication still works within a vault (same key + same content = same eTag).\n */\nexport async function hmacSha256Hex(key: CryptoKey, data: Uint8Array): Promise<string> {\n // Export AES-GCM DEK raw bytes → import as HMAC key\n const rawKey = await subtle.exportKey('raw', key)\n const hmacKey = await subtle.importKey(\n 'raw',\n rawKey,\n { name: 'HMAC', hash: 'SHA-256' },\n false,\n ['sign'],\n )\n const sig = await subtle.sign('HMAC', hmacKey, data as unknown as BufferSource)\n return Array.from(new Uint8Array(sig))\n .map((b) => b.toString(16).padStart(2, '0'))\n .join('')\n}\n\n// ─── AAD-aware Binary Encrypt / Decrypt ──\n\n/**\n * Encrypt raw bytes with AES-256-GCM using Additional Authenticated Data.\n *\n * The AAD binds each chunk to its parent blob and position, preventing\n * chunk reorder, substitution, and truncation attacks:\n * ```\n * AAD = UTF-8(\"{eTag}:{chunkIndex}:{chunkCount}\")\n * ```\n *\n * The AAD is NOT stored — the reader reconstructs it from `BlobObject`\n * metadata and passes it to `decryptBytesWithAAD`.\n */\nexport async function encryptBytesWithAAD(\n data: Uint8Array,\n dek: CryptoKey,\n aad: Uint8Array,\n): Promise<EncryptResult> {\n const iv = generateIV()\n const ciphertext = await subtle.encrypt(\n {\n name: 'AES-GCM',\n iv: iv as BufferSource,\n additionalData: aad as BufferSource,\n },\n dek,\n data as unknown as BufferSource,\n )\n return {\n iv: bufferToBase64(iv),\n data: bufferToBase64(ciphertext),\n }\n}\n\n/**\n * Decrypt AES-256-GCM ciphertext with AAD verification.\n *\n * If the AAD does not match the one used at encryption time (e.g. because\n * a chunk was reordered or substituted from another blob), the GCM auth\n * tag fails and this throws `TamperedError`.\n */\nexport async function decryptBytesWithAAD(\n ivBase64: string,\n dataBase64: string,\n dek: CryptoKey,\n aad: Uint8Array,\n): Promise<Uint8Array> {\n const iv = base64ToBuffer(ivBase64)\n const ciphertext = base64ToBuffer(dataBase64)\n try {\n const plaintext = await subtle.decrypt(\n {\n name: 'AES-GCM',\n iv: iv as BufferSource,\n additionalData: aad as BufferSource,\n },\n dek,\n ciphertext as BufferSource,\n )\n return new Uint8Array(plaintext)\n } catch (err) {\n if (err instanceof Error && err.name === 'OperationError') {\n throw new TamperedError()\n }\n throw new DecryptionError(\n err instanceof Error ? err.message : 'Decryption failed',\n )\n }\n}\n\n// ─── Presence Key Derivation ──────────────────────────────\n\n/**\n * Derive an AES-256-GCM presence key from a collection DEK using HKDF-SHA256.\n *\n * The presence key is domain-separated from the data DEK by the fixed salt\n * `'noydb-presence'` and the `info` = collection name. This means:\n * - The adapter never sees the presence key.\n * - Presence payloads rotate automatically when the collection DEK is rotated.\n * - Revoked users cannot derive the new presence key after a DEK rotation.\n *\n * @param dek The collection's AES-256-GCM DEK (extractable).\n * @param collectionName Used as the HKDF `info` parameter for domain separation.\n * @returns A non-extractable AES-256-GCM key suitable for presence payload encryption.\n */\nexport async function derivePresenceKey(dek: CryptoKey, collectionName: string): Promise<CryptoKey> {\n // Step 1: export DEK raw bytes\n const rawDek = await subtle.exportKey('raw', dek)\n\n // Step 2: import as HKDF key material\n const hkdfKey = await subtle.importKey(\n 'raw',\n rawDek,\n 'HKDF',\n false,\n ['deriveBits'],\n )\n\n // Step 3: derive 256 bits with salt='noydb-presence' and info=collectionName\n const salt = new TextEncoder().encode('noydb-presence')\n const info = new TextEncoder().encode(collectionName)\n const bits = await subtle.deriveBits(\n { name: 'HKDF', hash: 'SHA-256', salt, info },\n hkdfKey,\n KEY_BITS,\n )\n\n // Step 4: import derived bits as AES-GCM key\n return subtle.importKey(\n 'raw',\n bits,\n { name: 'AES-GCM', length: KEY_BITS },\n false,\n ['encrypt', 'decrypt'],\n )\n}\n\n// ─── Deterministic Encryption ────────────────────────────\n\n/**\n * Derive a deterministic 12-byte IV from `{ DEK, context, plaintext }`\n * via HKDF-SHA256. Given the same three inputs, the IV is identical, so\n * `encryptDeterministic` produces the same ciphertext on every call —\n * which is precisely what enables blind equality search on encrypted\n * fields.\n *\n * **The side channel this opens.** Two records whose field value is the\n * same produce the same ciphertext. An observer with store access can\n * therefore tell which records share a value — not *what* the value is,\n * but the equivalence class. This is the well-known trade-off of\n * deterministic encryption and is why the feature is strictly opt-in\n * per field, guarded by `acknowledgeDeterministicRisk: true` at\n * collection creation.\n *\n * The context string MUST include the collection name and field name,\n * so:\n * - The same plaintext in two different fields encrypts differently\n * (no cross-field equality leak).\n * - The same plaintext in two different collections (different DEKs)\n * encrypts differently by virtue of the key, even before HKDF\n * domain separation kicks in.\n */\nasync function deriveDeterministicIV(\n dek: CryptoKey,\n context: string,\n plaintext: string,\n): Promise<Uint8Array> {\n const rawDek = await subtle.exportKey('raw', dek)\n const hkdfKey = await subtle.importKey('raw', rawDek, 'HKDF', false, ['deriveBits'])\n const salt = new TextEncoder().encode('noydb-deterministic-v1')\n const info = new TextEncoder().encode(`${context}\\x00${plaintext}`)\n const bits = await subtle.deriveBits(\n { name: 'HKDF', hash: 'SHA-256', salt, info },\n hkdfKey,\n IV_BYTES * 8,\n )\n return new Uint8Array(bits)\n}\n\n/**\n * Encrypt a plaintext string with AES-256-GCM and a deterministic,\n * HKDF-derived IV.\n *\n * The same `{ dek, context, plaintext }` triple always produces the\n * same `{ iv, data }` — call this twice and you can string-compare the\n * ciphertexts to check equality of the inputs without decrypting them.\n *\n * @param context Domain-separation string — by convention\n * `'<collection>/<field>'`. Different contexts encrypt\n * the same plaintext to different ciphertexts, so\n * `email` in collection `users` does not collide with\n * `email` in collection `customers`.\n */\nexport async function encryptDeterministic(\n plaintext: string,\n dek: CryptoKey,\n context: string,\n): Promise<EncryptResult> {\n const iv = await deriveDeterministicIV(dek, context, plaintext)\n const encoded = new TextEncoder().encode(plaintext)\n const ciphertext = await subtle.encrypt(\n { name: 'AES-GCM', iv: iv as BufferSource },\n dek,\n encoded,\n )\n return {\n iv: bufferToBase64(iv),\n data: bufferToBase64(ciphertext),\n }\n}\n\n/**\n * Counterpart to {@link encryptDeterministic}. The IV is stored\n * alongside the ciphertext (exactly like the randomized path), so\n * decrypt uses the stored IV and verifies the GCM auth tag — a tampered\n * ciphertext throws `TamperedError` just like randomized AES-GCM.\n */\nexport async function decryptDeterministic(\n ivBase64: string,\n dataBase64: string,\n dek: CryptoKey,\n): Promise<string> {\n return decrypt(ivBase64, dataBase64, dek)\n}\n\n// ─── Random Generation ─────────────────────────────────────────────────\n\n/** Generate a random 12-byte IV for AES-GCM. */\nexport function generateIV(): Uint8Array {\n return globalThis.crypto.getRandomValues(new Uint8Array(IV_BYTES))\n}\n\n/** Generate a random 32-byte salt for PBKDF2. */\nexport function generateSalt(): Uint8Array {\n return globalThis.crypto.getRandomValues(new Uint8Array(SALT_BYTES))\n}\n\n// ─── Base64 Helpers ────────────────────────────────────────────────────\n\nexport function bufferToBase64(buffer: ArrayBuffer | Uint8Array): string {\n const bytes = buffer instanceof Uint8Array ? buffer : new Uint8Array(buffer)\n let binary = ''\n for (let i = 0; i < bytes.length; i++) {\n binary += String.fromCharCode(bytes[i]!)\n }\n return btoa(binary)\n}\n\nexport function base64ToBuffer(base64: string): Uint8Array<ArrayBuffer> {\n const binary = atob(base64)\n const bytes = new Uint8Array(binary.length)\n for (let i = 0; i < binary.length; i++) {\n bytes[i] = binary.charCodeAt(i)\n }\n return bytes\n}\n","/**\n * Ledger entry shape + canonical JSON + sha256 helpers.\n *\n * This file holds the PURE primitives used by the hash-chained ledger:\n * the entry type, the deterministic (sort-stable) JSON encoder, and\n * the sha256 hasher that produces `prevHash` and `ledger.head()`.\n *\n * Everything here is validator-free and side-effect free — the only\n * runtime dep is Web Crypto's `subtle.digest` for the sha256 call,\n * which we already use for every other hashing operation in the core.\n *\n * The hash chain property works like this:\n *\n * hash(entry[i]) = sha256(canonicalJSON(entry[i]))\n * entry[i+1].prevHash = hash(entry[i])\n *\n * Any modification to `entry[i]` (field values, field order, whitespace)\n * produces a different `hash(entry[i])`, which means `entry[i+1]`'s\n * stored `prevHash` no longer matches the recomputed hash, which means\n * `verify()` returns `{ ok: false, divergedAt: i + 1 }`. The chain is\n * append-only and tamper-evident without external anchoring.\n */\n\n/**\n * A single ledger entry in its plaintext form — what gets serialized,\n * hashed, and then encrypted with the ledger DEK before being written\n * to the `_ledger/` adapter collection.\n *\n * ## Why hash the ciphertext, not the plaintext?\n *\n * `payloadHash` is the sha256 of the record's ENCRYPTED envelope bytes,\n * not its plaintext. This matters:\n *\n * 1. **Zero-knowledge preserved.** A user (or a third party) can\n * verify the ledger against the stored envelopes without any\n * decryption keys. The adapter layer already holds only\n * ciphertext, so hashing the ciphertext keeps the ledger at the\n * same privacy level as the adapter.\n *\n * 2. **Determinism.** Plaintext → ciphertext is randomized by the\n * fresh per-write IV, so `hash(plaintext)` would need extra\n * normalization. `hash(ciphertext)` is already deterministic and\n * unique per write.\n *\n * 3. **Detection property.** If an attacker modifies even one byte of\n * the stored ciphertext (trying to flip a record), the hash\n * changes, the ledger's recorded `payloadHash` no longer matches,\n * and a data-integrity check fails. We don't do that check in\n * `verify()` today, but the\n * hook is there for a future `verifyIntegrity()` follow-up.\n *\n * Fields marked `op`, `collection`, `id`, `version`, `ts`, `actor` are\n * plaintext METADATA about the operation — NOT the record itself. The\n * entry is still encrypted at rest via the ledger DEK, but adapters\n * could theoretically infer operation patterns from the sizes and\n * timestamps. This is an accepted trade-off for the tamper-evidence\n * property; full ORAM-level privacy is out of scope for noy-db.\n */\nexport interface LedgerEntry {\n /**\n * Zero-based sequential position of this entry in the chain. The\n * canonical adapter key is this number zero-padded to 10 digits\n * (`\"0000000001\"`) so lexicographic ordering matches numeric order.\n */\n readonly index: number\n\n /**\n * Hex-encoded sha256 of the canonical JSON of the PREVIOUS entry.\n * The genesis entry (index 0) has `prevHash === ''` — the first\n * entry in a fresh vault has nothing to point back to.\n */\n readonly prevHash: string\n\n /**\n * Which kind of mutation this entry records. only supports\n * data operations (`put`, `delete`). Access-control operations\n * (`grant`, `revoke`, `rotate`) will be added in a follow-up once\n * the keyring write path is instrumented — that's tracked in the\n * epic issue.\n */\n readonly op: 'put' | 'delete'\n\n /** The collection the mutation targeted. */\n readonly collection: string\n\n /** The record id the mutation targeted. */\n readonly id: string\n\n /**\n * The record version AFTER this mutation. For `put` this is the\n * newly assigned version; for `delete` this is the version that\n * was deleted (the last version visible to reads).\n */\n readonly version: number\n\n /** ISO timestamp of the mutation. */\n readonly ts: string\n\n /** User id of the actor who performed the mutation. */\n readonly actor: string\n\n /**\n * Hex-encoded sha256 of the encrypted envelope's `_data` field.\n * For `put`, this is the hash of the new ciphertext. For `delete`,\n * it's the hash of the last visible ciphertext at deletion time,\n * or the empty string if nothing was there to delete. Hashing the\n * ciphertext (not the plaintext) preserves zero-knowledge — see\n * the file docstring.\n */\n readonly payloadHash: string\n\n /**\n * Optional hex-encoded sha256 of the encrypted JSON Patch delta\n * blob stored alongside this entry in `_ledger_deltas/`. Present\n * only for `put` operations that had a previous version — the\n * genesis put of a new record, and every `delete`, leave this\n * field undefined.\n *\n * The delta payload itself lives in a sibling internal collection\n * (`_ledger_deltas/<paddedIndex>`) and is encrypted with the\n * ledger DEK. Callers use `ledger.loadDelta(index)` to decrypt and\n * deserialize it when reconstructing a historical version.\n *\n * Why optional instead of always-present: the first put of a\n * record has no previous version to diff against, so storing an\n * empty patch would be noise. For deletes there's no \"next\" state\n * to describe with a delta. Both cases set this field to undefined.\n *\n * Note: the canonical-JSON hasher treats `undefined` as invalid\n * (it's one of the guard rails), so on the wire this field is\n * either `{ deltaHash: '<hex>' }` or absent from the JSON\n * entirely — never `{ deltaHash: undefined }`.\n */\n readonly deltaHash?: string\n}\n\n/**\n * Canonical (sort-stable) JSON encoder.\n *\n * This function is the load-bearing primitive of the hash chain:\n * `sha256(canonicalJSON(entry))` must produce the same hex string\n * every time, on every machine, for the same logical entry — otherwise\n * `verify()` would return `{ ok: false }` on cross-platform reads.\n *\n * JavaScript's `JSON.stringify` is almost canonical, but NOT quite:\n * it preserves the insertion order of object keys, which means\n * `{a:1,b:2}` and `{b:2,a:1}` serialize differently. We fix this by\n * recursively walking objects and sorting their keys before\n * concatenation.\n *\n * Arrays keep their original order (reordering them would change\n * semantics). Numbers, strings, booleans, and `null` use the default\n * JSON encoding. `undefined` and functions are rejected — ledger\n * entries are plain data, and silently dropping `undefined` would\n * break the \"same input → same hash\" property if a caller forgot to\n * omit a field.\n *\n * Performance: one pass per nesting level; O(n log n) for key sorting\n * at each object. Entries are small (< 1 KB) so this is negligible\n * compared to the sha256 call.\n */\nexport function canonicalJson(value: unknown): string {\n if (value === null) return 'null'\n if (typeof value === 'boolean') return value ? 'true' : 'false'\n if (typeof value === 'number') {\n if (!Number.isFinite(value)) {\n throw new Error(\n `canonicalJson: refusing to encode non-finite number ${String(value)}`,\n )\n }\n return JSON.stringify(value)\n }\n if (typeof value === 'string') return JSON.stringify(value)\n if (typeof value === 'bigint') {\n throw new Error('canonicalJson: BigInt is not JSON-serializable')\n }\n if (typeof value === 'undefined' || typeof value === 'function') {\n throw new Error(\n `canonicalJson: refusing to encode ${typeof value} — include all fields explicitly`,\n )\n }\n if (Array.isArray(value)) {\n return '[' + value.map((v) => canonicalJson(v)).join(',') + ']'\n }\n if (typeof value === 'object') {\n const obj = value as Record<string, unknown>\n const keys = Object.keys(obj).sort()\n const parts: string[] = []\n for (const key of keys) {\n parts.push(JSON.stringify(key) + ':' + canonicalJson(obj[key]))\n }\n return '{' + parts.join(',') + '}'\n }\n throw new Error(`canonicalJson: unexpected value type: ${typeof value}`)\n}\n\n/**\n * Compute a hex-encoded sha256 of a string via Web Crypto's subtle API.\n *\n * We use hex (not base64) for hashes because hex is case-insensitive,\n * fixed-length (64 chars), and easier to compare visually in debug\n * output. Base64 would save a few bytes in storage but every encrypted\n * ledger entry is already much larger than the hash itself.\n */\nexport async function sha256Hex(input: string): Promise<string> {\n const bytes = new TextEncoder().encode(input)\n const digest = await globalThis.crypto.subtle.digest('SHA-256', bytes)\n return bytesToHex(new Uint8Array(digest))\n}\n\n/**\n * Compute the canonical hash of a ledger entry. Short wrapper around\n * `canonicalJson` + `sha256Hex`; callers use this instead of composing\n * the two functions every time, so any future change to the hashing\n * pipeline (e.g., adding a domain-separation prefix) lives in one place.\n */\nexport async function hashEntry(entry: LedgerEntry): Promise<string> {\n return sha256Hex(canonicalJson(entry))\n}\n\n/** Convert a Uint8Array to a lowercase hex string. */\nfunction bytesToHex(bytes: Uint8Array): string {\n const hex = new Array<string>(bytes.length)\n for (let i = 0; i < bytes.length; i++) {\n // Non-null assertion: indexing a Uint8Array within bounds always\n // returns a number, but the compiler's noUncheckedIndexedAccess\n // flag widens it to `number | undefined`. Safe here by construction.\n hex[i] = (bytes[i] ?? 0).toString(16).padStart(2, '0')\n }\n return hex.join('')\n}\n\n/**\n * Pad an index to the canonical 10-digit form used as the adapter key.\n * Ten digits is enough for ~10 billion ledger entries per vault\n * — far beyond any realistic use case, but cheap enough that the extra\n * digits don't hurt storage.\n */\nexport function paddedIndex(index: number): string {\n return String(index).padStart(10, '0')\n}\n\n/** Parse a padded adapter key back into a number. Returns NaN on malformed input. */\nexport function parseIndex(key: string): number {\n return Number.parseInt(key, 10)\n}\n","/**\n * RFC 6902 JSON Patch — compute + apply.\n *\n * This module is the \"delta history\" primitive: instead of\n * snapshotting the full record on every put (the behavior),\n * `Collection.put` computes a JSON Patch from the previous version to\n * the new version and stores only the patch in the ledger. To\n * reconstruct version N, we walk from the genesis snapshot forward\n * applying patches. Storage scales with **edit size**, not record\n * size — a 10 KB record edited 1000 times costs ~10 KB of deltas\n * instead of ~10 MB of snapshots.\n *\n * ## Why hand-roll instead of using a library?\n *\n * RFC 6902 has good libraries (`fast-json-patch`, `rfc6902`) but every\n * single one of them adds a runtime dependency to `@noy-db/core`. The\n * \"zero runtime dependencies\" promise is one of the core's load-bearing\n * features, and the patch surface we actually need is small enough\n * (~150 LoC) that vendoring is the right call.\n *\n * What we implement:\n * - `add` — insert a value at a path\n * - `remove` — delete the value at a path\n * - `replace` — overwrite the value at a path\n *\n * What we deliberately skip (out of scope for the ledger use):\n * - `move` and `copy` — optimizations; the diff algorithm doesn't\n * emit them, so the apply path doesn't need them\n * - `test` — used for transactional patches; we already have\n * optimistic concurrency via `_v` at the envelope layer\n * - Sophisticated array diffing (LCS, edit distance) — we treat\n * arrays as atomic values and emit a single `replace` op when\n * they differ. The accounting domain has small arrays where this\n * is fine; if we ever need patch-level array diffing we can add\n * it without changing the storage format.\n *\n * ## Path encoding (RFC 6902 §3)\n *\n * Paths look like `/foo/bar/0`. Each path segment is either an object\n * key or a numeric array index. Two characters need escaping inside\n * keys: `~` becomes `~0` and `/` becomes `~1`. We implement both.\n *\n * Empty path (`\"\"`) refers to the root document. Only `replace` makes\n * sense at the root, and our diff function emits it as a top-level\n * `replace` when `prev` and `next` differ in shape (object vs array,\n * primitive vs object, etc.).\n */\n\n/** A single JSON Patch operation. Subset of RFC 6902 — see file docstring. */\nexport type JsonPatchOp =\n | { readonly op: 'add'; readonly path: string; readonly value: unknown }\n | { readonly op: 'remove'; readonly path: string }\n | { readonly op: 'replace'; readonly path: string; readonly value: unknown }\n\n/** A complete JSON Patch document — an array of operations. */\nexport type JsonPatch = readonly JsonPatchOp[]\n\n// ─── Compute (diff) ──────────────────────────────────────────────────\n\n/**\n * Compute a JSON Patch that, when applied to `prev`, produces `next`.\n *\n * The algorithm is a straightforward recursive object walk:\n *\n * 1. If both inputs are plain objects (and not arrays/null):\n * - For each key in `prev`, recurse if `next` has it, else emit `remove`\n * - For each key in `next` not in `prev`, emit `add`\n * 2. If both inputs are arrays AND structurally equal, no-op.\n * Otherwise emit a single `replace` for the whole array.\n * 3. If both inputs are deeply equal primitives, no-op.\n * 4. Otherwise emit a `replace` at the current path.\n *\n * We do not minimize patches across move-like rearrangements — every\n * generated patch is straightforward enough to apply by hand if you\n * had to debug it.\n */\nexport function computePatch(prev: unknown, next: unknown): JsonPatch {\n const ops: JsonPatchOp[] = []\n diff(prev, next, '', ops)\n return ops\n}\n\nfunction diff(\n prev: unknown,\n next: unknown,\n path: string,\n out: JsonPatchOp[],\n): void {\n // Both null / both undefined → no-op (we don't differentiate them\n // in JSON terms; canonicalJson would reject undefined anyway).\n if (prev === next) return\n\n // One side null, the other not → straight replace.\n if (prev === null || next === null) {\n out.push({ op: 'replace', path, value: next })\n return\n }\n\n const prevIsArray = Array.isArray(prev)\n const nextIsArray = Array.isArray(next)\n const prevIsObject = typeof prev === 'object' && !prevIsArray\n const nextIsObject = typeof next === 'object' && !nextIsArray\n\n // Type changed (e.g., object → primitive, array → object). Replace.\n if (prevIsArray !== nextIsArray || prevIsObject !== nextIsObject) {\n out.push({ op: 'replace', path, value: next })\n return\n }\n\n // Both arrays. We don't do clever LCS-based diffing — emit a single\n // replace for the whole array if they differ. See file docstring for\n // the rationale.\n if (prevIsArray && nextIsArray) {\n if (!arrayDeepEqual(prev as unknown[], next as unknown[])) {\n out.push({ op: 'replace', path, value: next })\n }\n return\n }\n\n // Both plain objects. Recurse key by key.\n if (prevIsObject && nextIsObject) {\n const prevObj = prev as Record<string, unknown>\n const nextObj = next as Record<string, unknown>\n const prevKeys = Object.keys(prevObj)\n const nextKeys = Object.keys(nextObj)\n\n // Handle removes and overlapping recursions in one pass over prev.\n for (const key of prevKeys) {\n const childPath = path + '/' + escapePathSegment(key)\n if (!(key in nextObj)) {\n out.push({ op: 'remove', path: childPath })\n } else {\n diff(prevObj[key], nextObj[key], childPath, out)\n }\n }\n // Handle adds.\n for (const key of nextKeys) {\n if (!(key in prevObj)) {\n out.push({\n op: 'add',\n path: path + '/' + escapePathSegment(key),\n value: nextObj[key],\n })\n }\n }\n return\n }\n\n // Two primitives that aren't strictly equal — replace.\n out.push({ op: 'replace', path, value: next })\n}\n\nfunction arrayDeepEqual(a: unknown[], b: unknown[]): boolean {\n if (a.length !== b.length) return false\n for (let i = 0; i < a.length; i++) {\n if (!deepEqual(a[i], b[i])) return false\n }\n return true\n}\n\nfunction deepEqual(a: unknown, b: unknown): boolean {\n if (a === b) return true\n if (a === null || b === null) return false\n if (typeof a !== typeof b) return false\n if (typeof a !== 'object') return false\n const aArray = Array.isArray(a)\n const bArray = Array.isArray(b)\n if (aArray !== bArray) return false\n if (aArray && bArray) return arrayDeepEqual(a, b as unknown[])\n const aObj = a as Record<string, unknown>\n const bObj = b as Record<string, unknown>\n const aKeys = Object.keys(aObj)\n const bKeys = Object.keys(bObj)\n if (aKeys.length !== bKeys.length) return false\n for (const key of aKeys) {\n if (!(key in bObj)) return false\n if (!deepEqual(aObj[key], bObj[key])) return false\n }\n return true\n}\n\n// ─── Apply ──────────────────────────────────────────────────────────\n\n/**\n * Apply a JSON Patch to a base document and return the result.\n *\n * The base document is **not mutated** — every op clones the parent\n * container before writing to it, so the caller's reference to `base`\n * stays untouched. This costs an extra allocation per op but makes\n * the apply pipeline reorderable and safe to interrupt.\n *\n * Throws on:\n * - Removing a path that doesn't exist\n * - Adding to a path whose parent doesn't exist\n * - A path component that doesn't match the document shape (e.g.,\n * trying to step into a primitive)\n *\n * Throwing is the right behavior for the ledger use case: a failed\n * apply means the chain is corrupted, which should be loud rather\n * than silently producing a wrong reconstruction.\n */\nexport function applyPatch<T = unknown>(base: T, patch: JsonPatch): T {\n let result: unknown = clone(base)\n for (const op of patch) {\n result = applyOp(result, op)\n }\n return result as T\n}\n\nfunction applyOp(doc: unknown, op: JsonPatchOp): unknown {\n // Empty path → operation targets the root. Only `replace` and `add`\n // make sense at the root, but we handle `remove` for completeness\n // (root removal returns null).\n if (op.path === '') {\n if (op.op === 'remove') return null\n return clone(op.value)\n }\n\n const segments = parsePath(op.path)\n return walkAndApply(doc, segments, op)\n}\n\nfunction walkAndApply(\n doc: unknown,\n segments: string[],\n op: JsonPatchOp,\n): unknown {\n if (segments.length === 0) {\n // Should never happen — empty path is handled in applyOp().\n throw new Error('walkAndApply: empty segments (internal error)')\n }\n\n const [head, ...rest] = segments\n if (head === undefined) throw new Error('walkAndApply: undefined segment')\n\n if (rest.length === 0) {\n return applyAtTerminal(doc, head, op)\n }\n\n // Recurse into the child container, then rebuild the parent with\n // the modified child.\n if (Array.isArray(doc)) {\n const idx = parseArrayIndex(head, doc.length)\n const child = doc[idx]\n const newChild = walkAndApply(child, rest, op)\n const next = doc.slice()\n next[idx] = newChild\n return next\n }\n if (doc !== null && typeof doc === 'object') {\n const obj = doc as Record<string, unknown>\n if (!(head in obj)) {\n throw new Error(`applyPatch: path segment \"${head}\" not found in object`)\n }\n const newChild = walkAndApply(obj[head], rest, op)\n return { ...obj, [head]: newChild }\n }\n throw new Error(\n `applyPatch: cannot step into ${typeof doc} at segment \"${head}\"`,\n )\n}\n\nfunction applyAtTerminal(\n doc: unknown,\n segment: string,\n op: JsonPatchOp,\n): unknown {\n if (Array.isArray(doc)) {\n const idx =\n segment === '-' ? doc.length : parseArrayIndex(segment, doc.length + 1)\n const next = doc.slice()\n if (op.op === 'remove') {\n next.splice(idx, 1)\n return next\n }\n if (op.op === 'add') {\n next.splice(idx, 0, clone(op.value))\n return next\n }\n if (op.op === 'replace') {\n if (idx >= doc.length) {\n throw new Error(\n `applyPatch: replace at out-of-bounds array index ${idx}`,\n )\n }\n next[idx] = clone(op.value)\n return next\n }\n }\n if (doc !== null && typeof doc === 'object') {\n const obj = doc as Record<string, unknown>\n if (op.op === 'remove') {\n if (!(segment in obj)) {\n throw new Error(\n `applyPatch: remove on missing key \"${segment}\"`,\n )\n }\n const next = { ...obj }\n delete next[segment]\n return next\n }\n if (op.op === 'add') {\n // RFC 6902: `add` on an existing key replaces it.\n return { ...obj, [segment]: clone(op.value) }\n }\n if (op.op === 'replace') {\n if (!(segment in obj)) {\n throw new Error(\n `applyPatch: replace on missing key \"${segment}\"`,\n )\n }\n return { ...obj, [segment]: clone(op.value) }\n }\n }\n throw new Error(\n `applyPatch: cannot apply ${op.op} at terminal segment \"${segment}\"`,\n )\n}\n\n// ─── Path encoding (RFC 6902 §3) ─────────────────────────────────────\n\n/**\n * Escape a single path segment per RFC 6902 §3:\n * `~` → `~0`\n * `/` → `~1`\n *\n * Order matters: `~` must be escaped first, otherwise the `~1` we\n * just emitted would be re-escaped to `~01`.\n */\nfunction escapePathSegment(segment: string): string {\n return segment.replace(/~/g, '~0').replace(/\\//g, '~1')\n}\n\nfunction unescapePathSegment(segment: string): string {\n return segment.replace(/~1/g, '/').replace(/~0/g, '~')\n}\n\nfunction parsePath(path: string): string[] {\n if (!path.startsWith('/')) {\n throw new Error(`applyPatch: path must start with '/', got \"${path}\"`)\n }\n return path\n .slice(1)\n .split('/')\n .map(unescapePathSegment)\n}\n\nfunction parseArrayIndex(segment: string, max: number): number {\n if (!/^\\d+$/.test(segment)) {\n throw new Error(\n `applyPatch: array index must be a non-negative integer, got \"${segment}\"`,\n )\n }\n const idx = Number.parseInt(segment, 10)\n if (idx < 0 || idx > max) {\n throw new Error(\n `applyPatch: array index ${idx} out of range [0, ${max}]`,\n )\n }\n return idx\n}\n\n// ─── Cheap structural clone ─────────────────────────────────────────\n\n/**\n * Plain-JSON clone via JSON.parse(JSON.stringify(value)).\n *\n * Faster than `structuredClone` for our use because (a) we know our\n * inputs are JSON-compatible (no Dates, Maps, or BigInts — anything\n * else gets rejected by canonicalJson upstream), and (b) `structuredClone`\n * has overhead for handling arbitrary structured data we don't need.\n *\n * For tiny ledger entries (< 1 KB), the JSON round-trip is in the\n * single-digit microsecond range.\n */\nfunction clone<T>(value: T): T {\n if (value === null || value === undefined) return value\n if (typeof value !== 'object') return value\n return JSON.parse(JSON.stringify(value)) as T\n}\n","/**\n * Ledger storage constants — pinned in their own leaf module so\n * always-on core code (vault.ts, dictionary.ts) can import them\n * without dragging the `LedgerStore` class into the bundle.\n *\n * `splitting: true` in tsup is not enough on its own: when a\n * source file exports both pure constants and a heavyweight class,\n * the bundler keeps the entire chunk reachable from any importer.\n * Extracting the constants lets the floor scenario import them\n * without paying for the class.\n *\n * @internal\n */\n\n/** The internal collection name used for ledger entry storage. */\nexport const LEDGER_COLLECTION = '_ledger'\n\n/**\n * The internal collection name used for delta payload storage.\n *\n * Deltas live in a sibling collection (not inside `_ledger`) for two\n * reasons:\n *\n * 1. **Listing efficiency.** `ledger.loadAllEntries()` calls\n * `adapter.list(_ledger)` which would otherwise return every\n * delta key alongside every entry key. Splitting them keeps the\n * list small (one key per ledger entry) and the delta reads\n * keyed by the entry's index.\n *\n * 2. **Prune-friendliness.** A future `pruneHistory()` will delete\n * old deltas while keeping the ledger chain intact (folding old\n * deltas into a base snapshot). Separating the storage makes\n * that deletion a targeted operation on one collection instead\n * of a filter across a mixed list.\n *\n * Both collections share the same ledger DEK — one DEK, two\n * internal collections, same zero-knowledge guarantees.\n */\nexport const LEDGER_DELTAS_COLLECTION = '_ledger_deltas'\n","/**\n * Envelope payload hash — pinned in its own leaf module so consumers\n * (DictionaryHandle, the active history strategy) can import it\n * without dragging in the `LedgerStore` class.\n *\n * see `constants.ts` for the broader rationale.\n *\n * @internal\n */\n\nimport type { EncryptedEnvelope } from '../../types.js'\nimport { sha256Hex } from './entry.js'\n\n/**\n * Compute the `payloadHash` value for an encrypted envelope. Used by\n * `LedgerStore.append` for both put (hash the new envelope) and\n * delete (hash the previous envelope) paths, and by\n * `DictionaryHandle` so its ledger entries match the same contract.\n *\n * Returns the empty string when there is no envelope (delete of a\n * never-existed record). The empty string tolerated by the ledger\n * entry's `payloadHash` field as the canonical \"nothing here\" value.\n */\nexport async function envelopePayloadHash(\n envelope: EncryptedEnvelope | null,\n): Promise<string> {\n if (!envelope) return ''\n // `_data` is a base64 string for encrypted envelopes and the raw\n // JSON for plaintext ones. Both are strings, so a single sha256Hex\n // call works for both modes — the hash value differs between\n // encrypted/plaintext compartments because the bytes on disk\n // differ.\n return sha256Hex(envelope._data)\n}\n","/**\n * `LedgerStore` — read/write access to a compartment's hash-chained\n * audit log.\n *\n * The store is a thin wrapper around the adapter's `_ledger/` internal\n * collection. Every append:\n *\n * 1. Loads the current head (or treats an empty ledger as head = -1)\n * 2. Computes `prevHash` = sha256(canonicalJson(head))\n * 3. Builds the new entry with `index = head.index + 1`\n * 4. Encrypts the entry with the compartment's ledger DEK\n * 5. Writes the encrypted envelope to `_ledger/<paddedIndex>`\n *\n * `verify()` walks the chain from genesis forward and returns\n * `{ ok: true, head }` on success or `{ ok: false, divergedAt }` on the\n * first broken link.\n *\n * ## Thread / concurrency model\n *\n * For we assume a **single writer per vault**. Two\n * concurrent `append()` calls would race on the \"read head, write\n * head+1\" cycle and could produce a broken chain. The sync engine\n * is the primary concurrent-writer scenario, and it uses\n * optimistic-concurrency via `expectedVersion` on the adapter — but\n * the ledger path has no such guard today. Multi-writer hardening is a\n * follow-up.\n *\n * Single-writer usage IS safe, including across process restarts:\n * `head()` reads the adapter fresh each call, so a crash between the\n * adapter.put of a data record and the ledger append just means the\n * ledger is missing an entry for that record. `verify()` still\n * succeeds; a future `verifyIntegrity()` helper can cross-check the\n * ledger against the data collections to catch the gap.\n *\n * ## Why hide the ledger from `vault.collection()`?\n *\n * The `_ledger` name starts with `_`, matching the existing prefix\n * convention for internal collections (`_keyring`, `_sync`,\n * `_history`). The Vault's public `collection()` method already\n * returns entries for any name, but `loadAll()` filters out\n * underscore-prefixed collections so backups and exports don't leak\n * ledger metadata. We keep the ledger accessible ONLY via\n * `vault.ledger()` to enforce the hash-chain invariants — direct\n * puts via `collection('_ledger')` would bypass the `append()` logic.\n */\n\nimport type { NoydbStore, EncryptedEnvelope } from '../../types.js'\nimport { NOYDB_FORMAT_VERSION } from '../../types.js'\nimport { encrypt, decrypt } from '../../crypto.js'\nimport { ConflictError, LedgerContentionError } from '../../errors.js'\nimport {\n canonicalJson,\n hashEntry,\n paddedIndex,\n sha256Hex,\n type LedgerEntry,\n} from './entry.js'\nimport type { JsonPatch } from './patch.js'\nimport { applyPatch } from './patch.js'\nimport { LEDGER_COLLECTION, LEDGER_DELTAS_COLLECTION } from './constants.js'\nimport { envelopePayloadHash } from './hash.js'\n\n/**\n * Maximum optimistic-CAS retries on the ledger head. Each failed\n * attempt invalidates the head cache, re-reads, and retries with a\n * fresh next-index. After N failures we surface\n * `LedgerContentionError` so the caller can decide whether to retry,\n * queue, or alert.\n */\nconst MAX_APPEND_ATTEMPTS = 8\n\n// — re-export the constants + helper so any existing\n// `import { LEDGER_COLLECTION } from '...store.js'` paths keep\n// working. Internal core paths (vault.ts) import from the leaf\n// modules directly to avoid pulling this file's class into the\n// floor bundle.\nexport { LEDGER_COLLECTION, LEDGER_DELTAS_COLLECTION, envelopePayloadHash }\n\n/**\n * Input shape for `LedgerStore.append()`. The caller supplies the\n * operation metadata; the store fills in `index` and `prevHash`.\n */\nexport interface AppendInput {\n op: LedgerEntry['op']\n collection: string\n id: string\n version: number\n actor: string\n payloadHash: string\n /**\n * Optional JSON Patch representing the delta from the previous\n * version to the new version. Present only for `put` operations\n * that had a previous version; omitted for genesis puts and for\n * deletes. When present, `LedgerStore.append` persists the patch\n * in `_ledger_deltas/<paddedIndex>` and records its sha256 hash\n * as the entry's `deltaHash` field.\n */\n delta?: JsonPatch\n}\n\n/**\n * Result of `LedgerStore.verify()`. On success, `head` is the hash of\n * the last entry — the same value that should be published to any\n * external anchoring service (blockchain, OpenTimestamps, etc.). On\n * failure, `divergedAt` is the 0-based index of the first entry whose\n * recorded `prevHash` does not match the recomputed hash of its\n * predecessor. Entries at `divergedAt` and later are untrustworthy;\n * entries before that index are still valid.\n */\nexport type VerifyResult =\n | { readonly ok: true; readonly head: string; readonly length: number }\n | {\n readonly ok: false\n readonly divergedAt: number\n readonly expected: string\n readonly actual: string\n }\n\n/**\n * A LedgerStore is bound to a single vault. Callers obtain one\n * via `vault.ledger()` — there is no public constructor to keep\n * the hash-chain invariants in one place.\n *\n * The class holds no mutable state beyond its dependencies (adapter,\n * vault name, DEK resolver, actor id). Every method reads the\n * adapter fresh so multiple instances against the same vault\n * see each other's writes immediately (at the cost of re-parsing the\n * ledger on every head() / verify() call; acceptable at scale).\n */\nexport class LedgerStore {\n private readonly adapter: NoydbStore\n private readonly vault: string\n private readonly encrypted: boolean\n private readonly getDEK: (collectionName: string) => Promise<CryptoKey>\n private readonly actor: string\n\n /**\n * In-memory cache of the chain head — the most recently appended\n * entry along with its precomputed hash. Without this, every\n * `append()` would re-load every prior entry to recompute the\n * prevHash, making N puts O(N²) — a 1K-record stress test goes from\n * < 100ms to a multi-second timeout.\n *\n * The cache is populated on first read (`append`, `head`, `verify`)\n * and updated in-place on every successful `append`. Single-writer\n * usage (the assumption) keeps it consistent. A second\n * LedgerStore instance writing to the same vault would not\n * see the first instance's appends in its cached state — that's the\n * concurrency caveat documented at the class level.\n *\n * Sentinel `undefined` means \"not yet loaded\"; an explicit `null`\n * value means \"loaded and confirmed empty\" — distinguishing these\n * matters because an empty ledger is a valid state (genesis prevHash\n * is the empty string), and we don't want to re-scan the adapter\n * just because the chain is freshly initialized.\n */\n private headCache: { entry: LedgerEntry; hash: string } | null | undefined = undefined\n\n constructor(opts: {\n adapter: NoydbStore\n vault: string\n encrypted: boolean\n getDEK: (collectionName: string) => Promise<CryptoKey>\n actor: string\n }) {\n this.adapter = opts.adapter\n this.vault = opts.vault\n this.encrypted = opts.encrypted\n this.getDEK = opts.getDEK\n this.actor = opts.actor\n }\n\n /**\n * Lazily load (or return cached) the current chain head. The cache\n * sentinel is `undefined` until first access; after the first call,\n * the cache holds either a `{ entry, hash }` for non-empty ledgers\n * or `null` for empty ones.\n */\n private async getCachedHead(): Promise<{ entry: LedgerEntry; hash: string } | null> {\n if (this.headCache !== undefined) return this.headCache\n const entries = await this.loadAllEntries()\n const last = entries[entries.length - 1]\n if (!last) {\n this.headCache = null\n return null\n }\n this.headCache = { entry: last, hash: await hashEntry(last) }\n return this.headCache\n }\n\n /**\n * Append a new entry to the ledger. Returns the full entry that was\n * written (with its assigned index and computed prevHash) so the\n * caller can use the hash for downstream purposes (e.g., embedding\n * in a verifiable backup).\n *\n * This is the **only** way to add entries. Direct adapter writes to\n * `_ledger/` would bypass the chain math and would be caught by the\n * next `verify()` call as a divergence.\n *\n * ## Multi-writer correctness\n *\n * Append is implemented as an optimistic-CAS retry loop. On every\n * attempt:\n *\n * 1. Read fresh head (cache invalidated on retry).\n * 2. Compute `nextIndex = head.index + 1`, `prevHash = hash(head)`.\n * 3. Encrypt delta payload IN MEMORY (no adapter write yet) so we\n * can compute `deltaHash` before claiming the chain slot.\n * 4. Build + encrypt the entry envelope.\n * 5. `adapter.put(_ledger, paddedIndex, envelope, expectedVersion: 0)`\n * — the `expectedVersion: 0` asserts \"this slot must not exist.\"\n * Stores with `casAtomic: true` honor the CAS check; under\n * contention the second writer's put throws `ConflictError`.\n * 6. On `ConflictError`: invalidate the head cache, sleep with\n * bounded backoff + jitter, retry. After `MAX_APPEND_ATTEMPTS`\n * retries throw {@link LedgerContentionError}.\n * 7. On success: write the delta envelope (if any) at the same\n * index. Update the head cache.\n *\n * Entry-first ordering matters: writing the delta first under\n * contention would orphan delta records at indices the writer never\n * actually claimed. The deltaHash is computed off the encrypted\n * envelope's `_data` field, which doesn't require the envelope to\n * be persisted.\n *\n * Stores with `casAtomic: false` (file, s3, r2 by default) silently\n * accept the `expectedVersion: 0` argument and proceed without a\n * CAS check. Concurrent appends against those stores remain\n * best-effort — pair them with an advisory lock or with sync\n * single-writer discipline.\n */\n async append(input: AppendInput): Promise<LedgerEntry> {\n let lastConflict: ConflictError | undefined\n for (let attempt = 0; attempt < MAX_APPEND_ATTEMPTS; attempt++) {\n // Force a fresh head read on every retry. The first attempt may\n // hit the cache; subsequent attempts must re-scan the adapter\n // because the prior conflict means our cached state is stale.\n if (attempt > 0) {\n this.headCache = undefined\n }\n try {\n return await this.appendOnce(input)\n } catch (err) {\n if (err instanceof ConflictError) {\n lastConflict = err\n if (attempt < MAX_APPEND_ATTEMPTS - 1) {\n await sleepBackoff(attempt)\n }\n continue\n }\n throw err\n }\n }\n void lastConflict\n throw new LedgerContentionError(MAX_APPEND_ATTEMPTS)\n }\n\n /**\n * One attempt at the append cycle. Throws `ConflictError` when the\n * CAS check on the entry put fails — `append()` catches that and\n * retries. Any other error propagates to the caller.\n */\n private async appendOnce(input: AppendInput): Promise<LedgerEntry> {\n const cached = await this.getCachedHead()\n const lastEntry = cached?.entry\n const prevHash = cached?.hash ?? ''\n const nextIndex = lastEntry ? lastEntry.index + 1 : 0\n\n // Encrypt the delta in memory so we can compute deltaHash WITHOUT\n // claiming the deltas slot yet — entry-put is the chain claim.\n let deltaEnvelope: EncryptedEnvelope | undefined\n let deltaHash: string | undefined\n if (input.delta !== undefined) {\n deltaEnvelope = await this.encryptDelta(input.delta)\n deltaHash = await sha256Hex(deltaEnvelope._data)\n }\n\n // Build the entry. Conditionally include `deltaHash` so\n // canonicalJson (which rejects undefined) never sees it when\n // there's no delta.\n const entryBase = {\n index: nextIndex,\n prevHash,\n op: input.op,\n collection: input.collection,\n id: input.id,\n version: input.version,\n ts: new Date().toISOString(),\n actor: input.actor === '' ? this.actor : input.actor,\n payloadHash: input.payloadHash,\n } as const\n const entry: LedgerEntry =\n deltaHash !== undefined\n ? { ...entryBase, deltaHash }\n : entryBase\n\n const envelope = await this.encryptEntry(entry)\n // expectedVersion: 0 ≡ \"the slot must not yet exist.\" Honored by\n // casAtomic stores; silently passed through by non-CAS stores.\n await this.adapter.put(\n this.vault,\n LEDGER_COLLECTION,\n paddedIndex(entry.index),\n envelope,\n 0,\n )\n\n // Chain slot claimed. Now write the delta record (if any).\n if (deltaEnvelope) {\n await this.adapter.put(\n this.vault,\n LEDGER_DELTAS_COLLECTION,\n paddedIndex(entry.index),\n deltaEnvelope,\n 0,\n )\n }\n\n // Update the head cache so the next append() doesn't re-scan the\n // adapter.\n this.headCache = { entry, hash: await hashEntry(entry) }\n return entry\n }\n\n /**\n * Load a delta payload by its entry index. Returns `null` if the\n * entry at that index doesn't reference a delta (genesis puts and\n * deletes leave the slot empty) or if the delta row is missing\n * (possible after a `pruneHistory` fold).\n *\n * The caller is responsible for deciding what to do with a missing\n * delta — `ledger.reconstruct()` uses it as a \"stop walking\n * backward\" signal and falls back to the on-disk current value.\n */\n async loadDelta(index: number): Promise<JsonPatch | null> {\n const envelope = await this.adapter.get(\n this.vault,\n LEDGER_DELTAS_COLLECTION,\n paddedIndex(index),\n )\n if (!envelope) return null\n if (!this.encrypted) {\n return JSON.parse(envelope._data) as JsonPatch\n }\n const dek = await this.getDEK(LEDGER_COLLECTION)\n const json = await decrypt(envelope._iv, envelope._data, dek)\n return JSON.parse(json) as JsonPatch\n }\n\n /** Encrypt a JSON Patch into an envelope for storage. Mirrors encryptEntry. */\n private async encryptDelta(patch: JsonPatch): Promise<EncryptedEnvelope> {\n const json = JSON.stringify(patch)\n if (!this.encrypted) {\n return {\n _noydb: NOYDB_FORMAT_VERSION,\n _v: 1,\n _ts: new Date().toISOString(),\n _iv: '',\n _data: json,\n _by: this.actor,\n }\n }\n const dek = await this.getDEK(LEDGER_COLLECTION)\n const { iv, data } = await encrypt(json, dek)\n return {\n _noydb: NOYDB_FORMAT_VERSION,\n _v: 1,\n _ts: new Date().toISOString(),\n _iv: iv,\n _data: data,\n _by: this.actor,\n }\n }\n\n /**\n * Read all entries in ascending-index order. Used internally by\n * `append()`, `head()`, `verify()`, and `entries()`. Decryption is\n * serial because the entries are tiny and the overhead of a Promise\n * pool would dominate at realistic chain lengths (< 100K entries).\n */\n async loadAllEntries(): Promise<LedgerEntry[]> {\n const keys = await this.adapter.list(this.vault, LEDGER_COLLECTION)\n // Sort lexicographically, which matches numeric order because\n // keys are zero-padded to 10 digits.\n keys.sort()\n const entries: LedgerEntry[] = []\n for (const key of keys) {\n const envelope = await this.adapter.get(\n this.vault,\n LEDGER_COLLECTION,\n key,\n )\n if (!envelope) continue\n entries.push(await this.decryptEntry(envelope))\n }\n return entries\n }\n\n /**\n * Return the current head of the ledger: the last entry, its hash,\n * and the total chain length. `null` on an empty ledger so callers\n * can distinguish \"no history yet\" from \"empty history\".\n */\n async head(): Promise<\n | { readonly entry: LedgerEntry; readonly hash: string; readonly length: number }\n | null\n > {\n const cached = await this.getCachedHead()\n if (!cached) return null\n // `length` is `entry.index + 1` because indices are zero-based and\n // contiguous. We don't need to re-scan the adapter to compute it.\n return {\n entry: cached.entry,\n hash: cached.hash,\n length: cached.entry.index + 1,\n }\n }\n\n /**\n * Return entries in the requested half-open range `[from, to)`.\n * Defaults: `from = 0`, `to = length`. The indices are clipped to\n * the valid range; no error is thrown for out-of-range queries.\n */\n async entries(opts: { from?: number; to?: number } = {}): Promise<LedgerEntry[]> {\n const all = await this.loadAllEntries()\n const from = Math.max(0, opts.from ?? 0)\n const to = Math.min(all.length, opts.to ?? all.length)\n return all.slice(from, to)\n }\n\n /**\n * Reconstruct a record's state at a given historical version by\n * walking the ledger's delta chain backward from the current state.\n *\n * ## Algorithm\n *\n * Ledger deltas are stored in **reverse** form — each entry's\n * patch describes how to undo that put, transforming the new\n * record back into the previous one. `reconstruct` exploits this\n * by:\n *\n * 1. Finding every ledger entry for `(collection, id)` in the\n * chain, sorted by index ascending.\n * 2. Starting from `current` (the present value of the record,\n * as held by the caller — typically fetched via\n * `Collection.get()`).\n * 3. Walking entries in **descending** index order and applying\n * each entry's reverse patch, stopping when we reach the\n * entry whose version equals `atVersion`.\n *\n * The result is the record as it existed immediately AFTER the\n * put at `atVersion`. To get the state at the genesis put\n * (version 1), the walk runs all the way back through every put\n * after the first.\n *\n * ## Caveats\n *\n * - **Delete entries** break the walk: once we see a delete, the\n * record didn't exist before that point, so there's nothing to\n * reconstruct. We return `null` in that case.\n * - **Missing deltas** (e.g., after `pruneHistory` folds old\n * entries into a base snapshot) also stop the walk. does\n * not ship pruneHistory, so today this only happens if an entry\n * was deleted out-of-band.\n * - The caller MUST pass the correct current value. Passing a\n * mutated object would corrupt the reconstruction — the patch\n * chain is only valid against the exact state that was in\n * effect when the most recent put happened.\n *\n * For, `reconstruct` is the only way to read a historical\n * version via deltas. The legacy `_history` collection still\n * holds full snapshots and `Collection.getVersion()` still reads\n * from there — the two paths coexist until pruneHistory lands in\n * a follow-up and delta becomes the default.\n */\n async reconstruct<T>(\n collection: string,\n id: string,\n current: T,\n atVersion: number,\n ): Promise<T | null> {\n const all = await this.loadAllEntries()\n // Filter to entries for this (collection, id), in ascending index.\n const matching = all.filter(\n (e) => e.collection === collection && e.id === id,\n )\n if (matching.length === 0) {\n // No ledger history at all; the current state IS version 1\n // (or there's nothing), so the only valid atVersion is the\n // current record's version. We can't verify that here, so\n // return current if atVersion is plausible, null otherwise.\n return null\n }\n\n // Walk entries in descending index order, applying each reverse\n // delta until we reach the target version.\n let state: T | null = current\n for (let i = matching.length - 1; i >= 0; i--) {\n const entry = matching[i]\n if (!entry) continue\n\n // Match check FIRST — before applying this entry's reverse\n // patch. `state` at this point is the record state immediately\n // after this entry's put (or before this entry's delete), so\n // if the caller asked for this exact version, we're done.\n if (entry.version === atVersion && entry.op !== 'delete') {\n return state\n }\n\n if (entry.op === 'delete') {\n // A delete erases the live state. If the caller asks for a\n // version older than the delete we should continue walking\n // (state becomes null and the next put resets it). But we\n // can't reconstruct that pre-delete state from the current\n // in-memory `state` — the delete has no reverse patch. So\n // anything past this point is unreachable; return null.\n return null\n }\n\n if (entry.deltaHash === undefined) {\n // Genesis put — the earliest state for this lifecycle. We\n // can't walk further back. If the caller asked for exactly\n // this version, return the current state (we already failed\n // the match check above because a fresh genesis after a\n // delete can have version === atVersion). Otherwise the\n // target is unreachable from here.\n if (entry.version === atVersion) return state\n return null\n }\n\n const patch = await this.loadDelta(entry.index)\n if (!patch) {\n // Delta row is missing (probably pruned). Stop walking.\n return null\n }\n\n if (state === null) {\n // We're trying to walk back across a delete range and there's\n // nothing to apply a reverse patch to. Bail.\n return null\n }\n\n state = applyPatch(state, patch)\n }\n\n // Ran off the end of the walk without matching. The target\n // version doesn't exist in this record's chain.\n return null\n }\n\n /**\n * Walk the chain from genesis forward and verify every link.\n *\n * Returns `{ ok: true, head, length }` if every entry's `prevHash`\n * matches the recomputed hash of its predecessor (and the genesis\n * entry's `prevHash` is the empty string).\n *\n * Returns `{ ok: false, divergedAt, expected, actual }` on the first\n * mismatch. `divergedAt` is the 0-based index of the BROKEN entry\n * — entries before that index still verify cleanly; entries at and\n * after `divergedAt` are untrustworthy.\n *\n * This method detects:\n * - Mutated entry content (fields changed)\n * - Reordered entries (if any adjacent pair swaps, the prevHash\n * of the second no longer matches)\n * - Inserted entries (the inserted entry's prevHash likely fails,\n * and the following entry's prevHash definitely fails)\n * - Deleted entries (the entry after the deletion sees a wrong\n * prevHash)\n *\n * It does NOT detect:\n * - Tampering with the DATA collections that bypassed the ledger\n * entirely (e.g., an attacker who modifies records without\n * appending matching ledger entries — this is why we also\n * plan a `verifyIntegrity()` helper in a follow-up)\n * - Truncation of the chain at the tail (dropping the last N\n * entries leaves a shorter but still consistent chain). External\n * anchoring of `head.hash` to a trusted service is the defense\n * against this.\n */\n async verify(): Promise<VerifyResult> {\n const entries = await this.loadAllEntries()\n let expectedPrevHash = ''\n for (let i = 0; i < entries.length; i++) {\n const entry = entries[i]\n if (!entry) continue\n if (entry.prevHash !== expectedPrevHash) {\n return {\n ok: false,\n divergedAt: i,\n expected: expectedPrevHash,\n actual: entry.prevHash,\n }\n }\n if (entry.index !== i) {\n // An entry whose stored index doesn't match its position in\n // the sorted list means someone rewrote the adapter keys.\n // Treat as divergence.\n return {\n ok: false,\n divergedAt: i,\n expected: `index=${i}`,\n actual: `index=${entry.index}`,\n }\n }\n expectedPrevHash = await hashEntry(entry)\n }\n return {\n ok: true,\n head: expectedPrevHash,\n length: entries.length,\n }\n }\n\n // ─── Encryption plumbing ─────────────────────────────────────────\n\n /**\n * Serialize + encrypt a ledger entry into an EncryptedEnvelope. The\n * envelope's `_v` field is set to `entry.index + 1` so the usual\n * optimistic-concurrency machinery has a reasonable version number\n * to compare against (the ledger is append-only, so concurrent\n * writes should always bump the index).\n */\n private async encryptEntry(entry: LedgerEntry): Promise<EncryptedEnvelope> {\n const json = canonicalJson(entry)\n if (!this.encrypted) {\n return {\n _noydb: NOYDB_FORMAT_VERSION,\n _v: entry.index + 1,\n _ts: entry.ts,\n _iv: '',\n _data: json,\n _by: entry.actor,\n }\n }\n const dek = await this.getDEK(LEDGER_COLLECTION)\n const { iv, data } = await encrypt(json, dek)\n return {\n _noydb: NOYDB_FORMAT_VERSION,\n _v: entry.index + 1,\n _ts: entry.ts,\n _iv: iv,\n _data: data,\n _by: entry.actor,\n }\n }\n\n /** Decrypt an envelope into a LedgerEntry. Throws on bad key / tamper. */\n private async decryptEntry(envelope: EncryptedEnvelope): Promise<LedgerEntry> {\n if (!this.encrypted) {\n return JSON.parse(envelope._data) as LedgerEntry\n }\n const dek = await this.getDEK(LEDGER_COLLECTION)\n const json = await decrypt(envelope._iv, envelope._data, dek)\n return JSON.parse(json) as LedgerEntry\n }\n}\n\n// `envelopePayloadHash` was moved to `./hash.ts` so it can be\n// imported by core code without dragging this file's `LedgerStore`\n// class into the floor bundle. The re-export at the top of this\n// file keeps the original `import { envelopePayloadHash } from '.../store.js'`\n// path working.\n\n/**\n * Exponential backoff with jitter for the append CAS retry loop.\n * Attempt 0 → ~5–10 ms, attempt 7 → ~640–1280 ms. Jitter avoids the\n * thundering-herd problem when multiple writers collide repeatedly.\n */\nfunction sleepBackoff(attempt: number): Promise<void> {\n const base = 5 * Math.pow(2, attempt)\n const jitter = Math.random() * base\n return new Promise((resolve) => setTimeout(resolve, base + jitter))\n}\n","/**\n * Time-machine queries — point-in-time reads reconstructed from the\n * existing history + ledger infrastructure.\n *\n * ## Usage\n *\n * ```ts\n * const vault = await db.openVault('acme', { passphrase })\n * const q1End = vault.at('2026-03-31T23:59:59Z')\n * const invoice = await q1End.collection<Invoice>('invoices').get('inv-001')\n * // → the record as it stood at the close of Q1 2026\n * ```\n *\n * ## How it works\n *\n * Every write path already fans out into two persistence lanes:\n *\n * 1. `saveHistory(...)` persists a **full encrypted envelope snapshot**\n * per version under the `_history` collection (one envelope per\n * version, keyed by `{collection}:{id}:{paddedVersion}`). Each\n * envelope carries its own `_ts` (the write timestamp).\n * 2. `ledger.append(...)` appends a hash-chained audit entry that\n * records the `op` (put / delete), `version`, and `ts`.\n *\n * Reconstruction at a target timestamp T is therefore:\n *\n * - Find the newest history envelope for `(collection, id)` whose\n * `_ts ≤ T` — that's the state the record was in at T.\n * - Check the ledger for any `op: 'delete'` entry for the same\n * `(collection, id)` with `entry.ts` in `(latestEnvelope._ts, T]` —\n * if present, the record was deleted before T, so return `null`.\n * - Decrypt the surviving envelope with the current collection DEK\n * (DEKs are per-collection but stable across versions — the same\n * key encrypts v1 and v15 of a record).\n *\n * No delta replay. The existing `history.ts` module already stores\n * complete snapshots; we just pick the right one.\n *\n * ## Read-only contract\n *\n * Every write method on `CollectionInstant` throws\n * {@link ReadOnlyAtInstantError}. A historical view is a *read*\n * surface — mutating the past would require either a branch/shadow\n * mechanism (tracked under shadow vaults) or a rewrite of\n * history, which breaks the ledger's tamper-evidence guarantee.\n *\n * @module\n */\nimport type { EncryptedEnvelope, NoydbStore } from '../types.js'\nimport type { LedgerStore } from './ledger/store.js'\nimport { getHistory } from './history.js'\nimport { decrypt } from '../crypto.js'\nimport { ReadOnlyAtInstantError } from '../errors.js'\n\n/**\n * Narrow view of a {@link Vault}'s internals that\n * {@link VaultInstant} needs. Passed in by `Vault.at()` rather than\n * constructed here so all crypto + adapter access stays inside the\n * Vault class.\n *\n * Not exported from the public barrel — consumers should get a\n * `VaultInstant` via `vault.at(ts)`, never by constructing one\n * directly.\n */\nexport interface VaultEngine {\n readonly adapter: NoydbStore\n /** Vault name (the compartment). */\n readonly name: string\n /**\n * `true` when the vault was opened with a passphrase (the normal\n * case). `false` in plaintext-mode vaults (`encrypt: false`) — in\n * that case `envelope._data` is raw JSON and we skip the DEK lookup.\n */\n readonly encrypted: boolean\n /**\n * Resolves the DEK used to decrypt a given collection's envelopes.\n * Not called when `encrypted` is false.\n */\n getDEK(collection: string): Promise<CryptoKey>\n /**\n * Lazily-initialised ledger. We consult it to detect deletes that\n * happened between the latest history snapshot and the target\n * timestamp. `null` when history is disabled for this vault — in\n * that case time-machine reads fall back to history-only\n * reconstruction (which may miss deletes).\n */\n getLedger(): LedgerStore | null\n}\n\n/**\n * A vault at a fixed instant. Produced by `vault.at(timestamp)`.\n * Carries no session state of its own — every read is a fresh\n * lookup through the vault's adapter.\n *\n * Cheap to construct; safe to throw away. Create one per query.\n */\nexport class VaultInstant {\n constructor(\n private readonly engine: VaultEngine,\n /** Fully-resolved target timestamp (ISO-8601 UTC). */\n public readonly timestamp: string,\n ) {}\n\n /** Get a point-in-time view of a collection. */\n collection<T = unknown>(name: string): CollectionInstant<T> {\n return new CollectionInstant<T>(this.engine, this.timestamp, name)\n }\n}\n\n/**\n * A read-only collection view anchored to a past instant.\n *\n * Every write method throws {@link ReadOnlyAtInstantError} — see the\n * module docstring for why. The read surface is intentionally smaller\n * than the live {@link Collection}: `get` and `list` cover the\n * \"what did the books look like on date X\" use case without pulling\n * in the full query DSL / joins / aggregates at this stage. Follow-up\n * work tracked under.\n */\nexport class CollectionInstant<T = unknown> {\n constructor(\n private readonly engine: VaultEngine,\n private readonly targetTs: string,\n public readonly name: string,\n ) {}\n\n /**\n * Return the record as it existed at the target timestamp, or\n * `null` if the record had not been created yet or had already been\n * deleted by then.\n */\n async get(id: string): Promise<T | null> {\n const envelope = await this.resolveEnvelope(id)\n if (!envelope) return null\n const plaintext = this.engine.encrypted\n ? await decrypt(envelope._iv, envelope._data, await this.engine.getDEK(this.name))\n : envelope._data\n return JSON.parse(plaintext) as T\n }\n\n /**\n * IDs of records that existed (had at least one `put` and were not\n * subsequently deleted) at the target timestamp.\n *\n * Implemented as a linear scan over history + ledger. Performance\n * is bounded by total history size (not live-vault size), so the\n * memory-first vault-scale cap (1K–50K records × average history\n * depth) still applies.\n */\n async list(): Promise<string[]> {\n const historyIds = await collectHistoryIds(this.engine.adapter, this.engine.name, this.name)\n const liveIds = await this.engine.adapter.list(this.engine.name, this.name)\n const candidateIds = new Set<string>([...historyIds, ...liveIds])\n const alive: string[] = []\n for (const id of candidateIds) {\n const env = await this.resolveEnvelope(id)\n if (env) alive.push(id)\n }\n return alive.sort()\n }\n\n // ── write guards ───────────────────────────────────────────────────\n\n async put(_id: string, _record: T): Promise<never> {\n throw new ReadOnlyAtInstantError('put', this.targetTs)\n }\n async delete(_id: string): Promise<never> {\n throw new ReadOnlyAtInstantError('delete', this.targetTs)\n }\n async update(_id: string, _patch: Partial<T>): Promise<never> {\n throw new ReadOnlyAtInstantError('update', this.targetTs)\n }\n\n // ── internals ─────────────────────────────────────────────────────\n\n /**\n * Return the envelope that represents the record's state at\n * `targetTs`, accounting for deletes. `null` if the record didn't\n * exist at that instant.\n *\n * ## Why we use the ledger as the authoritative timeline\n *\n * The per-version history snapshots saved by `saveHistory()` do\n * carry a `_ts` field, but that timestamp is the moment the\n * snapshot was *captured* (i.e. the instant right before the\n * subsequent overwrite), not the original write time. The ledger,\n * by contrast, records `ts` at the moment of each `put` / `delete`\n * — it's the only source that tracks the real timeline. So:\n *\n * 1. Walk the ledger; find the latest entry for `(collection, id)`\n * with `ts ≤ targetTs`.\n * 2. If that entry is a `delete`, the record was gone at the\n * target instant — return null.\n * 3. Otherwise it's a `put` with a specific `version`. Load the\n * envelope for that version from history, falling back to the\n * live collection for the most recent version.\n *\n * ## Fallback when the ledger is disabled\n *\n * If the vault has history disabled, `getLedger()` returns null and\n * we fall back to comparing envelope `_ts` fields. This is\n * approximate and gets the *last write* right but may confuse the\n * intermediate versions; adopters needing accurate time-machine\n * reads should leave history enabled.\n */\n private async resolveEnvelope(id: string): Promise<EncryptedEnvelope | null> {\n const ledger = this.engine.getLedger()\n if (ledger) {\n return this.resolveViaLedger(id, ledger)\n }\n return this.resolveViaEnvelopeTs(id)\n }\n\n private async resolveViaLedger(id: string, ledger: LedgerStore): Promise<EncryptedEnvelope | null> {\n const entries = await ledger.entries()\n // Entries are already ordered by index which is the mutation order.\n let latest: { op: 'put' | 'delete'; version: number } | null = null\n for (const e of entries) {\n if (e.collection !== this.name || e.id !== id) continue\n if (e.ts > this.targetTs) break // entries are time-ordered by index\n latest = { op: e.op, version: e.version }\n }\n if (!latest) return null\n if (latest.op === 'delete') return null\n return this.loadVersion(id, latest.version)\n }\n\n private async resolveViaEnvelopeTs(id: string): Promise<EncryptedEnvelope | null> {\n const history = await getHistory(\n this.engine.adapter, this.engine.name, this.name, id,\n )\n const live = await this.engine.adapter.get(this.engine.name, this.name, id)\n const byVersion = new Map<number, EncryptedEnvelope>()\n for (const e of history) byVersion.set(e._v, e)\n if (live) byVersion.set(live._v, live)\n const sorted = [...byVersion.values()].sort((a, b) =>\n a._ts < b._ts ? 1 : a._ts > b._ts ? -1 : 0,\n )\n return sorted.find((e) => e._ts <= this.targetTs) ?? null\n }\n\n /**\n * Fetch the envelope for a specific version. The live record (most\n * recent put) lives in the main collection; prior versions live in\n * `_history`. We check live first because the common case after a\n * delete is that we're trying to load the last-live version from\n * history, and skipping live for the current-version case avoids a\n * redundant lookup.\n */\n private async loadVersion(id: string, version: number): Promise<EncryptedEnvelope | null> {\n const live = await this.engine.adapter.get(this.engine.name, this.name, id)\n if (live && live._v === version) return live\n\n // Direct lookup by (collection, id, version) — avoids scanning all history.\n const historyId = `${this.name}:${id}:${String(version).padStart(10, '0')}`\n return await this.engine.adapter.get(this.engine.name, '_history', historyId)\n }\n}\n\n/**\n * Scan the `_history` collection once and collect every distinct\n * `recordId` for the given collection. History keys follow the\n * shape `<collection>:<recordId>:<paddedVersion>`; we split on the\n * last two colons (delimiter-safe because `paddedVersion` is\n * exactly 10 digits).\n */\nasync function collectHistoryIds(\n adapter: NoydbStore,\n vault: string,\n collection: string,\n): Promise<string[]> {\n const all = await adapter.list(vault, '_history')\n const prefix = `${collection}:`\n const seen = new Set<string>()\n for (const key of all) {\n if (!key.startsWith(prefix)) continue\n const lastColon = key.lastIndexOf(':')\n if (lastColon <= prefix.length) continue\n const middle = key.slice(prefix.length, lastColon)\n seen.add(middle)\n }\n return [...seen]\n}\n","/**\n * Active history strategy — `withHistory()` returns the real\n * implementation that wires per-record snapshots, the hash-chained\n * audit ledger, JSON-patch deltas, and time-machine reads into the\n * core write/read paths.\n *\n * Consumers opt in by:\n *\n * ```ts\n * import { createNoydb } from '@noy-db/hub'\n * import { withHistory } from '@noy-db/hub/history'\n *\n * const db = await createNoydb({\n * store: ...,\n * user: ...,\n * historyStrategy: withHistory(),\n * })\n * ```\n *\n * The factory is a thin wrapper that delegates to the existing\n * `history.ts`, `diff.ts`, `ledger/store.ts`, `ledger/patch.ts`, and\n * `time-machine.ts` modules. Splitting the import chain through this\n * file is what lets `tsup` tree-shake the heavy modules out of the\n * default `@noy-db/hub` bundle when no `withHistory()` import is\n * present in the consumer.\n *\n * @public\n */\n\nimport type { HistoryStrategy, BuildLedgerOptions } from './strategy.js'\nimport {\n saveHistory,\n getHistory,\n getVersionEnvelope,\n pruneHistory,\n clearHistory,\n} from './history.js'\nimport { diff as computeDiff } from './diff.js'\nimport { LedgerStore, envelopePayloadHash } from './ledger/store.js'\nimport { computePatch } from './ledger/patch.js'\nimport { VaultInstant } from './time-machine.js'\n\n/**\n * Build the active history strategy. Today the factory takes no\n * options; per-collection retention tuning still flows through\n * `HistoryConfig` on `Vault.collection()` / `vault.openVault()`.\n *\n * Future option slots (kept off the LTS surface for now):\n * - global maxVersions cap\n * - global beforeDate prune cadence\n * - ledger encryption toggle (today inferred from vault.encrypted)\n */\nexport function withHistory(): HistoryStrategy {\n return {\n saveHistory,\n getHistoryEntries: getHistory,\n getVersionEnvelope,\n pruneHistory,\n clearHistory,\n envelopePayloadHash,\n computePatch,\n diff: computeDiff,\n buildLedger(opts: BuildLedgerOptions): LedgerStore {\n return new LedgerStore({\n adapter: opts.adapter,\n vault: opts.vault,\n encrypted: opts.encrypted,\n getDEK: opts.getDEK,\n actor: opts.actor,\n })\n },\n buildVaultInstant(engine, timestamp): VaultInstant {\n return new VaultInstant(engine, timestamp)\n },\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACSA,IAAM,qBAAqB;AAC3B,IAAM,cAAc;AAEpB,SAAS,UAAU,YAAoB,UAAkB,SAAyB;AAChF,SAAO,GAAG,UAAU,IAAI,QAAQ,IAAI,OAAO,OAAO,EAAE,SAAS,aAAa,GAAG,CAAC;AAChF;AAkBA,SAAS,cAAc,IAAY,YAAoB,UAA4B;AACjF,MAAI,UAAU;AACZ,WAAO,GAAG,WAAW,GAAG,UAAU,IAAI,QAAQ,GAAG;AAAA,EACnD;AACA,SAAO,GAAG,WAAW,GAAG,UAAU,GAAG;AACvC;AAGA,eAAsB,YACpB,SACA,OACA,YACA,UACA,UACe;AACf,QAAM,KAAK,UAAU,YAAY,UAAU,SAAS,EAAE;AACtD,QAAM,QAAQ,IAAI,OAAO,oBAAoB,IAAI,QAAQ;AAC3D;AAGA,eAAsB,WACpB,SACA,OACA,YACA,UACA,SAC8B;AAC9B,QAAM,SAAS,MAAM,QAAQ,KAAK,OAAO,kBAAkB;AAC3D,QAAM,cAAc,OACjB,OAAO,QAAM,cAAc,IAAI,YAAY,QAAQ,CAAC,EACpD,KAAK,EACL,QAAQ;AAEX,QAAM,UAA+B,CAAC;AAEtC,aAAW,MAAM,aAAa;AAC5B,UAAM,WAAW,MAAM,QAAQ,IAAI,OAAO,oBAAoB,EAAE;AAChE,QAAI,CAAC,SAAU;AAGf,QAAI,SAAS,QAAQ,SAAS,MAAM,QAAQ,KAAM;AAClD,QAAI,SAAS,MAAM,SAAS,MAAM,QAAQ,GAAI;AAE9C,YAAQ,KAAK,QAAQ;AAErB,QAAI,SAAS,SAAS,QAAQ,UAAU,QAAQ,MAAO;AAAA,EACzD;AAEA,SAAO;AACT;AAGA,eAAsB,mBACpB,SACA,OACA,YACA,UACA,SACmC;AACnC,QAAM,KAAK,UAAU,YAAY,UAAU,OAAO;AAClD,SAAO,QAAQ,IAAI,OAAO,oBAAoB,EAAE;AAClD;AAGA,eAAsB,aACpB,SACA,OACA,YACA,UACA,SACiB;AACjB,QAAM,SAAS,MAAM,QAAQ,KAAK,OAAO,kBAAkB;AAC3D,QAAM,cAAc,OACjB,OAAO,QAAM,WAAW,cAAc,IAAI,YAAY,QAAQ,IAAI,cAAc,IAAI,UAAU,CAAC,EAC/F,KAAK;AAER,MAAI,WAAqB,CAAC;AAE1B,MAAI,QAAQ,iBAAiB,QAAW;AAEtC,UAAM,OAAO,QAAQ;AACrB,QAAI,YAAY,SAAS,MAAM;AAC7B,iBAAW,YAAY,MAAM,GAAG,YAAY,SAAS,IAAI;AAAA,IAC3D;AAAA,EACF;AAEA,MAAI,QAAQ,YAAY;AAEtB,eAAW,MAAM,aAAa;AAC5B,UAAI,SAAS,SAAS,EAAE,EAAG;AAC3B,YAAM,WAAW,MAAM,QAAQ,IAAI,OAAO,oBAAoB,EAAE;AAChE,UAAI,YAAY,SAAS,MAAM,QAAQ,YAAY;AACjD,iBAAS,KAAK,EAAE;AAAA,MAClB;AAAA,IACF;AAAA,EACF;AAGA,QAAM,gBAAgB,CAAC,GAAG,IAAI,IAAI,QAAQ,CAAC;AAE3C,aAAW,MAAM,eAAe;AAC9B,UAAM,QAAQ,OAAO,OAAO,oBAAoB,EAAE;AAAA,EACpD;AAEA,SAAO,cAAc;AACvB;AAGA,eAAsB,aACpB,SACA,OACA,YACA,UACiB;AACjB,QAAM,SAAS,MAAM,QAAQ,KAAK,OAAO,kBAAkB;AAC3D,MAAI;AAEJ,MAAI,cAAc,UAAU;AAC1B,eAAW,OAAO,OAAO,QAAM,cAAc,IAAI,YAAY,QAAQ,CAAC;AAAA,EACxE,WAAW,YAAY;AACrB,eAAW,OAAO,OAAO,QAAM,cAAc,IAAI,UAAU,CAAC;AAAA,EAC9D,OAAO;AACL,eAAW;AAAA,EACb;AAEA,aAAW,MAAM,UAAU;AACzB,UAAM,QAAQ,OAAO,OAAO,oBAAoB,EAAE;AAAA,EACpD;AAEA,SAAO,SAAS;AAClB;;;AC3IO,SAAS,KAAK,QAAiB,QAAiB,WAAW,IAAiB;AACjF,QAAM,UAAuB,CAAC;AAG9B,MAAI,WAAW,OAAQ,QAAO;AAG9B,MAAI,UAAU,QAAQ,UAAU,MAAM;AACpC,WAAO,CAAC,EAAE,MAAM,YAAY,UAAU,MAAM,SAAS,IAAI,OAAO,CAAC;AAAA,EACnE;AACA,MAAI,UAAU,QAAQ,UAAU,MAAM;AACpC,WAAO,CAAC,EAAE,MAAM,YAAY,UAAU,MAAM,WAAW,MAAM,OAAO,CAAC;AAAA,EACvE;AAGA,MAAI,OAAO,WAAW,OAAO,QAAQ;AACnC,WAAO,CAAC,EAAE,MAAM,YAAY,UAAU,MAAM,WAAW,MAAM,QAAQ,IAAI,OAAO,CAAC;AAAA,EACnF;AAGA,MAAI,OAAO,WAAW,UAAU;AAC9B,WAAO,CAAC,EAAE,MAAM,YAAY,UAAU,MAAM,WAAW,MAAM,QAAQ,IAAI,OAAO,CAAC;AAAA,EACnF;AAGA,MAAI,MAAM,QAAQ,MAAM,KAAK,MAAM,QAAQ,MAAM,GAAG;AAClD,UAAM,SAAS,KAAK,IAAI,OAAO,QAAQ,OAAO,MAAM;AACpD,aAAS,IAAI,GAAG,IAAI,QAAQ,KAAK;AAC/B,YAAM,IAAI,WAAW,GAAG,QAAQ,IAAI,CAAC,MAAM,IAAI,CAAC;AAChD,UAAI,KAAK,OAAO,QAAQ;AACtB,gBAAQ,KAAK,EAAE,MAAM,GAAG,MAAM,SAAS,IAAI,OAAO,CAAC,EAAE,CAAC;AAAA,MACxD,WAAW,KAAK,OAAO,QAAQ;AAC7B,gBAAQ,KAAK,EAAE,MAAM,GAAG,MAAM,WAAW,MAAM,OAAO,CAAC,EAAE,CAAC;AAAA,MAC5D,OAAO;AACL,gBAAQ,KAAK,GAAG,KAAK,OAAO,CAAC,GAAG,OAAO,CAAC,GAAG,CAAC,CAAC;AAAA,MAC/C;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAGA,QAAM,YAAY;AAClB,QAAM,YAAY;AAClB,QAAM,UAAU,oBAAI,IAAI,CAAC,GAAG,OAAO,KAAK,SAAS,GAAG,GAAG,OAAO,KAAK,SAAS,CAAC,CAAC;AAE9E,aAAW,OAAO,SAAS;AACzB,UAAM,IAAI,WAAW,GAAG,QAAQ,IAAI,GAAG,KAAK;AAC5C,QAAI,EAAE,OAAO,YAAY;AACvB,cAAQ,KAAK,EAAE,MAAM,GAAG,MAAM,SAAS,IAAI,UAAU,GAAG,EAAE,CAAC;AAAA,IAC7D,WAAW,EAAE,OAAO,YAAY;AAC9B,cAAQ,KAAK,EAAE,MAAM,GAAG,MAAM,WAAW,MAAM,UAAU,GAAG,EAAE,CAAC;AAAA,IACjE,OAAO;AACL,cAAQ,KAAK,GAAG,KAAK,UAAU,GAAG,GAAG,UAAU,GAAG,GAAG,CAAC,CAAC;AAAA,IACzD;AAAA,EACF;AAEA,SAAO;AACT;AAGO,SAAS,WAAW,SAA8B;AACvD,MAAI,QAAQ,WAAW,EAAG,QAAO;AACjC,SAAO,QAAQ,IAAI,OAAK;AACtB,YAAQ,EAAE,MAAM;AAAA,MACd,KAAK;AACH,eAAO,KAAK,EAAE,IAAI,KAAK,KAAK,UAAU,EAAE,EAAE,CAAC;AAAA,MAC7C,KAAK;AACH,eAAO,KAAK,EAAE,IAAI,KAAK,KAAK,UAAU,EAAE,IAAI,CAAC;AAAA,MAC/C,KAAK;AACH,eAAO,KAAK,EAAE,IAAI,KAAK,KAAK,UAAU,EAAE,IAAI,CAAC,WAAM,KAAK,UAAU,EAAE,EAAE,CAAC;AAAA,IAC3E;AAAA,EACF,CAAC,EAAE,KAAK,IAAI;AACd;;;ACrDO,IAAM,uBAAuB;;;ACkC7B,IAAM,aAAN,cAAyB,MAAM;AAAA;AAAA,EAE3B;AAAA,EAET,YAAY,MAAc,SAAiB;AACzC,UAAM,OAAO;AACb,SAAK,OAAO;AACZ,SAAK,OAAO;AAAA,EACd;AACF;AAYO,IAAM,kBAAN,cAA8B,WAAW;AAAA,EAC9C,YAAY,UAAU,qBAAqB;AACzC,UAAM,qBAAqB,OAAO;AAClC,SAAK,OAAO;AAAA,EACd;AACF;AAWO,IAAM,gBAAN,cAA4B,WAAW;AAAA,EAC5C,YAAY,UAAU,yEAAoE;AACxF,UAAM,YAAY,OAAO;AACzB,SAAK,OAAO;AAAA,EACd;AACF;AA6DO,IAAM,yBAAN,cAAqC,WAAW;AAAA,EACrD,YAAY,WAAmB,WAAmB;AAChD;AAAA,MACE;AAAA,MACA,UAAU,SAAS,kCAAkC,SAAS;AAAA,IAChE;AACA,SAAK,OAAO;AAAA,EACd;AACF;AA4WO,IAAM,gBAAN,cAA4B,WAAW;AAAA;AAAA,EAEnC;AAAA,EAET,YAAY,SAAiB,UAAU,oBAAoB;AACzD,UAAM,YAAY,OAAO;AACzB,SAAK,OAAO;AACZ,SAAK,UAAU;AAAA,EACjB;AACF;AAcO,IAAM,wBAAN,cAAoC,WAAW;AAAA,EAC3C;AAAA,EAET,YAAY,UAAkB;AAC5B;AAAA,MACE;AAAA,MACA,0DAA0D,QAAQ;AAAA,IACpE;AACA,SAAK,OAAO;AACZ,SAAK,WAAW;AAAA,EAClB;AACF;;;AC/hBA,IAAM,WAAW;AAGjB,IAAM,SAAS,WAAW,OAAO;AA8EjC,eAAsB,QACpB,WACA,KACwB;AACxB,QAAM,KAAK,WAAW;AACtB,QAAM,UAAU,IAAI,YAAY,EAAE,OAAO,SAAS;AAElD,QAAM,aAAa,MAAM,OAAO;AAAA,IAC9B,EAAE,MAAM,WAAW,GAAuB;AAAA,IAC1C;AAAA,IACA;AAAA,EACF;AAEA,SAAO;AAAA,IACL,IAAI,eAAe,EAAE;AAAA,IACrB,MAAM,eAAe,UAAU;AAAA,EACjC;AACF;AAGA,eAAsB,QACpB,UACA,YACA,KACiB;AACjB,QAAM,KAAK,eAAe,QAAQ;AAClC,QAAM,aAAa,eAAe,UAAU;AAE5C,MAAI;AACF,UAAM,YAAY,MAAM,OAAO;AAAA,MAC7B,EAAE,MAAM,WAAW,GAAuB;AAAA,MAC1C;AAAA,MACA;AAAA,IACF;AACA,WAAO,IAAI,YAAY,EAAE,OAAO,SAAS;AAAA,EAC3C,SAAS,KAAK;AACZ,QAAI,eAAe,SAAS,IAAI,SAAS,kBAAkB;AACzD,YAAM,IAAI,cAAc;AAAA,IAC1B;AACA,UAAM,IAAI;AAAA,MACR,eAAe,QAAQ,IAAI,UAAU;AAAA,IACvC;AAAA,EACF;AACF;AAkTO,SAAS,aAAyB;AACvC,SAAO,WAAW,OAAO,gBAAgB,IAAI,WAAW,QAAQ,CAAC;AACnE;AASO,SAAS,eAAe,QAA0C;AACvE,QAAM,QAAQ,kBAAkB,aAAa,SAAS,IAAI,WAAW,MAAM;AAC3E,MAAI,SAAS;AACb,WAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK;AACrC,cAAU,OAAO,aAAa,MAAM,CAAC,CAAE;AAAA,EACzC;AACA,SAAO,KAAK,MAAM;AACpB;AAEO,SAAS,eAAe,QAAyC;AACtE,QAAM,SAAS,KAAK,MAAM;AAC1B,QAAM,QAAQ,IAAI,WAAW,OAAO,MAAM;AAC1C,WAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK;AACtC,UAAM,CAAC,IAAI,OAAO,WAAW,CAAC;AAAA,EAChC;AACA,SAAO;AACT;;;AClVO,SAAS,cAAc,OAAwB;AACpD,MAAI,UAAU,KAAM,QAAO;AAC3B,MAAI,OAAO,UAAU,UAAW,QAAO,QAAQ,SAAS;AACxD,MAAI,OAAO,UAAU,UAAU;AAC7B,QAAI,CAAC,OAAO,SAAS,KAAK,GAAG;AAC3B,YAAM,IAAI;AAAA,QACR,uDAAuD,OAAO,KAAK,CAAC;AAAA,MACtE;AAAA,IACF;AACA,WAAO,KAAK,UAAU,KAAK;AAAA,EAC7B;AACA,MAAI,OAAO,UAAU,SAAU,QAAO,KAAK,UAAU,KAAK;AAC1D,MAAI,OAAO,UAAU,UAAU;AAC7B,UAAM,IAAI,MAAM,gDAAgD;AAAA,EAClE;AACA,MAAI,OAAO,UAAU,eAAe,OAAO,UAAU,YAAY;AAC/D,UAAM,IAAI;AAAA,MACR,qCAAqC,OAAO,KAAK;AAAA,IACnD;AAAA,EACF;AACA,MAAI,MAAM,QAAQ,KAAK,GAAG;AACxB,WAAO,MAAM,MAAM,IAAI,CAAC,MAAM,cAAc,CAAC,CAAC,EAAE,KAAK,GAAG,IAAI;AAAA,EAC9D;AACA,MAAI,OAAO,UAAU,UAAU;AAC7B,UAAM,MAAM;AACZ,UAAM,OAAO,OAAO,KAAK,GAAG,EAAE,KAAK;AACnC,UAAM,QAAkB,CAAC;AACzB,eAAW,OAAO,MAAM;AACtB,YAAM,KAAK,KAAK,UAAU,GAAG,IAAI,MAAM,cAAc,IAAI,GAAG,CAAC,CAAC;AAAA,IAChE;AACA,WAAO,MAAM,MAAM,KAAK,GAAG,IAAI;AAAA,EACjC;AACA,QAAM,IAAI,MAAM,yCAAyC,OAAO,KAAK,EAAE;AACzE;AAUA,eAAsB,UAAU,OAAgC;AAC9D,QAAM,QAAQ,IAAI,YAAY,EAAE,OAAO,KAAK;AAC5C,QAAM,SAAS,MAAM,WAAW,OAAO,OAAO,OAAO,WAAW,KAAK;AACrE,SAAO,WAAW,IAAI,WAAW,MAAM,CAAC;AAC1C;AAQA,eAAsB,UAAU,OAAqC;AACnE,SAAO,UAAU,cAAc,KAAK,CAAC;AACvC;AAGA,SAAS,WAAW,OAA2B;AAC7C,QAAM,MAAM,IAAI,MAAc,MAAM,MAAM;AAC1C,WAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK;AAIrC,QAAI,CAAC,KAAK,MAAM,CAAC,KAAK,GAAG,SAAS,EAAE,EAAE,SAAS,GAAG,GAAG;AAAA,EACvD;AACA,SAAO,IAAI,KAAK,EAAE;AACpB;AAQO,SAAS,YAAY,OAAuB;AACjD,SAAO,OAAO,KAAK,EAAE,SAAS,IAAI,GAAG;AACvC;AAGO,SAAS,WAAW,KAAqB;AAC9C,SAAO,OAAO,SAAS,KAAK,EAAE;AAChC;;;ACzKO,SAAS,aAAa,MAAe,MAA0B;AACpE,QAAM,MAAqB,CAAC;AAC5B,EAAAA,MAAK,MAAM,MAAM,IAAI,GAAG;AACxB,SAAO;AACT;AAEA,SAASA,MACP,MACA,MACA,MACA,KACM;AAGN,MAAI,SAAS,KAAM;AAGnB,MAAI,SAAS,QAAQ,SAAS,MAAM;AAClC,QAAI,KAAK,EAAE,IAAI,WAAW,MAAM,OAAO,KAAK,CAAC;AAC7C;AAAA,EACF;AAEA,QAAM,cAAc,MAAM,QAAQ,IAAI;AACtC,QAAM,cAAc,MAAM,QAAQ,IAAI;AACtC,QAAM,eAAe,OAAO,SAAS,YAAY,CAAC;AAClD,QAAM,eAAe,OAAO,SAAS,YAAY,CAAC;AAGlD,MAAI,gBAAgB,eAAe,iBAAiB,cAAc;AAChE,QAAI,KAAK,EAAE,IAAI,WAAW,MAAM,OAAO,KAAK,CAAC;AAC7C;AAAA,EACF;AAKA,MAAI,eAAe,aAAa;AAC9B,QAAI,CAAC,eAAe,MAAmB,IAAiB,GAAG;AACzD,UAAI,KAAK,EAAE,IAAI,WAAW,MAAM,OAAO,KAAK,CAAC;AAAA,IAC/C;AACA;AAAA,EACF;AAGA,MAAI,gBAAgB,cAAc;AAChC,UAAM,UAAU;AAChB,UAAM,UAAU;AAChB,UAAM,WAAW,OAAO,KAAK,OAAO;AACpC,UAAM,WAAW,OAAO,KAAK,OAAO;AAGpC,eAAW,OAAO,UAAU;AAC1B,YAAM,YAAY,OAAO,MAAM,kBAAkB,GAAG;AACpD,UAAI,EAAE,OAAO,UAAU;AACrB,YAAI,KAAK,EAAE,IAAI,UAAU,MAAM,UAAU,CAAC;AAAA,MAC5C,OAAO;AACL,QAAAA,MAAK,QAAQ,GAAG,GAAG,QAAQ,GAAG,GAAG,WAAW,GAAG;AAAA,MACjD;AAAA,IACF;AAEA,eAAW,OAAO,UAAU;AAC1B,UAAI,EAAE,OAAO,UAAU;AACrB,YAAI,KAAK;AAAA,UACP,IAAI;AAAA,UACJ,MAAM,OAAO,MAAM,kBAAkB,GAAG;AAAA,UACxC,OAAO,QAAQ,GAAG;AAAA,QACpB,CAAC;AAAA,MACH;AAAA,IACF;AACA;AAAA,EACF;AAGA,MAAI,KAAK,EAAE,IAAI,WAAW,MAAM,OAAO,KAAK,CAAC;AAC/C;AAEA,SAAS,eAAe,GAAc,GAAuB;AAC3D,MAAI,EAAE,WAAW,EAAE,OAAQ,QAAO;AAClC,WAAS,IAAI,GAAG,IAAI,EAAE,QAAQ,KAAK;AACjC,QAAI,CAAC,UAAU,EAAE,CAAC,GAAG,EAAE,CAAC,CAAC,EAAG,QAAO;AAAA,EACrC;AACA,SAAO;AACT;AAEA,SAAS,UAAU,GAAY,GAAqB;AAClD,MAAI,MAAM,EAAG,QAAO;AACpB,MAAI,MAAM,QAAQ,MAAM,KAAM,QAAO;AACrC,MAAI,OAAO,MAAM,OAAO,EAAG,QAAO;AAClC,MAAI,OAAO,MAAM,SAAU,QAAO;AAClC,QAAM,SAAS,MAAM,QAAQ,CAAC;AAC9B,QAAM,SAAS,MAAM,QAAQ,CAAC;AAC9B,MAAI,WAAW,OAAQ,QAAO;AAC9B,MAAI,UAAU,OAAQ,QAAO,eAAe,GAAG,CAAc;AAC7D,QAAM,OAAO;AACb,QAAM,OAAO;AACb,QAAM,QAAQ,OAAO,KAAK,IAAI;AAC9B,QAAM,QAAQ,OAAO,KAAK,IAAI;AAC9B,MAAI,MAAM,WAAW,MAAM,OAAQ,QAAO;AAC1C,aAAW,OAAO,OAAO;AACvB,QAAI,EAAE,OAAO,MAAO,QAAO;AAC3B,QAAI,CAAC,UAAU,KAAK,GAAG,GAAG,KAAK,GAAG,CAAC,EAAG,QAAO;AAAA,EAC/C;AACA,SAAO;AACT;AAsBO,SAAS,WAAwB,MAAS,OAAqB;AACpE,MAAI,SAAkB,MAAM,IAAI;AAChC,aAAW,MAAM,OAAO;AACtB,aAAS,QAAQ,QAAQ,EAAE;AAAA,EAC7B;AACA,SAAO;AACT;AAEA,SAAS,QAAQ,KAAc,IAA0B;AAIvD,MAAI,GAAG,SAAS,IAAI;AAClB,QAAI,GAAG,OAAO,SAAU,QAAO;AAC/B,WAAO,MAAM,GAAG,KAAK;AAAA,EACvB;AAEA,QAAM,WAAW,UAAU,GAAG,IAAI;AAClC,SAAO,aAAa,KAAK,UAAU,EAAE;AACvC;AAEA,SAAS,aACP,KACA,UACA,IACS;AACT,MAAI,SAAS,WAAW,GAAG;AAEzB,UAAM,IAAI,MAAM,+CAA+C;AAAA,EACjE;AAEA,QAAM,CAAC,MAAM,GAAG,IAAI,IAAI;AACxB,MAAI,SAAS,OAAW,OAAM,IAAI,MAAM,iCAAiC;AAEzE,MAAI,KAAK,WAAW,GAAG;AACrB,WAAO,gBAAgB,KAAK,MAAM,EAAE;AAAA,EACtC;AAIA,MAAI,MAAM,QAAQ,GAAG,GAAG;AACtB,UAAM,MAAM,gBAAgB,MAAM,IAAI,MAAM;AAC5C,UAAM,QAAQ,IAAI,GAAG;AACrB,UAAM,WAAW,aAAa,OAAO,MAAM,EAAE;AAC7C,UAAM,OAAO,IAAI,MAAM;AACvB,SAAK,GAAG,IAAI;AACZ,WAAO;AAAA,EACT;AACA,MAAI,QAAQ,QAAQ,OAAO,QAAQ,UAAU;AAC3C,UAAM,MAAM;AACZ,QAAI,EAAE,QAAQ,MAAM;AAClB,YAAM,IAAI,MAAM,6BAA6B,IAAI,uBAAuB;AAAA,IAC1E;AACA,UAAM,WAAW,aAAa,IAAI,IAAI,GAAG,MAAM,EAAE;AACjD,WAAO,EAAE,GAAG,KAAK,CAAC,IAAI,GAAG,SAAS;AAAA,EACpC;AACA,QAAM,IAAI;AAAA,IACR,gCAAgC,OAAO,GAAG,gBAAgB,IAAI;AAAA,EAChE;AACF;AAEA,SAAS,gBACP,KACA,SACA,IACS;AACT,MAAI,MAAM,QAAQ,GAAG,GAAG;AACtB,UAAM,MACJ,YAAY,MAAM,IAAI,SAAS,gBAAgB,SAAS,IAAI,SAAS,CAAC;AACxE,UAAM,OAAO,IAAI,MAAM;AACvB,QAAI,GAAG,OAAO,UAAU;AACtB,WAAK,OAAO,KAAK,CAAC;AAClB,aAAO;AAAA,IACT;AACA,QAAI,GAAG,OAAO,OAAO;AACnB,WAAK,OAAO,KAAK,GAAG,MAAM,GAAG,KAAK,CAAC;AACnC,aAAO;AAAA,IACT;AACA,QAAI,GAAG,OAAO,WAAW;AACvB,UAAI,OAAO,IAAI,QAAQ;AACrB,cAAM,IAAI;AAAA,UACR,oDAAoD,GAAG;AAAA,QACzD;AAAA,MACF;AACA,WAAK,GAAG,IAAI,MAAM,GAAG,KAAK;AAC1B,aAAO;AAAA,IACT;AAAA,EACF;AACA,MAAI,QAAQ,QAAQ,OAAO,QAAQ,UAAU;AAC3C,UAAM,MAAM;AACZ,QAAI,GAAG,OAAO,UAAU;AACtB,UAAI,EAAE,WAAW,MAAM;AACrB,cAAM,IAAI;AAAA,UACR,sCAAsC,OAAO;AAAA,QAC/C;AAAA,MACF;AACA,YAAM,OAAO,EAAE,GAAG,IAAI;AACtB,aAAO,KAAK,OAAO;AACnB,aAAO;AAAA,IACT;AACA,QAAI,GAAG,OAAO,OAAO;AAEnB,aAAO,EAAE,GAAG,KAAK,CAAC,OAAO,GAAG,MAAM,GAAG,KAAK,EAAE;AAAA,IAC9C;AACA,QAAI,GAAG,OAAO,WAAW;AACvB,UAAI,EAAE,WAAW,MAAM;AACrB,cAAM,IAAI;AAAA,UACR,uCAAuC,OAAO;AAAA,QAChD;AAAA,MACF;AACA,aAAO,EAAE,GAAG,KAAK,CAAC,OAAO,GAAG,MAAM,GAAG,KAAK,EAAE;AAAA,IAC9C;AAAA,EACF;AACA,QAAM,IAAI;AAAA,IACR,4BAA4B,GAAG,EAAE,yBAAyB,OAAO;AAAA,EACnE;AACF;AAYA,SAAS,kBAAkB,SAAyB;AAClD,SAAO,QAAQ,QAAQ,MAAM,IAAI,EAAE,QAAQ,OAAO,IAAI;AACxD;AAEA,SAAS,oBAAoB,SAAyB;AACpD,SAAO,QAAQ,QAAQ,OAAO,GAAG,EAAE,QAAQ,OAAO,GAAG;AACvD;AAEA,SAAS,UAAU,MAAwB;AACzC,MAAI,CAAC,KAAK,WAAW,GAAG,GAAG;AACzB,UAAM,IAAI,MAAM,8CAA8C,IAAI,GAAG;AAAA,EACvE;AACA,SAAO,KACJ,MAAM,CAAC,EACP,MAAM,GAAG,EACT,IAAI,mBAAmB;AAC5B;AAEA,SAAS,gBAAgB,SAAiB,KAAqB;AAC7D,MAAI,CAAC,QAAQ,KAAK,OAAO,GAAG;AAC1B,UAAM,IAAI;AAAA,MACR,gEAAgE,OAAO;AAAA,IACzE;AAAA,EACF;AACA,QAAM,MAAM,OAAO,SAAS,SAAS,EAAE;AACvC,MAAI,MAAM,KAAK,MAAM,KAAK;AACxB,UAAM,IAAI;AAAA,MACR,2BAA2B,GAAG,qBAAqB,GAAG;AAAA,IACxD;AAAA,EACF;AACA,SAAO;AACT;AAeA,SAAS,MAAS,OAAa;AAC7B,MAAI,UAAU,QAAQ,UAAU,OAAW,QAAO;AAClD,MAAI,OAAO,UAAU,SAAU,QAAO;AACtC,SAAO,KAAK,MAAM,KAAK,UAAU,KAAK,CAAC;AACzC;;;AC5WO,IAAM,oBAAoB;AAuB1B,IAAM,2BAA2B;;;ACfxC,eAAsB,oBACpB,UACiB;AACjB,MAAI,CAAC,SAAU,QAAO;AAMtB,SAAO,UAAU,SAAS,KAAK;AACjC;;;ACoCA,IAAM,sBAAsB;AA4DrB,IAAM,cAAN,MAAkB;AAAA,EACN;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsBT,YAAqE;AAAA,EAE7E,YAAY,MAMT;AACD,SAAK,UAAU,KAAK;AACpB,SAAK,QAAQ,KAAK;AAClB,SAAK,YAAY,KAAK;AACtB,SAAK,SAAS,KAAK;AACnB,SAAK,QAAQ,KAAK;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAc,gBAAsE;AAClF,QAAI,KAAK,cAAc,OAAW,QAAO,KAAK;AAC9C,UAAM,UAAU,MAAM,KAAK,eAAe;AAC1C,UAAM,OAAO,QAAQ,QAAQ,SAAS,CAAC;AACvC,QAAI,CAAC,MAAM;AACT,WAAK,YAAY;AACjB,aAAO;AAAA,IACT;AACA,SAAK,YAAY,EAAE,OAAO,MAAM,MAAM,MAAM,UAAU,IAAI,EAAE;AAC5D,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4CA,MAAM,OAAO,OAA0C;AACrD,QAAI;AACJ,aAAS,UAAU,GAAG,UAAU,qBAAqB,WAAW;AAI9D,UAAI,UAAU,GAAG;AACf,aAAK,YAAY;AAAA,MACnB;AACA,UAAI;AACF,eAAO,MAAM,KAAK,WAAW,KAAK;AAAA,MACpC,SAAS,KAAK;AACZ,YAAI,eAAe,eAAe;AAChC,yBAAe;AACf,cAAI,UAAU,sBAAsB,GAAG;AACrC,kBAAM,aAAa,OAAO;AAAA,UAC5B;AACA;AAAA,QACF;AACA,cAAM;AAAA,MACR;AAAA,IACF;AACA,SAAK;AACL,UAAM,IAAI,sBAAsB,mBAAmB;AAAA,EACrD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAc,WAAW,OAA0C;AACjE,UAAM,SAAS,MAAM,KAAK,cAAc;AACxC,UAAM,YAAY,QAAQ;AAC1B,UAAM,WAAW,QAAQ,QAAQ;AACjC,UAAM,YAAY,YAAY,UAAU,QAAQ,IAAI;AAIpD,QAAI;AACJ,QAAI;AACJ,QAAI,MAAM,UAAU,QAAW;AAC7B,sBAAgB,MAAM,KAAK,aAAa,MAAM,KAAK;AACnD,kBAAY,MAAM,UAAU,cAAc,KAAK;AAAA,IACjD;AAKA,UAAM,YAAY;AAAA,MAChB,OAAO;AAAA,MACP;AAAA,MACA,IAAI,MAAM;AAAA,MACV,YAAY,MAAM;AAAA,MAClB,IAAI,MAAM;AAAA,MACV,SAAS,MAAM;AAAA,MACf,KAAI,oBAAI,KAAK,GAAE,YAAY;AAAA,MAC3B,OAAO,MAAM,UAAU,KAAK,KAAK,QAAQ,MAAM;AAAA,MAC/C,aAAa,MAAM;AAAA,IACrB;AACA,UAAM,QACJ,cAAc,SACV,EAAE,GAAG,WAAW,UAAU,IAC1B;AAEN,UAAM,WAAW,MAAM,KAAK,aAAa,KAAK;AAG9C,UAAM,KAAK,QAAQ;AAAA,MACjB,KAAK;AAAA,MACL;AAAA,MACA,YAAY,MAAM,KAAK;AAAA,MACvB;AAAA,MACA;AAAA,IACF;AAGA,QAAI,eAAe;AACjB,YAAM,KAAK,QAAQ;AAAA,QACjB,KAAK;AAAA,QACL;AAAA,QACA,YAAY,MAAM,KAAK;AAAA,QACvB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAIA,SAAK,YAAY,EAAE,OAAO,MAAM,MAAM,UAAU,KAAK,EAAE;AACvD,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,MAAM,UAAU,OAA0C;AACxD,UAAM,WAAW,MAAM,KAAK,QAAQ;AAAA,MAClC,KAAK;AAAA,MACL;AAAA,MACA,YAAY,KAAK;AAAA,IACnB;AACA,QAAI,CAAC,SAAU,QAAO;AACtB,QAAI,CAAC,KAAK,WAAW;AACnB,aAAO,KAAK,MAAM,SAAS,KAAK;AAAA,IAClC;AACA,UAAM,MAAM,MAAM,KAAK,OAAO,iBAAiB;AAC/C,UAAM,OAAO,MAAM,QAAQ,SAAS,KAAK,SAAS,OAAO,GAAG;AAC5D,WAAO,KAAK,MAAM,IAAI;AAAA,EACxB;AAAA;AAAA,EAGA,MAAc,aAAa,OAA8C;AACvE,UAAM,OAAO,KAAK,UAAU,KAAK;AACjC,QAAI,CAAC,KAAK,WAAW;AACnB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,IAAI;AAAA,QACJ,MAAK,oBAAI,KAAK,GAAE,YAAY;AAAA,QAC5B,KAAK;AAAA,QACL,OAAO;AAAA,QACP,KAAK,KAAK;AAAA,MACZ;AAAA,IACF;AACA,UAAM,MAAM,MAAM,KAAK,OAAO,iBAAiB;AAC/C,UAAM,EAAE,IAAI,KAAK,IAAI,MAAM,QAAQ,MAAM,GAAG;AAC5C,WAAO;AAAA,MACL,QAAQ;AAAA,MACR,IAAI;AAAA,MACJ,MAAK,oBAAI,KAAK,GAAE,YAAY;AAAA,MAC5B,KAAK;AAAA,MACL,OAAO;AAAA,MACP,KAAK,KAAK;AAAA,IACZ;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,iBAAyC;AAC7C,UAAM,OAAO,MAAM,KAAK,QAAQ,KAAK,KAAK,OAAO,iBAAiB;AAGlE,SAAK,KAAK;AACV,UAAM,UAAyB,CAAC;AAChC,eAAW,OAAO,MAAM;AACtB,YAAM,WAAW,MAAM,KAAK,QAAQ;AAAA,QAClC,KAAK;AAAA,QACL;AAAA,QACA;AAAA,MACF;AACA,UAAI,CAAC,SAAU;AACf,cAAQ,KAAK,MAAM,KAAK,aAAa,QAAQ,CAAC;AAAA,IAChD;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,OAGJ;AACA,UAAM,SAAS,MAAM,KAAK,cAAc;AACxC,QAAI,CAAC,OAAQ,QAAO;AAGpB,WAAO;AAAA,MACL,OAAO,OAAO;AAAA,MACd,MAAM,OAAO;AAAA,MACb,QAAQ,OAAO,MAAM,QAAQ;AAAA,IAC/B;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,QAAQ,OAAuC,CAAC,GAA2B;AAC/E,UAAM,MAAM,MAAM,KAAK,eAAe;AACtC,UAAM,OAAO,KAAK,IAAI,GAAG,KAAK,QAAQ,CAAC;AACvC,UAAM,KAAK,KAAK,IAAI,IAAI,QAAQ,KAAK,MAAM,IAAI,MAAM;AACrD,WAAO,IAAI,MAAM,MAAM,EAAE;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA+CA,MAAM,YACJ,YACA,IACA,SACA,WACmB;AACnB,UAAM,MAAM,MAAM,KAAK,eAAe;AAEtC,UAAM,WAAW,IAAI;AAAA,MACnB,CAAC,MAAM,EAAE,eAAe,cAAc,EAAE,OAAO;AAAA,IACjD;AACA,QAAI,SAAS,WAAW,GAAG;AAKzB,aAAO;AAAA,IACT;AAIA,QAAI,QAAkB;AACtB,aAAS,IAAI,SAAS,SAAS,GAAG,KAAK,GAAG,KAAK;AAC7C,YAAM,QAAQ,SAAS,CAAC;AACxB,UAAI,CAAC,MAAO;AAMZ,UAAI,MAAM,YAAY,aAAa,MAAM,OAAO,UAAU;AACxD,eAAO;AAAA,MACT;AAEA,UAAI,MAAM,OAAO,UAAU;AAOzB,eAAO;AAAA,MACT;AAEA,UAAI,MAAM,cAAc,QAAW;AAOjC,YAAI,MAAM,YAAY,UAAW,QAAO;AACxC,eAAO;AAAA,MACT;AAEA,YAAM,QAAQ,MAAM,KAAK,UAAU,MAAM,KAAK;AAC9C,UAAI,CAAC,OAAO;AAEV,eAAO;AAAA,MACT;AAEA,UAAI,UAAU,MAAM;AAGlB,eAAO;AAAA,MACT;AAEA,cAAQ,WAAW,OAAO,KAAK;AAAA,IACjC;AAIA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiCA,MAAM,SAAgC;AACpC,UAAM,UAAU,MAAM,KAAK,eAAe;AAC1C,QAAI,mBAAmB;AACvB,aAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK;AACvC,YAAM,QAAQ,QAAQ,CAAC;AACvB,UAAI,CAAC,MAAO;AACZ,UAAI,MAAM,aAAa,kBAAkB;AACvC,eAAO;AAAA,UACL,IAAI;AAAA,UACJ,YAAY;AAAA,UACZ,UAAU;AAAA,UACV,QAAQ,MAAM;AAAA,QAChB;AAAA,MACF;AACA,UAAI,MAAM,UAAU,GAAG;AAIrB,eAAO;AAAA,UACL,IAAI;AAAA,UACJ,YAAY;AAAA,UACZ,UAAU,SAAS,CAAC;AAAA,UACpB,QAAQ,SAAS,MAAM,KAAK;AAAA,QAC9B;AAAA,MACF;AACA,yBAAmB,MAAM,UAAU,KAAK;AAAA,IAC1C;AACA,WAAO;AAAA,MACL,IAAI;AAAA,MACJ,MAAM;AAAA,MACN,QAAQ,QAAQ;AAAA,IAClB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAc,aAAa,OAAgD;AACzE,UAAM,OAAO,cAAc,KAAK;AAChC,QAAI,CAAC,KAAK,WAAW;AACnB,aAAO;AAAA,QACL,QAAQ;AAAA,QACR,IAAI,MAAM,QAAQ;AAAA,QAClB,KAAK,MAAM;AAAA,QACX,KAAK;AAAA,QACL,OAAO;AAAA,QACP,KAAK,MAAM;AAAA,MACb;AAAA,IACF;AACA,UAAM,MAAM,MAAM,KAAK,OAAO,iBAAiB;AAC/C,UAAM,EAAE,IAAI,KAAK,IAAI,MAAM,QAAQ,MAAM,GAAG;AAC5C,WAAO;AAAA,MACL,QAAQ;AAAA,MACR,IAAI,MAAM,QAAQ;AAAA,MAClB,KAAK,MAAM;AAAA,MACX,KAAK;AAAA,MACL,OAAO;AAAA,MACP,KAAK,MAAM;AAAA,IACb;AAAA,EACF;AAAA;AAAA,EAGA,MAAc,aAAa,UAAmD;AAC5E,QAAI,CAAC,KAAK,WAAW;AACnB,aAAO,KAAK,MAAM,SAAS,KAAK;AAAA,IAClC;AACA,UAAM,MAAM,MAAM,KAAK,OAAO,iBAAiB;AAC/C,UAAM,OAAO,MAAM,QAAQ,SAAS,KAAK,SAAS,OAAO,GAAG;AAC5D,WAAO,KAAK,MAAM,IAAI;AAAA,EACxB;AACF;AAaA,SAAS,aAAa,SAAgC;AACpD,QAAM,OAAO,IAAI,KAAK,IAAI,GAAG,OAAO;AACpC,QAAM,SAAS,KAAK,OAAO,IAAI;AAC/B,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,OAAO,MAAM,CAAC;AACpE;;;ACnkBO,IAAM,eAAN,MAAmB;AAAA,EACxB,YACmB,QAED,WAChB;AAHiB;AAED;AAAA,EACf;AAAA,EAHgB;AAAA,EAED;AAAA;AAAA,EAIlB,WAAwB,MAAoC;AAC1D,WAAO,IAAI,kBAAqB,KAAK,QAAQ,KAAK,WAAW,IAAI;AAAA,EACnE;AACF;AAYO,IAAM,oBAAN,MAAqC;AAAA,EAC1C,YACmB,QACA,UACD,MAChB;AAHiB;AACA;AACD;AAAA,EACf;AAAA,EAHgB;AAAA,EACA;AAAA,EACD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQlB,MAAM,IAAI,IAA+B;AACvC,UAAM,WAAW,MAAM,KAAK,gBAAgB,EAAE;AAC9C,QAAI,CAAC,SAAU,QAAO;AACtB,UAAM,YAAY,KAAK,OAAO,YAC1B,MAAM,QAAQ,SAAS,KAAK,SAAS,OAAO,MAAM,KAAK,OAAO,OAAO,KAAK,IAAI,CAAC,IAC/E,SAAS;AACb,WAAO,KAAK,MAAM,SAAS;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,OAA0B;AAC9B,UAAM,aAAa,MAAM,kBAAkB,KAAK,OAAO,SAAS,KAAK,OAAO,MAAM,KAAK,IAAI;AAC3F,UAAM,UAAU,MAAM,KAAK,OAAO,QAAQ,KAAK,KAAK,OAAO,MAAM,KAAK,IAAI;AAC1E,UAAM,eAAe,oBAAI,IAAY,CAAC,GAAG,YAAY,GAAG,OAAO,CAAC;AAChE,UAAM,QAAkB,CAAC;AACzB,eAAW,MAAM,cAAc;AAC7B,YAAM,MAAM,MAAM,KAAK,gBAAgB,EAAE;AACzC,UAAI,IAAK,OAAM,KAAK,EAAE;AAAA,IACxB;AACA,WAAO,MAAM,KAAK;AAAA,EACpB;AAAA;AAAA,EAIA,MAAM,IAAI,KAAa,SAA4B;AACjD,UAAM,IAAI,uBAAuB,OAAO,KAAK,QAAQ;AAAA,EACvD;AAAA,EACA,MAAM,OAAO,KAA6B;AACxC,UAAM,IAAI,uBAAuB,UAAU,KAAK,QAAQ;AAAA,EAC1D;AAAA,EACA,MAAM,OAAO,KAAa,QAAoC;AAC5D,UAAM,IAAI,uBAAuB,UAAU,KAAK,QAAQ;AAAA,EAC1D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkCA,MAAc,gBAAgB,IAA+C;AAC3E,UAAM,SAAS,KAAK,OAAO,UAAU;AACrC,QAAI,QAAQ;AACV,aAAO,KAAK,iBAAiB,IAAI,MAAM;AAAA,IACzC;AACA,WAAO,KAAK,qBAAqB,EAAE;AAAA,EACrC;AAAA,EAEA,MAAc,iBAAiB,IAAY,QAAwD;AACjG,UAAM,UAAU,MAAM,OAAO,QAAQ;AAErC,QAAI,SAA2D;AAC/D,eAAW,KAAK,SAAS;AACvB,UAAI,EAAE,eAAe,KAAK,QAAQ,EAAE,OAAO,GAAI;AAC/C,UAAI,EAAE,KAAK,KAAK,SAAU;AAC1B,eAAS,EAAE,IAAI,EAAE,IAAI,SAAS,EAAE,QAAQ;AAAA,IAC1C;AACA,QAAI,CAAC,OAAQ,QAAO;AACpB,QAAI,OAAO,OAAO,SAAU,QAAO;AACnC,WAAO,KAAK,YAAY,IAAI,OAAO,OAAO;AAAA,EAC5C;AAAA,EAEA,MAAc,qBAAqB,IAA+C;AAChF,UAAM,UAAU,MAAM;AAAA,MACpB,KAAK,OAAO;AAAA,MAAS,KAAK,OAAO;AAAA,MAAM,KAAK;AAAA,MAAM;AAAA,IACpD;AACA,UAAM,OAAO,MAAM,KAAK,OAAO,QAAQ,IAAI,KAAK,OAAO,MAAM,KAAK,MAAM,EAAE;AAC1E,UAAM,YAAY,oBAAI,IAA+B;AACrD,eAAW,KAAK,QAAS,WAAU,IAAI,EAAE,IAAI,CAAC;AAC9C,QAAI,KAAM,WAAU,IAAI,KAAK,IAAI,IAAI;AACrC,UAAM,SAAS,CAAC,GAAG,UAAU,OAAO,CAAC,EAAE;AAAA,MAAK,CAAC,GAAG,MAC9C,EAAE,MAAM,EAAE,MAAM,IAAI,EAAE,MAAM,EAAE,MAAM,KAAK;AAAA,IAC3C;AACA,WAAO,OAAO,KAAK,CAAC,MAAM,EAAE,OAAO,KAAK,QAAQ,KAAK;AAAA,EACvD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAc,YAAY,IAAY,SAAoD;AACxF,UAAM,OAAO,MAAM,KAAK,OAAO,QAAQ,IAAI,KAAK,OAAO,MAAM,KAAK,MAAM,EAAE;AAC1E,QAAI,QAAQ,KAAK,OAAO,QAAS,QAAO;AAGxC,UAAMC,aAAY,GAAG,KAAK,IAAI,IAAI,EAAE,IAAI,OAAO,OAAO,EAAE,SAAS,IAAI,GAAG,CAAC;AACzE,WAAO,MAAM,KAAK,OAAO,QAAQ,IAAI,KAAK,OAAO,MAAM,YAAYA,UAAS;AAAA,EAC9E;AACF;AASA,eAAe,kBACb,SACA,OACA,YACmB;AACnB,QAAM,MAAM,MAAM,QAAQ,KAAK,OAAO,UAAU;AAChD,QAAM,SAAS,GAAG,UAAU;AAC5B,QAAM,OAAO,oBAAI,IAAY;AAC7B,aAAW,OAAO,KAAK;AACrB,QAAI,CAAC,IAAI,WAAW,MAAM,EAAG;AAC7B,UAAM,YAAY,IAAI,YAAY,GAAG;AACrC,QAAI,aAAa,OAAO,OAAQ;AAChC,UAAM,SAAS,IAAI,MAAM,OAAO,QAAQ,SAAS;AACjD,SAAK,IAAI,MAAM;AAAA,EACjB;AACA,SAAO,CAAC,GAAG,IAAI;AACjB;;;ACtOO,SAAS,cAA+B;AAC7C,SAAO;AAAA,IACL;AAAA,IACA,mBAAmB;AAAA,IACnB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,YAAY,MAAuC;AACjD,aAAO,IAAI,YAAY;AAAA,QACrB,SAAS,KAAK;AAAA,QACd,OAAO,KAAK;AAAA,QACZ,WAAW,KAAK;AAAA,QAChB,QAAQ,KAAK;AAAA,QACb,OAAO,KAAK;AAAA,MACd,CAAC;AAAA,IACH;AAAA,IACA,kBAAkB,QAAQ,WAAyB;AACjD,aAAO,IAAI,aAAa,QAAQ,SAAS;AAAA,IAC3C;AAAA,EACF;AACF;","names":["diff","historyId"]}
|