@typicalday/firegraph 0.11.2 → 0.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/README.md +355 -78
  2. package/dist/backend-DuvHGgK1.d.cts +1897 -0
  3. package/dist/backend-DuvHGgK1.d.ts +1897 -0
  4. package/dist/backend.cjs +365 -5
  5. package/dist/backend.cjs.map +1 -1
  6. package/dist/backend.d.cts +25 -5
  7. package/dist/backend.d.ts +25 -5
  8. package/dist/backend.js +209 -7
  9. package/dist/backend.js.map +1 -1
  10. package/dist/chunk-2DHMNTV6.js +16 -0
  11. package/dist/chunk-2DHMNTV6.js.map +1 -0
  12. package/dist/chunk-4MMQ5W74.js +288 -0
  13. package/dist/chunk-4MMQ5W74.js.map +1 -0
  14. package/dist/{chunk-5753Y42M.js → chunk-C2QMD7RY.js} +6 -10
  15. package/dist/chunk-C2QMD7RY.js.map +1 -0
  16. package/dist/chunk-D4J7Z4FE.js +67 -0
  17. package/dist/chunk-D4J7Z4FE.js.map +1 -0
  18. package/dist/chunk-EQJUUVFG.js +14 -0
  19. package/dist/chunk-EQJUUVFG.js.map +1 -0
  20. package/dist/chunk-N5HFDWQX.js +23 -0
  21. package/dist/chunk-N5HFDWQX.js.map +1 -0
  22. package/dist/chunk-PAD7WFFU.js +573 -0
  23. package/dist/chunk-PAD7WFFU.js.map +1 -0
  24. package/dist/chunk-TK64DNVK.js +256 -0
  25. package/dist/chunk-TK64DNVK.js.map +1 -0
  26. package/dist/{chunk-NJSOD64C.js → chunk-WRTFC5NG.js} +438 -30
  27. package/dist/chunk-WRTFC5NG.js.map +1 -0
  28. package/dist/client-BKi3vk0Q.d.ts +34 -0
  29. package/dist/client-BrsaXtDV.d.cts +34 -0
  30. package/dist/cloudflare/index.cjs +1386 -74
  31. package/dist/cloudflare/index.cjs.map +1 -1
  32. package/dist/cloudflare/index.d.cts +217 -13
  33. package/dist/cloudflare/index.d.ts +217 -13
  34. package/dist/cloudflare/index.js +639 -180
  35. package/dist/cloudflare/index.js.map +1 -1
  36. package/dist/codegen/index.d.cts +1 -1
  37. package/dist/codegen/index.d.ts +1 -1
  38. package/dist/errors-BRc3I_eH.d.cts +73 -0
  39. package/dist/errors-BRc3I_eH.d.ts +73 -0
  40. package/dist/firestore-enterprise/index.cjs +3877 -0
  41. package/dist/firestore-enterprise/index.cjs.map +1 -0
  42. package/dist/firestore-enterprise/index.d.cts +141 -0
  43. package/dist/firestore-enterprise/index.d.ts +141 -0
  44. package/dist/firestore-enterprise/index.js +985 -0
  45. package/dist/firestore-enterprise/index.js.map +1 -0
  46. package/dist/firestore-standard/index.cjs +3117 -0
  47. package/dist/firestore-standard/index.cjs.map +1 -0
  48. package/dist/firestore-standard/index.d.cts +49 -0
  49. package/dist/firestore-standard/index.d.ts +49 -0
  50. package/dist/firestore-standard/index.js +283 -0
  51. package/dist/firestore-standard/index.js.map +1 -0
  52. package/dist/index.cjs +809 -534
  53. package/dist/index.cjs.map +1 -1
  54. package/dist/index.d.cts +24 -100
  55. package/dist/index.d.ts +24 -100
  56. package/dist/index.js +184 -531
  57. package/dist/index.js.map +1 -1
  58. package/dist/registry-Bc7h6WTM.d.cts +64 -0
  59. package/dist/registry-C2KUPVZj.d.ts +64 -0
  60. package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.cts} +1 -56
  61. package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.ts} +1 -56
  62. package/dist/{serialization-ZZ7RSDRX.js → serialization-OE2PFZMY.js} +6 -4
  63. package/dist/sqlite/index.cjs +3631 -0
  64. package/dist/sqlite/index.cjs.map +1 -0
  65. package/dist/sqlite/index.d.cts +111 -0
  66. package/dist/sqlite/index.d.ts +111 -0
  67. package/dist/sqlite/index.js +1164 -0
  68. package/dist/sqlite/index.js.map +1 -0
  69. package/package.json +33 -3
  70. package/dist/backend-U-MLShlg.d.ts +0 -97
  71. package/dist/backend-np4gEVhB.d.cts +0 -97
  72. package/dist/chunk-5753Y42M.js.map +0 -1
  73. package/dist/chunk-NJSOD64C.js.map +0 -1
  74. package/dist/chunk-R7CRGYY4.js +0 -94
  75. package/dist/chunk-R7CRGYY4.js.map +0 -1
  76. package/dist/types-BGWxcpI_.d.cts +0 -736
  77. package/dist/types-BGWxcpI_.d.ts +0 -736
  78. /package/dist/{serialization-ZZ7RSDRX.js.map → serialization-OE2PFZMY.js.map} +0 -0
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/internal/serialization-tag.ts","../../src/serialization.ts","../../src/sqlite/index.ts","../../src/docid.ts","../../src/internal/constants.ts","../../src/internal/write-plan.ts","../../src/batch.ts","../../src/dynamic-registry.ts","../../src/errors.ts","../../src/json-schema.ts","../../src/migration.ts","../../src/scope.ts","../../src/registry.ts","../../src/sandbox.ts","../../src/query.ts","../../src/query-safety.ts","../../src/transaction.ts","../../src/client.ts","../../src/id.ts","../../src/internal/backend.ts","../../src/internal/sqlite-data-ops.ts","../../src/internal/sqlite-payload-guard.ts","../../src/default-indexes.ts","../../src/internal/sqlite-schema.ts","../../src/timestamp.ts","../../src/sqlite/sql.ts","../../src/sqlite/backend.ts"],"sourcesContent":["/**\n * Firegraph serialization tag — split from `src/serialization.ts` so it can\n * be imported from Workers-facing code without dragging in\n * `@google-cloud/firestore`.\n *\n * The full serialization module (with Timestamp/GeoPoint round-tripping)\n * lives one folder up because the sandbox migration pipeline needs it; the\n * write-plan helper only needs to recognise tagged objects to keep them\n * terminal during patch flattening, so it imports just the tag from here.\n */\n\n/** Sentinel key used to tag serialized Firestore types. */\nexport const SERIALIZATION_TAG = '__firegraph_ser__' as const;\n\nconst KNOWN_TYPES = new Set(['Timestamp', 'GeoPoint', 'VectorValue', 'DocumentReference']);\n\n/** Check if a value is a tagged serialized Firestore type. */\nexport function isTaggedValue(value: unknown): boolean {\n if (value === null || typeof value !== 'object') return false;\n const tag = (value as Record<string, unknown>)[SERIALIZATION_TAG];\n return typeof tag === 'string' && KNOWN_TYPES.has(tag);\n}\n","/**\n * Firestore-aware serialization for the sandbox migration pipeline.\n *\n * Firestore documents can contain special types (Timestamp, GeoPoint,\n * VectorValue, DocumentReference) that don't survive plain JSON\n * round-tripping. This module provides tagged serialization: Firestore\n * types are wrapped in tagged plain objects before JSON marshaling and\n * reconstructed after.\n *\n * Only used by the `defaultExecutor` sandbox path. Static migrations\n * (in-memory functions) receive raw Firestore objects directly.\n */\n\nimport type { DocumentReference, Firestore } from '@google-cloud/firestore';\nimport { FieldValue, GeoPoint, Timestamp } from '@google-cloud/firestore';\n\n// ---------------------------------------------------------------------------\n// Constants\n// ---------------------------------------------------------------------------\n\n// SERIALIZATION_TAG and isTaggedValue live in `internal/serialization-tag.ts`\n// so Workers-facing code (e.g. `src/internal/write-plan.ts` and the\n// `firegraph/cloudflare` bundle) can recognise tagged values without\n// pulling in `@google-cloud/firestore`. Re-exported here so callers that\n// already import from `src/serialization.ts` keep working.\nexport { isTaggedValue, SERIALIZATION_TAG } from './internal/serialization-tag.js';\nimport { isTaggedValue, SERIALIZATION_TAG } from './internal/serialization-tag.js';\n\n// One-time warning for DocumentReference deserialization without db\nlet _docRefWarned = false;\n\n// ---------------------------------------------------------------------------\n// Detection helpers\n// ---------------------------------------------------------------------------\n\nfunction isTimestamp(value: unknown): value is Timestamp {\n return value instanceof Timestamp;\n}\n\nfunction isGeoPoint(value: unknown): value is GeoPoint {\n return value instanceof GeoPoint;\n}\n\nfunction isDocumentReference(value: unknown): value is DocumentReference {\n // Duck-type check: DocumentReference has path (string) and firestore properties\n if (value === null || typeof value !== 'object') return false;\n const v = value as Record<string, unknown>;\n return (\n typeof v.path === 'string' &&\n v.firestore !== undefined &&\n typeof v.id === 'string' &&\n v.constructor?.name === 'DocumentReference'\n );\n}\n\nfunction isVectorValue(value: unknown): boolean {\n if (value === null || typeof value !== 'object') return false;\n const v = value as Record<string, unknown>;\n return (\n v.constructor?.name === 'VectorValue' && Array.isArray((v as Record<string, unknown>)._values)\n );\n}\n\n// ---------------------------------------------------------------------------\n// Serialize\n// ---------------------------------------------------------------------------\n\n/**\n * Recursively walk a data object and replace Firestore types with tagged\n * plain objects suitable for JSON serialization.\n *\n * Returns a new object tree — the input is never mutated.\n */\nexport function serializeFirestoreTypes(data: Record<string, unknown>): Record<string, unknown> {\n return serializeValue(data) as Record<string, unknown>;\n}\n\nfunction serializeValue(value: unknown): unknown {\n // Primitives\n if (value === null || value === undefined) return value;\n if (typeof value !== 'object') return value;\n\n // Firestore types (check before generic object/array)\n if (isTimestamp(value)) {\n return {\n [SERIALIZATION_TAG]: 'Timestamp',\n seconds: value.seconds,\n nanoseconds: value.nanoseconds,\n };\n }\n if (isGeoPoint(value)) {\n return {\n [SERIALIZATION_TAG]: 'GeoPoint',\n latitude: value.latitude,\n longitude: value.longitude,\n };\n }\n if (isDocumentReference(value)) {\n return { [SERIALIZATION_TAG]: 'DocumentReference', path: (value as DocumentReference).path };\n }\n if (isVectorValue(value)) {\n // Prefer toArray() (public API) over _values (private internal property)\n const v = value as Record<string, unknown>;\n const values =\n typeof v.toArray === 'function' ? (v.toArray as () => number[])() : (v._values as number[]);\n return { [SERIALIZATION_TAG]: 'VectorValue', values: [...values] };\n }\n\n // Arrays\n if (Array.isArray(value)) {\n return value.map(serializeValue);\n }\n\n // Plain objects — recurse\n const result: Record<string, unknown> = {};\n for (const key of Object.keys(value as Record<string, unknown>)) {\n result[key] = serializeValue((value as Record<string, unknown>)[key]);\n }\n return result;\n}\n\n// ---------------------------------------------------------------------------\n// Deserialize\n// ---------------------------------------------------------------------------\n\n/**\n * Recursively walk a data object and reconstruct Firestore types from\n * tagged plain objects.\n *\n * @param data - The data to deserialize (typically from JSON.parse)\n * @param db - Optional Firestore instance for DocumentReference reconstruction.\n * If not provided, tagged DocumentReferences are left as-is with a one-time warning.\n *\n * Returns a new object tree — the input is never mutated.\n */\nexport function deserializeFirestoreTypes(\n data: Record<string, unknown>,\n db?: Firestore,\n): Record<string, unknown> {\n return deserializeValue(data, db) as Record<string, unknown>;\n}\n\nfunction deserializeValue(value: unknown, db?: Firestore): unknown {\n if (value === null || value === undefined) return value;\n if (typeof value !== 'object') return value;\n\n // Short-circuit for values that are already real Firestore types.\n // This makes deserializeFirestoreTypes idempotent — safe to call on data\n // that has already been deserialized (e.g., write-back after defaultExecutor\n // already reconstructed types, or static migrations that return raw types).\n if (\n isTimestamp(value) ||\n isGeoPoint(value) ||\n isDocumentReference(value) ||\n isVectorValue(value)\n ) {\n return value;\n }\n\n // Arrays\n if (Array.isArray(value)) {\n return value.map((v) => deserializeValue(v, db));\n }\n\n const obj = value as Record<string, unknown>;\n\n // Check for tagged Firestore type\n if (isTaggedValue(obj)) {\n const tag = obj[SERIALIZATION_TAG] as string;\n\n switch (tag) {\n case 'Timestamp':\n // Validate expected fields before reconstruction\n if (typeof obj.seconds !== 'number' || typeof obj.nanoseconds !== 'number') return obj;\n return new Timestamp(obj.seconds, obj.nanoseconds);\n\n case 'GeoPoint':\n if (typeof obj.latitude !== 'number' || typeof obj.longitude !== 'number') return obj;\n return new GeoPoint(obj.latitude, obj.longitude);\n\n case 'VectorValue':\n if (!Array.isArray(obj.values)) return obj;\n return FieldValue.vector(obj.values as number[]);\n\n case 'DocumentReference':\n if (typeof obj.path !== 'string') return obj;\n if (db) {\n return db.doc(obj.path);\n }\n // No db available — leave as tagged object with one-time warning\n if (!_docRefWarned) {\n _docRefWarned = true;\n console.warn(\n '[firegraph] DocumentReference encountered during migration deserialization ' +\n 'but no Firestore instance available. The reference will remain as a tagged ' +\n 'object with its path. Enable write-back for full reconstruction.',\n );\n }\n return obj;\n\n default:\n // Unknown tag — leave as-is (forward compatibility)\n return obj;\n }\n }\n\n // Plain object — recurse\n const result: Record<string, unknown> = {};\n for (const key of Object.keys(obj)) {\n result[key] = deserializeValue(obj[key], db);\n }\n return result;\n}\n","/**\n * Public entry point for the shared-table SQLite backend.\n *\n * Use this subpath when targeting better-sqlite3, libSQL, D1, or any other\n * SQLite-compatible driver via the `SqliteExecutor` interface. The backend\n * encodes subgraphs as a materialized `scope` column and shares its write\n * pipeline with the Cloudflare Durable Object backend.\n */\n\nexport { createGraphClient } from '../client.js';\nexport { META_EDGE_TYPE, META_NODE_TYPE } from '../dynamic-registry.js';\nexport { generateId } from '../id.js';\nexport { createMergedRegistry, createRegistry } from '../registry.js';\nexport type { SqliteBackendOptions, SqliteCapability } from './backend.js';\nexport { createSqliteBackend } from './backend.js';\n","import { createHash } from 'node:crypto';\n\nimport { SHARD_SEPARATOR } from './internal/constants.js';\n\nexport function computeNodeDocId(uid: string): string {\n return uid;\n}\n\nexport function computeEdgeDocId(aUid: string, axbType: string, bUid: string): string {\n const composite = `${aUid}${SHARD_SEPARATOR}${axbType}${SHARD_SEPARATOR}${bUid}`;\n const hash = createHash('sha256').update(composite).digest('hex');\n const shard = hash[0];\n return `${shard}${SHARD_SEPARATOR}${aUid}${SHARD_SEPARATOR}${axbType}${SHARD_SEPARATOR}${bUid}`;\n}\n","export const NODE_RELATION = 'is';\n\n/**\n * Default result limit applied to findEdges/findNodes queries\n * when no explicit limit is provided. Prevents unbounded result sets\n * that could be expensive on Enterprise Firestore.\n */\nexport const DEFAULT_QUERY_LIMIT = 500;\n\n/**\n * Fields that are part of the firegraph record structure (not user data).\n * Used by the query planner and safety analysis to distinguish builtin\n * fields from data.* fields.\n */\nexport const BUILTIN_FIELDS = new Set([\n 'aType',\n 'aUid',\n 'axbType',\n 'bType',\n 'bUid',\n 'createdAt',\n 'updatedAt',\n]);\n\nexport const SHARD_ALGORITHM = 'sha256';\nexport const SHARD_SEPARATOR = ':';\nexport const SHARD_BUCKETS = 16;\n","/**\n * Write-plan helper — flattens partial-update payloads into a list of\n * deep-path operations every backend can execute identically.\n *\n * Background: firegraph used to ship two write semantics that quietly\n * disagreed about depth.\n * - `putNode`/`putEdge` did a full document replace.\n * - `updateNode`/`updateEdge` did a one-level shallow merge: top-level\n * keys were preserved, but nested objects were replaced wholesale.\n *\n * Both behaviours dropped sibling keys silently. The 0.12 contract is that\n * `put*` and `update*` deep-merge by default (sibling keys at any depth\n * survive); `replace*` is the explicit escape hatch.\n *\n * `flattenPatch` walks a partial-update payload and emits one\n * {@link DataPathOp} per terminal value. Plain objects recurse; arrays,\n * primitives, Firestore special types, and tagged firegraph-serialization\n * objects are terminal (replaced as a unit). `undefined` values are\n * skipped; `null` is preserved as a real `null` write; the\n * {@link DELETE_FIELD} sentinel marks a field for removal.\n *\n * The output is deliberately backend-agnostic. Each backend translates ops\n * into its native dialect:\n * - Firestore: dotted field path → `data.a.b.c` for `update()`.\n * - SQLite / DO SQLite: `json_set(data, '$.a.b.c', ?)` /\n * `json_remove(data, '$.a.b.c')`.\n */\n\nimport { isTaggedValue, SERIALIZATION_TAG } from './serialization-tag.js';\n\n// ---------------------------------------------------------------------------\n// Public sentinel\n// ---------------------------------------------------------------------------\n\n/**\n * Sentinel returned by {@link deleteField}. Treated by all backends as\n * \"remove this field from the stored document\".\n *\n * Equivalent to Firestore's `FieldValue.delete()`, but works for SQLite\n * backends too. Use inside `updateNode`/`updateEdge` payloads.\n */\nexport const DELETE_FIELD: unique symbol = Symbol.for('firegraph.deleteField');\nexport type DeleteSentinel = typeof DELETE_FIELD;\n\n/**\n * Returns the firegraph delete sentinel. Place this anywhere in an\n * `updateNode`/`updateEdge` payload to remove the corresponding field.\n *\n * ```ts\n * await client.updateNode('tour', uid, {\n * attrs: { obsoleteFlag: deleteField() },\n * });\n * ```\n */\nexport function deleteField(): DeleteSentinel {\n return DELETE_FIELD;\n}\n\n/** Type guard for the delete sentinel. */\nexport function isDeleteSentinel(value: unknown): value is DeleteSentinel {\n return value === DELETE_FIELD;\n}\n\n// ---------------------------------------------------------------------------\n// Terminal-detection helpers\n// ---------------------------------------------------------------------------\n\nconst FIRESTORE_TERMINAL_CTOR = new Set([\n 'Timestamp',\n 'GeoPoint',\n 'VectorValue',\n 'DocumentReference',\n 'FieldValue',\n 'NumericIncrementTransform',\n 'ArrayUnionTransform',\n 'ArrayRemoveTransform',\n 'ServerTimestampTransform',\n 'DeleteTransform',\n]);\n\n/**\n * Should this value be written as a single terminal op (no recursion)?\n *\n * Plain JS objects (constructor === Object, or no prototype) are recursed.\n * Everything else — arrays, primitives, class instances, Firestore special\n * types, tagged serialization payloads — is terminal.\n */\nexport function isTerminalValue(value: unknown): boolean {\n if (value === null) return true;\n const t = typeof value;\n if (t !== 'object') return true;\n if (Array.isArray(value)) return true;\n // Tagged serialization payloads carry the SERIALIZATION_TAG sentinel and\n // should be persisted whole — never split into per-field ops.\n if (isTaggedValue(value)) return true;\n const proto = Object.getPrototypeOf(value);\n if (proto === null || proto === Object.prototype) return false;\n // Class instances — Firestore types or anything else exotic.\n const ctor = (value as { constructor?: { name?: string } }).constructor;\n if (ctor && typeof ctor.name === 'string' && FIRESTORE_TERMINAL_CTOR.has(ctor.name)) return true;\n // Unknown class instance: treat as terminal. Recursing into a class\n // instance is almost always wrong (Map, Set, Date, Buffer...).\n return true;\n}\n\n// ---------------------------------------------------------------------------\n// Core type\n// ---------------------------------------------------------------------------\n\n/**\n * Single terminal write operation produced by {@link flattenPatch}.\n *\n * `path` is a non-empty array of plain object keys. `value` is the value to\n * write; ignored when `delete` is `true`. Arrays / primitives / Firestore\n * special types appear here as whole terminal values.\n */\nexport interface DataPathOp {\n path: readonly string[];\n value: unknown;\n delete: boolean;\n}\n\n// ---------------------------------------------------------------------------\n// Path-segment validation\n// ---------------------------------------------------------------------------\n\n/**\n * Object keys that are safe to embed in SQLite `json_set`/`json_remove`\n * paths. The SQLite backend uses an allowlist regex too — keep these in\n * sync (see `JSON_PATH_KEY_RE` in `internal/sqlite-sql.ts` and\n * `cloudflare/sql.ts`).\n *\n * Allows: ASCII letters, digits, `_`, `-`. Must start with a letter or\n * underscore. This rejects keys containing dots, brackets, quotes, or\n * non-ASCII characters that could break path parsing or be used to\n * inject into the path expression.\n */\nconst SAFE_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;\n\n/**\n * Mutual-exclusion guard for {@link UpdatePayload}. The two branches of the\n * shape — `dataOps` (deep-merge) and `replaceData` (full replace) — are\n * structurally incompatible: combining them would tell the backend to\n * simultaneously merge AND wipe, and the three backends disagree on which\n * wins. This helper centralises the runtime check so all three backends\n * trip the same error.\n *\n * Imported as a runtime check from `firestore-backend`, `sqlite-sql`, and\n * `cloudflare/sql`. Backend authors implementing the public `StorageBackend`\n * contract should call it too.\n */\nexport function assertUpdatePayloadExclusive(update: {\n dataOps?: unknown;\n replaceData?: unknown;\n}): void {\n if (update.replaceData !== undefined && update.dataOps !== undefined) {\n throw new Error(\n 'firegraph: UpdatePayload cannot specify both `replaceData` and `dataOps`. ' +\n 'Use one or the other — `replaceData` is the migration-write-back form, ' +\n '`dataOps` is the standard partial-update form.',\n );\n }\n}\n\n/**\n * Reject `DELETE_FIELD` sentinels in payloads where field deletion isn't a\n * meaningful operation: full-document replace (`replaceNode`/`replaceEdge`)\n * and the merge-default put surface (`putNode`/`putEdge`).\n *\n * Why both:\n * - In **replace**, the entire `data` field is overwritten. A delete\n * sentinel in that payload either silently disappears (Firestore drops\n * the Symbol during `.set()` serialization) or produces an empty SQLite\n * `json_remove` no-op, depending on backend. Either way the caller's\n * intent — \"remove field X\" — is lost. Use `updateNode` instead.\n * - In **put** (merge mode), behaviour diverges across backends today:\n * SQLite's flattenPatch emits a real delete op, but Firestore's\n * `.set(..., {merge: true})` silently drops the Symbol. Until that's\n * fixed end-to-end, the safest contract is to reject sentinels at the\n * entry point and steer callers to `updateNode`.\n *\n * The walk mirrors `flattenPatch`: plain objects recurse, everything else\n * is terminal. Tagged serialization payloads short-circuit so we don't\n * recurse into the `__firegraph_ser__` envelope.\n */\nexport function assertNoDeleteSentinels(data: unknown, callerLabel: string): void {\n walkForDeleteSentinels(data, [], { kind: 'root' }, ({ path }) => {\n const where = path.length === 0 ? '<root>' : path.map((p) => JSON.stringify(p)).join(' > ');\n throw new Error(\n `firegraph: ${callerLabel} payload contains a deleteField() sentinel at ${where}. ` +\n `deleteField() is only valid inside updateNode/updateEdge — full-data ` +\n `writes (put*, replace*) cannot delete individual fields. Use updateNode ` +\n `with a deleteField() value, or omit the field from the replace payload.`,\n );\n });\n}\n\ntype SentinelParent = { kind: 'root' } | { kind: 'object' } | { kind: 'array'; index: number };\n\nfunction walkForDeleteSentinels(\n node: unknown,\n path: readonly string[],\n parent: SentinelParent,\n visit: (ctx: { path: readonly string[]; parent: SentinelParent }) => void,\n): void {\n if (node === null || node === undefined) return;\n if (isDeleteSentinel(node)) {\n visit({ path, parent });\n return;\n }\n if (typeof node !== 'object') return;\n if (isTaggedValue(node)) return;\n if (Array.isArray(node)) {\n for (let i = 0; i < node.length; i++) {\n walkForDeleteSentinels(node[i], [...path, String(i)], { kind: 'array', index: i }, visit);\n }\n return;\n }\n const proto = Object.getPrototypeOf(node);\n if (proto !== null && proto !== Object.prototype) return;\n const obj = node as Record<string, unknown>;\n for (const key of Object.keys(obj)) {\n walkForDeleteSentinels(obj[key], [...path, key], { kind: 'object' }, visit);\n }\n}\n\n/** Throws if any path segment in the patch is unsafe for SQLite paths. */\nexport function assertSafePath(path: readonly string[]): void {\n for (const seg of path) {\n if (!SAFE_KEY_RE.test(seg)) {\n throw new Error(\n `firegraph: unsafe object key ${JSON.stringify(seg)} at path ${path\n .map((p) => JSON.stringify(p))\n .join(' > ')}. Keys used inside update payloads must match ` +\n `/^[A-Za-z_][A-Za-z0-9_-]*$/ so they can be embedded safely in ` +\n `SQLite JSON paths.`,\n );\n }\n }\n}\n\n// ---------------------------------------------------------------------------\n// flattenPatch\n// ---------------------------------------------------------------------------\n\n/**\n * Flatten a partial-update payload into a list of terminal {@link DataPathOp}s.\n *\n * Rules:\n * - Plain objects (no prototype or `Object.prototype`) recurse — each\n * key becomes another path segment.\n * - Arrays are terminal: writing `{tags: ['a']}` overwrites the whole\n * `tags` array. Element-wise array merging is intentionally NOT\n * supported — it's almost never what callers actually want, and\n * Firestore `arrayUnion`/`arrayRemove` give precise semantics when\n * they are.\n * - `undefined` values are skipped (no op generated). Use\n * {@link deleteField} if you actually want to remove a field.\n * - `null` is preserved verbatim — emits a terminal op with `value: null`.\n * - {@link DELETE_FIELD} produces an op with `delete: true`.\n * - Firestore special types and tagged serialization payloads are terminal.\n * - Class instances are terminal.\n *\n * Throws if any object key on the recursion path is unsafe (see\n * {@link assertSafePath}).\n */\nexport function flattenPatch(data: Record<string, unknown>): DataPathOp[] {\n const ops: DataPathOp[] = [];\n walk(data, [], ops);\n return ops;\n}\n\nfunction assertNoDeleteSentinelsInArrayValue(\n arr: readonly unknown[],\n arrayPath: readonly string[],\n): void {\n walkForDeleteSentinels(arr, arrayPath, { kind: 'root' }, ({ parent }) => {\n const arrayPathStr =\n arrayPath.length === 0 ? '<root>' : arrayPath.map((p) => JSON.stringify(p)).join(' > ');\n if (parent.kind === 'array') {\n throw new Error(\n `firegraph: deleteField() sentinel at index ${parent.index} inside an array at ` +\n `path ${arrayPathStr}. Arrays are ` +\n `terminal in update payloads (replaced as a unit), so the sentinel ` +\n `would be silently dropped by JSON serialization. To remove the ` +\n `field entirely, pass deleteField() in place of the whole array.`,\n );\n }\n throw new Error(\n `firegraph: deleteField() sentinel inside an array element at ` +\n `path ${arrayPathStr}. ` +\n `Arrays are terminal in update payloads — the sentinel would ` +\n `be silently dropped by JSON serialization.`,\n );\n });\n}\n\nfunction walk(node: unknown, path: string[], out: DataPathOp[]): void {\n // Caller guarantees the root is a plain object; this branch only\n // matters for recursion.\n if (node === undefined) return;\n if (isDeleteSentinel(node)) {\n if (path.length === 0) {\n throw new Error('firegraph: deleteField() cannot be the entire update payload.');\n }\n assertSafePath(path);\n out.push({ path: [...path], value: undefined, delete: true });\n return;\n }\n if (isTerminalValue(node)) {\n if (path.length === 0) {\n // `null` / array / primitive at the root is illegal — patches must\n // describe per-key changes.\n throw new Error(\n 'firegraph: update payload must be a plain object. Got ' +\n (node === null ? 'null' : Array.isArray(node) ? 'array' : typeof node) +\n '.',\n );\n }\n // A DELETE_FIELD sentinel embedded inside an array (which is terminal\n // and replaced as a unit) would silently disappear: JSON.stringify drops\n // Symbols, and Firestore's serializer does likewise. Reject loudly so\n // the divergence between \"user wrote a delete\" and \"field stayed put\"\n // can't happen.\n if (Array.isArray(node)) {\n assertNoDeleteSentinelsInArrayValue(node, path);\n }\n assertSafePath(path);\n out.push({ path: [...path], value: node, delete: false });\n return;\n }\n // Plain object: recurse into its own enumerable keys.\n const obj = node as Record<string, unknown>;\n const keys = Object.keys(obj);\n if (keys.length === 0) {\n // Empty object at non-root: emit terminal op so an empty object can\n // be written explicitly when the caller really wants one. Skip at\n // the root — no-op patches should produce no ops.\n if (path.length > 0) {\n assertSafePath(path);\n out.push({ path: [...path], value: {}, delete: false });\n }\n return;\n }\n for (const key of keys) {\n if (key === SERIALIZATION_TAG) {\n const where = path.length === 0 ? '<root>' : path.map((p) => JSON.stringify(p)).join(' > ');\n throw new Error(\n `firegraph: update payload contains a literal \\`${SERIALIZATION_TAG}\\` key at ` +\n `${where}. That key is reserved for firegraph's serialization envelope and ` +\n `cannot appear on a plain object in user data. Use a different field name, ` +\n `or pass a recognized tagged value through replaceNode/replaceEdge instead.`,\n );\n }\n walk(obj[key], [...path, key], out);\n }\n}\n","import { computeEdgeDocId, computeNodeDocId } from './docid.js';\nimport type { BatchBackend, WritableRecord } from './internal/backend.js';\nimport { NODE_RELATION } from './internal/constants.js';\nimport { assertNoDeleteSentinels, flattenPatch } from './internal/write-plan.js';\nimport type { GraphBatch, GraphRegistry } from './types.js';\n\nfunction buildWritableNodeRecord(\n aType: string,\n uid: string,\n data: Record<string, unknown>,\n): WritableRecord {\n return { aType, aUid: uid, axbType: NODE_RELATION, bType: aType, bUid: uid, data };\n}\n\nfunction buildWritableEdgeRecord(\n aType: string,\n aUid: string,\n axbType: string,\n bType: string,\n bUid: string,\n data: Record<string, unknown>,\n): WritableRecord {\n return { aType, aUid, axbType, bType, bUid, data };\n}\n\nexport class GraphBatchImpl implements GraphBatch {\n constructor(\n private readonly backend: BatchBackend,\n private readonly registry?: GraphRegistry,\n private readonly scopePath: string = '',\n ) {}\n\n async putNode(aType: string, uid: string, data: Record<string, unknown>): Promise<void> {\n this.writeNode(aType, uid, data, 'merge');\n }\n\n async putEdge(\n aType: string,\n aUid: string,\n axbType: string,\n bType: string,\n bUid: string,\n data: Record<string, unknown>,\n ): Promise<void> {\n this.writeEdge(aType, aUid, axbType, bType, bUid, data, 'merge');\n }\n\n async replaceNode(aType: string, uid: string, data: Record<string, unknown>): Promise<void> {\n this.writeNode(aType, uid, data, 'replace');\n }\n\n async replaceEdge(\n aType: string,\n aUid: string,\n axbType: string,\n bType: string,\n bUid: string,\n data: Record<string, unknown>,\n ): Promise<void> {\n this.writeEdge(aType, aUid, axbType, bType, bUid, data, 'replace');\n }\n\n private writeNode(\n aType: string,\n uid: string,\n data: Record<string, unknown>,\n mode: 'merge' | 'replace',\n ): void {\n assertNoDeleteSentinels(data, mode === 'replace' ? 'replaceNode' : 'putNode');\n if (this.registry) {\n this.registry.validate(aType, NODE_RELATION, aType, data, this.scopePath);\n }\n const docId = computeNodeDocId(uid);\n const record = buildWritableNodeRecord(aType, uid, data);\n if (this.registry) {\n const entry = this.registry.lookup(aType, NODE_RELATION, aType);\n if (entry?.schemaVersion && entry.schemaVersion > 0) {\n record.v = entry.schemaVersion;\n }\n }\n this.backend.setDoc(docId, record, mode);\n }\n\n private writeEdge(\n aType: string,\n aUid: string,\n axbType: string,\n bType: string,\n bUid: string,\n data: Record<string, unknown>,\n mode: 'merge' | 'replace',\n ): void {\n assertNoDeleteSentinels(data, mode === 'replace' ? 'replaceEdge' : 'putEdge');\n if (this.registry) {\n this.registry.validate(aType, axbType, bType, data, this.scopePath);\n }\n const docId = computeEdgeDocId(aUid, axbType, bUid);\n const record = buildWritableEdgeRecord(aType, aUid, axbType, bType, bUid, data);\n if (this.registry) {\n const entry = this.registry.lookup(aType, axbType, bType);\n if (entry?.schemaVersion && entry.schemaVersion > 0) {\n record.v = entry.schemaVersion;\n }\n }\n this.backend.setDoc(docId, record, mode);\n }\n\n async updateNode(uid: string, data: Record<string, unknown>): Promise<void> {\n const docId = computeNodeDocId(uid);\n this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });\n }\n\n async updateEdge(\n aUid: string,\n axbType: string,\n bUid: string,\n data: Record<string, unknown>,\n ): Promise<void> {\n const docId = computeEdgeDocId(aUid, axbType, bUid);\n this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });\n }\n\n async removeNode(uid: string): Promise<void> {\n const docId = computeNodeDocId(uid);\n this.backend.deleteDoc(docId);\n }\n\n async removeEdge(aUid: string, axbType: string, bUid: string): Promise<void> {\n const docId = computeEdgeDocId(aUid, axbType, bUid);\n this.backend.deleteDoc(docId);\n }\n\n async commit(): Promise<void> {\n await this.backend.commit();\n }\n}\n","import { createHash } from 'node:crypto';\n\nimport { NODE_RELATION } from './internal/constants.js';\nimport { createRegistry } from './registry.js';\nimport { compileMigrations, precompileSource } from './sandbox.js';\nimport type {\n EdgeTypeData,\n GraphReader,\n GraphRegistry,\n MigrationExecutor,\n NodeTypeData,\n RegistryEntry,\n} from './types.js';\n\n// ---------------------------------------------------------------------------\n// Meta-type constants\n// ---------------------------------------------------------------------------\n\n/** The aType used for node type definition meta-nodes. */\nexport const META_NODE_TYPE = 'nodeType';\n\n/** The aType used for edge type definition meta-nodes. */\nexport const META_EDGE_TYPE = 'edgeType';\n\n// ---------------------------------------------------------------------------\n// JSON Schemas for meta-type data payloads\n// ---------------------------------------------------------------------------\n\n/** JSON Schema for a single stored migration step. */\nconst STORED_MIGRATION_STEP_SCHEMA = {\n type: 'object',\n required: ['fromVersion', 'toVersion', 'up'],\n properties: {\n fromVersion: { type: 'integer', minimum: 0 },\n toVersion: { type: 'integer', minimum: 1 },\n up: { type: 'string', minLength: 1 },\n },\n additionalProperties: false,\n};\n\n/** JSON Schema for the `data` payload of a `nodeType` meta-node. */\nexport const NODE_TYPE_SCHEMA: object = {\n type: 'object',\n required: ['name', 'jsonSchema'],\n properties: {\n name: { type: 'string', minLength: 1 },\n jsonSchema: { type: 'object' },\n description: { type: 'string' },\n titleField: { type: 'string' },\n subtitleField: { type: 'string' },\n viewTemplate: { type: 'string' },\n viewCss: { type: 'string' },\n allowedIn: { type: 'array', items: { type: 'string', minLength: 1 } },\n schemaVersion: { type: 'integer', minimum: 0 },\n migrations: { type: 'array', items: STORED_MIGRATION_STEP_SCHEMA },\n migrationWriteBack: { type: 'string', enum: ['off', 'eager', 'background'] },\n },\n additionalProperties: false,\n};\n\n/** JSON Schema for the `data` payload of an `edgeType` meta-node. */\nexport const EDGE_TYPE_SCHEMA: object = {\n type: 'object',\n required: ['name', 'from', 'to'],\n properties: {\n name: { type: 'string', minLength: 1 },\n from: {\n oneOf: [\n { type: 'string', minLength: 1 },\n { type: 'array', items: { type: 'string', minLength: 1 }, minItems: 1 },\n ],\n },\n to: {\n oneOf: [\n { type: 'string', minLength: 1 },\n { type: 'array', items: { type: 'string', minLength: 1 }, minItems: 1 },\n ],\n },\n jsonSchema: { type: 'object' },\n inverseLabel: { type: 'string' },\n description: { type: 'string' },\n titleField: { type: 'string' },\n subtitleField: { type: 'string' },\n viewTemplate: { type: 'string' },\n viewCss: { type: 'string' },\n allowedIn: { type: 'array', items: { type: 'string', minLength: 1 } },\n targetGraph: { type: 'string', minLength: 1, pattern: '^[^/]+$' },\n schemaVersion: { type: 'integer', minimum: 0 },\n migrations: { type: 'array', items: STORED_MIGRATION_STEP_SCHEMA },\n migrationWriteBack: { type: 'string', enum: ['off', 'eager', 'background'] },\n },\n additionalProperties: false,\n};\n\n// ---------------------------------------------------------------------------\n// Bootstrap registry\n// ---------------------------------------------------------------------------\n\n/** Registry entries for the two meta-types (always present). */\nexport const BOOTSTRAP_ENTRIES: readonly RegistryEntry[] = [\n {\n aType: META_NODE_TYPE,\n axbType: NODE_RELATION,\n bType: META_NODE_TYPE,\n jsonSchema: NODE_TYPE_SCHEMA,\n description: 'Meta-type: defines a node type',\n },\n {\n aType: META_EDGE_TYPE,\n axbType: NODE_RELATION,\n bType: META_EDGE_TYPE,\n jsonSchema: EDGE_TYPE_SCHEMA,\n description: 'Meta-type: defines an edge type',\n },\n];\n\n/**\n * Build the bootstrap registry that validates meta-type writes.\n * This is always available, even before any dynamic types are loaded.\n *\n * Memoized at module scope: `BOOTSTRAP_ENTRIES` is a `readonly` array\n * of module-level constants and `createRegistry` is pure over them, so\n * the resulting registry — including its compiled cfworker\n * `Validator`s — can be reused across every `GraphClientImpl`\n * constructor. This matters on Cloudflare Workers, where the dynamic\n * client constructor runs on every request that touches the\n * meta-registry path; without memoization we'd re-walk +\n * re-dereference these schemas per request.\n */\nlet _bootstrapRegistry: GraphRegistry | null = null;\nexport function createBootstrapRegistry(): GraphRegistry {\n if (_bootstrapRegistry) return _bootstrapRegistry;\n _bootstrapRegistry = createRegistry([...BOOTSTRAP_ENTRIES]);\n return _bootstrapRegistry;\n}\n\n// ---------------------------------------------------------------------------\n// Deterministic UID generation\n// ---------------------------------------------------------------------------\n\n/**\n * Generate a deterministic UID for a meta-type definition.\n * This ensures that defining the same type name always targets the same\n * Firestore document, enabling upsert semantics.\n *\n * Format: 21-char base64url substring of SHA-256(`metaType:name`).\n */\nexport function generateDeterministicUid(metaType: string, name: string): string {\n const hash = createHash('sha256').update(`${metaType}:${name}`).digest('base64url');\n return hash.slice(0, 21);\n}\n\n// ---------------------------------------------------------------------------\n// createRegistryFromGraph\n// ---------------------------------------------------------------------------\n\n/**\n * Read meta-type nodes from the graph and compile them into a GraphRegistry.\n *\n * The returned registry includes both the dynamic entries AND the bootstrap\n * meta-type entries, so meta-type writes remain validateable after a reload.\n *\n * @param reader - A GraphReader pointed at the collection containing meta-nodes.\n * @param executor - Optional custom executor for compiling stored migration source strings.\n */\nexport async function createRegistryFromGraph(\n reader: GraphReader,\n executor?: MigrationExecutor,\n): Promise<GraphRegistry> {\n const [nodeTypes, edgeTypes] = await Promise.all([\n reader.findNodes({ aType: META_NODE_TYPE }),\n reader.findNodes({ aType: META_EDGE_TYPE }),\n ]);\n\n const entries: RegistryEntry[] = [...BOOTSTRAP_ENTRIES];\n\n // Eagerly pre-validate all migration sources in the sandbox before building\n // the registry. This ensures reloadRegistry() fails fast on invalid sources.\n const prevalidations: Promise<void>[] = [];\n for (const record of nodeTypes) {\n const data = record.data as unknown as NodeTypeData;\n if (data.migrations) {\n for (const m of data.migrations) {\n prevalidations.push(precompileSource(m.up, executor));\n }\n }\n }\n for (const record of edgeTypes) {\n const data = record.data as unknown as EdgeTypeData;\n if (data.migrations) {\n for (const m of data.migrations) {\n prevalidations.push(precompileSource(m.up, executor));\n }\n }\n }\n await Promise.all(prevalidations);\n\n // Convert nodeType records → self-loop RegistryEntries\n for (const record of nodeTypes) {\n const data = record.data as unknown as NodeTypeData;\n entries.push({\n aType: data.name,\n axbType: NODE_RELATION,\n bType: data.name,\n jsonSchema: data.jsonSchema,\n description: data.description,\n titleField: data.titleField,\n subtitleField: data.subtitleField,\n allowedIn: data.allowedIn,\n migrations: data.migrations ? compileMigrations(data.migrations, executor) : undefined,\n migrationWriteBack: data.migrationWriteBack,\n });\n }\n\n // Convert edgeType records → RegistryEntries (expand from/to arrays)\n for (const record of edgeTypes) {\n const data = record.data as unknown as EdgeTypeData;\n const fromTypes = Array.isArray(data.from) ? data.from : [data.from];\n const toTypes = Array.isArray(data.to) ? data.to : [data.to];\n\n const compiledMigrations = data.migrations\n ? compileMigrations(data.migrations, executor)\n : undefined;\n\n for (const aType of fromTypes) {\n for (const bType of toTypes) {\n entries.push({\n aType,\n axbType: data.name,\n bType,\n jsonSchema: data.jsonSchema,\n description: data.description,\n inverseLabel: data.inverseLabel,\n titleField: data.titleField,\n subtitleField: data.subtitleField,\n allowedIn: data.allowedIn,\n targetGraph: data.targetGraph,\n migrations: compiledMigrations,\n migrationWriteBack: data.migrationWriteBack,\n });\n }\n }\n }\n\n return createRegistry(entries);\n}\n","export class FiregraphError extends Error {\n constructor(\n message: string,\n public readonly code: string,\n ) {\n super(message);\n this.name = 'FiregraphError';\n }\n}\n\nexport class NodeNotFoundError extends FiregraphError {\n constructor(uid: string) {\n super(`Node not found: ${uid}`, 'NODE_NOT_FOUND');\n this.name = 'NodeNotFoundError';\n }\n}\n\nexport class EdgeNotFoundError extends FiregraphError {\n constructor(aUid: string, axbType: string, bUid: string) {\n super(`Edge not found: ${aUid} -[${axbType}]-> ${bUid}`, 'EDGE_NOT_FOUND');\n this.name = 'EdgeNotFoundError';\n }\n}\n\nexport class ValidationError extends FiregraphError {\n constructor(\n message: string,\n public readonly details?: unknown,\n ) {\n super(message, 'VALIDATION_ERROR');\n this.name = 'ValidationError';\n }\n}\n\nexport class RegistryViolationError extends FiregraphError {\n constructor(aType: string, axbType: string, bType: string) {\n super(`Unregistered triple: (${aType}) -[${axbType}]-> (${bType})`, 'REGISTRY_VIOLATION');\n this.name = 'RegistryViolationError';\n }\n}\n\nexport class InvalidQueryError extends FiregraphError {\n constructor(message: string) {\n super(message, 'INVALID_QUERY');\n this.name = 'InvalidQueryError';\n }\n}\n\nexport class TraversalError extends FiregraphError {\n constructor(message: string) {\n super(message, 'TRAVERSAL_ERROR');\n this.name = 'TraversalError';\n }\n}\n\nexport class DynamicRegistryError extends FiregraphError {\n constructor(message: string) {\n super(message, 'DYNAMIC_REGISTRY_ERROR');\n this.name = 'DynamicRegistryError';\n }\n}\n\nexport class QuerySafetyError extends FiregraphError {\n constructor(message: string) {\n super(message, 'QUERY_SAFETY');\n this.name = 'QuerySafetyError';\n }\n}\n\nexport class RegistryScopeError extends FiregraphError {\n constructor(\n aType: string,\n axbType: string,\n bType: string,\n scopePath: string,\n allowedIn: string[],\n ) {\n super(\n `Type (${aType}) -[${axbType}]-> (${bType}) is not allowed at scope \"${scopePath || 'root'}\". ` +\n `Allowed in: [${allowedIn.join(', ')}]`,\n 'REGISTRY_SCOPE',\n );\n this.name = 'RegistryScopeError';\n }\n}\n\nexport class MigrationError extends FiregraphError {\n constructor(message: string) {\n super(message, 'MIGRATION_ERROR');\n this.name = 'MigrationError';\n }\n}\n\n/**\n * Thrown when a caller tries to perform an operation that would require\n * atomicity across two physical storage backends — e.g. opening a routed\n * subgraph client from inside a transaction callback. Cross-backend\n * atomicity cannot be honoured by real-world storage engines (Firestore,\n * SQLite drivers over D1/DO/better-sqlite3, etc.), so firegraph surfaces\n * this as a typed error instead of silently confining the write to the\n * base backend.\n *\n * Normally `TransactionBackend` and `BatchBackend` don't expose `subgraph()`\n * at the type level, so this error is unreachable through well-typed code.\n * It exists as a public catchable type for app code that needs to tolerate\n * this case deliberately (e.g. dynamic code paths that bypass the type\n * system) and as future-proofing if the interface ever grows a way to\n * request a sub-scope inside a transaction.\n */\nexport class CrossBackendTransactionError extends FiregraphError {\n constructor(message: string) {\n super(message, 'CROSS_BACKEND_TRANSACTION');\n this.name = 'CrossBackendTransactionError';\n }\n}\n\n/**\n * Thrown when a caller invokes a capability-gated operation on a backend\n * that does not declare the required capability. Capability gating is\n * primarily a compile-time concern (see `BackendCapabilities` and the\n * type-level extension surfaces in `GraphClient<C>`), but this runtime\n * error covers the cases where the type system is bypassed — dynamic\n * registries, `as any` casts, or callers explicitly downcasting through\n * the generic-erased `StorageBackend` shape.\n *\n * The error code is `CAPABILITY_NOT_SUPPORTED`. The message names the\n * missing capability and the backend that was asked, so app code can\n * diagnose without inspecting the cap set itself.\n */\nexport class CapabilityNotSupportedError extends FiregraphError {\n constructor(\n public readonly capability: string,\n backendDescription: string,\n ) {\n super(\n `Capability \"${capability}\" is not supported by ${backendDescription}.`,\n 'CAPABILITY_NOT_SUPPORTED',\n );\n this.name = 'CapabilityNotSupportedError';\n }\n}\n","/**\n * JSON Schema validation and introspection utilities.\n *\n * Uses `@cfworker/json-schema` for validation — a runtime-interpreter\n * JSON Schema validator that does not rely on `new Function()` and is\n * therefore compatible with Cloudflare Workers (which run V8 with\n * `--disallow-code-generation-from-strings`). Ajv was used here\n * previously, but its `ajv.compile(schema)` generates a validator via\n * the Function constructor and fails with \"Code generation from strings\n * disallowed for this context\" whenever firegraph's dynamic-registry\n * bootstrap or `reloadRegistry` runs inside a Worker.\n *\n * The introspection half (`jsonSchemaToFieldMeta`) is pure string/object\n * manipulation with no validator dependency.\n */\n\nimport { type OutputUnit, type Schema, Validator } from '@cfworker/json-schema';\n\nimport { ValidationError } from './errors.js';\n\n// ---------------------------------------------------------------------------\n// FieldMeta types (previously in editor/server/schema-introspect.ts)\n// ---------------------------------------------------------------------------\n\nexport interface FieldMeta {\n name: string;\n type: 'string' | 'number' | 'boolean' | 'enum' | 'array' | 'object' | 'unknown';\n required: boolean;\n description?: string;\n enumValues?: string[];\n minLength?: number;\n maxLength?: number;\n pattern?: string;\n min?: number;\n max?: number;\n isInt?: boolean;\n itemMeta?: FieldMeta;\n fields?: FieldMeta[];\n}\n\n// ---------------------------------------------------------------------------\n// Validation\n// ---------------------------------------------------------------------------\n\n/** Cap on how many errors get joined into the human-readable message. */\nconst MAX_RENDERED_ERRORS = 20;\n\n/**\n * Compile a JSON Schema into a validation function.\n *\n * The returned function throws `ValidationError` if data is invalid. The\n * error's `details` is the `OutputUnit[]` array produced by\n * `@cfworker/json-schema` — consumers that previously inspected Ajv's\n * `ErrorObject[]` need to map to the cfworker shape\n * (`{ keyword, keywordLocation, instanceLocation, error }`).\n *\n * Draft 2020-12 is requested by default to match the library's richest\n * feature set; schemas that omit `$schema` still validate under it\n * since keyword semantics back-compat to draft-07 for the fields\n * firegraph actually uses.\n *\n * `shortCircuit` is explicitly disabled so `result.errors` contains\n * every violation, not just the first one — humans rely on the joined\n * error message to debug bad writes from the editor / chat UI. The\n * full array is preserved on `ValidationError.details`; only the\n * rendered message is capped at `MAX_RENDERED_ERRORS` lines so\n * pathological `oneOf`/`anyOf` schemas can't blow up log lines.\n *\n * Format keywords supported by `@cfworker/json-schema` (anything else\n * is silently passed through — see node_modules/@cfworker/json-schema/\n * src/format.ts):\n * `date`, `time`, `date-time`, `duration`,\n * `email`, `hostname`, `ipv4`, `ipv6`,\n * `uri`, `uri-reference`, `uri-template`, `url`,\n * `uuid`, `regex`,\n * `json-pointer`, `relative-json-pointer`, `json-pointer-uri-fragment`.\n */\nexport function compileSchema(schema: object, label?: string): (data: unknown) => void {\n // `object` is the public type used throughout `RegistryEntry.jsonSchema`\n // and the dynamic-client API; cfworker's `Schema` is structurally\n // `{ [k: string]: any }`, which a JSON Schema document always\n // satisfies at runtime. The cast is therefore safe in practice —\n // pass anything other than a plain JSON-Schema-shaped object and\n // `dereference()` inside the validator will throw at construction.\n const validator = new Validator(schema as Schema, '2020-12', false);\n return (data: unknown) => {\n const result = validator.validate(data);\n if (!result.valid) {\n const total = result.errors.length;\n const head = result.errors.slice(0, MAX_RENDERED_ERRORS).map(formatError).join('; ');\n const overflow = total > MAX_RENDERED_ERRORS ? ` (+${total - MAX_RENDERED_ERRORS} more)` : '';\n throw new ValidationError(\n `Data validation failed${label ? ' for ' + label : ''}: ${head}${overflow}`,\n result.errors,\n );\n }\n };\n}\n\n/**\n * Format a single cfworker `OutputUnit` into a human-readable line.\n *\n * cfworker's `instanceLocation` is a JSON-Pointer-as-URI-fragment\n * (`#`, `#/foo`, `#/foo/0/bar`); strip the leading `#` so the rendered\n * path looks like Ajv's `instancePath` (`/foo/0/bar`) and root errors\n * read as `/` rather than `#`. The `[keyword]` prefix is included so\n * messages stay actionable when `error` is terse (e.g. `not`, `enum`).\n */\nfunction formatError(err: OutputUnit): string {\n const path = err.instanceLocation.replace(/^#/, '') || '/';\n const keyword = err.keyword ? `[${err.keyword}] ` : '';\n const detail = err.error ? `: ${keyword}${err.error}` : '';\n return `${path}${detail}`;\n}\n\n// ---------------------------------------------------------------------------\n// JSON Schema → FieldMeta introspection\n// ---------------------------------------------------------------------------\n\n/**\n * Convert a JSON Schema (expected to be `type: \"object\"`) into `FieldMeta[]`\n * suitable for the editor's SchemaForm component.\n */\nexport function jsonSchemaToFieldMeta(schema: any): FieldMeta[] {\n if (!schema || schema.type !== 'object' || !schema.properties) return [];\n\n const requiredSet = new Set<string>(Array.isArray(schema.required) ? schema.required : []);\n\n return Object.entries(schema.properties).map(([name, prop]) =>\n propertyToFieldMeta(name, prop as any, requiredSet.has(name)),\n );\n}\n\n/**\n * Convert a single JSON Schema property into a `FieldMeta`.\n */\nfunction propertyToFieldMeta(name: string, prop: any, required: boolean): FieldMeta {\n if (!prop) return { name, type: 'unknown', required };\n\n // Handle enum (can appear with or without type)\n if (Array.isArray(prop.enum)) {\n return {\n name,\n type: 'enum',\n required,\n enumValues: prop.enum as string[],\n description: prop.description,\n };\n }\n\n // Handle oneOf/anyOf for nullable patterns like { oneOf: [{type:'string'}, {type:'null'}] }\n if (Array.isArray(prop.oneOf) || Array.isArray(prop.anyOf)) {\n const variants = (prop.oneOf ?? prop.anyOf) as any[];\n const nonNull = variants.filter((v: any) => v.type !== 'null');\n if (nonNull.length === 1) {\n // Nullable wrapper — unwrap and mark as optional\n return propertyToFieldMeta(name, nonNull[0], false);\n }\n return { name, type: 'unknown', required, description: prop.description };\n }\n\n const type = prop.type;\n\n if (type === 'string') {\n return {\n name,\n type: 'string',\n required,\n minLength: prop.minLength,\n maxLength: prop.maxLength,\n pattern: prop.pattern,\n description: prop.description,\n };\n }\n\n if (type === 'number' || type === 'integer') {\n return {\n name,\n type: 'number',\n required,\n min: prop.minimum,\n max: prop.maximum,\n isInt: type === 'integer' ? true : undefined,\n description: prop.description,\n };\n }\n\n if (type === 'boolean') {\n return { name, type: 'boolean', required, description: prop.description };\n }\n\n if (type === 'array') {\n const itemMeta = prop.items ? propertyToFieldMeta('item', prop.items, true) : undefined;\n return {\n name,\n type: 'array',\n required,\n itemMeta,\n description: prop.description,\n };\n }\n\n if (type === 'object') {\n return {\n name,\n type: 'object',\n required,\n fields: jsonSchemaToFieldMeta(prop),\n description: prop.description,\n };\n }\n\n return { name, type: 'unknown', required, description: prop.description };\n}\n","/**\n * Migration pipeline for auto-migrating records on read.\n *\n * When a record's `v` is behind the version derived from the registry\n * entry's migrations, the pipeline applies migration steps sequentially\n * to bring the data up to the current version.\n */\n\nimport { MigrationError } from './errors.js';\nimport type {\n GraphRegistry,\n MigrationStep,\n MigrationWriteBack,\n StoredGraphRecord,\n} from './types.js';\n\n/** Result of attempting to migrate a single record. */\nexport interface MigrationResult {\n record: StoredGraphRecord;\n migrated: boolean;\n /** Resolved write-back mode for this record (entry-level > global > 'off'). */\n writeBack: MigrationWriteBack;\n}\n\n/**\n * Apply a chain of migration steps to transform data from `currentVersion`\n * to `targetVersion`. Throws `MigrationError` if the chain is incomplete\n * or a migration function fails.\n *\n * Returns the migrated data payload only — the caller is responsible for\n * stamping `v` on the record envelope.\n */\nexport async function applyMigrationChain(\n data: Record<string, unknown>,\n currentVersion: number,\n targetVersion: number,\n migrations: MigrationStep[],\n): Promise<Record<string, unknown>> {\n const sorted = [...migrations].sort((a, b) => a.fromVersion - b.fromVersion);\n let result = { ...data };\n let version = currentVersion;\n\n for (const step of sorted) {\n if (step.fromVersion === version) {\n try {\n result = await step.up(result);\n } catch (err: unknown) {\n if (err instanceof MigrationError) throw err;\n throw new MigrationError(\n `Migration from v${step.fromVersion} to v${step.toVersion} failed: ${(err as Error).message}`,\n );\n }\n if (!result || typeof result !== 'object') {\n throw new MigrationError(\n `Migration from v${step.fromVersion} to v${step.toVersion} returned invalid data (expected object)`,\n );\n }\n version = step.toVersion;\n }\n }\n\n if (version !== targetVersion) {\n throw new MigrationError(\n `Incomplete migration chain: reached v${version} but target is v${targetVersion}`,\n );\n }\n\n return result;\n}\n\n/**\n * Validate that a migration chain forms a contiguous path from version 0\n * to the highest `toVersion`. Throws `MigrationError` if the chain has\n * gaps or duplicate `fromVersion` values.\n *\n * Called at registry construction time to catch incomplete chains early,\n * rather than at read time when a record is migrated.\n */\nexport function validateMigrationChain(migrations: MigrationStep[], label: string): void {\n if (migrations.length === 0) return;\n\n // Validate individual steps\n const seen = new Set<number>();\n for (const step of migrations) {\n if (step.toVersion <= step.fromVersion) {\n throw new MigrationError(\n `${label}: migration step has toVersion (${step.toVersion}) <= fromVersion (${step.fromVersion})`,\n );\n }\n if (seen.has(step.fromVersion)) {\n throw new MigrationError(\n `${label}: duplicate migration step for fromVersion ${step.fromVersion}`,\n );\n }\n seen.add(step.fromVersion);\n }\n\n const sorted = [...migrations].sort((a, b) => a.fromVersion - b.fromVersion);\n const targetVersion = Math.max(...migrations.map((m) => m.toVersion));\n let version = 0;\n\n for (const step of sorted) {\n if (step.fromVersion === version) {\n version = step.toVersion;\n } else if (step.fromVersion > version) {\n throw new MigrationError(\n `${label}: migration chain has a gap — no step covers v${version} → v${step.fromVersion}`,\n );\n }\n }\n\n if (version !== targetVersion) {\n throw new MigrationError(\n `${label}: migration chain does not reach v${targetVersion} (stuck at v${version})`,\n );\n }\n}\n\n/**\n * Attempt to migrate a single record based on its registry entry.\n *\n * Returns the original record unchanged if no migration is needed\n * (no schema version, already at current version, or no migrations defined).\n */\nexport async function migrateRecord(\n record: StoredGraphRecord,\n registry: GraphRegistry,\n globalWriteBack: MigrationWriteBack = 'off',\n): Promise<MigrationResult> {\n const entry = registry.lookup(record.aType, record.axbType, record.bType);\n\n if (!entry?.migrations?.length || !entry.schemaVersion) {\n return { record, migrated: false, writeBack: 'off' };\n }\n\n const currentVersion = record.v ?? 0;\n\n if (currentVersion >= entry.schemaVersion) {\n return { record, migrated: false, writeBack: 'off' };\n }\n\n const migratedData = await applyMigrationChain(\n record.data,\n currentVersion,\n entry.schemaVersion,\n entry.migrations,\n );\n\n // Two-tier resolution: entry-level > global > 'off'\n const writeBack = entry.migrationWriteBack ?? globalWriteBack ?? 'off';\n\n return {\n record: { ...record, data: migratedData, v: entry.schemaVersion },\n migrated: true,\n writeBack,\n };\n}\n\n/**\n * Migrate an array of records, returning all results.\n * If any single migration fails, the entire call rejects — a broken\n * migration function is a bug that should surface immediately.\n */\nexport async function migrateRecords(\n records: StoredGraphRecord[],\n registry: GraphRegistry,\n globalWriteBack: MigrationWriteBack = 'off',\n): Promise<MigrationResult[]> {\n return Promise.all(records.map((r) => migrateRecord(r, registry, globalWriteBack)));\n}\n","/**\n * Scope path matching for subgraph-level registry constraints.\n *\n * Scope paths are slash-separated names derived from the chain of\n * `subgraph()` calls (e.g., `'agents'`, `'agents/memories'`).\n * The root graph has an empty scope path (`''`).\n *\n * Patterns:\n * - `'root'` — matches only the root graph (empty scope path)\n * - `'agents'` — matches exactly `'agents'`\n * - `'agents/memories'` — matches exactly `'agents/memories'`\n * - `'*​/agents'` — `*` matches one segment: `'foo/agents'` but not `'a/b/agents'`\n * - `'**​/memories'` — `**` matches zero or more segments\n * - `'**'` — matches everything including root\n */\n\n/**\n * Test whether a scope path matches a single pattern.\n *\n * @param scopePath - The current scope path (empty string for root)\n * @param pattern - The pattern to match against\n */\nexport function matchScope(scopePath: string, pattern: string): boolean {\n // Special case: 'root' matches only the root graph\n if (pattern === 'root') return scopePath === '';\n\n // Special case: '**' matches everything\n if (pattern === '**') return true;\n\n const pathSegments = scopePath === '' ? [] : scopePath.split('/');\n const patternSegments = pattern.split('/');\n\n return matchSegments(pathSegments, 0, patternSegments, 0);\n}\n\n/**\n * Test whether a scope path matches any pattern in a list.\n * Returns `true` if the list is empty or undefined (allowed everywhere).\n *\n * @param scopePath - The current scope path (empty string for root)\n * @param patterns - Array of patterns to match against\n */\nexport function matchScopeAny(scopePath: string, patterns: string[]): boolean {\n if (!patterns || patterns.length === 0) return true;\n return patterns.some((p) => matchScope(scopePath, p));\n}\n\n/**\n * Recursive segment matcher with support for `*` (one segment) and\n * `**` (zero or more segments).\n */\nfunction matchSegments(path: string[], pi: number, pattern: string[], qi: number): boolean {\n // Both exhausted — match\n if (pi === path.length && qi === pattern.length) return true;\n\n // Pattern exhausted but path remains — no match\n if (qi === pattern.length) return false;\n\n const seg = pattern[qi];\n\n if (seg === '**') {\n // '**' at the end of pattern — matches everything remaining\n if (qi === pattern.length - 1) return true;\n\n // Try consuming 0, 1, 2, ... path segments\n for (let skip = 0; skip <= path.length - pi; skip++) {\n if (matchSegments(path, pi + skip, pattern, qi + 1)) return true;\n }\n return false;\n }\n\n // Path exhausted but pattern has non-** segments remaining — no match\n if (pi === path.length) return false;\n\n if (seg === '*') {\n // '*' matches exactly one segment\n return matchSegments(path, pi + 1, pattern, qi + 1);\n }\n\n // Literal match\n if (path[pi] === seg) {\n return matchSegments(path, pi + 1, pattern, qi + 1);\n }\n\n return false;\n}\n","import { RegistryScopeError, RegistryViolationError, ValidationError } from './errors.js';\nimport { NODE_RELATION } from './internal/constants.js';\nimport { compileSchema } from './json-schema.js';\nimport { validateMigrationChain } from './migration.js';\nimport { matchScopeAny } from './scope.js';\nimport type { DiscoveryResult, GraphRegistry, RegistryEntry } from './types.js';\n\nfunction tripleKey(aType: string, axbType: string, bType: string): string {\n return `${aType}:${axbType}:${bType}`;\n}\n\nfunction tripleKeyFor(e: RegistryEntry): string {\n return tripleKey(e.aType, e.axbType, e.bType);\n}\n\n/**\n * Build a registry from either explicit entries or a DiscoveryResult.\n *\n * @example\n * ```ts\n * // From explicit entries (programmatic)\n * const registry = createRegistry([\n * { aType: 'user', axbType: 'is', bType: 'user', jsonSchema: userSchema },\n * { aType: 'user', axbType: 'follows', bType: 'user', jsonSchema: followsSchema },\n * ]);\n *\n * // From discovery result (folder convention)\n * const discovered = await discoverEntities('./entities');\n * const registry = createRegistry(discovered);\n * ```\n */\nexport function createRegistry(input: RegistryEntry[] | DiscoveryResult): GraphRegistry {\n const map = new Map<string, { entry: RegistryEntry; validate?: (data: unknown) => void }>();\n\n let entries: RegistryEntry[];\n\n if (Array.isArray(input)) {\n entries = input;\n } else {\n entries = discoveryToEntries(input);\n }\n\n const entryList: ReadonlyArray<RegistryEntry> = Object.freeze([...entries]);\n\n for (const entry of entries) {\n if (entry.targetGraph && entry.targetGraph.includes('/')) {\n throw new ValidationError(\n `Entry (${entry.aType}) -[${entry.axbType}]-> (${entry.bType}) has invalid targetGraph \"${entry.targetGraph}\" — must be a single segment (no \"/\")`,\n );\n }\n if (entry.migrations?.length) {\n const label = `Entry (${entry.aType}) -[${entry.axbType}]-> (${entry.bType})`;\n validateMigrationChain(entry.migrations, label);\n // Derive schemaVersion from migrations — single source of truth\n entry.schemaVersion = Math.max(...entry.migrations.map((m) => m.toVersion));\n } else {\n // No migrations → no versioning (ignore any user-supplied schemaVersion)\n entry.schemaVersion = undefined;\n }\n const key = tripleKey(entry.aType, entry.axbType, entry.bType);\n const validator = entry.jsonSchema\n ? compileSchema(entry.jsonSchema, `(${entry.aType}) -[${entry.axbType}]-> (${entry.bType})`)\n : undefined;\n map.set(key, { entry, validate: validator });\n }\n\n // Build axbType index for lookupByAxbType\n const axbIndex = new Map<string, ReadonlyArray<RegistryEntry>>();\n const axbBuild = new Map<string, RegistryEntry[]>();\n for (const entry of entries) {\n const existing = axbBuild.get(entry.axbType);\n if (existing) {\n existing.push(entry);\n } else {\n axbBuild.set(entry.axbType, [entry]);\n }\n }\n for (const [key, arr] of axbBuild) {\n axbIndex.set(key, Object.freeze(arr));\n }\n\n // Build aType → subgraph-topology index.\n //\n // For each source aType, collect edge entries whose `targetGraph` is set —\n // these are the aType's direct subgraph children. Dedupe by `targetGraph`\n // alone (not by axbType): the physical subgraph store is addressed by\n // (parentUid, targetGraph) and the cascade caller only cares about which\n // child subgraphs to tear down. Two distinct edge relations pointing into\n // the same `targetGraph` would otherwise produce duplicate destroy calls\n // on the same physical backend.\n const topologyIndex = new Map<string, ReadonlyArray<RegistryEntry>>();\n const topologyBuild = new Map<string, RegistryEntry[]>();\n const topologySeen = new Map<string, Set<string>>();\n for (const entry of entries) {\n if (!entry.targetGraph) continue;\n let seen = topologySeen.get(entry.aType);\n if (!seen) {\n seen = new Set();\n topologySeen.set(entry.aType, seen);\n }\n if (seen.has(entry.targetGraph)) continue;\n seen.add(entry.targetGraph);\n const existing = topologyBuild.get(entry.aType);\n if (existing) {\n existing.push(entry);\n } else {\n topologyBuild.set(entry.aType, [entry]);\n }\n }\n for (const [key, arr] of topologyBuild) {\n topologyIndex.set(key, Object.freeze(arr));\n }\n\n return {\n lookup(aType: string, axbType: string, bType: string): RegistryEntry | undefined {\n return map.get(tripleKey(aType, axbType, bType))?.entry;\n },\n\n lookupByAxbType(axbType: string): ReadonlyArray<RegistryEntry> {\n return axbIndex.get(axbType) ?? [];\n },\n\n getSubgraphTopology(aType: string): ReadonlyArray<RegistryEntry> {\n return topologyIndex.get(aType) ?? [];\n },\n\n validate(\n aType: string,\n axbType: string,\n bType: string,\n data: unknown,\n scopePath?: string,\n ): void {\n const rec = map.get(tripleKey(aType, axbType, bType));\n\n if (!rec) {\n throw new RegistryViolationError(aType, axbType, bType);\n }\n\n // Scope validation: check allowedIn patterns when a scope context is provided\n if (scopePath !== undefined && rec.entry.allowedIn && rec.entry.allowedIn.length > 0) {\n if (!matchScopeAny(scopePath, rec.entry.allowedIn)) {\n throw new RegistryScopeError(aType, axbType, bType, scopePath, rec.entry.allowedIn);\n }\n }\n\n if (rec.validate) {\n try {\n rec.validate(data);\n } catch (err: unknown) {\n if (err instanceof ValidationError) throw err;\n throw new ValidationError(\n `Data validation failed for (${aType}) -[${axbType}]-> (${bType})`,\n err,\n );\n }\n }\n },\n\n entries(): ReadonlyArray<RegistryEntry> {\n return entryList;\n },\n };\n}\n\n/**\n * Create a merged registry where `base` entries take priority and `extension`\n * entries fill in gaps. Lookups and validation check `base` first; only if the\n * triple is not found there does the merged registry fall through to\n * `extension`.\n *\n * The `entries()` method returns a deduplicated list (base wins on collision).\n * The `lookupByAxbType()` method merges results from both registries,\n * deduplicating by triple key with base entries winning.\n */\nexport function createMergedRegistry(base: GraphRegistry, extension: GraphRegistry): GraphRegistry {\n // Build a set of triple keys from the base registry for fast collision checks.\n const baseKeys = new Set(base.entries().map(tripleKeyFor));\n\n return {\n lookup(aType: string, axbType: string, bType: string): RegistryEntry | undefined {\n return base.lookup(aType, axbType, bType) ?? extension.lookup(aType, axbType, bType);\n },\n\n lookupByAxbType(axbType: string): ReadonlyArray<RegistryEntry> {\n const baseResults = base.lookupByAxbType(axbType);\n const extResults = extension.lookupByAxbType(axbType);\n if (extResults.length === 0) return baseResults;\n if (baseResults.length === 0) return extResults;\n\n // Merge, base wins on triple-key collision\n const seen = new Set(baseResults.map(tripleKeyFor));\n const merged = [...baseResults];\n for (const entry of extResults) {\n if (!seen.has(tripleKeyFor(entry))) {\n merged.push(entry);\n }\n }\n return Object.freeze(merged);\n },\n\n getSubgraphTopology(aType: string): ReadonlyArray<RegistryEntry> {\n const baseResults = base.getSubgraphTopology(aType);\n const extResults = extension.getSubgraphTopology(aType);\n if (extResults.length === 0) return baseResults;\n if (baseResults.length === 0) return extResults;\n\n // Merge, base wins on `targetGraph` collision. Extension entries only\n // contribute new subgraph segments the base doesn't cover. Dedupe key\n // matches the physical DO address — (parentUid, targetGraph) — so two\n // different axbTypes pointing into the same segment collapse to one.\n const seen = new Set(baseResults.map((e) => e.targetGraph));\n const merged = [...baseResults];\n for (const entry of extResults) {\n if (!seen.has(entry.targetGraph)) {\n seen.add(entry.targetGraph);\n merged.push(entry);\n }\n }\n return Object.freeze(merged);\n },\n\n validate(\n aType: string,\n axbType: string,\n bType: string,\n data: unknown,\n scopePath?: string,\n ): void {\n if (baseKeys.has(tripleKey(aType, axbType, bType))) {\n return base.validate(aType, axbType, bType, data, scopePath);\n }\n // Falls through to extension (which throws RegistryViolationError if not found)\n return extension.validate(aType, axbType, bType, data, scopePath);\n },\n\n entries(): ReadonlyArray<RegistryEntry> {\n const extEntries = extension.entries();\n if (extEntries.length === 0) return base.entries();\n\n const merged = [...base.entries()];\n for (const entry of extEntries) {\n if (!baseKeys.has(tripleKeyFor(entry))) {\n merged.push(entry);\n }\n }\n return Object.freeze(merged);\n },\n };\n}\n\n/**\n * Convert a DiscoveryResult into flat RegistryEntry[].\n * Nodes become self-loop triples `(name, 'is', name)`.\n * Edges expand `from`/`to` arrays into one triple per combination.\n */\nfunction discoveryToEntries(discovery: DiscoveryResult): RegistryEntry[] {\n const entries: RegistryEntry[] = [];\n\n // Nodes → self-loop triples\n for (const [name, entity] of discovery.nodes) {\n entries.push({\n aType: name,\n axbType: NODE_RELATION,\n bType: name,\n jsonSchema: entity.schema,\n description: entity.description,\n titleField: entity.titleField,\n subtitleField: entity.subtitleField,\n allowedIn: entity.allowedIn,\n migrations: entity.migrations,\n migrationWriteBack: entity.migrationWriteBack,\n indexes: entity.indexes,\n });\n }\n\n // Edges → expand from/to into one triple per combination\n for (const [axbType, entity] of discovery.edges) {\n const topology = entity.topology;\n if (!topology) continue;\n\n const fromTypes = Array.isArray(topology.from) ? topology.from : [topology.from];\n const toTypes = Array.isArray(topology.to) ? topology.to : [topology.to];\n\n const resolvedTargetGraph = entity.targetGraph ?? topology.targetGraph;\n if (resolvedTargetGraph && resolvedTargetGraph.includes('/')) {\n throw new ValidationError(\n `Edge \"${axbType}\" has invalid targetGraph \"${resolvedTargetGraph}\" — must be a single segment (no \"/\")`,\n );\n }\n\n for (const aType of fromTypes) {\n for (const bType of toTypes) {\n entries.push({\n aType,\n axbType,\n bType,\n jsonSchema: entity.schema,\n description: entity.description,\n inverseLabel: topology.inverseLabel,\n titleField: entity.titleField,\n subtitleField: entity.subtitleField,\n allowedIn: entity.allowedIn,\n targetGraph: resolvedTargetGraph,\n migrations: entity.migrations,\n migrationWriteBack: entity.migrationWriteBack,\n indexes: entity.indexes,\n });\n }\n }\n }\n\n return entries;\n}\n","/**\n * Sandbox module for compiling dynamic registry migration source strings\n * into executable functions.\n *\n * Uses a dedicated worker thread with SES (Secure ECMAScript) Compartments\n * for isolation. SES `lockdown()` and `Compartment` evaluation run in the\n * worker thread so that the host process's intrinsics remain unaffected.\n *\n * Each migration function runs in a hardened compartment with no ambient\n * authority — no access to `process`, `require`, `fetch`, `setTimeout`,\n * or any other host-provided globals. Data crosses the compartment boundary\n * as JSON strings to prevent prototype chain escapes.\n *\n * Static registry migrations are already in-memory functions and never\n * go through this module.\n */\n\nimport { createHash } from 'node:crypto';\nimport type { Worker } from 'node:worker_threads';\n\nimport { MigrationError } from './errors.js';\nimport type * as SerializationModule from './serialization.js';\nimport type {\n MigrationExecutor,\n MigrationFn,\n MigrationStep,\n StoredMigrationStep,\n} from './types.js';\n\n// ---------------------------------------------------------------------------\n// Sandbox worker — SES lockdown and Compartment evaluation run in a\n// dedicated worker thread so that lockdown() does not affect the host\n// process's intrinsics. The worker is spawned lazily on first use.\n// ---------------------------------------------------------------------------\n\nlet _worker: Worker | null = null;\nlet _requestId = 0;\nconst _pending = new Map<\n number,\n {\n resolve: (value: unknown) => void;\n reject: (reason: Error) => void;\n }\n>();\n\n/**\n * Inline worker source evaluated as CJS in a dedicated worker thread.\n * Contains all SES setup, compilation, and execution logic.\n *\n * **Why inline?** Using `new Worker(code, { eval: true })` avoids\n * ESM/CJS file resolution issues when the library is consumed from\n * different module formats or bundlers.\n */\nconst WORKER_SOURCE = [\n `'use strict';`,\n `var _wt = require('node:worker_threads');`,\n `var _mod = require('node:module');`,\n `var _crypto = require('node:crypto');`,\n `var parentPort = _wt.parentPort;`,\n `var workerData = _wt.workerData;`,\n ``,\n `// Load SES using the parent module's resolution context`,\n `var esmRequire = _mod.createRequire(workerData.parentUrl);`,\n `esmRequire('ses');`,\n ``,\n `lockdown({`,\n ` errorTaming: 'unsafe',`,\n ` consoleTaming: 'unsafe',`,\n ` evalTaming: 'safe-eval',`,\n ` overrideTaming: 'moderate',`,\n ` stackFiltering: 'verbose'`,\n `});`,\n ``,\n `// Defense-in-depth: verify lockdown() actually hardened JSON.`,\n `if (!Object.isFrozen(JSON)) {`,\n ` throw new Error('SES lockdown failed: JSON is not frozen');`,\n `}`,\n ``,\n `var cache = new Map();`,\n ``,\n `function hashSource(s) {`,\n ` return _crypto.createHash('sha256').update(s).digest('hex');`,\n `}`,\n ``,\n `function buildWrapper(source) {`,\n ` return '(function() {' +`,\n ` ' var fn = (' + source + ');\\\\n' +`,\n ` ' if (typeof fn !== \"function\") return null;\\\\n' +`,\n ` ' return function(jsonIn) {\\\\n' +`,\n ` ' var data = JSON.parse(jsonIn);\\\\n' +`,\n ` ' var result = fn(data);\\\\n' +`,\n ` ' if (result !== null && typeof result === \"object\" && typeof result.then === \"function\") {\\\\n' +`,\n ` ' return result.then(function(r) { return JSON.stringify(r); });\\\\n' +`,\n ` ' }\\\\n' +`,\n ` ' return JSON.stringify(result);\\\\n' +`,\n ` ' };\\\\n' +`,\n ` '})()';`,\n `}`,\n ``,\n `function compileSource(source) {`,\n ` var key = hashSource(source);`,\n ` var cached = cache.get(key);`,\n ` if (cached) return cached;`,\n ``,\n ` var compartmentFn;`,\n ` try {`,\n ` var c = new Compartment({ JSON: JSON });`,\n ` compartmentFn = c.evaluate(buildWrapper(source));`,\n ` } catch (err) {`,\n ` throw new Error('Failed to compile migration source: ' + (err.message || String(err)));`,\n ` }`,\n ``,\n ` if (typeof compartmentFn !== 'function') {`,\n ` throw new Error('Migration source did not produce a function: ' + source.slice(0, 80));`,\n ` }`,\n ``,\n ` cache.set(key, compartmentFn);`,\n ` return compartmentFn;`,\n `}`,\n ``,\n `parentPort.on('message', function(msg) {`,\n ` var id = msg.id;`,\n ` try {`,\n ` if (msg.type === 'compile') {`,\n ` compileSource(msg.source);`,\n ` parentPort.postMessage({ id: id, type: 'compiled' });`,\n ` return;`,\n ` }`,\n ` if (msg.type === 'execute') {`,\n ` var fn = compileSource(msg.source);`,\n ` var raw;`,\n ` try {`,\n ` raw = fn(msg.jsonData);`,\n ` } catch (err) {`,\n ` parentPort.postMessage({ id: id, type: 'error', message: 'Migration function threw: ' + (err.message || String(err)) });`,\n ` return;`,\n ` }`,\n ` if (raw !== null && typeof raw === 'object' && typeof raw.then === 'function') {`,\n ` raw.then(`,\n ` function(jsonResult) {`,\n ` if (jsonResult === undefined || jsonResult === null) {`,\n ` parentPort.postMessage({ id: id, type: 'error', message: 'Migration returned a non-JSON-serializable value' });`,\n ` } else {`,\n ` parentPort.postMessage({ id: id, type: 'result', jsonResult: jsonResult });`,\n ` }`,\n ` },`,\n ` function(err) {`,\n ` parentPort.postMessage({ id: id, type: 'error', message: 'Async migration function threw: ' + (err.message || String(err)) });`,\n ` }`,\n ` );`,\n ` return;`,\n ` }`,\n ` if (raw === undefined || raw === null) {`,\n ` parentPort.postMessage({ id: id, type: 'error', message: 'Migration returned a non-JSON-serializable value' });`,\n ` } else {`,\n ` parentPort.postMessage({ id: id, type: 'result', jsonResult: raw });`,\n ` }`,\n ` }`,\n ` } catch (err) {`,\n ` parentPort.postMessage({ id: id, type: 'error', message: err.message || String(err) });`,\n ` }`,\n `});`,\n].join('\\n');\n\n// ---------------------------------------------------------------------------\n// Worker lifecycle management\n// ---------------------------------------------------------------------------\n\ninterface WorkerResponse {\n id: number;\n type: string;\n message?: string;\n jsonResult?: string;\n}\n\n// `node:worker_threads` is loaded lazily so this module can be imported in\n// runtimes without it (Cloudflare Workers, browsers). Only callers that\n// actually exercise the default migration sandbox will trigger the import.\nlet _WorkerCtor: (new (source: string, opts: Record<string, unknown>) => Worker) | null = null;\n\nasync function loadWorkerCtor(): Promise<NonNullable<typeof _WorkerCtor>> {\n if (_WorkerCtor) return _WorkerCtor;\n const wt = await import('node:worker_threads');\n _WorkerCtor = wt.Worker as unknown as NonNullable<typeof _WorkerCtor>;\n return _WorkerCtor;\n}\n\nasync function ensureWorker(): Promise<Worker> {\n if (_worker) return _worker;\n\n const Ctor = await loadWorkerCtor();\n _worker = new Ctor(WORKER_SOURCE, {\n eval: true,\n workerData: { parentUrl: import.meta.url },\n });\n\n // Don't let the worker prevent process exit\n _worker.unref();\n\n _worker.on('message', (msg: WorkerResponse) => {\n if (msg.id === undefined) return;\n const pending = _pending.get(msg.id);\n if (!pending) return;\n _pending.delete(msg.id);\n\n if (msg.type === 'error') {\n pending.reject(new MigrationError(msg.message ?? 'Unknown sandbox error'));\n } else {\n pending.resolve(msg);\n }\n });\n\n _worker.on('error', (err: Error) => {\n // Worker crashed — reject all pending requests and allow respawn\n for (const [, p] of _pending) {\n p.reject(new MigrationError(`Sandbox worker error: ${err.message}`));\n }\n _pending.clear();\n _worker = null;\n });\n\n _worker.on('exit', (code: number) => {\n // Always reject pending requests — a worker exiting while requests\n // are in-flight is always an error from the caller's perspective,\n // even if the exit code is 0 (e.g., graceful termination).\n if (_pending.size > 0) {\n for (const [, p] of _pending) {\n p.reject(new MigrationError(`Sandbox worker exited with code ${code}`));\n }\n _pending.clear();\n }\n _worker = null;\n });\n\n return _worker;\n}\n\nasync function sendToWorker(msg: Record<string, unknown>): Promise<WorkerResponse> {\n const worker = await ensureWorker();\n if (_requestId >= Number.MAX_SAFE_INTEGER) _requestId = 0;\n const id = ++_requestId;\n return new Promise<WorkerResponse>((resolve, reject) => {\n _pending.set(id, { resolve: resolve as (v: unknown) => void, reject });\n worker.postMessage({ ...msg, id });\n });\n}\n\n// ---------------------------------------------------------------------------\n// Compiled function cache (keyed by executor → SHA-256 hash of source string)\n// ---------------------------------------------------------------------------\n\n// Two-level cache: outer key is the executor reference (WeakMap so that\n// short-lived executors and their caches can be garbage collected), inner\n// key is the SHA-256 hash of the source string. This prevents cache\n// poisoning when different clients use different sandbox executors in\n// the same process.\nconst compiledCache = new WeakMap<MigrationExecutor, Map<string, MigrationFn>>();\n\nfunction getExecutorCache(executor: MigrationExecutor): Map<string, MigrationFn> {\n let cache = compiledCache.get(executor);\n if (!cache) {\n cache = new Map();\n compiledCache.set(executor, cache);\n }\n return cache;\n}\n\nfunction hashSource(source: string): string {\n return createHash('sha256').update(source).digest('hex');\n}\n\n// ---------------------------------------------------------------------------\n// Lazy serialization loader. Pulls `@google-cloud/firestore` only when the\n// default executor actually runs a migration — keeps Firestore out of\n// non-Firestore bundles (e.g. the Cloudflare DO backend).\n// ---------------------------------------------------------------------------\n\nlet _serializationModule: typeof SerializationModule | null = null;\n\nasync function loadSerialization(): Promise<typeof SerializationModule> {\n if (_serializationModule) return _serializationModule;\n _serializationModule = await import('./serialization.js');\n return _serializationModule;\n}\n\n// ---------------------------------------------------------------------------\n// Default executor\n// ---------------------------------------------------------------------------\n\n/**\n * Default executor using a worker-thread SES Compartment with JSON marshaling.\n *\n * Migration source is compiled and executed inside an isolated SES\n * Compartment running in a dedicated worker thread. The worker calls\n * `lockdown()` in its own V8 isolate, leaving the host process's\n * intrinsics completely unaffected.\n *\n * Data crosses the compartment boundary as JSON strings, preventing\n * prototype chain escapes. The compartment receives only `JSON` as an\n * endowment for parsing/stringifying data.\n *\n * The returned `MigrationFn` always returns a `Promise` (communication\n * with the worker is inherently async via `postMessage`).\n */\nexport function defaultExecutor(source: string): MigrationFn {\n // Worker is spawned lazily on first execution via `sendToWorker`.\n // Eager spawning here would force a top-level `node:worker_threads`\n // load and break Cloudflare Workers / browser callers that never\n // exercise the default sandbox.\n\n // Return a MigrationFn that delegates to the worker thread.\n // Compilation + execution happen in the worker's SES Compartment.\n return (async (data: Record<string, unknown>) => {\n const { serializeFirestoreTypes, deserializeFirestoreTypes } = await loadSerialization();\n const jsonData = JSON.stringify(serializeFirestoreTypes(data));\n const response = await sendToWorker({ type: 'execute', source, jsonData });\n if (response.jsonResult === undefined || response.jsonResult === null) {\n throw new MigrationError('Migration returned a non-JSON-serializable value');\n }\n try {\n return deserializeFirestoreTypes(JSON.parse(response.jsonResult));\n } catch {\n throw new MigrationError('Migration returned a non-JSON-serializable value');\n }\n }) as MigrationFn;\n}\n\n// ---------------------------------------------------------------------------\n// Public API\n// ---------------------------------------------------------------------------\n\n/**\n * Eagerly validate a migration source string by compiling it in the\n * sandbox worker (or via a custom executor) without executing it.\n *\n * Use this to catch syntax errors at define-time or reload-time rather\n * than at first migration execution.\n *\n * @throws {MigrationError} If the source is syntactically invalid or\n * does not produce a function.\n */\nexport async function precompileSource(\n source: string,\n executor?: MigrationExecutor,\n): Promise<void> {\n if (executor && executor !== defaultExecutor) {\n // Custom executors validate synchronously the old way\n try {\n executor(source);\n } catch (err: unknown) {\n if (err instanceof MigrationError) throw err;\n throw new MigrationError(`Failed to compile migration source: ${(err as Error).message}`);\n }\n return;\n }\n\n // Default executor: send a compile-only message to the worker\n await sendToWorker({ type: 'compile', source });\n}\n\n/**\n * Compile a stored migration source string into an executable function.\n * Results are cached by SHA-256 hash of the source string so repeated\n * reads never re-parse the same migration.\n *\n * **Important:** When using the default executor, this function does NOT\n * validate the source synchronously — validation is deferred to the\n * worker thread at execution time. Callers that need eager validation\n * (e.g., `defineNodeType`, `reloadRegistry`) should call\n * `precompileSource()` before or alongside `compileMigrationFn()`.\n */\nexport function compileMigrationFn(\n source: string,\n executor: MigrationExecutor = defaultExecutor,\n): MigrationFn {\n const cache = getExecutorCache(executor);\n const key = hashSource(source);\n const cached = cache.get(key);\n if (cached) return cached;\n\n try {\n const fn = executor(source);\n cache.set(key, fn);\n return fn;\n } catch (err: unknown) {\n if (err instanceof MigrationError) throw err;\n throw new MigrationError(`Failed to compile migration source: ${(err as Error).message}`);\n }\n}\n\n/**\n * Batch compile stored migration steps into executable MigrationStep[].\n *\n * With the default executor, source validation is deferred to execution\n * time. Use `precompileSource()` to validate eagerly — see\n * `createRegistryFromGraph()` for the recommended pattern.\n */\nexport function compileMigrations(\n stored: StoredMigrationStep[],\n executor?: MigrationExecutor,\n): MigrationStep[] {\n return stored.map((step) => ({\n fromVersion: step.fromVersion,\n toVersion: step.toVersion,\n up: compileMigrationFn(step.up, executor),\n }));\n}\n\n/**\n * Terminate the sandbox worker thread. The worker will be respawned\n * on the next `defaultExecutor` call.\n *\n * Primarily useful for test cleanup to avoid vitest hanging on\n * unfinished worker threads.\n */\nexport async function destroySandboxWorker(): Promise<void> {\n if (!_worker) return;\n const w = _worker;\n _worker = null;\n // Reject any remaining pending requests\n for (const [, p] of _pending) {\n p.reject(new MigrationError('Sandbox worker terminated'));\n }\n _pending.clear();\n await w.terminate();\n}\n","import { computeEdgeDocId } from './docid.js';\nimport { InvalidQueryError } from './errors.js';\nimport { BUILTIN_FIELDS, DEFAULT_QUERY_LIMIT, NODE_RELATION } from './internal/constants.js';\nimport type { FindEdgesParams, FindNodesParams, QueryFilter, QueryPlan } from './types.js';\n\nexport function buildEdgeQueryPlan(params: FindEdgesParams): QueryPlan {\n const { aType, aUid, axbType, bType, bUid, limit, orderBy } = params;\n\n if (aUid && axbType && bUid && !params.where?.length) {\n return { strategy: 'get', docId: computeEdgeDocId(aUid, axbType, bUid) };\n }\n\n const filters: QueryFilter[] = [];\n\n if (aType) filters.push({ field: 'aType', op: '==', value: aType });\n if (aUid) filters.push({ field: 'aUid', op: '==', value: aUid });\n if (axbType) filters.push({ field: 'axbType', op: '==', value: axbType });\n if (bType) filters.push({ field: 'bType', op: '==', value: bType });\n if (bUid) filters.push({ field: 'bUid', op: '==', value: bUid });\n\n if (params.where) {\n for (const clause of params.where) {\n const field = BUILTIN_FIELDS.has(clause.field)\n ? clause.field\n : clause.field.startsWith('data.')\n ? clause.field\n : `data.${clause.field}`;\n filters.push({ field, op: clause.op, value: clause.value });\n }\n }\n\n if (filters.length === 0) {\n throw new InvalidQueryError('findEdges requires at least one filter parameter');\n }\n\n // limit: undefined → apply DEFAULT_QUERY_LIMIT\n // limit: 0 → no limit (unlimited, used by internal bulk operations)\n // limit: N → use N\n const effectiveLimit = limit === undefined ? DEFAULT_QUERY_LIMIT : limit || undefined;\n return { strategy: 'query', filters, options: { limit: effectiveLimit, orderBy } };\n}\n\nexport function buildNodeQueryPlan(params: FindNodesParams): QueryPlan {\n const { aType, limit, orderBy } = params;\n\n const filters: QueryFilter[] = [\n { field: 'aType', op: '==', value: aType },\n { field: 'axbType', op: '==', value: NODE_RELATION },\n ];\n\n if (params.where) {\n for (const clause of params.where) {\n const field = BUILTIN_FIELDS.has(clause.field)\n ? clause.field\n : clause.field.startsWith('data.')\n ? clause.field\n : `data.${clause.field}`;\n filters.push({ field, op: clause.op, value: clause.value });\n }\n }\n\n const effectiveLimit = limit === undefined ? DEFAULT_QUERY_LIMIT : limit || undefined;\n return { strategy: 'query', filters, options: { limit: effectiveLimit, orderBy } };\n}\n","import { BUILTIN_FIELDS } from './internal/constants.js';\nimport type { QueryFilter } from './types.js';\n\n/**\n * Result of analyzing a query for collection scan risk.\n */\nexport interface QuerySafetyResult {\n /** Whether the query matches a known indexed pattern. */\n safe: boolean;\n /** Human-readable explanation when the query is unsafe. */\n reason?: string;\n}\n\n/**\n * Known composite index patterns that prevent full collection scans.\n * Each pattern is a set of field names that must ALL be present in the\n * query filters. Order within the set doesn't matter — what matters is\n * that the Firestore composite index covers the combination.\n *\n * These correspond to the indexes in firestore.indexes.json:\n * (aUid, axbType) — forward edge lookup\n * (axbType, bUid) — reverse edge lookup\n * (aType, axbType) — type-scoped queries + findNodes\n * (axbType, bType) — edge type + target type\n */\nconst SAFE_INDEX_PATTERNS: ReadonlyArray<ReadonlySet<string>> = [\n new Set(['aUid', 'axbType']),\n new Set(['axbType', 'bUid']),\n new Set(['aType', 'axbType']),\n new Set(['axbType', 'bType']),\n];\n\n/**\n * Analyzes a set of query filters to determine whether the query would\n * likely cause a full collection scan on Firestore Enterprise.\n *\n * A query is considered \"safe\" if the builtin fields present in the filters\n * match at least one known composite index pattern. Queries that only use\n * `data.*` fields without a safe base pattern are flagged as unsafe.\n */\nexport function analyzeQuerySafety(filters: QueryFilter[]): QuerySafetyResult {\n // Extract the set of builtin fields being filtered on (equality checks are\n // the primary index-usable operations, but we're generous here and count\n // any filter on a builtin field as potentially index-backed).\n const builtinFieldsPresent = new Set<string>();\n let hasDataFilters = false;\n\n for (const f of filters) {\n if (BUILTIN_FIELDS.has(f.field)) {\n builtinFieldsPresent.add(f.field);\n } else {\n // data.* or other non-builtin fields\n hasDataFilters = true;\n }\n }\n\n // Check if the builtin fields match any known safe index pattern.\n // A pattern is \"matched\" if all fields in the pattern are present in the query.\n for (const pattern of SAFE_INDEX_PATTERNS) {\n let matched = true;\n for (const field of pattern) {\n if (!builtinFieldsPresent.has(field)) {\n matched = false;\n break;\n }\n }\n if (matched) {\n // Even with data.* filters, the base index narrows the scan significantly.\n // The data.* filters are applied as post-filters on the index results.\n return { safe: true };\n }\n }\n\n // No safe pattern matched — build an explanation.\n const presentFields = [...builtinFieldsPresent];\n if (presentFields.length === 0 && hasDataFilters) {\n return {\n safe: false,\n reason:\n 'Query filters only use data.* fields with no builtin field constraints. ' +\n 'This requires a full collection scan. Add aType, aUid, axbType, bType, or bUid filters, ' +\n 'or set allowCollectionScan: true.',\n };\n }\n\n if (hasDataFilters) {\n return {\n safe: false,\n reason:\n `Query filters on [${presentFields.join(', ')}] do not match any indexed pattern. ` +\n 'data.* filters without an indexed base require a full collection scan. ' +\n `Safe patterns: (aUid + axbType), (axbType + bUid), (aType + axbType), (axbType + bType). ` +\n 'Set allowCollectionScan: true to override.',\n };\n }\n\n return {\n safe: false,\n reason:\n `Query filters on [${presentFields.join(', ')}] do not match any indexed pattern. ` +\n 'This may cause a full collection scan on Firestore Enterprise. ' +\n `Safe patterns: (aUid + axbType), (axbType + bUid), (aType + axbType), (axbType + bType). ` +\n 'Set allowCollectionScan: true to override.',\n };\n}\n","import { computeEdgeDocId, computeNodeDocId } from './docid.js';\nimport { QuerySafetyError } from './errors.js';\nimport type { TransactionBackend, WritableRecord } from './internal/backend.js';\nimport { NODE_RELATION } from './internal/constants.js';\nimport { assertNoDeleteSentinels, flattenPatch } from './internal/write-plan.js';\nimport { migrateRecord, migrateRecords } from './migration.js';\nimport { buildEdgeQueryPlan, buildNodeQueryPlan } from './query.js';\nimport { analyzeQuerySafety } from './query-safety.js';\nimport type {\n FindEdgesParams,\n FindNodesParams,\n GraphRegistry,\n GraphTransaction,\n MigrationWriteBack,\n QueryFilter,\n ScanProtection,\n StoredGraphRecord,\n} from './types.js';\n\nfunction buildWritableNodeRecord(\n aType: string,\n uid: string,\n data: Record<string, unknown>,\n): WritableRecord {\n return { aType, aUid: uid, axbType: NODE_RELATION, bType: aType, bUid: uid, data };\n}\n\nfunction buildWritableEdgeRecord(\n aType: string,\n aUid: string,\n axbType: string,\n bType: string,\n bUid: string,\n data: Record<string, unknown>,\n): WritableRecord {\n return { aType, aUid, axbType, bType, bUid, data };\n}\n\nexport class GraphTransactionImpl implements GraphTransaction {\n constructor(\n private readonly backend: TransactionBackend,\n private readonly registry?: GraphRegistry,\n private readonly scanProtection: ScanProtection = 'error',\n private readonly scopePath: string = '',\n private readonly globalWriteBack: MigrationWriteBack = 'off',\n ) {}\n\n async getNode(uid: string): Promise<StoredGraphRecord | null> {\n const docId = computeNodeDocId(uid);\n const record = await this.backend.getDoc(docId);\n if (!record || !this.registry) return record;\n const result = await migrateRecord(record, this.registry, this.globalWriteBack);\n if (result.migrated && result.writeBack !== 'off') {\n await this.backend.updateDoc(docId, {\n replaceData: result.record.data as Record<string, unknown>,\n v: result.record.v,\n });\n }\n return result.record;\n }\n\n async getEdge(aUid: string, axbType: string, bUid: string): Promise<StoredGraphRecord | null> {\n const docId = computeEdgeDocId(aUid, axbType, bUid);\n const record = await this.backend.getDoc(docId);\n if (!record || !this.registry) return record;\n const result = await migrateRecord(record, this.registry, this.globalWriteBack);\n if (result.migrated && result.writeBack !== 'off') {\n await this.backend.updateDoc(docId, {\n replaceData: result.record.data as Record<string, unknown>,\n v: result.record.v,\n });\n }\n return result.record;\n }\n\n async edgeExists(aUid: string, axbType: string, bUid: string): Promise<boolean> {\n const docId = computeEdgeDocId(aUid, axbType, bUid);\n const record = await this.backend.getDoc(docId);\n return record !== null;\n }\n\n private checkQuerySafety(filters: QueryFilter[], allowCollectionScan?: boolean): void {\n if (allowCollectionScan || this.scanProtection === 'off') return;\n\n const result = analyzeQuerySafety(filters);\n if (result.safe) return;\n\n if (this.scanProtection === 'error') {\n throw new QuerySafetyError(result.reason!);\n }\n\n console.warn(`[firegraph] Query safety warning: ${result.reason}`);\n }\n\n async findEdges(params: FindEdgesParams): Promise<StoredGraphRecord[]> {\n const plan = buildEdgeQueryPlan(params);\n let records: StoredGraphRecord[];\n if (plan.strategy === 'get') {\n const record = await this.backend.getDoc(plan.docId);\n records = record ? [record] : [];\n } else {\n this.checkQuerySafety(plan.filters, params.allowCollectionScan);\n records = await this.backend.query(plan.filters, plan.options);\n }\n return this.applyMigrations(records);\n }\n\n async findNodes(params: FindNodesParams): Promise<StoredGraphRecord[]> {\n const plan = buildNodeQueryPlan(params);\n let records: StoredGraphRecord[];\n if (plan.strategy === 'get') {\n const record = await this.backend.getDoc(plan.docId);\n records = record ? [record] : [];\n } else {\n this.checkQuerySafety(plan.filters, params.allowCollectionScan);\n records = await this.backend.query(plan.filters, plan.options);\n }\n return this.applyMigrations(records);\n }\n\n private async applyMigrations(records: StoredGraphRecord[]): Promise<StoredGraphRecord[]> {\n if (!this.registry || records.length === 0) return records;\n const results = await migrateRecords(records, this.registry, this.globalWriteBack);\n for (const result of results) {\n if (result.migrated && result.writeBack !== 'off') {\n const docId =\n result.record.axbType === NODE_RELATION\n ? computeNodeDocId(result.record.aUid)\n : computeEdgeDocId(result.record.aUid, result.record.axbType, result.record.bUid);\n await this.backend.updateDoc(docId, {\n replaceData: result.record.data as Record<string, unknown>,\n v: result.record.v,\n });\n }\n }\n return results.map((r) => r.record);\n }\n\n async putNode(aType: string, uid: string, data: Record<string, unknown>): Promise<void> {\n await this.writeNode(aType, uid, data, 'merge');\n }\n\n async putEdge(\n aType: string,\n aUid: string,\n axbType: string,\n bType: string,\n bUid: string,\n data: Record<string, unknown>,\n ): Promise<void> {\n await this.writeEdge(aType, aUid, axbType, bType, bUid, data, 'merge');\n }\n\n async replaceNode(aType: string, uid: string, data: Record<string, unknown>): Promise<void> {\n await this.writeNode(aType, uid, data, 'replace');\n }\n\n async replaceEdge(\n aType: string,\n aUid: string,\n axbType: string,\n bType: string,\n bUid: string,\n data: Record<string, unknown>,\n ): Promise<void> {\n await this.writeEdge(aType, aUid, axbType, bType, bUid, data, 'replace');\n }\n\n private async writeNode(\n aType: string,\n uid: string,\n data: Record<string, unknown>,\n mode: 'merge' | 'replace',\n ): Promise<void> {\n assertNoDeleteSentinels(data, mode === 'replace' ? 'replaceNode' : 'putNode');\n if (this.registry) {\n this.registry.validate(aType, NODE_RELATION, aType, data, this.scopePath);\n }\n const docId = computeNodeDocId(uid);\n const record = buildWritableNodeRecord(aType, uid, data);\n if (this.registry) {\n const entry = this.registry.lookup(aType, NODE_RELATION, aType);\n if (entry?.schemaVersion && entry.schemaVersion > 0) {\n record.v = entry.schemaVersion;\n }\n }\n await this.backend.setDoc(docId, record, mode);\n }\n\n private async writeEdge(\n aType: string,\n aUid: string,\n axbType: string,\n bType: string,\n bUid: string,\n data: Record<string, unknown>,\n mode: 'merge' | 'replace',\n ): Promise<void> {\n assertNoDeleteSentinels(data, mode === 'replace' ? 'replaceEdge' : 'putEdge');\n if (this.registry) {\n this.registry.validate(aType, axbType, bType, data, this.scopePath);\n }\n const docId = computeEdgeDocId(aUid, axbType, bUid);\n const record = buildWritableEdgeRecord(aType, aUid, axbType, bType, bUid, data);\n if (this.registry) {\n const entry = this.registry.lookup(aType, axbType, bType);\n if (entry?.schemaVersion && entry.schemaVersion > 0) {\n record.v = entry.schemaVersion;\n }\n }\n await this.backend.setDoc(docId, record, mode);\n }\n\n async updateNode(uid: string, data: Record<string, unknown>): Promise<void> {\n const docId = computeNodeDocId(uid);\n await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });\n }\n\n async updateEdge(\n aUid: string,\n axbType: string,\n bUid: string,\n data: Record<string, unknown>,\n ): Promise<void> {\n const docId = computeEdgeDocId(aUid, axbType, bUid);\n await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });\n }\n\n async removeNode(uid: string): Promise<void> {\n const docId = computeNodeDocId(uid);\n await this.backend.deleteDoc(docId);\n }\n\n async removeEdge(aUid: string, axbType: string, bUid: string): Promise<void> {\n const docId = computeEdgeDocId(aUid, axbType, bUid);\n await this.backend.deleteDoc(docId);\n }\n}\n","import { GraphBatchImpl } from './batch.js';\nimport { computeEdgeDocId, computeNodeDocId } from './docid.js';\nimport {\n createBootstrapRegistry,\n createRegistryFromGraph,\n generateDeterministicUid,\n META_EDGE_TYPE,\n META_NODE_TYPE,\n} from './dynamic-registry.js';\nimport { DynamicRegistryError, FiregraphError, QuerySafetyError } from './errors.js';\nimport type { BackendCapabilities, StorageBackend, WritableRecord } from './internal/backend.js';\nimport { NODE_RELATION } from './internal/constants.js';\nimport { assertNoDeleteSentinels, flattenPatch } from './internal/write-plan.js';\nimport type { MigrationResult } from './migration.js';\nimport { migrateRecord, migrateRecords } from './migration.js';\nimport { buildEdgeQueryPlan, buildNodeQueryPlan } from './query.js';\nimport { analyzeQuerySafety } from './query-safety.js';\nimport { createMergedRegistry } from './registry.js';\nimport { precompileSource } from './sandbox.js';\nimport { GraphTransactionImpl } from './transaction.js';\nimport type {\n AggregateResult,\n AggregateSpec,\n BulkOptions,\n BulkResult,\n BulkUpdatePatch,\n Capability,\n CascadeResult,\n CoreGraphClient,\n DefineTypeOptions,\n DynamicGraphClient,\n DynamicGraphMethods,\n DynamicRegistryConfig,\n EdgeTopology,\n EngineTraversalParams,\n EngineTraversalResult,\n ExpandParams,\n ExpandResult,\n FindEdgesParams,\n FindEdgesProjectedParams,\n FindNearestParams,\n FindNodesParams,\n FullTextSearchParams,\n GeoSearchParams,\n GraphBatch,\n GraphClient,\n GraphClientOptions,\n GraphReader,\n GraphRegistry,\n GraphTransaction,\n MigrationExecutor,\n MigrationFn,\n MigrationWriteBack,\n ProjectedRow,\n QueryFilter,\n QueryOptions,\n ScanProtection,\n StoredGraphRecord,\n} from './types.js';\n\nconst RESERVED_TYPE_NAMES = new Set([META_NODE_TYPE, META_EDGE_TYPE]);\n\nfunction buildWritableNodeRecord(\n aType: string,\n uid: string,\n data: Record<string, unknown>,\n): WritableRecord {\n return { aType, aUid: uid, axbType: NODE_RELATION, bType: aType, bUid: uid, data };\n}\n\nfunction buildWritableEdgeRecord(\n aType: string,\n aUid: string,\n axbType: string,\n bType: string,\n bUid: string,\n data: Record<string, unknown>,\n): WritableRecord {\n return { aType, aUid, axbType, bType, bUid, data };\n}\n\nexport class GraphClientImpl implements CoreGraphClient, DynamicGraphMethods {\n readonly scanProtection: ScanProtection;\n\n /**\n * Capability set of the underlying backend. Mirrors `backend.capabilities`\n * verbatim so callers can portability-check (`client.capabilities.has(\n * 'query.join')`) without reaching for the backend handle. Static for the\n * lifetime of the client.\n */\n get capabilities(): BackendCapabilities {\n return this.backend.capabilities;\n }\n\n // Static mode\n private readonly staticRegistry?: GraphRegistry;\n\n // Dynamic mode\n private readonly dynamicConfig?: DynamicRegistryConfig;\n private readonly bootstrapRegistry?: GraphRegistry;\n private dynamicRegistry?: GraphRegistry;\n private readonly metaBackend?: StorageBackend;\n\n // Migration settings\n private readonly globalWriteBack: MigrationWriteBack;\n private readonly migrationSandbox?: MigrationExecutor;\n\n constructor(\n private readonly backend: StorageBackend,\n options?: GraphClientOptions,\n /** @internal Optional pre-built meta-backend (used by subgraph clones). */\n metaBackend?: StorageBackend,\n ) {\n this.globalWriteBack = options?.migrationWriteBack ?? 'off';\n this.migrationSandbox = options?.migrationSandbox;\n\n if (options?.registryMode) {\n this.dynamicConfig = options.registryMode;\n this.bootstrapRegistry = createBootstrapRegistry();\n if (options.registry) {\n this.staticRegistry = options.registry;\n }\n this.metaBackend = metaBackend;\n } else {\n this.staticRegistry = options?.registry;\n }\n\n this.scanProtection = options?.scanProtection ?? 'error';\n }\n\n // ---------------------------------------------------------------------------\n // Backend access (exposed for traversal helpers and subgraph cloning)\n // ---------------------------------------------------------------------------\n\n /** @internal */\n getBackend(): StorageBackend {\n return this.backend;\n }\n\n /**\n * Snapshot of the currently-effective registry. Returns the merged view\n * used for domain-type validation and migration — in dynamic mode this is\n * `dynamicRegistry ?? staticRegistry ?? bootstrapRegistry`, so callers see\n * updates after `reloadRegistry()` without having to re-resolve anything.\n *\n * Exposed for backends that need topology access during bulk operations\n * (e.g. the Cloudflare DO backend's cross-DO cascade). Not part of the\n * public `GraphClient` surface.\n *\n * @internal\n */\n getRegistrySnapshot(): GraphRegistry | undefined {\n return this.getCombinedRegistry();\n }\n\n // ---------------------------------------------------------------------------\n // Registry routing\n // ---------------------------------------------------------------------------\n\n private getRegistryForType(aType: string): GraphRegistry | undefined {\n if (!this.dynamicConfig) return this.staticRegistry;\n\n if (aType === META_NODE_TYPE || aType === META_EDGE_TYPE) {\n return this.bootstrapRegistry;\n }\n\n return this.dynamicRegistry ?? this.staticRegistry ?? this.bootstrapRegistry;\n }\n\n private getBackendForType(aType: string): StorageBackend {\n if (this.metaBackend && (aType === META_NODE_TYPE || aType === META_EDGE_TYPE)) {\n return this.metaBackend;\n }\n return this.backend;\n }\n\n private getCombinedRegistry(): GraphRegistry | undefined {\n if (!this.dynamicConfig) return this.staticRegistry;\n return this.dynamicRegistry ?? this.staticRegistry ?? this.bootstrapRegistry;\n }\n\n // ---------------------------------------------------------------------------\n // Query safety\n // ---------------------------------------------------------------------------\n\n private checkQuerySafety(filters: QueryFilter[], allowCollectionScan?: boolean): void {\n if (allowCollectionScan || this.scanProtection === 'off') return;\n\n const result = analyzeQuerySafety(filters);\n if (result.safe) return;\n\n if (this.scanProtection === 'error') {\n throw new QuerySafetyError(result.reason!);\n }\n\n console.warn(`[firegraph] Query safety warning: ${result.reason}`);\n }\n\n // ---------------------------------------------------------------------------\n // Migration helpers\n // ---------------------------------------------------------------------------\n\n private async applyMigration(\n record: StoredGraphRecord,\n docId: string,\n ): Promise<StoredGraphRecord> {\n const registry = this.getCombinedRegistry();\n if (!registry) return record;\n\n const result = await migrateRecord(record, registry, this.globalWriteBack);\n if (result.migrated) {\n this.handleWriteBack(result, docId);\n }\n return result.record;\n }\n\n private async applyMigrations(records: StoredGraphRecord[]): Promise<StoredGraphRecord[]> {\n const registry = this.getCombinedRegistry();\n if (!registry || records.length === 0) return records;\n\n const results = await migrateRecords(records, registry, this.globalWriteBack);\n for (const result of results) {\n if (result.migrated) {\n const docId =\n result.record.axbType === NODE_RELATION\n ? computeNodeDocId(result.record.aUid)\n : computeEdgeDocId(result.record.aUid, result.record.axbType, result.record.bUid);\n this.handleWriteBack(result, docId);\n }\n }\n return results.map((r) => r.record);\n }\n\n /**\n * Fire-and-forget write-back for a migrated record. Both `'eager'` and\n * `'background'` are non-blocking; the difference is the log level on\n * failure. For synchronous write-back, use a transaction — see\n * `GraphTransactionImpl`.\n */\n private handleWriteBack(result: MigrationResult, docId: string): void {\n if (result.writeBack === 'off') return;\n\n const doWriteBack = async () => {\n try {\n await this.backend.updateDoc(docId, {\n replaceData: result.record.data as Record<string, unknown>,\n v: result.record.v,\n });\n } catch (err: unknown) {\n const msg = `[firegraph] Migration write-back failed for ${docId}: ${(err as Error).message}`;\n if (result.writeBack === 'eager') {\n console.error(msg);\n } else {\n console.warn(msg);\n }\n }\n };\n\n void doWriteBack();\n }\n\n // ---------------------------------------------------------------------------\n // GraphReader\n // ---------------------------------------------------------------------------\n\n async getNode(uid: string): Promise<StoredGraphRecord | null> {\n const docId = computeNodeDocId(uid);\n const record = await this.backend.getDoc(docId);\n if (!record) return null;\n return this.applyMigration(record, docId);\n }\n\n async getEdge(aUid: string, axbType: string, bUid: string): Promise<StoredGraphRecord | null> {\n const docId = computeEdgeDocId(aUid, axbType, bUid);\n const record = await this.backend.getDoc(docId);\n if (!record) return null;\n return this.applyMigration(record, docId);\n }\n\n async edgeExists(aUid: string, axbType: string, bUid: string): Promise<boolean> {\n const docId = computeEdgeDocId(aUid, axbType, bUid);\n const record = await this.backend.getDoc(docId);\n return record !== null;\n }\n\n async findEdges(params: FindEdgesParams): Promise<StoredGraphRecord[]> {\n const plan = buildEdgeQueryPlan(params);\n let records: StoredGraphRecord[];\n if (plan.strategy === 'get') {\n const record = await this.backend.getDoc(plan.docId);\n records = record ? [record] : [];\n } else {\n this.checkQuerySafety(plan.filters, params.allowCollectionScan);\n records = await this.backend.query(plan.filters, plan.options);\n }\n return this.applyMigrations(records);\n }\n\n async findNodes(params: FindNodesParams): Promise<StoredGraphRecord[]> {\n const plan = buildNodeQueryPlan(params);\n let records: StoredGraphRecord[];\n if (plan.strategy === 'get') {\n const record = await this.backend.getDoc(plan.docId);\n records = record ? [record] : [];\n } else {\n this.checkQuerySafety(plan.filters, params.allowCollectionScan);\n records = await this.backend.query(plan.filters, plan.options);\n }\n return this.applyMigrations(records);\n }\n\n // ---------------------------------------------------------------------------\n // GraphWriter\n // ---------------------------------------------------------------------------\n\n async putNode(aType: string, uid: string, data: Record<string, unknown>): Promise<void> {\n await this.writeNode(aType, uid, data, 'merge');\n }\n\n async putEdge(\n aType: string,\n aUid: string,\n axbType: string,\n bType: string,\n bUid: string,\n data: Record<string, unknown>,\n ): Promise<void> {\n await this.writeEdge(aType, aUid, axbType, bType, bUid, data, 'merge');\n }\n\n async replaceNode(aType: string, uid: string, data: Record<string, unknown>): Promise<void> {\n await this.writeNode(aType, uid, data, 'replace');\n }\n\n async replaceEdge(\n aType: string,\n aUid: string,\n axbType: string,\n bType: string,\n bUid: string,\n data: Record<string, unknown>,\n ): Promise<void> {\n await this.writeEdge(aType, aUid, axbType, bType, bUid, data, 'replace');\n }\n\n private async writeNode(\n aType: string,\n uid: string,\n data: Record<string, unknown>,\n mode: 'merge' | 'replace',\n ): Promise<void> {\n assertNoDeleteSentinels(data, mode === 'replace' ? 'replaceNode' : 'putNode');\n const registry = this.getRegistryForType(aType);\n if (registry) {\n registry.validate(aType, NODE_RELATION, aType, data, this.backend.scopePath);\n }\n const backend = this.getBackendForType(aType);\n const docId = computeNodeDocId(uid);\n const record = buildWritableNodeRecord(aType, uid, data);\n if (registry) {\n const entry = registry.lookup(aType, NODE_RELATION, aType);\n if (entry?.schemaVersion && entry.schemaVersion > 0) {\n record.v = entry.schemaVersion;\n }\n }\n await backend.setDoc(docId, record, mode);\n }\n\n private async writeEdge(\n aType: string,\n aUid: string,\n axbType: string,\n bType: string,\n bUid: string,\n data: Record<string, unknown>,\n mode: 'merge' | 'replace',\n ): Promise<void> {\n assertNoDeleteSentinels(data, mode === 'replace' ? 'replaceEdge' : 'putEdge');\n const registry = this.getRegistryForType(aType);\n if (registry) {\n registry.validate(aType, axbType, bType, data, this.backend.scopePath);\n }\n const backend = this.getBackendForType(aType);\n const docId = computeEdgeDocId(aUid, axbType, bUid);\n const record = buildWritableEdgeRecord(aType, aUid, axbType, bType, bUid, data);\n if (registry) {\n const entry = registry.lookup(aType, axbType, bType);\n if (entry?.schemaVersion && entry.schemaVersion > 0) {\n record.v = entry.schemaVersion;\n }\n }\n await backend.setDoc(docId, record, mode);\n }\n\n async updateNode(uid: string, data: Record<string, unknown>): Promise<void> {\n const docId = computeNodeDocId(uid);\n await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });\n }\n\n async updateEdge(\n aUid: string,\n axbType: string,\n bUid: string,\n data: Record<string, unknown>,\n ): Promise<void> {\n const docId = computeEdgeDocId(aUid, axbType, bUid);\n await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });\n }\n\n async removeNode(uid: string): Promise<void> {\n const docId = computeNodeDocId(uid);\n await this.backend.deleteDoc(docId);\n }\n\n async removeEdge(aUid: string, axbType: string, bUid: string): Promise<void> {\n const docId = computeEdgeDocId(aUid, axbType, bUid);\n await this.backend.deleteDoc(docId);\n }\n\n // ---------------------------------------------------------------------------\n // Transactions & Batches\n // ---------------------------------------------------------------------------\n\n async runTransaction<T>(fn: (tx: GraphTransaction) => Promise<T>): Promise<T> {\n return this.backend.runTransaction(async (txBackend) => {\n const graphTx = new GraphTransactionImpl(\n txBackend,\n this.getCombinedRegistry(),\n this.scanProtection,\n this.backend.scopePath,\n this.globalWriteBack,\n );\n return fn(graphTx);\n });\n }\n\n batch(): GraphBatch {\n return new GraphBatchImpl(\n this.backend.createBatch(),\n this.getCombinedRegistry(),\n this.backend.scopePath,\n );\n }\n\n // ---------------------------------------------------------------------------\n // Subgraph\n // ---------------------------------------------------------------------------\n\n subgraph(parentNodeUid: string, name: string = 'graph'): GraphClient {\n if (!parentNodeUid || parentNodeUid.includes('/')) {\n throw new FiregraphError(\n `Invalid parentNodeUid for subgraph: \"${parentNodeUid}\". ` +\n 'Must be a non-empty string without \"/\".',\n 'INVALID_SUBGRAPH',\n );\n }\n if (name.includes('/')) {\n throw new FiregraphError(\n `Subgraph name must not contain \"/\": got \"${name}\". ` +\n 'Use chained .subgraph() calls for nested subgraphs.',\n 'INVALID_SUBGRAPH',\n );\n }\n\n const childBackend = this.backend.subgraph(parentNodeUid, name);\n\n return new GraphClientImpl(\n childBackend,\n {\n registry: this.getCombinedRegistry(),\n scanProtection: this.scanProtection,\n migrationWriteBack: this.globalWriteBack,\n migrationSandbox: this.migrationSandbox,\n },\n // Subgraphs do not have meta-backends; meta lives only at the root.\n );\n }\n\n // ---------------------------------------------------------------------------\n // Collection group query\n // ---------------------------------------------------------------------------\n\n async findEdgesGlobal(\n params: FindEdgesParams,\n collectionName?: string,\n ): Promise<StoredGraphRecord[]> {\n if (!this.backend.findEdgesGlobal) {\n throw new FiregraphError(\n 'findEdgesGlobal() is not supported by the current storage backend.',\n 'UNSUPPORTED_OPERATION',\n );\n }\n const plan = buildEdgeQueryPlan(params);\n if (plan.strategy === 'get') {\n throw new FiregraphError(\n 'findEdgesGlobal() requires a query, not a direct document lookup. ' +\n 'Omit one of aUid/axbType/bUid to force a query strategy.',\n 'INVALID_QUERY',\n );\n }\n this.checkQuerySafety(plan.filters, params.allowCollectionScan);\n const records = await this.backend.findEdgesGlobal(params, collectionName);\n return this.applyMigrations(records);\n }\n\n // ---------------------------------------------------------------------------\n // Aggregate query (capability: query.aggregate)\n // ---------------------------------------------------------------------------\n\n async aggregate<A extends AggregateSpec>(\n params: FindEdgesParams & { aggregates: A },\n ): Promise<AggregateResult<A>> {\n if (!this.backend.aggregate) {\n throw new FiregraphError(\n 'aggregate() is not supported by the current storage backend.',\n 'UNSUPPORTED_OPERATION',\n );\n }\n\n // Allow zero-filter aggregates (e.g. count(*) over the whole collection).\n // findEdges-style buildEdgeQueryPlan rejects empty filter sets because a\n // bare findEdges with no identifying fields would be a full collection\n // scan; aggregate() is the legitimate use case for that shape.\n const hasAnyFilter =\n params.aType ||\n params.aUid ||\n params.axbType ||\n params.bType ||\n params.bUid ||\n (params.where && params.where.length > 0);\n\n if (!hasAnyFilter) {\n this.checkQuerySafety([], params.allowCollectionScan);\n const result = await this.backend.aggregate(params.aggregates, []);\n return result as AggregateResult<A>;\n }\n\n const plan = buildEdgeQueryPlan(params);\n if (plan.strategy === 'get') {\n throw new FiregraphError(\n 'aggregate() requires a query, not a direct document lookup. ' +\n 'Omit one of aUid/axbType/bUid to force a query strategy.',\n 'INVALID_QUERY',\n );\n }\n this.checkQuerySafety(plan.filters, params.allowCollectionScan);\n const result = await this.backend.aggregate(params.aggregates, plan.filters);\n return result as AggregateResult<A>;\n }\n\n // ---------------------------------------------------------------------------\n // Bulk operations\n // ---------------------------------------------------------------------------\n\n async removeNodeCascade(uid: string, options?: BulkOptions): Promise<CascadeResult> {\n return this.backend.removeNodeCascade(uid, this, options);\n }\n\n async bulkRemoveEdges(params: FindEdgesParams, options?: BulkOptions): Promise<BulkResult> {\n return this.backend.bulkRemoveEdges(params, this, options);\n }\n\n // ---------------------------------------------------------------------------\n // Server-side DML (capability: query.dml)\n // ---------------------------------------------------------------------------\n\n /**\n * Single-statement bulk DELETE. Translates `params` to a filter list via\n * `buildEdgeQueryPlan` (the same plan `findEdges` uses) and dispatches to\n * `backend.bulkDelete`. The fetch-then-delete loop in `bulkRemoveEdges`\n * is the cap-less fallback; this method is the fast path on backends\n * declaring `query.dml`.\n *\n * Scan-protection rules match `findEdges`: a query with no identifying\n * fields requires `allowCollectionScan: true` to pass. A bare-empty\n * filter set (no `aType`, `aUid`, etc., no `where`) is allowed at this\n * layer — shared SQLite bounds the blast radius via its leading `scope`\n * predicate — but the DO RPC backend rejects empty filters at the wire\n * boundary as defense-in-depth. To wipe a routed subgraph DO, use\n * `removeNodeCascade` on the parent node instead.\n */\n async bulkDelete(params: FindEdgesParams, options?: BulkOptions): Promise<BulkResult> {\n if (!this.backend.bulkDelete) {\n throw new FiregraphError(\n 'bulkDelete() is not supported by the current storage backend. ' +\n 'Fall back to bulkRemoveEdges() for backends without query.dml ' +\n '(e.g. Firestore Standard).',\n 'UNSUPPORTED_OPERATION',\n );\n }\n const filters = this.buildDmlFilters(params);\n return this.backend.bulkDelete(filters, options);\n }\n\n /**\n * Single-statement bulk UPDATE. Same translation path as `bulkDelete`,\n * but the patch is deep-merged into each matching row's `data` via the\n * shared `flattenPatch` pipeline. Identifying columns are immutable\n * through this path (see `BulkUpdatePatch` JSDoc).\n *\n * Empty-patch rejection happens inside the backend (`compileBulkUpdate`)\n * — a `data: {}` payload would only rewrite `updated_at`, which is\n * almost certainly a bug.\n */\n async bulkUpdate(\n params: FindEdgesParams,\n patch: BulkUpdatePatch,\n options?: BulkOptions,\n ): Promise<BulkResult> {\n if (!this.backend.bulkUpdate) {\n throw new FiregraphError(\n 'bulkUpdate() is not supported by the current storage backend.',\n 'UNSUPPORTED_OPERATION',\n );\n }\n const filters = this.buildDmlFilters(params);\n return this.backend.bulkUpdate(filters, patch, options);\n }\n\n // ---------------------------------------------------------------------------\n // Multi-source fan-out (capability: query.join)\n // ---------------------------------------------------------------------------\n\n /**\n * Fan out from `params.sources` over a single edge type in one round trip.\n * On backends without `query.join`, throws `UNSUPPORTED_OPERATION` — the\n * cap-less fallback is the per-source `findEdges` loop, which lives in\n * `traverse.ts` (the higher-level traversal walker) rather than here.\n *\n * `expand()` is intentionally edge-type-only — the source set is a flat\n * UID list and the hop matches one `axbType`. Multi-axbType expansions\n * become multiple `expand()` calls, one per relation.\n *\n * `params.sources.length === 0` short-circuits to an empty result. The\n * backend never sees the call. (`compileExpand` itself rejects empty\n * because `IN ()` is not valid SQL.)\n */\n async expand(params: ExpandParams): Promise<ExpandResult> {\n if (!this.backend.expand) {\n throw new FiregraphError(\n 'expand() is not supported by the current storage backend. ' +\n 'Backends without `query.join` can use createTraversal() instead — ' +\n 'the per-hop loop is functionally equivalent (just slower).',\n 'UNSUPPORTED_OPERATION',\n );\n }\n if (params.sources.length === 0) {\n return params.hydrate ? { edges: [], targets: [] } : { edges: [] };\n }\n return this.backend.expand(params);\n }\n\n // ---------------------------------------------------------------------------\n // Engine-level multi-hop traversal (capability: traversal.serverSide)\n // ---------------------------------------------------------------------------\n\n /**\n * Compile a multi-hop traversal spec into one server-side nested\n * Pipeline and dispatch a single round trip.\n *\n * Backends declaring `traversal.serverSide` (Firestore Enterprise\n * today) install this method; everywhere else, it throws\n * `UNSUPPORTED_OPERATION`. The capability gate matches the type-level\n * surface — `GraphClient<C>` only exposes `runEngineTraversal` when\n * `'traversal.serverSide' extends C`.\n *\n * Most callers should not invoke this method directly; the\n * `createTraversal(...).run()` builder routes through it\n * automatically when `engineTraversal: 'auto'` (the default) and\n * the spec is eligible per `firestore-traverse-compiler.ts`. Calling\n * directly is appropriate for benchmarking or for callers that have\n * already shaped their hop chain into the strict\n * `EngineTraversalParams` shape.\n *\n * `params.sources.length === 0` short-circuits to empty per-hop\n * arrays. The backend never sees the call.\n */\n async runEngineTraversal(params: EngineTraversalParams): Promise<EngineTraversalResult> {\n if (!this.backend.runEngineTraversal) {\n throw new FiregraphError(\n 'runEngineTraversal() is not supported by the current storage backend. ' +\n 'Backends without `traversal.serverSide` can use createTraversal() instead — ' +\n 'the per-hop loop is functionally equivalent for in-graph specs (different ' +\n 'round-trip profile).',\n 'UNSUPPORTED_OPERATION',\n );\n }\n if (params.sources.length === 0) {\n return {\n hops: params.hops.map(() => ({ edges: [], sourceCount: 0 })),\n totalReads: 0,\n };\n }\n return this.backend.runEngineTraversal(params);\n }\n\n // ---------------------------------------------------------------------------\n // Server-side projection (capability: query.select)\n // ---------------------------------------------------------------------------\n\n /**\n * Server-side projection — fetch only the requested fields from each\n * matching edge. The backend translates the call into a projecting query\n * (`SELECT json_extract(...)` on SQLite/DO, `Query.select(...)` on\n * Firestore Standard, classic projection on Enterprise) so the wire\n * payload is reduced to just the requested fields.\n *\n * Resolution rules for `select` (mirrored across all backends):\n *\n * - Built-in envelope fields (`aType`, `aUid`, `axbType`, `bType`,\n * `bUid`, `createdAt`, `updatedAt`, `v`) → resolve to the typed\n * column / Firestore field directly.\n * - `'data'` literal → returns the whole user payload.\n * - `'data.<x>'` → explicit nested path, returned at the same shape.\n * - bare name → rewritten to `data.<name>` (the canonical \"give me a\n * few keys out of the JSON payload\" shape).\n *\n * Empty `select: []` is rejected with `INVALID_QUERY`. Duplicate entries\n * are de-duped (first-occurrence order preserved); the result row carries\n * one slot per unique field.\n *\n * Migrations are *not* applied to the result. The caller asked for a\n * partial shape, and rehydrating it through the migration pipeline would\n * require synthesising every absent field — see\n * `StorageBackend.findEdgesProjected` for the rationale.\n *\n * Scan protection follows the `findEdges` rules: a query with no\n * identifying fields requires `allowCollectionScan: true` to pass. The\n * cap-less fallback would be `findEdges` + JS-side projection, but that\n * defeats the wire-payload reduction; backends without `query.select`\n * throw `UNSUPPORTED_OPERATION` rather than silently materialising full\n * rows.\n */\n async findEdgesProjected<F extends ReadonlyArray<string>>(\n params: FindEdgesProjectedParams<F>,\n ): Promise<Array<ProjectedRow<F>>> {\n if (!this.backend.findEdgesProjected) {\n throw new FiregraphError(\n 'findEdgesProjected() is not supported by the current storage backend. ' +\n 'There is no client-side fallback because the wire-payload reduction ' +\n 'is the entire point of the API — use findEdges() and project in JS ' +\n 'if the backend does not declare `query.select`.',\n 'UNSUPPORTED_OPERATION',\n );\n }\n if (params.select.length === 0) {\n throw new FiregraphError(\n 'findEdgesProjected() requires a non-empty `select` list.',\n 'INVALID_QUERY',\n );\n }\n\n // Reuse the same plan + scan-safety pipeline as `findEdges` so the\n // identifier-vs-where rules and `allowCollectionScan` semantics behave\n // identically. A GET-shape (all three identifiers, no `where`) is also\n // allowed here — projection over a single edge is a meaningful shape.\n // We translate it to the equivalent equality filter list because the\n // backend `findEdgesProjected` contract takes filters, not a docId.\n const plan = buildEdgeQueryPlan(params);\n let filters: QueryFilter[];\n let options: QueryOptions | undefined;\n if (plan.strategy === 'get') {\n // GET means `aUid`, `axbType`, `bUid` are all set and there are no\n // `where` clauses. Synthesize the equivalent equality filters so the\n // backend can issue a single projecting query whose WHERE clause\n // resolves to the same row the docId would have looked up.\n filters = [\n { field: 'aUid', op: '==', value: params.aUid! },\n { field: 'axbType', op: '==', value: params.axbType! },\n { field: 'bUid', op: '==', value: params.bUid! },\n ];\n if (params.aType) filters.push({ field: 'aType', op: '==', value: params.aType });\n if (params.bType) filters.push({ field: 'bType', op: '==', value: params.bType });\n options = undefined;\n } else {\n filters = plan.filters;\n options = plan.options;\n }\n this.checkQuerySafety(filters, params.allowCollectionScan);\n const rows = await this.backend.findEdgesProjected(params.select, filters, options);\n return rows as Array<ProjectedRow<F>>;\n }\n\n /**\n * Native vector / nearest-neighbour search (capability `search.vector`).\n *\n * Resolves to the top-K records by similarity, sorted nearest-first\n * (`EUCLIDEAN` / `COSINE`) or highest-first (`DOT_PRODUCT`). The wrapper\n * is intentionally thin: capability check, scan-protection, then forward\n * `params` verbatim to the backend. All field-path normalisation and\n * SDK-shape validation lives in the shared\n * `runFirestoreFindNearest` helper that both Firestore editions call —\n * keeping it there means the validation surface stays in lockstep with\n * the SDK call site, regardless of which backend is plugged in.\n *\n * Migrations are NOT applied. The vector index walked the raw stored\n * shape; rehydrating each row through the migration pipeline would\n * change the candidate set the index already chose. If you need\n * migrated shape, follow up with `getNode` / `findEdges` on the\n * returned UIDs — those paths apply migrations normally.\n *\n * Scan-protection mirrors `findEdges`: if no identifying filters\n * (`aType` / `axbType` / `bType`) and no `where` clauses are supplied,\n * the request must opt in via `allowCollectionScan: true`. The ANN\n * query still walks the candidate set the WHERE clause produces, so\n * an unfiltered nearest-neighbour search over a million-row collection\n * is the same scan trap as an unfiltered `findEdges`.\n *\n * Backends without `search.vector` throw `UNSUPPORTED_OPERATION` —\n * there is no client-side fallback because emulating ANN over the\n * generic backend surface (`findEdges` + JS-side cosine) doesn't scale\n * past trivial datasets and would give callers the wrong mental model\n * about cost.\n */\n async findNearest(params: FindNearestParams): Promise<StoredGraphRecord[]> {\n if (!this.backend.findNearest) {\n throw new FiregraphError(\n 'findNearest() is not supported by the current storage backend. ' +\n 'Vector search requires a backend that declares `search.vector` ' +\n '(currently Firestore Standard and Enterprise). There is no ' +\n 'client-side fallback because emulating ANN on top of the generic ' +\n 'backend surface does not scale beyond toy datasets.',\n 'UNSUPPORTED_OPERATION',\n );\n }\n\n // Build the same filter list the helper passes to `applyFiltersToQuery`\n // so scan-protection sees exactly what the index will narrow on.\n // Identifiers come first (matching `buildVectorFilters` in the helper),\n // user-supplied `where` follows. We do NOT use `buildEdgeQueryPlan`\n // here — there is no GET-strategy notion for vector search; the\n // identifying-field filters are pure narrowing for the ANN walk.\n const filters: QueryFilter[] = [];\n if (params.aType) filters.push({ field: 'aType', op: '==', value: params.aType });\n if (params.axbType) filters.push({ field: 'axbType', op: '==', value: params.axbType });\n if (params.bType) filters.push({ field: 'bType', op: '==', value: params.bType });\n if (params.where) filters.push(...params.where);\n this.checkQuerySafety(filters, params.allowCollectionScan);\n\n return this.backend.findNearest(params);\n }\n\n /**\n * Native full-text search (capability `search.fullText`).\n *\n * Returns the top-N records by relevance, ordered by the search\n * index's score. Only Firestore Enterprise declares this capability\n * today — the underlying Pipelines `search({ query: documentMatches(...) })`\n * stage requires Enterprise's FTS index. Standard does not declare\n * the cap (FTS is an Enterprise-only product feature, not a\n * typed-API gap), and the SQLite-shaped backends have no native\n * FTS index. Backends without `search.fullText` throw\n * `UNSUPPORTED_OPERATION` from this wrapper.\n *\n * Scan-protection mirrors `findNearest`: a search with no\n * identifying filters (`aType` / `axbType` / `bType`) walks every\n * row the index scored, so the request must opt in via\n * `allowCollectionScan: true`.\n *\n * Migrations are NOT applied. The FTS index walked the raw stored\n * shape; rehydrating each row through the migration pipeline would\n * change the candidate set the index already scored. If you need\n * migrated shape, follow up with `getNode` / `findEdges` on the\n * returned UIDs.\n */\n async fullTextSearch(params: FullTextSearchParams): Promise<StoredGraphRecord[]> {\n if (!this.backend.fullTextSearch) {\n throw new FiregraphError(\n 'fullTextSearch() is not supported by the current storage backend. ' +\n 'Full-text search requires a backend that declares `search.fullText` ' +\n '(currently Firestore Enterprise only — FTS is an Enterprise product ' +\n 'feature). There is no client-side fallback because emulating FTS over ' +\n 'the generic backend surface would not scale beyond toy datasets.',\n 'UNSUPPORTED_OPERATION',\n );\n }\n const filters: QueryFilter[] = [];\n if (params.aType) filters.push({ field: 'aType', op: '==', value: params.aType });\n if (params.axbType) filters.push({ field: 'axbType', op: '==', value: params.axbType });\n if (params.bType) filters.push({ field: 'bType', op: '==', value: params.bType });\n this.checkQuerySafety(filters, params.allowCollectionScan);\n return this.backend.fullTextSearch(params);\n }\n\n /**\n * Native geospatial distance search (capability `search.geo`).\n *\n * Returns rows whose `geoField` lies within `radiusMeters` of\n * `point`, ordered nearest-first by default. Only Firestore\n * Enterprise declares this capability — same Enterprise-only\n * gating as `fullTextSearch`. Backends without `search.geo` throw\n * `UNSUPPORTED_OPERATION` from this wrapper.\n *\n * Scan-protection mirrors `findNearest` and `fullTextSearch`.\n *\n * Migrations are NOT applied — same rationale as the other search\n * extensions.\n */\n async geoSearch(params: GeoSearchParams): Promise<StoredGraphRecord[]> {\n if (!this.backend.geoSearch) {\n throw new FiregraphError(\n 'geoSearch() is not supported by the current storage backend. ' +\n 'Geospatial search requires a backend that declares `search.geo` ' +\n '(currently Firestore Enterprise only — geo queries are an ' +\n 'Enterprise product feature). There is no client-side fallback ' +\n 'because emulating geo over the generic backend surface (haversine ' +\n 'over `findEdges`) would not scale beyond trivial datasets.',\n 'UNSUPPORTED_OPERATION',\n );\n }\n const filters: QueryFilter[] = [];\n if (params.aType) filters.push({ field: 'aType', op: '==', value: params.aType });\n if (params.axbType) filters.push({ field: 'axbType', op: '==', value: params.axbType });\n if (params.bType) filters.push({ field: 'bType', op: '==', value: params.bType });\n this.checkQuerySafety(filters, params.allowCollectionScan);\n return this.backend.geoSearch(params);\n }\n\n /**\n * Translate a `FindEdgesParams` into the `QueryFilter[]` shape the\n * backend `bulkDelete` / `bulkUpdate` methods expect. Mirrors the\n * `aggregate()` plan: a bare-empty params object becomes an empty\n * filter list (after a scan-protection check); a GET-shape (all three\n * identifiers) is rejected so we never silently turn a single-row\n * lookup into a server-side DML; otherwise we run `buildEdgeQueryPlan`\n * and surface its filters.\n */\n private buildDmlFilters(params: FindEdgesParams): QueryFilter[] {\n const hasAnyFilter =\n params.aType ||\n params.aUid ||\n params.axbType ||\n params.bType ||\n params.bUid ||\n (params.where && params.where.length > 0);\n\n if (!hasAnyFilter) {\n this.checkQuerySafety([], params.allowCollectionScan);\n return [];\n }\n\n const plan = buildEdgeQueryPlan(params);\n if (plan.strategy === 'get') {\n throw new FiregraphError(\n 'bulkDelete() / bulkUpdate() require a query, not a direct document lookup. ' +\n 'Use removeEdge() / updateEdge() for single-row operations, or omit one of ' +\n 'aUid/axbType/bUid to force a query strategy.',\n 'INVALID_QUERY',\n );\n }\n this.checkQuerySafety(plan.filters, params.allowCollectionScan);\n return plan.filters;\n }\n\n // ---------------------------------------------------------------------------\n // Dynamic registry methods\n // ---------------------------------------------------------------------------\n\n async defineNodeType(\n name: string,\n jsonSchema: object,\n description?: string,\n options?: DefineTypeOptions,\n ): Promise<void> {\n if (!this.dynamicConfig) {\n throw new DynamicRegistryError(\n 'defineNodeType() is only available in dynamic registry mode. ' +\n 'Pass registryMode: { mode: \"dynamic\" } to createGraphClient().',\n );\n }\n\n if (RESERVED_TYPE_NAMES.has(name)) {\n throw new DynamicRegistryError(\n `Cannot define type \"${name}\": this name is reserved for the meta-registry.`,\n );\n }\n\n if (this.staticRegistry?.lookup(name, NODE_RELATION, name)) {\n throw new DynamicRegistryError(\n `Cannot define node type \"${name}\": already defined in the static registry.`,\n );\n }\n\n const uid = generateDeterministicUid(META_NODE_TYPE, name);\n const data: Record<string, unknown> = { name, jsonSchema };\n if (description !== undefined) data.description = description;\n if (options?.titleField !== undefined) data.titleField = options.titleField;\n if (options?.subtitleField !== undefined) data.subtitleField = options.subtitleField;\n if (options?.viewTemplate !== undefined) data.viewTemplate = options.viewTemplate;\n if (options?.viewCss !== undefined) data.viewCss = options.viewCss;\n if (options?.allowedIn !== undefined) data.allowedIn = options.allowedIn;\n if (options?.migrationWriteBack !== undefined)\n data.migrationWriteBack = options.migrationWriteBack;\n if (options?.migrations !== undefined) {\n data.migrations = await this.serializeMigrations(options.migrations);\n }\n\n await this.putNode(META_NODE_TYPE, uid, data);\n }\n\n async defineEdgeType(\n name: string,\n topology: EdgeTopology,\n jsonSchema?: object,\n description?: string,\n options?: DefineTypeOptions,\n ): Promise<void> {\n if (!this.dynamicConfig) {\n throw new DynamicRegistryError(\n 'defineEdgeType() is only available in dynamic registry mode. ' +\n 'Pass registryMode: { mode: \"dynamic\" } to createGraphClient().',\n );\n }\n\n if (RESERVED_TYPE_NAMES.has(name)) {\n throw new DynamicRegistryError(\n `Cannot define type \"${name}\": this name is reserved for the meta-registry.`,\n );\n }\n\n if (this.staticRegistry) {\n const fromTypes = Array.isArray(topology.from) ? topology.from : [topology.from];\n const toTypes = Array.isArray(topology.to) ? topology.to : [topology.to];\n for (const aType of fromTypes) {\n for (const bType of toTypes) {\n if (this.staticRegistry.lookup(aType, name, bType)) {\n throw new DynamicRegistryError(\n `Cannot define edge type \"${name}\" for (${aType}) -> (${bType}): already defined in the static registry.`,\n );\n }\n }\n }\n }\n\n const uid = generateDeterministicUid(META_EDGE_TYPE, name);\n const data: Record<string, unknown> = {\n name,\n from: topology.from,\n to: topology.to,\n };\n if (jsonSchema !== undefined) data.jsonSchema = jsonSchema;\n if (topology.inverseLabel !== undefined) data.inverseLabel = topology.inverseLabel;\n if (topology.targetGraph !== undefined) data.targetGraph = topology.targetGraph;\n if (description !== undefined) data.description = description;\n if (options?.titleField !== undefined) data.titleField = options.titleField;\n if (options?.subtitleField !== undefined) data.subtitleField = options.subtitleField;\n if (options?.viewTemplate !== undefined) data.viewTemplate = options.viewTemplate;\n if (options?.viewCss !== undefined) data.viewCss = options.viewCss;\n if (options?.allowedIn !== undefined) data.allowedIn = options.allowedIn;\n if (options?.migrationWriteBack !== undefined)\n data.migrationWriteBack = options.migrationWriteBack;\n if (options?.migrations !== undefined) {\n data.migrations = await this.serializeMigrations(options.migrations);\n }\n\n await this.putNode(META_EDGE_TYPE, uid, data);\n }\n\n async reloadRegistry(): Promise<void> {\n if (!this.dynamicConfig) {\n throw new DynamicRegistryError(\n 'reloadRegistry() is only available in dynamic registry mode. ' +\n 'Pass registryMode: { mode: \"dynamic\" } to createGraphClient().',\n );\n }\n\n const reader = this.createMetaReader();\n const dynamicOnly = await createRegistryFromGraph(reader, this.migrationSandbox);\n\n if (this.staticRegistry) {\n this.dynamicRegistry = createMergedRegistry(this.staticRegistry, dynamicOnly);\n } else {\n this.dynamicRegistry = dynamicOnly;\n }\n }\n\n private async serializeMigrations(\n migrations: Array<{ fromVersion: number; toVersion: number; up: MigrationFn | string }>,\n ): Promise<Array<{ fromVersion: number; toVersion: number; up: string }>> {\n const result = migrations.map((m) => {\n const source = typeof m.up === 'function' ? m.up.toString() : m.up;\n return { fromVersion: m.fromVersion, toVersion: m.toVersion, up: source };\n });\n await Promise.all(result.map((m) => precompileSource(m.up, this.migrationSandbox)));\n return result;\n }\n\n /**\n * Build a `GraphReader` over the meta-backend. If meta lives in the same\n * collection as the main backend, `this` is returned directly.\n */\n private createMetaReader(): GraphReader {\n if (!this.metaBackend) return this;\n\n const backend = this.metaBackend;\n\n const executeMetaQuery = (\n filters: QueryFilter[],\n options?: QueryOptions,\n ): Promise<StoredGraphRecord[]> => backend.query(filters, options);\n\n return {\n async getNode(uid: string): Promise<StoredGraphRecord | null> {\n return backend.getDoc(computeNodeDocId(uid));\n },\n async getEdge(\n aUid: string,\n axbType: string,\n bUid: string,\n ): Promise<StoredGraphRecord | null> {\n return backend.getDoc(computeEdgeDocId(aUid, axbType, bUid));\n },\n async edgeExists(aUid: string, axbType: string, bUid: string): Promise<boolean> {\n const record = await backend.getDoc(computeEdgeDocId(aUid, axbType, bUid));\n return record !== null;\n },\n async findEdges(params: FindEdgesParams): Promise<StoredGraphRecord[]> {\n const plan = buildEdgeQueryPlan(params);\n if (plan.strategy === 'get') {\n const record = await backend.getDoc(plan.docId);\n return record ? [record] : [];\n }\n return executeMetaQuery(plan.filters, plan.options);\n },\n async findNodes(params: FindNodesParams): Promise<StoredGraphRecord[]> {\n const plan = buildNodeQueryPlan(params);\n if (plan.strategy === 'get') {\n const record = await backend.getDoc(plan.docId);\n return record ? [record] : [];\n }\n return executeMetaQuery(plan.filters, plan.options);\n },\n };\n }\n}\n\n/**\n * Create a `GraphClient` backed by a `StorageBackend`.\n *\n * Phase 3: the type parameter `C` is inferred from\n * `StorageBackend<C>.capabilities` and propagates to the returned\n * `GraphClient<C>`. Extension surfaces (aggregate, search, raw escape\n * hatches, …) are conditionally intersected — they exist on the returned\n * type only when `C` declares the matching capability. Calls into\n * undeclared extensions are TypeScript errors at the call site, not\n * runtime failures.\n *\n * The runtime delegate `GraphClientImpl` carries only the portable core\n * methods today; extension methods land in Phases 4–10 alongside their\n * backend implementations. Until then the type-level surface is ahead of\n * the runtime, but no backend declares any extension capability so the\n * narrowing is effectively a no-op for current callers.\n *\n * `createGraphClientFromBackend` is retained as a deprecated alias for\n * backward compatibility while the codebase migrates off the old name.\n */\nexport function createGraphClient<C extends Capability = Capability>(\n backend: StorageBackend<C>,\n options: GraphClientOptions & { registryMode: DynamicRegistryConfig },\n metaBackend?: StorageBackend,\n): DynamicGraphClient<C>;\nexport function createGraphClient<C extends Capability = Capability>(\n backend: StorageBackend<C>,\n options?: GraphClientOptions,\n metaBackend?: StorageBackend,\n): GraphClient<C>;\nexport function createGraphClient<C extends Capability = Capability>(\n backend: StorageBackend<C>,\n options?: GraphClientOptions,\n metaBackend?: StorageBackend,\n): GraphClient<C> | DynamicGraphClient<C> {\n // The double cast bridges the gap between the runtime delegate\n // (`GraphClientImpl`, which structurally implements\n // `CoreGraphClient & DynamicGraphMethods`) and the conditionally-\n // intersected return types `GraphClient<C>` / `DynamicGraphClient<C>`.\n // The implementation signature can't pick between the two overloads\n // without inspecting `options.registryMode` at the type level, which\n // requires conditional types over the `options` argument; the cast\n // collapses that ambiguity. Sound today because every `*Extension`\n // body is empty and `DynamicGraphMethods` is always present at runtime\n // (the validation routing inside `GraphClientImpl` no-ops the dynamic\n // methods when registryMode is absent).\n return new GraphClientImpl(backend, options, metaBackend) as unknown as\n | GraphClient<C>\n | DynamicGraphClient<C>;\n}\n\n/**\n * @deprecated Use `createGraphClient` instead. Kept temporarily so existing\n * callers (Cloudflare client, routing backend, tests) continue to compile\n * during the Phase 2 transition.\n */\nexport const createGraphClientFromBackend = createGraphClient;\n","import { nanoid } from 'nanoid';\n\nexport function generateId(): string {\n return nanoid();\n}\n","/**\n * Backend abstraction for firegraph.\n *\n * `StorageBackend` is the single interface every storage driver implements.\n * The Firestore backend wraps `@google-cloud/firestore`; the SQLite backend\n * (shared by D1 and Durable Object SQLite) uses a parameterized SQL executor.\n *\n * `GraphClientImpl` and friends depend only on this interface — they have\n * no direct knowledge of Firestore or SQLite.\n */\n\nimport type {\n AggregateSpec,\n BulkOptions,\n BulkResult,\n BulkUpdatePatch,\n Capability,\n CascadeResult,\n EngineTraversalParams,\n EngineTraversalResult,\n ExpandParams,\n ExpandResult,\n FindEdgesParams,\n FindNearestParams,\n FullTextSearchParams,\n GeoSearchParams,\n GraphReader,\n QueryFilter,\n QueryOptions,\n StoredGraphRecord,\n} from '../types.js';\nimport type { DataPathOp } from './write-plan.js';\n\n/**\n * Runtime descriptor of which `Capability`s a `StorageBackend` actually\n * implements. Static for the lifetime of a backend instance; declared at\n * construction. The phantom `_phantom` field is a type-level marker\n * (never read at runtime) that lets the type parameter `C` flow through\n * the descriptor for use by `GraphClient<C>` conditional gating.\n *\n * Use `createCapabilities` to construct one. Use `.has(c)` to check\n * membership at runtime; the type system gates extension methods on the\n * client level (see `.claude/backend-capabilities.md`).\n */\nexport interface BackendCapabilities<C extends Capability = Capability> {\n /** Runtime membership check. */\n has(capability: Capability): boolean;\n /** Iterate declared capabilities (diagnostics, error messages). */\n values(): IterableIterator<Capability>;\n /** Type-level marker. Never read at runtime. */\n readonly _phantom?: C;\n}\n\n/**\n * Construct a `BackendCapabilities<C>` from an explicit set. The set is\n * captured by reference; callers should treat it as readonly after passing\n * it in. The runtime cost of `has()` is one Set lookup.\n */\nexport function createCapabilities<C extends Capability>(\n caps: ReadonlySet<C>,\n): BackendCapabilities<C> {\n return {\n has: (capability: Capability): boolean => caps.has(capability as C),\n values: () => caps.values() as IterableIterator<Capability>,\n };\n}\n\n/**\n * Intersect multiple capability sets. Used by `RoutingStorageBackend` to\n * derive the capability set of a composite backend: a routed graph can\n * only honour a capability if every wrapped backend honours it.\n */\nexport function intersectCapabilities(\n parts: ReadonlyArray<BackendCapabilities>,\n): BackendCapabilities {\n if (parts.length === 0) return createCapabilities(new Set<Capability>());\n const sets = parts.map((p) => new Set<Capability>(p.values()));\n const [first, ...rest] = sets;\n const intersection = new Set<Capability>();\n for (const c of first) {\n if (rest.every((s) => s.has(c))) intersection.add(c);\n }\n return createCapabilities(intersection);\n}\n\n/**\n * Per-record write payload — backend-agnostic. Timestamps are not present;\n * the backend supplies them via `serverTimestamp()` placeholders that it\n * itself resolves at commit time.\n */\nexport interface WritableRecord {\n aType: string;\n aUid: string;\n axbType: string;\n bType: string;\n bUid: string;\n data: Record<string, unknown>;\n /** Schema version (set by the writer when registry has migrations). */\n v?: number;\n}\n\n/**\n * Write semantics for `setDoc`.\n *\n * - `'merge'` — the new contract (0.12+). Existing fields not mentioned\n * in the new data survive; nested objects are recursively merged;\n * arrays are replaced as a unit. This is the default for\n * `putNode` / `putEdge`.\n * - `'replace'` — the document is replaced wholesale, dropping any\n * fields not present in the payload. This is the explicit escape\n * hatch surfaced as `replaceNode` / `replaceEdge` and used by\n * migration write-back.\n */\nexport type WriteMode = 'merge' | 'replace';\n\n/**\n * Patch shape for `updateDoc`.\n *\n * - `dataOps`: list of deep-path terminal ops produced by\n * `flattenPatch()` (one op per leaf — arrays / primitives / Firestore\n * special types are terminal). Used by `updateNode` / `updateEdge`.\n * Sibling keys at every depth are preserved.\n * - `replaceData`: full `data` replacement. Used only by the migration\n * write-back path, which has already produced a complete migrated\n * document.\n * - `v`: optional schema-version stamp.\n *\n * `updatedAt` is always set by the backend.\n */\nexport interface UpdatePayload {\n dataOps?: DataPathOp[];\n replaceData?: Record<string, unknown>;\n v?: number;\n}\n\n/**\n * Read/write transaction adapter. Mirrors Firestore's transaction semantics:\n * reads are snapshot-consistent; writes are issued inside the transaction\n * and a rejection from any write aborts the surrounding `runTransaction`.\n *\n * Writes return `Promise<void>` so SQL drivers can surface row-level errors\n * (constraint violations, malformed JSON paths) rather than swallowing them.\n * Firestore implementations can resolve synchronously since the underlying\n * `Transaction.set/update/delete` calls are themselves synchronous buffers.\n */\nexport interface TransactionBackend {\n getDoc(docId: string): Promise<StoredGraphRecord | null>;\n query(filters: QueryFilter[], options?: QueryOptions): Promise<StoredGraphRecord[]>;\n setDoc(docId: string, record: WritableRecord, mode: WriteMode): Promise<void>;\n updateDoc(docId: string, update: UpdatePayload): Promise<void>;\n deleteDoc(docId: string): Promise<void>;\n}\n\n/**\n * Atomic multi-write batch.\n */\nexport interface BatchBackend {\n setDoc(docId: string, record: WritableRecord, mode: WriteMode): void;\n updateDoc(docId: string, update: UpdatePayload): void;\n deleteDoc(docId: string): void;\n commit(): Promise<void>;\n}\n\n/**\n * The single storage abstraction.\n *\n * Each backend instance is scoped to a \"graph location\" — for Firestore\n * that's a collection path; for SQLite it's a (table, scopePath) pair.\n * `subgraph()` returns a child backend bound to a nested location.\n */\nexport interface StorageBackend<C extends Capability = Capability> {\n /** Capabilities this backend instance declares. Static for the lifetime of the backend. */\n readonly capabilities: BackendCapabilities<C>;\n /** Backend-internal location identifier (collection path or table name). */\n readonly collectionPath: string;\n /** Subgraph scope (empty string for root). */\n readonly scopePath: string;\n\n // --- Reads ---\n getDoc(docId: string): Promise<StoredGraphRecord | null>;\n query(filters: QueryFilter[], options?: QueryOptions): Promise<StoredGraphRecord[]>;\n\n // --- Writes ---\n setDoc(docId: string, record: WritableRecord, mode: WriteMode): Promise<void>;\n updateDoc(docId: string, update: UpdatePayload): Promise<void>;\n deleteDoc(docId: string): Promise<void>;\n\n // --- Transactions & batches ---\n runTransaction<T>(fn: (tx: TransactionBackend) => Promise<T>): Promise<T>;\n createBatch(): BatchBackend;\n\n // --- Subgraphs ---\n subgraph(parentNodeUid: string, name: string): StorageBackend;\n\n // --- Cascade & bulk ---\n removeNodeCascade(\n uid: string,\n reader: GraphReader,\n options?: BulkOptions,\n ): Promise<CascadeResult>;\n bulkRemoveEdges(\n params: FindEdgesParams,\n reader: GraphReader,\n options?: BulkOptions,\n ): Promise<BulkResult>;\n\n // --- Cross-collection queries ---\n /**\n * Find edges across all subgraphs sharing a given collection name.\n * Optional — backends that can't support this should throw a clear error.\n */\n findEdgesGlobal?(params: FindEdgesParams, collectionName?: string): Promise<StoredGraphRecord[]>;\n\n // --- Aggregations ---\n /**\n * Run an aggregate query (count/sum/avg/min/max). Present only on backends\n * that declare `query.aggregate`. The map's keys are caller-defined aliases\n * matching `AggregateSpec`; values are the resolved numeric results.\n *\n * Backends that can't satisfy a particular op throw `FiregraphError` with\n * code `UNSUPPORTED_AGGREGATE` (e.g. Firestore Standard rejects min/max).\n */\n aggregate?(spec: AggregateSpec, filters: QueryFilter[]): Promise<Record<string, number>>;\n\n // --- Server-side DML ---\n /**\n * Delete every row matching `filters` in one server-side statement.\n * Present only on backends that declare `query.dml`. The default cascade\n * implementation in `bulk.ts` uses this when available; backends without\n * the cap (e.g. Firestore Standard) fall back to a fetch-then-delete\n * loop driven by `findEdges` + per-row `deleteDoc`.\n *\n * The contract matches `findEdges`: scope predicates are honoured\n * automatically by the backend's own internal scope tracking. Callers\n * supply only the filter list — the same shape produced by\n * `buildEdgeQueryPlan`.\n */\n bulkDelete?(filters: QueryFilter[], options?: BulkOptions): Promise<BulkResult>;\n /**\n * Update every row matching `filters` with `patch` in one server-side\n * statement. The patch is deep-merged into each row's `data` field, the\n * same flatten-then-merge pipeline `updateDoc` uses. Identifying columns\n * (`aType`, `axbType`, `aUid`, `bType`, `bUid`, `v`) are not writable\n * through this path.\n */\n bulkUpdate?(\n filters: QueryFilter[],\n patch: BulkUpdatePatch,\n options?: BulkOptions,\n ): Promise<BulkResult>;\n\n // --- Server-side multi-source fan-out ---\n /**\n * Fan out from `params.sources` over a single edge type in one server-side\n * round trip. Present only on backends that declare `query.join`. The\n * traversal layer (`traverse.ts`) calls `expand` once per hop when the\n * backend declares the cap; otherwise it falls back to the per-source\n * `findEdges` loop.\n *\n * Cross-graph hops are never dispatched through `expand` — each source\n * UID resolves to a distinct subgraph location, which can't be fanned\n * out as a single statement. The traversal layer enforces that\n * boundary; `expand` itself does not need to inspect `targetGraph`.\n */\n expand?(params: ExpandParams): Promise<ExpandResult>;\n\n // --- Engine-level multi-hop traversal ---\n /**\n * Compile a multi-hop traversal spec into one server-side query and\n * dispatch a single round trip. Present only on backends that declare\n * `traversal.serverSide` (Firestore Enterprise today, via nested\n * Pipelines that combine `define`, `addFields`, and\n * `toArrayExpression`).\n *\n * The traversal layer (`traverse.ts`) compiles a `TraversalBuilder`\n * spec into `EngineTraversalParams` only when the spec is eligible\n * (no cross-graph hops, no JS filters, depth ≤ `MAX_PIPELINE_DEPTH`,\n * `Π(limitPerSource_i × N_i) ≤ maxReads`, `limitPerSource` set on\n * every hop). Ineligible specs fall back to the per-hop `expand()`\n * loop without invoking this method.\n *\n * The result collapses the nested-pipeline tree into per-hop edge\n * arrays so the traversal layer can fold the result into the same\n * `HopResult[]` shape it produces from the per-hop loop.\n */\n runEngineTraversal?(params: EngineTraversalParams): Promise<EngineTraversalResult>;\n\n // --- Server-side projection ---\n /**\n * Run a projecting query — return only the listed fields per row. Present\n * only on backends that declare `query.select`. The cap-less fallback is\n * `findEdges` followed by a JS-side projection in user code; firegraph\n * does not auto-fall-back because the wire-payload reduction is the only\n * reason to call this method.\n *\n * `select` is the explicit field list; `filters` and `options` mirror the\n * `query()` shape. The returned rows have one slot per unique entry in\n * `select`. Field-name interpretation is the backend's responsibility:\n * built-in fields resolve to columns / Firestore field names, bare names\n * resolve to `data.<name>`, and dotted paths resolve verbatim. See\n * `FindEdgesProjectedParams` for the user-facing contract.\n *\n * Migrations are not applied to the result — the caller asked for a\n * specific projection shape, and rehydrating a partial record into the\n * migration pipeline would require synthesising every absent field.\n */\n findEdgesProjected?(\n select: ReadonlyArray<string>,\n filters: QueryFilter[],\n options?: QueryOptions,\n ): Promise<Array<Record<string, unknown>>>;\n\n // --- Native vector / nearest-neighbour search ---\n /**\n * Run a vector / nearest-neighbour query. Present only on backends that\n * declare `search.vector`. There is no client-side fallback — the\n * SQLite-shaped backends (shared SQLite, Cloudflare DO) genuinely have\n * no native ANN index, and a JS-side k-NN sweep over `findEdges()` would\n * scale catastrophically. Backends without the cap throw\n * `UNSUPPORTED_OPERATION` from the client wrapper.\n *\n * `params` carries the user-facing shape (vector field path, query\n * vector, distance metric, optional threshold and result-field). The\n * client wrapper has already run scan-protection on the identifying\n * / `where` filter list before dispatching.\n *\n * Path normalisation is the backend's responsibility: rewriting bare\n * `vectorField` / `distanceResultField` names to `data.<name>` and\n * rejecting envelope fields (`aType`, `axbType`, `bType`, `aUid`,\n * `bUid`, `v`, etc.) with `INVALID_QUERY` happens inside the\n * backend, not the client wrapper. The two in-tree Firestore-edition\n * backends share `runFirestoreFindNearest` (see\n * `src/internal/firestore-vector.ts`) for this; third-party backends\n * declaring `search.vector` must apply equivalent normalisation\n * before calling their underlying SDK.\n *\n * The backend is also responsible for translating to the underlying\n * SDK call (`Query.findNearest` on Firestore today) and decoding the\n * result snapshot into `StoredGraphRecord[]`.\n *\n * Migrations are not applied to the result. The vector index walks the\n * raw stored shape; rehydrating into the migration pipeline before\n * returning would change the candidate set the index already chose.\n */\n findNearest?(params: FindNearestParams): Promise<StoredGraphRecord[]>;\n\n // --- Native full-text search ---\n /**\n * Run a full-text search query. Present only on backends that declare\n * `search.fullText`. There is no client-side fallback — the only\n * in-tree backend that supports it is Firestore Enterprise (via\n * Pipeline `search({ query: documentMatches(...) })`); Standard and\n * the SQLite-shaped backends throw `UNSUPPORTED_OPERATION` from the\n * client wrapper.\n *\n * The backend is responsible for path normalisation (rewriting\n * bare `fields` entries to `data.<name>`, rejecting envelope fields\n * with `INVALID_QUERY`), translating to the underlying SDK call,\n * and decoding the result into `StoredGraphRecord[]`.\n *\n * Migrations are not applied to the result. The search index walked\n * the raw stored shape; rehydrating into the migration pipeline\n * would change the candidate set the index already scored.\n */\n fullTextSearch?(params: FullTextSearchParams): Promise<StoredGraphRecord[]>;\n\n // --- Native geospatial distance search ---\n /**\n * Run a geospatial distance search. Present only on backends that\n * declare `search.geo`. There is no client-side fallback — only\n * Firestore Enterprise has a native geo index (translated via\n * Pipeline `search({ query: geoDistance(...).lessThanOrEqual(...) })`).\n * Backends without the cap throw `UNSUPPORTED_OPERATION` from the\n * client wrapper.\n *\n * The backend is responsible for `geoField` path normalisation,\n * translating `point` to a Firestore `GeoPoint`, applying the\n * radius cap inside the search query, and (when\n * `orderByDistance` is true / unset) emitting the\n * `geoDistance(...).ascending()` ordering inside the search stage.\n *\n * Migrations are not applied to the result.\n */\n geoSearch?(params: GeoSearchParams): Promise<StoredGraphRecord[]>;\n}\n","/**\n * Shared `dataOps` SQL compilation helpers used by both SQLite-style backends\n * (`internal/sqlite-sql.ts` for the shared-table backend and `cloudflare/sql.ts`\n * for the per-DO backend).\n *\n * The two backends differ in identifier quoting and scope handling, but the\n * `data` column lives in JSON in both, the deep-merge / replace contract is\n * identical, and the `json_set` / `json_remove` expression they emit for a\n * `DataPathOp[]` is byte-for-byte the same. Lifting the helpers here keeps\n * that shape in one place — the comment in `cloudflare/sql.ts` used to read\n * \"keep them in sync\"; this module is what they keep in sync against.\n *\n * The helpers take a `backendLabel` parameter so error messages still\n * distinguish `\"SQLite backend\"` (shared-table) from `\"DO SQLite backend\"`\n * (per-Durable-Object). Identifier quoting is the caller's job — the helpers\n * here only emit JSON-path expressions against an opaque `base` argument,\n * never bare column names.\n */\n\nimport { FiregraphError } from '../errors.js';\nimport type { DataPathOp } from './write-plan.js';\n\n/**\n * Constructor names of Firestore special types that don't survive a plain\n * `JSON.stringify` round-trip — they have non-enumerable accessors (e.g.\n * `Timestamp.seconds`) or class identity that JSON loses. Detection is by\n * `constructor.name` to keep this module dependency-free (importing\n * `@google-cloud/firestore` here would pollute the Cloudflare Workers bundle —\n * see tests/unit/bundle-pollution.test.ts).\n */\nexport const FIRESTORE_TYPE_NAMES = new Set([\n 'Timestamp',\n 'GeoPoint',\n 'VectorValue',\n 'DocumentReference',\n 'FieldValue',\n]);\n\nexport function isFirestoreSpecialType(value: object): string | null {\n const ctorName = (value as { constructor?: { name?: string } }).constructor?.name;\n if (ctorName && FIRESTORE_TYPE_NAMES.has(ctorName)) return ctorName;\n return null;\n}\n\n/**\n * Identifiers accepted in `data.<key>` paths and `dataOps` path segments.\n * The pattern (`/^[A-Za-z_][A-Za-z0-9_-]*$/`) covers code-style identifiers\n * (camel, snake, kebab). Silently quoting exotic keys would require symmetric\n * quoting at every read/write call site; any drift produces silent data\n * corruption. Failing loudly at compile time is safer — users with exotic\n * keys can use `replaceNode` / `replaceEdge` (full-data overwrite) instead.\n */\nexport const JSON_PATH_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;\n\nexport function validateJsonPathKey(key: string, backendLabel: string): void {\n if (key.length === 0) {\n throw new FiregraphError(\n `${backendLabel}: empty JSON path component is not allowed`,\n 'INVALID_QUERY',\n );\n }\n if (!JSON_PATH_KEY_RE.test(key)) {\n throw new FiregraphError(\n `${backendLabel}: data field path component \"${key}\" is not a safe JSON-path identifier. ` +\n `Allowed pattern: /^[A-Za-z_][A-Za-z0-9_-]*$/. Use replaceNode/replaceEdge (full-data overwrite) ` +\n `for keys with reserved characters (whitespace, dots, brackets, quotes, etc.).`,\n 'INVALID_QUERY',\n );\n }\n}\n\n/**\n * Bind a value as a JSON-serializable string for `json(?)` placeholders in\n * the compiled `json_set` expression. `assertJsonSafePayload` already runs\n * eagerly at the write boundary, so the Firestore-special-type rejection\n * here is defense-in-depth — left in place per the team's preference for\n * symmetric guards across the SQLite compilers.\n */\nexport function jsonBind(value: unknown, backendLabel: string): string {\n if (value === undefined) return 'null';\n if (value !== null && typeof value === 'object') {\n const firestoreType = isFirestoreSpecialType(value);\n if (firestoreType) {\n throw new FiregraphError(\n `${backendLabel} cannot persist a Firestore ${firestoreType} value. ` +\n `Convert to a primitive before writing (e.g. \\`ts.toMillis()\\` for Timestamp).`,\n 'INVALID_ARGUMENT',\n );\n }\n }\n return JSON.stringify(value);\n}\n\n/**\n * Build the SQL expression that applies a list of `DataPathOp`s onto an\n * existing JSON column reference (e.g. `\"data\"` or `COALESCE(\"data\", '{}')`).\n *\n * Returns the full expression (already parenthesised where needed) and pushes\n * the bound parameters onto `params` in left-to-right order. Returns `null`\n * when there are no ops at all — the caller picks a fallback expression.\n *\n * Strategy:\n * 1. `json_remove(<base>, '$.a.b', '$.c', …)` strips delete-ops.\n * 2. `json_set(<#1>, '$.x.y', json(?), '$.z', json(?), …)` writes value-ops.\n * `json(?)` ensures non-string values bind as JSON (objects, arrays,\n * numbers, booleans, null).\n */\nexport function compileDataOpsExpr(\n ops: readonly DataPathOp[],\n base: string,\n params: unknown[],\n backendLabel: string,\n): string | null {\n if (ops.length === 0) return null;\n\n const deletes: DataPathOp[] = [];\n const sets: DataPathOp[] = [];\n for (const op of ops) (op.delete ? deletes : sets).push(op);\n\n let expr = base;\n\n if (deletes.length > 0) {\n const placeholders = deletes.map(() => '?').join(', ');\n expr = `json_remove(${expr}, ${placeholders})`;\n for (const op of deletes) {\n for (const seg of op.path) validateJsonPathKey(seg, backendLabel);\n params.push(`$.${op.path.join('.')}`);\n }\n }\n\n if (sets.length > 0) {\n const pieces = sets.map(() => '?, json(?)').join(', ');\n expr = `json_set(${expr}, ${pieces})`;\n for (const op of sets) {\n for (const seg of op.path) validateJsonPathKey(seg, backendLabel);\n params.push(`$.${op.path.join('.')}`);\n params.push(jsonBind(op.value, backendLabel));\n }\n }\n\n return expr;\n}\n","/**\n * Shared eager-validation helper for SQLite-style backends\n * (`internal/sqlite-sql.ts` and `cloudflare/sql.ts`).\n *\n * Both backends serialise `record.data` (and `update.replaceData`) as a raw\n * JSON blob via `JSON.stringify`. Two classes of value silently corrupt that\n * representation and the cross-backend contract:\n *\n * 1. **Firestore special types** (`Timestamp`, `GeoPoint`, `VectorValue`,\n * `DocumentReference`, `FieldValue`). They have non-enumerable accessors\n * or rely on class identity that JSON drops, so they round-trip as `{}`\n * or garbage. Callers must convert to primitives before writing.\n * 2. **`DELETE_FIELD` sentinel.** A `Symbol` is invisible to\n * `JSON.stringify`. If a caller embeds the sentinel in a `replaceNode`\n * payload or in a fresh-insert (no existing row), the field would\n * silently disappear instead of erroring loudly the way it does for the\n * `dataOps` path — so we reject it eagerly here.\n * 3. **Tagged serialization payloads** (`__firegraph_ser__`). These are the\n * sandbox migration boundary marshalling form. They are valid inside\n * Firestore (the Firestore backend re-hydrates them via\n * `deserializeFirestoreTypes`), but on SQLite they would persist as\n * opaque tagged objects that no downstream reader knows how to interpret.\n * Reject them at the boundary so the failure is loud.\n *\n * The Firestore backend does NOT call this — it accepts those types natively\n * and `deserializeFirestoreTypes` rebuilds tagged values into real Firestore\n * objects on its own write path.\n *\n * Detection avoids `instanceof` so this module stays free of\n * `@google-cloud/firestore`. Constructor-name + duck-type matches the\n * approach used by `bindValue`/`jsonBind` elsewhere in the SQLite compilers.\n */\n\nimport { FiregraphError } from '../errors.js';\nimport { SERIALIZATION_TAG } from './serialization-tag.js';\nimport { isDeleteSentinel } from './write-plan.js';\n\nconst FIRESTORE_TYPE_NAMES = new Set([\n 'Timestamp',\n 'GeoPoint',\n 'VectorValue',\n 'DocumentReference',\n 'FieldValue',\n]);\n\n/**\n * Walk `data` and throw on any value that the SQLite-style raw-JSON\n * persistence path can't faithfully serialise. `label` distinguishes the\n * caller in error messages (e.g. `'shared-table SQLite'` vs `'DO SQLite'`).\n *\n * Plain objects recurse. Arrays recurse element-wise. Primitives, `null`,\n * and `undefined` are accepted (mirroring how `flattenPatch` treats them\n * during the merge path).\n */\nexport function assertJsonSafePayload(data: unknown, label: string): void {\n walk(data, [], label);\n}\n\nfunction walk(node: unknown, path: readonly string[], label: string): void {\n if (node === null || node === undefined) return;\n if (isDeleteSentinel(node)) {\n throw new FiregraphError(\n `${label} backend cannot persist a deleteField() sentinel inside a ` +\n `full-data payload (replaceNode/replaceEdge or first-insert). The ` +\n `sentinel is only valid inside an updateNode/updateEdge dataOps patch. ` +\n `Path: ${formatPath(path)}.`,\n 'INVALID_ARGUMENT',\n );\n }\n const t = typeof node;\n if (t === 'symbol' || t === 'function') {\n throw new FiregraphError(\n `${label} backend cannot persist a value of type ${t}. ` +\n `JSON.stringify drops it silently. Path: ${formatPath(path)}.`,\n 'INVALID_ARGUMENT',\n );\n }\n if (t === 'bigint') {\n throw new FiregraphError(\n `${label} backend cannot persist a value of type bigint. ` +\n `JSON.stringify cannot serialize this type (throws TypeError). ` +\n `Path: ${formatPath(path)}.`,\n 'INVALID_ARGUMENT',\n );\n }\n if (t !== 'object') return;\n if (Array.isArray(node)) {\n for (let i = 0; i < node.length; i++) {\n walk(node[i], [...path, String(i)], label);\n }\n return;\n }\n // Reject any object carrying the firegraph serialization tag — both legit\n // tagged Firestore-type payloads (the migration-sandbox output that round-\n // trips through Firestore) and bogus user data that happens to put a\n // literal `__firegraph_ser__` key on a plain object. SQLite has no\n // Timestamp class to rebuild the tag into, and silently writing the\n // envelope would produce an unreadable column.\n const obj = node as Record<string, unknown>;\n if (Object.prototype.hasOwnProperty.call(obj, SERIALIZATION_TAG)) {\n const tagValue = obj[SERIALIZATION_TAG];\n throw new FiregraphError(\n `${label} backend cannot persist an object with a \\`${SERIALIZATION_TAG}\\` ` +\n `key (value: ${formatTagValue(tagValue)}). Recognised tags are valid only on ` +\n `the Firestore backend (migration-sandbox output); a literal ` +\n `\\`${SERIALIZATION_TAG}\\` field in user data is reserved and not allowed. ` +\n `Path: ${formatPath(path)}.`,\n 'INVALID_ARGUMENT',\n );\n }\n // Class instances: reject Firestore special types loudly; reject every\n // other class instance generically (Map, Set, Date are caller's\n // responsibility to convert — Date is allowed in filter binds via\n // `bindValue` but not as a stored payload value because JSON.stringify\n // produces a string, not a real Date).\n const proto = Object.getPrototypeOf(node);\n if (proto !== null && proto !== Object.prototype) {\n const ctor = (node as { constructor?: { name?: string } }).constructor;\n const ctorName = ctor && typeof ctor.name === 'string' ? ctor.name : '<anonymous>';\n if (FIRESTORE_TYPE_NAMES.has(ctorName)) {\n throw new FiregraphError(\n `${label} backend cannot persist a Firestore ${ctorName} value. ` +\n `Convert to a primitive before writing (e.g. \\`ts.toMillis()\\` for ` +\n `Timestamp, \\`{lat,lng}\\` for GeoPoint). Path: ${formatPath(path)}.`,\n 'INVALID_ARGUMENT',\n );\n }\n // Accept Date as an alias for its epoch-ms — it round-trips as an ISO\n // string via JSON.stringify, which the caller chose; not our place to\n // reject. Same for Buffer / typed arrays — they'll JSON-serialize as\n // best they can. Reject only opaque exotic instances that JSON drops.\n if (node instanceof Date) return;\n throw new FiregraphError(\n `${label} backend cannot persist a class instance of type ${ctorName}. ` +\n `Only plain objects, arrays, and primitives round-trip safely through ` +\n `JSON storage. Path: ${formatPath(path)}.`,\n 'INVALID_ARGUMENT',\n );\n }\n for (const key of Object.keys(obj)) {\n walk(obj[key], [...path, key], label);\n }\n}\n\nfunction formatPath(path: readonly string[]): string {\n return path.length === 0 ? '<root>' : path.map((p) => JSON.stringify(p)).join(' > ');\n}\n\nfunction formatTagValue(value: unknown): string {\n if (value === null) return 'null';\n if (value === undefined) return 'undefined';\n if (typeof value === 'string') return JSON.stringify(value);\n if (typeof value === 'number' || typeof value === 'boolean' || typeof value === 'bigint') {\n return String(value);\n }\n return typeof value;\n}\n","/**\n * Default core index preset.\n *\n * This set covers the query patterns firegraph's query planner emits for\n * built-in operations — `findNodes`, `findEdges`, cascade delete, traversal,\n * and the DO/SQLite path compilers. Apps that need additional indexes\n * (descending timestamps, `data.*` filters, composite fields unique to\n * their query shapes) declare them on `RegistryEntry.indexes` or override\n * this preset wholesale via the backend-specific `coreIndexes` option —\n * `FiregraphDOOptions.coreIndexes` for the DO backend,\n * `BuildSchemaOptions.coreIndexes` for the legacy SQLite backend, and\n * `GenerateIndexOptions.coreIndexes` for the Firestore CLI generator.\n *\n * ## Ownership model\n *\n * This list is firegraph's *recommendation* — not non-negotiable policy.\n * Consumers can:\n *\n * 1. Accept the preset as-is (default).\n * 2. Extend it: `coreIndexes: [...DEFAULT_CORE_INDEXES, ...more]`.\n * 3. Replace it entirely with a tailored set.\n * 4. Disable it (`coreIndexes: []`) and take full responsibility for\n * index coverage — only do this if you're provisioning a complete\n * custom set.\n *\n * ## Per-backend emission\n *\n * The Firestore generator skips single-field entries (Firestore implicitly\n * indexes every field) and emits one composite index per multi-field spec.\n * The SQLite-flavored generators (DO, legacy) emit every spec as-is.\n *\n * ## Why these specific indexes\n *\n * - `aUid` / `bUid` — required for `_fgRemoveNodeCascade`, which scans by\n * each UID side independently. A composite `(aUid, axbType)` also\n * satisfies `aUid`-alone via leading-column prefix, but the single-field\n * form is cheaper for the common case.\n * - `aType` / `bType` — `findNodes({ aType })` and cross-type enumeration.\n * - `(aUid, axbType)` — forward edge lookup (`findEdges({ aUid, axbType })`)\n * and the `get` strategy fallback when only two of three triple fields\n * are present.\n * - `(axbType, bUid)` — reverse edge traversal.\n * - `(aType, axbType)` — type-scoped edge scans (e.g., `findEdges({ aType, axbType })`).\n * - `(axbType, bType)` — scope edges of one relation to a target type.\n */\n\nimport type { IndexSpec } from './types.js';\n\nexport const DEFAULT_CORE_INDEXES: ReadonlyArray<IndexSpec> = Object.freeze([\n { fields: ['aUid'] },\n { fields: ['bUid'] },\n { fields: ['aType'] },\n { fields: ['bType'] },\n { fields: ['aUid', 'axbType'] },\n { fields: ['axbType', 'bUid'] },\n { fields: ['aType', 'axbType'] },\n { fields: ['axbType', 'bType'] },\n]);\n","/**\n * SQLite schema for firegraph triples.\n *\n * Single-table design — both nodes (self-loops with `axbType = 'is'`) and\n * edges share one row. The `scope` column carries the materialized subgraph\n * path (parent UIDs interleaved with subgraph names), which preserves\n * Firestore's nested-subcollection semantics in a flat table.\n *\n * `data` is a JSON string. Built-in fields are projected to typed columns so\n * the query planner can use indexes without going through `json_extract`.\n *\n * ## Indexes\n *\n * Every index is prefixed with the `scope` column so the query compiler\n * (which always emits a `scope = ?` predicate) can use the index prefix\n * directly. This is the single difference from the DO SQLite backend, where\n * each DO is physically scoped and no discriminator column exists.\n *\n * Index specs come from the core preset (overridable via\n * `buildSchemaStatementsOptions.coreIndexes`) plus per-entry `indexes`\n * declared on registry entries. Specs are deduplicated by canonical\n * fingerprint before emission.\n */\n\nimport { DEFAULT_CORE_INDEXES } from '../default-indexes.js';\nimport type { GraphRegistry, IndexSpec } from '../types.js';\nimport { buildIndexDDL, dedupeIndexSpecs } from './sqlite-index-ddl.js';\n\nexport const SQLITE_COLUMNS = [\n 'doc_id',\n 'scope',\n 'a_type',\n 'a_uid',\n 'axb_type',\n 'b_type',\n 'b_uid',\n 'data',\n 'v',\n 'created_at',\n 'updated_at',\n] as const;\n\nexport type SqliteColumn = (typeof SQLITE_COLUMNS)[number];\n\n/**\n * Map firegraph field names (as they appear in `QueryFilter.field` and the\n * record envelope) to SQLite column names.\n */\nexport const FIELD_TO_COLUMN: Record<string, SqliteColumn> = {\n aType: 'a_type',\n aUid: 'a_uid',\n axbType: 'axb_type',\n bType: 'b_type',\n bUid: 'b_uid',\n v: 'v',\n createdAt: 'created_at',\n updatedAt: 'updated_at',\n};\n\n/**\n * Options controlling DDL emission for `buildSchemaStatements`.\n */\nexport interface BuildSchemaOptions {\n /**\n * Replaces the built-in core preset. Defaults to `DEFAULT_CORE_INDEXES`.\n * Pass `[]` to disable core indexes entirely.\n */\n coreIndexes?: IndexSpec[];\n /**\n * Registry contributing per-triple `indexes` declarations.\n */\n registry?: GraphRegistry;\n}\n\n/**\n * Build the DDL statements that create the firegraph table and its indexes.\n * Returned as separate statements because some drivers (D1) require one\n * statement per `prepare()` call.\n */\nexport function buildSchemaStatements(table: string, options: BuildSchemaOptions = {}): string[] {\n const t = quoteIdent(table);\n const statements: string[] = [\n `CREATE TABLE IF NOT EXISTS ${t} (\n doc_id TEXT NOT NULL,\n scope TEXT NOT NULL DEFAULT '',\n a_type TEXT NOT NULL,\n a_uid TEXT NOT NULL,\n axb_type TEXT NOT NULL,\n b_type TEXT NOT NULL,\n b_uid TEXT NOT NULL,\n data TEXT NOT NULL,\n v INTEGER,\n created_at INTEGER NOT NULL,\n updated_at INTEGER NOT NULL,\n PRIMARY KEY (scope, doc_id)\n )`,\n // `doc_id`-only index for edge-doc lookup across scopes (the primary key\n // leads with `scope`, so a scope-less lookup wouldn't otherwise hit it).\n `CREATE INDEX IF NOT EXISTS ${quoteIdent(`${table}_idx_doc_id`)} ON ${t}(doc_id)`,\n ];\n\n const core = options.coreIndexes ?? [...DEFAULT_CORE_INDEXES];\n const fromRegistry = options.registry?.entries().flatMap((e) => e.indexes ?? []) ?? [];\n\n const leadingColumns = ['scope'];\n const deduped = dedupeIndexSpecs([...core, ...fromRegistry], leadingColumns);\n for (const spec of deduped) {\n statements.push(buildIndexDDL(spec, { table, fieldToColumn: FIELD_TO_COLUMN, leadingColumns }));\n }\n return statements;\n}\n\n/**\n * Quote a SQL identifier with double quotes, escaping any embedded quotes.\n *\n * Identifier names (table, column, index) come from configuration and\n * static code in this module — never from user data — but quoting still\n * protects against accidental keyword collisions.\n */\nexport function quoteIdent(name: string): string {\n validateTableName(name);\n return `\"${name}\"`;\n}\n\n/**\n * Validate a SQLite identifier (table name, column name) against the\n * allowed character set. Exposed so factory functions can fail fast on\n * an invalid `options.table` rather than waiting until first SQL.\n */\nexport function validateTableName(name: string): void {\n if (!/^[A-Za-z_][A-Za-z0-9_]*$/.test(name)) {\n throw new Error(`Invalid SQL identifier: ${name}. Must match /^[A-Za-z_][A-Za-z0-9_]*$/.`);\n }\n}\n\n/**\n * Quote a SQL column-alias label. Unlike `quoteIdent` (which validates the\n * input as a SQL identifier and is used for table/column names), this helper\n * accepts arbitrary text — projection aliases are pure labels we read back\n * out of the result row, never executed as identifiers, so they can carry\n * dots (e.g. `data.detail.region`) and other characters that\n * `validateTableName` rejects.\n *\n * Embedded double quotes are escaped per the SQL standard (`\"` → `\"\"`),\n * which is sufficient to prevent the alias text from terminating the quoted\n * label early. This is the only injection vector for an alias — even if\n * the input contained `\";--`, double-quote escaping would render it\n * `\"\"\";--` inside `\"...\"`, harmless.\n *\n * Used by `compileFindEdgesProjected` (and the DO mirror) for the\n * caller-supplied projection field name; the underlying SQL expression\n * (`json_extract(...)`, column reference) still goes through the strict\n * compiler with no caller input.\n */\nexport function quoteColumnAlias(label: string): string {\n return `\"${label.replace(/\"/g, '\"\"')}\"`;\n}\n","/**\n * Backend-agnostic timestamp.\n *\n * Structurally compatible with `@google-cloud/firestore`'s `Timestamp` so\n * that records returned by either the Firestore or SQLite backend can be\n * consumed through the same `StoredGraphRecord` shape.\n *\n * Firestore's native `Timestamp` already satisfies this interface, so\n * existing Firestore consumers see no behavior change. The SQLite backend\n * returns instances of `GraphTimestampImpl` which also satisfies it.\n */\n\nexport interface GraphTimestamp {\n readonly seconds: number;\n readonly nanoseconds: number;\n toDate(): Date;\n toMillis(): number;\n}\n\n/**\n * Concrete `GraphTimestamp` implementation used by non-Firestore backends.\n * Mirrors the surface of Firestore's `Timestamp` enough for typical use.\n */\nexport class GraphTimestampImpl implements GraphTimestamp {\n constructor(\n public readonly seconds: number,\n public readonly nanoseconds: number,\n ) {}\n\n toDate(): Date {\n return new Date(this.toMillis());\n }\n\n toMillis(): number {\n return this.seconds * 1000 + Math.floor(this.nanoseconds / 1e6);\n }\n\n toJSON(): { seconds: number; nanoseconds: number } {\n return { seconds: this.seconds, nanoseconds: this.nanoseconds };\n }\n\n static fromMillis(ms: number): GraphTimestampImpl {\n const seconds = Math.floor(ms / 1000);\n const nanoseconds = (ms - seconds * 1000) * 1e6;\n return new GraphTimestampImpl(seconds, nanoseconds);\n }\n\n static now(): GraphTimestampImpl {\n return GraphTimestampImpl.fromMillis(Date.now());\n }\n}\n\n/**\n * Sentinel returned by `StorageBackend.serverTimestamp()` when the backend\n * has no native server-time concept and just wants a placeholder that the\n * adapter resolves to a concrete time at write commit. SQLite backends\n * substitute the wall-clock millis at the moment of `setDoc`/`updateDoc`.\n */\nexport const SERVER_TIMESTAMP_SENTINEL = Symbol.for('firegraph.serverTimestamp');\nexport type ServerTimestampSentinel = typeof SERVER_TIMESTAMP_SENTINEL;\n\nexport function isServerTimestampSentinel(value: unknown): value is ServerTimestampSentinel {\n return value === SERVER_TIMESTAMP_SENTINEL;\n}\n","/**\n * Compile firegraph queries and writes into parameterized SQLite statements.\n *\n * The single-table SQLite schema mirrors the Firestore record envelope:\n * built-in fields (`aType`, `aUid`, etc.) are typed columns, and `data` is a\n * JSON string. `data.<key>` filter fields are translated to `json_extract`\n * expressions; built-in fields go straight to their column.\n */\n\nimport { FiregraphError } from '../errors.js';\nimport type { UpdatePayload, WritableRecord, WriteMode } from '../internal/backend.js';\nimport { NODE_RELATION } from '../internal/constants.js';\nimport {\n compileDataOpsExpr,\n isFirestoreSpecialType,\n validateJsonPathKey,\n} from '../internal/sqlite-data-ops.js';\nimport { assertJsonSafePayload } from '../internal/sqlite-payload-guard.js';\nimport { FIELD_TO_COLUMN, quoteColumnAlias, quoteIdent } from '../internal/sqlite-schema.js';\nimport { assertUpdatePayloadExclusive, flattenPatch } from '../internal/write-plan.js';\nimport type { GraphTimestamp } from '../timestamp.js';\nimport { GraphTimestampImpl } from '../timestamp.js';\nimport type {\n AggregateSpec,\n ExpandParams,\n QueryFilter,\n QueryOptions,\n StoredGraphRecord,\n} from '../types.js';\n\nconst SQLITE_BACKEND_LABEL = 'shared-table SQLite';\nconst SQLITE_BACKEND_ERR_LABEL = 'SQLite backend';\n\nexport interface CompiledStatement {\n sql: string;\n params: unknown[];\n}\n\n/**\n * Translate a firegraph filter field to either a column reference or a\n * `json_extract(\"data\", '$.<path>')` expression with the JSON path\n * **inlined as a string literal** — not parametrized.\n *\n * Inlining matters: SQLite's query planner matches an expression index\n * (`CREATE INDEX … ON tbl(json_extract(\"data\", '$.status'))`) against\n * *textually identical* expressions in the WHERE clause. `json_extract(\n * \"data\", ?)` with the path as a bound parameter would never hit the\n * index, so every `data.*` filter would fall back to a full scan even\n * when a matching expression index exists. The index builder in\n * `sqlite-index-ddl.ts` emits the inlined form, and this compiler must\n * match it verbatim.\n *\n * Safety: each path component passes `JSON_PATH_KEY_RE`\n * (`/^[A-Za-z_][A-Za-z0-9_-]*$/`) before embedding, which excludes every\n * character SQLite treats as syntax (quote, dot, bracket, whitespace).\n */\nfunction compileFieldRef(field: string): { expr: string } {\n const column = FIELD_TO_COLUMN[field];\n if (column) {\n return { expr: quoteIdent(column) };\n }\n if (field.startsWith('data.')) {\n const suffix = field.slice(5);\n for (const part of suffix.split('.')) {\n validateJsonPathKey(part, SQLITE_BACKEND_ERR_LABEL);\n }\n return { expr: `json_extract(\"data\", '$.${suffix}')` };\n }\n if (field === 'data') {\n return { expr: `json_extract(\"data\", '$')` };\n }\n throw new FiregraphError(`SQLite backend cannot resolve filter field: ${field}`, 'INVALID_QUERY');\n}\n\n/**\n * Coerce a JS filter value into a SQLite-bindable primitive. JS objects\n * (other than null) are JSON-stringified so they can match values stored\n * via `json_extract`.\n *\n * Firestore special types (Timestamp, GeoPoint, VectorValue,\n * DocumentReference, FieldValue) are rejected with `INVALID_QUERY` —\n * `JSON.stringify` would emit garbage (`{}` for Timestamp, etc.) that\n * silently fails to match anything. Convert to a primitive at the call site\n * (e.g. `ts.toMillis()` or `ts.toDate().toISOString()`) before passing in.\n */\nfunction bindValue(value: unknown): unknown {\n if (value === null || value === undefined) return null;\n if (\n typeof value === 'string' ||\n typeof value === 'number' ||\n typeof value === 'bigint' ||\n typeof value === 'boolean'\n ) {\n return value;\n }\n if (value instanceof Date) return value.getTime();\n if (typeof value === 'object') {\n const firestoreType = isFirestoreSpecialType(value);\n if (firestoreType) {\n throw new FiregraphError(\n `SQLite backend cannot bind a Firestore ${firestoreType} value — JSON serialization ` +\n `would silently drop fields and the resulting bind would never match a stored row. ` +\n `Convert to a primitive (e.g. \\`ts.toMillis()\\` for Timestamp) before filtering or ` +\n `updating.`,\n 'INVALID_QUERY',\n );\n }\n return JSON.stringify(value);\n }\n return String(value);\n}\n\nfunction compileFilter(filter: QueryFilter, params: unknown[]): string {\n const { expr } = compileFieldRef(filter.field);\n\n switch (filter.op) {\n case '==':\n params.push(bindValue(filter.value));\n return `${expr} = ?`;\n case '!=':\n params.push(bindValue(filter.value));\n return `${expr} != ?`;\n case '<':\n params.push(bindValue(filter.value));\n return `${expr} < ?`;\n case '<=':\n params.push(bindValue(filter.value));\n return `${expr} <= ?`;\n case '>':\n params.push(bindValue(filter.value));\n return `${expr} > ?`;\n case '>=':\n params.push(bindValue(filter.value));\n return `${expr} >= ?`;\n case 'in': {\n const values = asArray(filter.value, 'in');\n const placeholders = values.map(() => '?').join(', ');\n for (const v of values) params.push(bindValue(v));\n return `${expr} IN (${placeholders})`;\n }\n case 'not-in': {\n const values = asArray(filter.value, 'not-in');\n const placeholders = values.map(() => '?').join(', ');\n for (const v of values) params.push(bindValue(v));\n return `${expr} NOT IN (${placeholders})`;\n }\n case 'array-contains': {\n params.push(bindValue(filter.value));\n return `EXISTS (SELECT 1 FROM json_each(${expr}) WHERE value = ?)`;\n }\n case 'array-contains-any': {\n const values = asArray(filter.value, 'array-contains-any');\n const placeholders = values.map(() => '?').join(', ');\n for (const v of values) params.push(bindValue(v));\n return `EXISTS (SELECT 1 FROM json_each(${expr}) WHERE value IN (${placeholders}))`;\n }\n default:\n throw new FiregraphError(\n `SQLite backend does not support filter operator: ${String(filter.op)}`,\n 'INVALID_QUERY',\n );\n }\n}\n\nfunction asArray(value: unknown, op: string): unknown[] {\n if (!Array.isArray(value) || value.length === 0) {\n throw new FiregraphError(`Operator \"${op}\" requires a non-empty array value`, 'INVALID_QUERY');\n }\n return value;\n}\n\nfunction compileOrderBy(options: QueryOptions | undefined, _params: unknown[]): string {\n if (!options?.orderBy) return '';\n const { field, direction } = options.orderBy;\n const { expr } = compileFieldRef(field);\n const dir = direction === 'desc' ? 'DESC' : 'ASC';\n return ` ORDER BY ${expr} ${dir}`;\n}\n\nfunction compileLimit(options: QueryOptions | undefined, params: unknown[]): string {\n if (options?.limit === undefined) return '';\n params.push(options.limit);\n return ` LIMIT ?`;\n}\n\n/**\n * SELECT all rows matching `filters` within `scope`. The scope filter is\n * always added as the leading predicate so the `(scope, …)` indexes apply.\n */\nexport function compileSelect(\n table: string,\n scope: string,\n filters: QueryFilter[],\n options?: QueryOptions,\n): CompiledStatement {\n const params: unknown[] = [];\n const conditions: string[] = ['\"scope\" = ?'];\n params.push(scope);\n\n for (const f of filters) {\n conditions.push(compileFilter(f, params));\n }\n\n let sql = `SELECT * FROM ${quoteIdent(table)} WHERE ${conditions.join(' AND ')}`;\n // ORDER BY parameters must come after WHERE parameters in the bind list.\n const orderClause = compileOrderBy(options, params);\n sql += orderClause;\n sql += compileLimit(options, params);\n\n return { sql, params };\n}\n\n/**\n * Compile an aggregate query into a single `SELECT` statement.\n *\n * Each entry in `spec` becomes one aggregate function in the projection\n * list, aliased with the caller-supplied key. Field references reuse\n * `compileFieldRef` so dotted `data.*` paths are translated to\n * `json_extract` exactly the same way as in regular filters — including\n * the index-friendly inlined-path form.\n *\n * Numeric coercion: SUM/AVG/MIN/MAX cast `json_extract` results through\n * `CAST(... AS REAL)`. SQLite stores JSON as text, so without the cast a\n * numeric value extracted from the JSON column comes back as a string and\n * `MIN`/`MAX` would compare lexicographically (`\"100\" < \"20\"`). The cast\n * forces numeric semantics on those three; `COUNT(*)` is unaffected.\n *\n * Empty result handling matches the Firestore semantics surfaced by the\n * `runFirestoreAggregate` helper: SUM/MIN/MAX of an empty set returns 0\n * (resolving SQLite's `SUM(NULL) = NULL` to a clean number); AVG returns\n * `NaN` (mathematically undefined for empty input). The translation\n * happens at the JS layer in the SQLite backend so the SQL stays simple.\n */\nexport function compileAggregate(\n table: string,\n scope: string,\n spec: AggregateSpec,\n filters: QueryFilter[],\n): { stmt: CompiledStatement; aliases: string[] } {\n const aliases = Object.keys(spec);\n if (aliases.length === 0) {\n throw new FiregraphError(\n 'aggregate() requires at least one aggregation in the `aggregates` map.',\n 'INVALID_QUERY',\n );\n }\n\n const projections: string[] = [];\n for (const alias of aliases) {\n const { op, field } = spec[alias];\n // Validate the alias as a JSON path key — same charset rule used for\n // dotted field references. This guards against accidental SQL\n // injection through caller-supplied alias names; aliases are inlined\n // (not parametrised) because SQL aliases can't be bound parameters.\n validateJsonPathKey(alias, SQLITE_BACKEND_ERR_LABEL);\n if (op === 'count') {\n // Reject a stray field — see `AggregateField` JSDoc for rationale.\n if (field !== undefined) {\n throw new FiregraphError(\n `Aggregate '${alias}' op 'count' must not specify a field — ` +\n `count operates on rows, not a column expression.`,\n 'INVALID_QUERY',\n );\n }\n projections.push(`COUNT(*) AS ${quoteIdent(alias)}`);\n continue;\n }\n if (!field) {\n throw new FiregraphError(\n `Aggregate '${alias}' op '${op}' requires a field.`,\n 'INVALID_QUERY',\n );\n }\n const { expr } = compileFieldRef(field);\n const numeric = `CAST(${expr} AS REAL)`;\n if (op === 'sum') projections.push(`SUM(${numeric}) AS ${quoteIdent(alias)}`);\n else if (op === 'avg') projections.push(`AVG(${numeric}) AS ${quoteIdent(alias)}`);\n else if (op === 'min') projections.push(`MIN(${numeric}) AS ${quoteIdent(alias)}`);\n else if (op === 'max') projections.push(`MAX(${numeric}) AS ${quoteIdent(alias)}`);\n else\n throw new FiregraphError(\n `SQLite backend does not support aggregate op: ${String(op)}`,\n 'INVALID_QUERY',\n );\n }\n\n const params: unknown[] = [scope];\n const conditions: string[] = ['\"scope\" = ?'];\n for (const f of filters) {\n conditions.push(compileFilter(f, params));\n }\n\n const sql =\n `SELECT ${projections.join(', ')} ` +\n `FROM ${quoteIdent(table)} ` +\n `WHERE ${conditions.join(' AND ')}`;\n return { stmt: { sql, params }, aliases };\n}\n\n/**\n * Cross-scope SELECT — equivalent to Firestore's collection group query.\n * Used by `findEdgesGlobal`.\n *\n * `scopeNameFilter`, when present, narrows the result to rows whose scope's\n * last materialized-path segment equals the given subgraph name (or to the\n * root rows when the name equals the table name itself, matching Firestore's\n * `db.collectionGroup(tableName)` semantics).\n */\nexport function compileSelectGlobal(\n table: string,\n filters: QueryFilter[],\n options?: QueryOptions,\n scopeNameFilter?: { name: string; isRoot: boolean },\n): CompiledStatement {\n if (filters.length === 0) {\n throw new FiregraphError(\n 'compileSelectGlobal requires at least one filter — refusing to issue an unbounded SELECT.',\n 'INVALID_QUERY',\n );\n }\n\n const params: unknown[] = [];\n const conditions: string[] = [];\n\n if (scopeNameFilter) {\n if (scopeNameFilter.isRoot) {\n conditions.push(`\"scope\" = ?`);\n params.push('');\n } else {\n conditions.push(`\"scope\" LIKE ? ESCAPE '\\\\'`);\n params.push(`%/${escapeLike(scopeNameFilter.name)}`);\n }\n }\n\n for (const f of filters) {\n conditions.push(compileFilter(f, params));\n }\n\n const sql =\n `SELECT * FROM ${quoteIdent(table)} WHERE ${conditions.join(' AND ')}` +\n compileOrderBy(options, params) +\n compileLimit(options, params);\n return { sql, params };\n}\n\nexport function compileSelectByDocId(\n table: string,\n scope: string,\n docId: string,\n): CompiledStatement {\n return {\n sql: `SELECT * FROM ${quoteIdent(table)} WHERE \"scope\" = ? AND \"doc_id\" = ? LIMIT 1`,\n params: [scope, docId],\n };\n}\n\n/**\n * Discriminator for one projected column. The decoder uses this to recover\n * the JS-shape of the requested field from the SQL row.\n */\nexport type ProjectedColumnKind =\n /** Top-level TEXT column: `a_type`, `a_uid`, `axb_type`, `b_type`, `b_uid`. */\n | 'builtin-text'\n /** Top-level INTEGER column: `v`. */\n | 'builtin-int'\n /** Top-level INTEGER millis column: `created_at`, `updated_at`.\n * Decoder wraps the millisecond value in `GraphTimestampImpl` so the\n * output matches `findEdges` / `findNodes`. */\n | 'builtin-timestamp'\n /** Whole `data` JSON payload — emitted when the caller projects `'data'`\n * literally. Decoder JSON.parses the column. */\n | 'data'\n /** Any `data.<path>` projection. The compiler emits a paired\n * `json_extract(...)` value column and `json_type(...)` type column;\n * the decoder uses the type to recover JSON-encoded objects/arrays as\n * native JS while passing primitives through verbatim. */\n | 'json';\n\n/** Per-column metadata returned alongside the compiled statement. The\n * backend uses this to translate row -> projected JS object. */\nexport interface ProjectedColumnSpec {\n /** Original caller-supplied field name. Used as the alias in the SQL\n * projection list AND as the key in the returned JS row. */\n field: string;\n /** Kind discriminator — see `ProjectedColumnKind`. */\n kind: ProjectedColumnKind;\n /** Alias of the paired `json_type(...)` column when `kind === 'json'`,\n * otherwise undefined. Stored explicitly (rather than derived from\n * `field`) so the compiler can use a guaranteed-unique sentinel that\n * cannot collide with any caller-supplied select entry. */\n typeAlias?: string;\n}\n\n/**\n * Normalize a projection field name to the canonical form `compileFieldRef`\n * understands: built-ins stay as-is, `data` and `data.*` stay as-is, and a\n * bare `name` (with no dot and not in the built-in map) is rewritten to\n * `data.name`.\n *\n * Why bare names: the documented use case for `findEdgesProjected` is\n * \"give me titles and dates for a list view\", and the canonical example\n * (`select: ['title', 'date']`) reads naturally as bare names. Requiring\n * `'data.title'` would be portable to `WhereClause.field` but uglier in the\n * common case. The result row keys preserve the original form (`title`,\n * not `data.title`) so callers see what they asked for.\n */\nfunction normalizeProjectionField(field: string): string {\n if (field in FIELD_TO_COLUMN) return field;\n if (field === 'data' || field.startsWith('data.')) return field;\n return `data.${field}`;\n}\n\n/**\n * Compile a `findEdgesProjected({ select })` call into a single SELECT\n * statement that returns only the requested fields.\n *\n * Shape:\n *\n * SELECT\n * <expr-1> AS \"<field-1>\", [json_type(...) AS \"__fg_t_<idx>\",]\n * <expr-2> AS \"<field-2>\", [json_type(...) AS \"__fg_t_<idx>\",]\n * ...\n * FROM <table>\n * WHERE \"scope\" = ? AND <filters>\n * [ORDER BY ...]\n * [LIMIT ?]\n *\n * For `data.*` fields the compiler also projects `json_type` so the\n * decoder can distinguish a stored string from a serialized object/array\n * (`json_extract` returns both as TEXT; `json_type` is the only reliable\n * disambiguator). The cost is one extra column per `data.*` field — all in\n * the same row, no extra round trip. The companion alias uses a positional\n * sentinel `__fg_t_<idx>` rather than `<field>__t` so it cannot collide\n * with a user-provided field literally named `<x>__t`.\n *\n * Duplicate entries in `select` are de-duped at compile time so the SQL\n * projection list carries one column per unique field. Order in the input\n * `select` is preserved (first occurrence wins).\n *\n * The compiler rejects an empty `select` (the client wrapper enforces this\n * too — both layers reject so a misuse caught by either surfaces a clean\n * `INVALID_QUERY`).\n */\nexport function compileFindEdgesProjected(\n table: string,\n scope: string,\n select: ReadonlyArray<string>,\n filters: QueryFilter[],\n options?: QueryOptions,\n): { stmt: CompiledStatement; columns: ProjectedColumnSpec[] } {\n if (select.length === 0) {\n throw new FiregraphError(\n 'compileFindEdgesProjected requires a non-empty select list — ' +\n 'an empty projection has no SQL representation distinct from `findEdges`.',\n 'INVALID_QUERY',\n );\n }\n\n // De-dupe while preserving first-occurrence order. Two entries that\n // differ only by normalization (e.g. `'title'` and `'data.title'`)\n // remain distinct so the result row carries both keys — that's the\n // caller's choice and we honour it.\n const seen = new Set<string>();\n const uniqueFields: string[] = [];\n for (const f of select) {\n if (!seen.has(f)) {\n seen.add(f);\n uniqueFields.push(f);\n }\n }\n\n const projections: string[] = [];\n const columns: ProjectedColumnSpec[] = [];\n for (let idx = 0; idx < uniqueFields.length; idx++) {\n const field = uniqueFields[idx]!;\n const canonical = normalizeProjectionField(field);\n const { expr } = compileFieldRef(canonical);\n // Alias is the caller-supplied field name verbatim — this is the key\n // the decoder reads back from each result row, and the contract is\n // \"projection result is keyed by what the caller passed in\". Use the\n // relaxed alias quoter so dotted paths like `data.detail.region` are\n // accepted (the strict `quoteIdent` is for table/column names only).\n const alias = quoteColumnAlias(field);\n projections.push(`${expr} AS ${alias}`);\n\n let kind: ProjectedColumnKind;\n let typeAliasName: string | undefined;\n if (canonical === 'data') {\n kind = 'data';\n } else if (canonical.startsWith('data.')) {\n kind = 'json';\n // Pair every json_extract with a json_type so the decoder can\n // recover objects/arrays. The paired column needs a guaranteed-\n // unique alias — `<field>__t` would collide if the caller projects\n // both `'foo'` and `'foo__t'` (legal user input). Use a positional\n // sentinel keyed by the field's de-duped index. The decoder reads\n // the type column off the same name we generated here, so we record\n // it in the column spec rather than reconstructing from the field.\n typeAliasName = `__fg_t_${idx}`;\n const typeAlias = quoteColumnAlias(typeAliasName);\n projections.push(`json_type(\"data\", '$.${canonical.slice(5)}') AS ${typeAlias}`);\n } else {\n // Built-in field. Discriminate by column name so the decoder can\n // wrap timestamps in `GraphTimestampImpl` and coerce `v` to number.\n if (canonical === 'v') kind = 'builtin-int';\n else if (canonical === 'createdAt' || canonical === 'updatedAt') kind = 'builtin-timestamp';\n else kind = 'builtin-text';\n }\n columns.push({ field, kind, typeAlias: typeAliasName });\n }\n\n const params: unknown[] = [scope];\n const conditions: string[] = ['\"scope\" = ?'];\n for (const f of filters) {\n conditions.push(compileFilter(f, params));\n }\n\n let sql =\n `SELECT ${projections.join(', ')} ` +\n `FROM ${quoteIdent(table)} ` +\n `WHERE ${conditions.join(' AND ')}`;\n sql += compileOrderBy(options, params);\n sql += compileLimit(options, params);\n\n return { stmt: { sql, params }, columns };\n}\n\n/**\n * Decode one SQL row into the projected JS shape described by `columns`.\n *\n * Built-in TEXT/INTEGER columns pass through with light coercion (BigInt\n * to number for `v`); timestamps wrap into `GraphTimestampImpl`. `data.*`\n * fields use the paired `json_type` column to decide whether to JSON.parse\n * the value (objects and arrays come back as JSON-encoded TEXT from\n * `json_extract`; primitives come through with their native SQLite type).\n *\n * The function is exported so the SQLite and shared SQLite backends share\n * one decoder; both call this with the spec returned from\n * `compileFindEdgesProjected`.\n */\nexport function decodeProjectedRow(\n row: Record<string, unknown>,\n columns: ProjectedColumnSpec[],\n): Record<string, unknown> {\n const out: Record<string, unknown> = {};\n for (const c of columns) {\n const raw = row[c.field];\n switch (c.kind) {\n case 'builtin-text':\n out[c.field] = raw === null || raw === undefined ? null : String(raw);\n break;\n case 'builtin-int':\n if (raw === null || raw === undefined) {\n // `v` is nullable in the schema; preserve null explicitly so\n // callers can distinguish \"no version\" from a numeric 0.\n out[c.field] = null;\n } else if (typeof raw === 'bigint') {\n out[c.field] = Number(raw);\n } else if (typeof raw === 'number') {\n out[c.field] = raw;\n } else {\n out[c.field] = Number(raw);\n }\n break;\n case 'builtin-timestamp': {\n const ms = toMillis(raw);\n out[c.field] = GraphTimestampImpl.fromMillis(ms) as unknown as GraphTimestamp;\n break;\n }\n case 'data':\n // Whole `data` payload — JSON.parse the column directly. Empty /\n // null defaults to `{}` for symmetry with `rowToRecord`.\n if (raw === null || raw === undefined || raw === '') {\n out[c.field] = {};\n } else {\n out[c.field] = JSON.parse(raw as string);\n }\n break;\n case 'json': {\n // Read the paired `json_type` companion column via the positional\n // sentinel recorded at compile time — the historical `<field>__t`\n // suffix would silently collide if the caller projected both\n // `'foo'` and `'foo__t'`.\n const t = row[c.typeAlias!] as string | null | undefined;\n if (raw === null || raw === undefined) {\n out[c.field] = null;\n } else if (t === 'object' || t === 'array') {\n // Stored object/array — `json_extract` returned a JSON-encoded\n // string. Re-parse to recover native JS shape.\n out[c.field] = typeof raw === 'string' ? JSON.parse(raw) : raw;\n } else if (t === 'integer' && typeof raw === 'bigint') {\n out[c.field] = Number(raw);\n } else {\n // text / real / true / false / null — pass through. SQLite's\n // driver already returns the native JS primitive type.\n out[c.field] = raw;\n }\n break;\n }\n }\n }\n return out;\n}\n\n/**\n * Compile an `expand()` fan-out into one SELECT statement.\n *\n * Shape (forward direction, with hydrate=false):\n *\n * SELECT * FROM <table>\n * WHERE \"scope\" = ? AND \"axbType\" = ? AND \"aUid\" IN (?, ?, …)\n * [AND \"aType\" = ?] [AND \"bType\" = ?]\n * [ORDER BY …]\n * [LIMIT ?]\n *\n * The `IN (\"aUid\", …)` clause replaces the per-source `findEdges` loop\n * `traverse.ts` would otherwise issue, collapsing N round trips into one\n * regardless of how many sources the caller passes. Reverse direction\n * swaps the `IN` predicate to `\"bUid\"` and the optional `aType`/`bType`\n * filters cover the same dimensions either way.\n *\n * Hydration (`params.hydrate === true`) is **not** baked into this\n * function. The backend issues a follow-up `SELECT * WHERE \"scope\" = ?\n * AND \"aUid\" = \"bUid\" AND \"axbType\" = 'is' AND \"bUid\" IN (…)` against the\n * target-side UIDs and stitches the alignment in JS. A single-statement\n * JOIN is technically possible (edges and nodes share one table here)\n * but produces colliding column names and forces a manual aliased\n * row-decoder, with no measurable perf win on an in-process executor.\n * Two `exec()` calls keep the row-decoder shared with the rest of\n * `findEdges` / `findNodes`.\n *\n * Empty `sources` is an invariant violation here — the SQLite backend\n * intercepts that case before reaching the compiler and short-circuits\n * to an empty result. The compiler itself rejects it because an empty\n * `IN ()` clause is invalid SQL.\n */\nexport function compileExpand(\n table: string,\n scope: string,\n params: ExpandParams,\n): CompiledStatement {\n if (params.sources.length === 0) {\n throw new FiregraphError(\n 'compileExpand requires a non-empty sources list — empty IN () is invalid SQL. ' +\n 'Callers should short-circuit empty input before reaching the compiler.',\n 'INVALID_QUERY',\n );\n }\n const direction = params.direction ?? 'forward';\n // Column identifiers must use the on-disk snake_case names\n // (`a_uid`, `axb_type`, `b_uid`, …) — see `FIELD_TO_COLUMN` in\n // `src/internal/sqlite-schema.ts`. We resolve every column reference\n // through `compileFieldRef` so a future schema rename can't drift\n // between read-paths.\n const aUidCol = compileFieldRef('aUid').expr;\n const bUidCol = compileFieldRef('bUid').expr;\n const aTypeCol = compileFieldRef('aType').expr;\n const bTypeCol = compileFieldRef('bType').expr;\n const axbTypeCol = compileFieldRef('axbType').expr;\n const sourceColumn = direction === 'forward' ? aUidCol : bUidCol;\n\n const sqlParams: unknown[] = [scope, params.axbType];\n const conditions: string[] = ['\"scope\" = ?', `${axbTypeCol} = ?`];\n\n // The IN list. Source UIDs are not currently chunked — SQLite has no hard\n // cap on bound parameters (the default `SQLITE_MAX_VARIABLE_NUMBER` is\n // 32766 in modern builds), and traversal callers cap source-set growth\n // through `maxReads` long before any realistic IN-list size. If a caller\n // ever blows past that cap, the backend surfaces it as the underlying\n // SQLite error rather than failing silently.\n const placeholders = params.sources.map(() => '?').join(', ');\n conditions.push(`${sourceColumn} IN (${placeholders})`);\n for (const uid of params.sources) sqlParams.push(uid);\n\n if (params.aType !== undefined) {\n conditions.push(`${aTypeCol} = ?`);\n sqlParams.push(params.aType);\n }\n if (params.bType !== undefined) {\n conditions.push(`${bTypeCol} = ?`);\n sqlParams.push(params.bType);\n }\n\n // Exclude self-loop \"node\" rows. Without this, an `axbType = 'is'`\n // expand (which would only happen via an explicit hop on the node\n // relation) could match the node-as-self-loop rows. Forward expand\n // over a non-`is` axbType already excludes them via the axbType\n // predicate; this clause is a belt-and-braces guard for the corner\n // case where the caller explicitly asks for `axbType: 'is'`.\n if (params.axbType === NODE_RELATION) {\n conditions.push(`${aUidCol} != ${bUidCol}`);\n }\n\n let sql = `SELECT * FROM ${quoteIdent(table)} WHERE ${conditions.join(' AND ')}`;\n\n // ORDER BY uses the same `compileFieldRef` rules as `findEdges`, so\n // dotted `data.*` paths translate to `json_extract` and hit the same\n // expression indexes when present. Per-source-strict ordering would\n // require window functions; see `ExpandParams.limitPerSource` JSDoc\n // for the contract — this LIMIT is a soft total cap.\n if (params.orderBy) {\n sql += compileOrderBy({ orderBy: params.orderBy }, sqlParams);\n }\n\n if (params.limitPerSource !== undefined) {\n // Total cap = sources.length * limitPerSource. We multiply at compile\n // time so the bound parameter is a concrete integer; SQLite parses\n // `LIMIT ?` once and the executor binds it in flight.\n const totalLimit = params.sources.length * params.limitPerSource;\n sql += ` LIMIT ?`;\n sqlParams.push(totalLimit);\n }\n\n return { sql, params: sqlParams };\n}\n\n/**\n * Compile the hydration-pass query for `expand({ hydrate: true })`. Issues\n * one statement against the same table that fetches every node row whose\n * `bUid` is in the supplied set. The backend stitches alignment in JS\n * (a `Map<bUid, StoredGraphRecord>` keyed by the canonical node UID).\n *\n * Empty input is rejected for the same reason as `compileExpand`.\n */\nexport function compileExpandHydrate(\n table: string,\n scope: string,\n targetUids: string[],\n): CompiledStatement {\n if (targetUids.length === 0) {\n throw new FiregraphError(\n 'compileExpandHydrate requires a non-empty target list — empty IN () is invalid SQL.',\n 'INVALID_QUERY',\n );\n }\n const placeholders = targetUids.map(() => '?').join(', ');\n const sqlParams: unknown[] = [scope, NODE_RELATION];\n for (const uid of targetUids) sqlParams.push(uid);\n\n // Resolve column refs via `compileFieldRef` — see `compileExpand` for\n // the schema-rename rationale.\n const aUidCol = compileFieldRef('aUid').expr;\n const bUidCol = compileFieldRef('bUid').expr;\n const axbTypeCol = compileFieldRef('axbType').expr;\n\n // Self-loop predicate (`a_uid = b_uid`) is what distinguishes a node\n // row from an edge row in the single-table schema. Without it, an\n // accidental `is`-typed edge (which firegraph forbids today, but the\n // schema doesn't enforce) could mask a real node row in the result.\n return {\n sql:\n `SELECT * FROM ${quoteIdent(table)} ` +\n `WHERE \"scope\" = ? AND ${axbTypeCol} = ? AND ${aUidCol} = ${bUidCol} AND ${bUidCol} IN (${placeholders})`,\n params: sqlParams,\n };\n}\n\n/**\n * Compile a `setDoc(record, mode)` call into a single statement.\n *\n * - `mode === 'replace'` — `INSERT OR REPLACE`. Every row column is\n * overwritten; any pre-existing JSON keys not present in `record.data`\n * are dropped.\n * - `mode === 'merge'` — `INSERT … ON CONFLICT(scope, doc_id) DO UPDATE\n * SET …`. New rows insert the full record; existing rows have their\n * `data` JSON deep-merged via the chained `json_set`/`json_remove`\n * expression produced by `compileDataOpsExpr`. Sibling keys at every\n * depth survive. Arrays and primitives are terminal (replaced as a\n * unit), matching Firestore's `.set(..., { merge: true })` behaviour.\n *\n * `created_at` is re-stamped on every put — both modes use\n * `excluded.created_at` so the cross-backend contract matches today's\n * Firestore behaviour. (Future work: switch to insert-only createdAt; out\n * of scope for the merge-semantics fix.)\n */\nexport function compileSet(\n table: string,\n scope: string,\n docId: string,\n record: WritableRecord,\n nowMillis: number,\n mode: WriteMode,\n): CompiledStatement {\n // Eager validation. Both branches below feed `record.data` to a raw\n // `JSON.stringify` for the INSERT path; flattenPatch only catches issues\n // on the merge UPDATE branch and only fires when there's a conflicting\n // row. Validating up front keeps first-insert and ON CONFLICT errors\n // identical, and rejects the DELETE_FIELD sentinel that JSON.stringify\n // would silently drop.\n assertJsonSafePayload(record.data, SQLITE_BACKEND_LABEL);\n if (mode === 'replace') {\n const sql = `INSERT OR REPLACE INTO ${quoteIdent(table)} (\n doc_id, scope, a_type, a_uid, axb_type, b_type, b_uid, data, v, created_at, updated_at\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`;\n const params: unknown[] = [\n docId,\n scope,\n record.aType,\n record.aUid,\n record.axbType,\n record.bType,\n record.bUid,\n JSON.stringify(record.data ?? {}),\n record.v ?? null,\n nowMillis,\n nowMillis,\n ];\n return { sql, params };\n }\n\n // Merge mode.\n const insertParams: unknown[] = [\n docId,\n scope,\n record.aType,\n record.aUid,\n record.axbType,\n record.bType,\n record.bUid,\n JSON.stringify(record.data ?? {}),\n record.v ?? null,\n nowMillis,\n nowMillis,\n ];\n\n // Translate record.data into deep-path ops, then build the merge expr.\n const ops = flattenPatch(record.data ?? {});\n const updateParams: unknown[] = [];\n const dataExpr =\n compileDataOpsExpr(ops, `COALESCE(\"data\", '{}')`, updateParams, SQLITE_BACKEND_ERR_LABEL) ??\n `COALESCE(\"data\", '{}')`;\n\n // `v` uses COALESCE(excluded.v, v) so an incoming record with `v=undefined`\n // (registry has no migrations, or stampWritableRecord didn't stamp) leaves\n // any previously-stamped `v` intact. Firestore's `set(record, {merge: true})`\n // omits the key when undefined and behaves the same way; SQLite must too.\n // Without COALESCE, `excluded.v` would be NULL and clobber a pre-existing\n // `v` — which silently breaks migration replay if migrations are removed\n // and later re-added to a type.\n const sql = `INSERT INTO ${quoteIdent(table)} (\n doc_id, scope, a_type, a_uid, axb_type, b_type, b_uid, data, v, created_at, updated_at\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n ON CONFLICT(scope, doc_id) DO UPDATE SET\n \"a_type\" = excluded.\"a_type\",\n \"a_uid\" = excluded.\"a_uid\",\n \"axb_type\" = excluded.\"axb_type\",\n \"b_type\" = excluded.\"b_type\",\n \"b_uid\" = excluded.\"b_uid\",\n \"data\" = ${dataExpr},\n \"v\" = COALESCE(excluded.\"v\", \"v\"),\n \"created_at\" = excluded.\"created_at\",\n \"updated_at\" = excluded.\"updated_at\"`;\n\n return { sql, params: [...insertParams, ...updateParams] };\n}\n\n/**\n * Compile an `UpdatePayload` into a single UPDATE statement.\n *\n * - `replaceData` overwrites the whole `data` JSON. (Used by migration\n * write-back.)\n * - `dataOps` deep-merges via chained `json_remove` / `json_set` —\n * siblings at every nesting depth survive; arrays / primitives /\n * Firestore special types are terminal; delete-ops use `json_remove`.\n * - `v` is set when provided.\n * - `updated_at` is always stamped.\n */\nexport function compileUpdate(\n table: string,\n scope: string,\n docId: string,\n update: UpdatePayload,\n nowMillis: number,\n): CompiledStatement {\n assertUpdatePayloadExclusive(update);\n const setClauses: string[] = [];\n const params: unknown[] = [];\n\n if (update.replaceData) {\n assertJsonSafePayload(update.replaceData, SQLITE_BACKEND_LABEL);\n setClauses.push(`\"data\" = ?`);\n params.push(JSON.stringify(update.replaceData));\n } else if (update.dataOps && update.dataOps.length > 0) {\n for (const op of update.dataOps) {\n if (!op.delete) assertJsonSafePayload(op.value, SQLITE_BACKEND_LABEL);\n }\n const expr = compileDataOpsExpr(\n update.dataOps,\n `COALESCE(\"data\", '{}')`,\n params,\n SQLITE_BACKEND_ERR_LABEL,\n );\n if (expr !== null) {\n setClauses.push(`\"data\" = ${expr}`);\n }\n }\n\n if (update.v !== undefined) {\n setClauses.push(`\"v\" = ?`);\n params.push(update.v);\n }\n\n setClauses.push(`\"updated_at\" = ?`);\n params.push(nowMillis);\n\n // WHERE params come last\n params.push(scope, docId);\n\n return {\n sql: `UPDATE ${quoteIdent(table)} SET ${setClauses.join(', ')} WHERE \"scope\" = ? AND \"doc_id\" = ?`,\n params,\n };\n}\n\nexport function compileDelete(table: string, scope: string, docId: string): CompiledStatement {\n return {\n sql: `DELETE FROM ${quoteIdent(table)} WHERE \"scope\" = ? AND \"doc_id\" = ?`,\n params: [scope, docId],\n };\n}\n\n/**\n * Compile a server-side bulk DELETE — `query.dml` capability.\n *\n * Mirrors `compileSelect`'s WHERE construction (scope-leading predicate\n * + filter list) so any composite index that accelerates a `findEdges`\n * also accelerates the equivalent `bulkDelete`. Empty-filter callers (a\n * \"delete everything in this scope\" sweep) are accepted — the caller is\n * expected to have opted into a collection scan via `allowCollectionScan`\n * at the client layer; the SQL itself is the same shape regardless.\n */\nexport function compileBulkDelete(\n table: string,\n scope: string,\n filters: QueryFilter[],\n): CompiledStatement {\n const params: unknown[] = [scope];\n const conditions: string[] = ['\"scope\" = ?'];\n for (const f of filters) {\n conditions.push(compileFilter(f, params));\n }\n return {\n sql: `DELETE FROM ${quoteIdent(table)} WHERE ${conditions.join(' AND ')}`,\n params,\n };\n}\n\n/**\n * Compile a server-side bulk UPDATE — `query.dml` capability.\n *\n * The `patch.data` payload is deep-merged into each matching row's `data`\n * field via the same `flattenPatch` → `compileDataOpsExpr` pipeline that\n * single-row `compileUpdate` uses. Identifying columns (`aType`, `axbType`,\n * `aUid`, `bType`, `bUid`, `v`) are intentionally read-only through this\n * path — to relabel rows, delete and re-insert.\n *\n * Empty-patch (no leaves to merge) is rejected: a no-op UPDATE that only\n * touched `updated_at` would silently rewrite every matching row's\n * timestamp, which is almost never what the caller wants. If you want to\n * stamp without editing data, use `setDoc` with `'merge'`.\n */\nexport function compileBulkUpdate(\n table: string,\n scope: string,\n filters: QueryFilter[],\n patchData: Record<string, unknown>,\n nowMillis: number,\n): CompiledStatement {\n const dataOps = flattenPatch(patchData);\n if (dataOps.length === 0) {\n throw new FiregraphError(\n 'bulkUpdate() patch.data must contain at least one leaf — an empty patch ' +\n 'would only rewrite `updated_at`, which is almost certainly a bug. ' +\n 'Use `setDoc` with merge mode if you want to stamp without editing data.',\n 'INVALID_QUERY',\n );\n }\n for (const op of dataOps) {\n if (!op.delete) assertJsonSafePayload(op.value, SQLITE_BACKEND_LABEL);\n }\n const setParams: unknown[] = [];\n const expr = compileDataOpsExpr(\n dataOps,\n `COALESCE(\"data\", '{}')`,\n setParams,\n SQLITE_BACKEND_ERR_LABEL,\n );\n if (expr === null) {\n // `compileDataOpsExpr` only returns null when there's nothing to do —\n // we already guarded the empty-patch case above so this is unreachable\n // in practice, but the type system can't see that.\n throw new FiregraphError(\n 'bulkUpdate() patch produced no SQL operations — internal invariant violated.',\n 'INVALID_ARGUMENT',\n );\n }\n const setClauses: string[] = [`\"data\" = ${expr}`, `\"updated_at\" = ?`];\n setParams.push(nowMillis);\n\n // WHERE: scope + filters. Filter params follow the SET params in the\n // bind list — same ordering convention as `compileUpdate` /\n // `compileSelect`.\n const whereParams: unknown[] = [scope];\n const conditions: string[] = ['\"scope\" = ?'];\n for (const f of filters) {\n conditions.push(compileFilter(f, whereParams));\n }\n\n return {\n sql:\n `UPDATE ${quoteIdent(table)} SET ${setClauses.join(', ')} ` +\n `WHERE ${conditions.join(' AND ')}`,\n params: [...setParams, ...whereParams],\n };\n}\n\n/**\n * Delete every row whose scope starts with `scopePrefix` followed by '/'.\n * Used by cascade delete to wipe all subgraphs nested under a node.\n *\n * The trailing '/' guard prevents `'a'` from matching `'ab'`. The exact-\n * match case (`scope = scopePrefix`) is intentionally NOT included: the\n * prefix passed in is always `<storageScope>/<uid>` (or just `<uid>` at\n * root), which is an odd-segment count, while every stored row's scope is\n * `''` (root) or an even-segment `<uid>/<name>[/…]` pair sequence — so\n * `scope = scopePrefix` can never match a real row.\n */\nexport function compileDeleteScopePrefix(table: string, scopePrefix: string): CompiledStatement {\n // SQLite LIKE escape: prefix could contain '%' or '_'. Escape with '\\\\'.\n const escaped = escapeLike(scopePrefix);\n return {\n sql: `DELETE FROM ${quoteIdent(table)} WHERE \"scope\" LIKE ? ESCAPE '\\\\'`,\n params: [`${escaped}/%`],\n };\n}\n\n/**\n * Count rows that `compileDeleteScopePrefix` would delete. Used by cascade\n * to report an accurate total in `CascadeResult.deleted` — the prefix-\n * delete is a single SQL statement and the executor surface doesn't expose\n * per-row counts. One extra index lookup per cascade is cheap relative to\n * the delete itself.\n */\nexport function compileCountScopePrefix(table: string, scopePrefix: string): CompiledStatement {\n const escaped = escapeLike(scopePrefix);\n return {\n sql: `SELECT COUNT(*) AS n FROM ${quoteIdent(table)} WHERE \"scope\" LIKE ? ESCAPE '\\\\'`,\n params: [`${escaped}/%`],\n };\n}\n\nfunction escapeLike(value: string): string {\n return value.replace(/\\\\/g, '\\\\\\\\').replace(/%/g, '\\\\%').replace(/_/g, '\\\\_');\n}\n\n/**\n * Convert a row returned by a SQLite driver into a `StoredGraphRecord`.\n * `created_at`/`updated_at` are wrapped in `GraphTimestampImpl` so they\n * present the same surface as Firestore's `Timestamp`.\n */\nexport function rowToRecord(row: Record<string, unknown>): StoredGraphRecord {\n const dataString = row.data as string | null;\n const data = dataString ? (JSON.parse(dataString) as Record<string, unknown>) : {};\n\n const createdMs = toMillis(row.created_at);\n const updatedMs = toMillis(row.updated_at);\n\n const record: Record<string, unknown> = {\n aType: row.a_type as string,\n aUid: row.a_uid as string,\n axbType: row.axb_type as string,\n bType: row.b_type as string,\n bUid: row.b_uid as string,\n data,\n createdAt: GraphTimestampImpl.fromMillis(createdMs) as unknown as GraphTimestamp,\n updatedAt: GraphTimestampImpl.fromMillis(updatedMs) as unknown as GraphTimestamp,\n };\n\n if (row.v !== null && row.v !== undefined) {\n record.v = Number(row.v);\n }\n return record as unknown as StoredGraphRecord;\n}\n\nfunction toMillis(value: unknown): number {\n if (typeof value === 'number') return value;\n if (typeof value === 'bigint') return Number(value);\n if (typeof value === 'string') return Number(value);\n return 0;\n}\n","/**\n * SQLite implementation of `StorageBackend`.\n *\n * Uses a single table keyed by `(scope, doc_id)`. Subgraphs are encoded in\n * the `scope` column as a materialized path of interleaved parent UIDs and\n * subgraph names — `''` at the root, `'<uid>/<name>'` one level down,\n * `'<uid1>/<name1>/<uid2>/<name2>'` two levels down, and so on. Cascade\n * delete uses a single `DELETE … WHERE scope LIKE 'prefix/%'` instead of\n * walking subcollections.\n */\n\nimport { computeEdgeDocId, computeNodeDocId } from '../docid.js';\nimport { FiregraphError } from '../errors.js';\nimport type {\n BackendCapabilities,\n BatchBackend,\n StorageBackend,\n TransactionBackend,\n UpdatePayload,\n WritableRecord,\n WriteMode,\n} from '../internal/backend.js';\nimport { createCapabilities } from '../internal/backend.js';\nimport { NODE_RELATION } from '../internal/constants.js';\nimport type { SqliteExecutor, SqliteTxExecutor } from '../internal/sqlite-executor.js';\nimport { buildEdgeQueryPlan } from '../query.js';\nimport type {\n AggregateSpec,\n BulkBatchError,\n BulkOptions,\n BulkResult,\n BulkUpdatePatch,\n CascadeResult,\n ExpandParams,\n ExpandResult,\n FindEdgesParams,\n GraphReader,\n QueryFilter,\n QueryOptions,\n StoredGraphRecord,\n} from '../types.js';\nimport type { CompiledStatement } from './sql.js';\nimport {\n compileAggregate,\n compileBulkDelete,\n compileBulkUpdate,\n compileCountScopePrefix,\n compileDelete,\n compileDeleteScopePrefix,\n compileExpand,\n compileExpandHydrate,\n compileFindEdgesProjected,\n compileSelect,\n compileSelectByDocId,\n compileSelectGlobal,\n compileSet,\n compileUpdate,\n decodeProjectedRow,\n rowToRecord,\n} from './sql.js';\n\nexport interface SqliteBackendOptions {\n /** Logical scope path (chained subgraph names) — used for `allowedIn` matching. */\n scopePath?: string;\n /** Internal storage scope (interleaved parent-uid/name path). */\n storageScope?: string;\n}\n\n/**\n * Default per-chunk retry budget for bulk/cascade operations. Mirrors the\n * Firestore bulk path (`src/bulk.ts`) so behaviour is consistent across\n * backends. Callers override via `BulkOptions.maxRetries`.\n */\nconst DEFAULT_MAX_RETRIES = 3;\nconst BASE_RETRY_DELAY_MS = 200;\n/**\n * Upper bound for the exponential backoff between chunk retries. Without\n * this cap, `maxRetries: 10` would push the final wait past 100s; legitimate\n * transient errors recover well within a few seconds, and longer waits just\n * delay the surfacing of permanent failures.\n */\nconst MAX_RETRY_DELAY_MS = 5000;\n\nfunction sleep(ms: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, ms));\n}\n\n/**\n * Return the smaller of two optional positive numbers, treating `undefined`\n * as \"no cap.\" Used to combine caller-supplied `BulkOptions.batchSize` with\n * the driver's own `maxBatchSize` so the more restrictive cap wins.\n */\nfunction minDefined(a: number | undefined, b: number | undefined): number | undefined {\n if (a === undefined) return b;\n if (b === undefined) return a;\n return Math.min(a, b);\n}\n\n/**\n * Split `statements` into chunks that respect both a per-batch statement\n * count cap (`maxStatements`) and a per-batch total bound-parameter cap\n * (`maxParams`). When neither cap is provided the entire list is returned\n * as a single chunk (preserves cross-batch atomicity for drivers like\n * DO SQLite that have no caps).\n *\n * Single-statement edge case: if a single statement's parameter count\n * already exceeds `maxParams`, it's emitted as its own chunk anyway. The\n * driver will reject it, which is the correct behavior — silently\n * dropping it would be worse.\n */\nfunction chunkStatements<T extends { params: unknown[] }>(\n statements: T[],\n maxStatements: number | undefined,\n maxParams: number | undefined,\n): T[][] {\n const stmtCap =\n maxStatements && maxStatements > 0 && Number.isFinite(maxStatements)\n ? Math.floor(maxStatements)\n : Infinity;\n const paramCap =\n maxParams && maxParams > 0 && Number.isFinite(maxParams) ? Math.floor(maxParams) : Infinity;\n\n if (stmtCap === Infinity && paramCap === Infinity) {\n return [statements];\n }\n\n const chunks: T[][] = [];\n let current: T[] = [];\n let currentParamCount = 0;\n for (const stmt of statements) {\n const stmtParams = stmt.params.length;\n const wouldExceedStmt = current.length + 1 > stmtCap;\n const wouldExceedParam = currentParamCount + stmtParams > paramCap;\n if (current.length > 0 && (wouldExceedStmt || wouldExceedParam)) {\n chunks.push(current);\n current = [];\n currentParamCount = 0;\n }\n current.push(stmt);\n currentParamCount += stmtParams;\n }\n if (current.length > 0) chunks.push(current);\n return chunks;\n}\n\nclass SqliteTransactionBackendImpl implements TransactionBackend {\n constructor(\n private readonly tx: SqliteTxExecutor,\n private readonly tableName: string,\n private readonly storageScope: string,\n ) {}\n\n async getDoc(docId: string): Promise<StoredGraphRecord | null> {\n const stmt = compileSelectByDocId(this.tableName, this.storageScope, docId);\n const rows = await this.tx.all(stmt.sql, stmt.params);\n return rows.length === 0 ? null : rowToRecord(rows[0]);\n }\n\n async query(filters: QueryFilter[], options?: QueryOptions): Promise<StoredGraphRecord[]> {\n const stmt = compileSelect(this.tableName, this.storageScope, filters, options);\n const rows = await this.tx.all(stmt.sql, stmt.params);\n return rows.map(rowToRecord);\n }\n\n async setDoc(docId: string, record: WritableRecord, mode: WriteMode): Promise<void> {\n const stmt = compileSet(this.tableName, this.storageScope, docId, record, Date.now(), mode);\n await this.tx.run(stmt.sql, stmt.params);\n }\n\n async updateDoc(docId: string, update: UpdatePayload): Promise<void> {\n const stmt = compileUpdate(this.tableName, this.storageScope, docId, update, Date.now());\n // RETURNING + `all()` for parity with Firestore — see SqliteBackendImpl.updateDoc.\n const sqlWithReturning = `${stmt.sql} RETURNING \"doc_id\"`;\n const rows = await this.tx.all(sqlWithReturning, stmt.params);\n if (rows.length === 0) {\n throw new FiregraphError(\n `updateDoc: no document found for doc_id=${docId} (scope=${this.storageScope})`,\n 'NOT_FOUND',\n );\n }\n }\n\n async deleteDoc(docId: string): Promise<void> {\n const stmt = compileDelete(this.tableName, this.storageScope, docId);\n await this.tx.run(stmt.sql, stmt.params);\n }\n}\n\nclass SqliteBatchBackendImpl implements BatchBackend {\n private readonly statements: CompiledStatement[] = [];\n\n constructor(\n private readonly executor: SqliteExecutor,\n private readonly tableName: string,\n private readonly storageScope: string,\n ) {}\n\n setDoc(docId: string, record: WritableRecord, mode: WriteMode): void {\n this.statements.push(\n compileSet(this.tableName, this.storageScope, docId, record, Date.now(), mode),\n );\n }\n\n updateDoc(docId: string, update: UpdatePayload): void {\n this.statements.push(\n compileUpdate(this.tableName, this.storageScope, docId, update, Date.now()),\n );\n }\n\n deleteDoc(docId: string): void {\n this.statements.push(compileDelete(this.tableName, this.storageScope, docId));\n }\n\n async commit(): Promise<void> {\n if (this.statements.length === 0) return;\n await this.executor.batch(this.statements);\n this.statements.length = 0;\n }\n}\n\n/**\n * Capability union declared by the SQLite-backed `StorageBackend`.\n *\n * `core.transactions` is part of the static union because `runTransaction`\n * is always present as a method on the class. The runtime cap-set determines\n * whether that method is *functional*: D1 leaves `executor.transaction`\n * undefined and the call throws `UNSUPPORTED_OPERATION`; DO SQLite and\n * better-sqlite3 wire the executor and the call works. The static type\n * therefore promises only that the method exists — callers that care about\n * portability check `client.capabilities.has('core.transactions')` before\n * opening a tx, and code that runs against an unknown driver can rely on the\n * runtime guard inside `runTransaction`.\n *\n * The `query.*` extension capabilities follow the same conservative\n * declaration rule as the cap descriptor itself — only land in the union\n * when the corresponding method is actually wired up. Today that's\n * `query.aggregate` (Phase 4), `query.dml` (Phase 5), `query.join`\n * (Phase 6 — fan-out via `IN (…)` in one statement), and `query.select`\n * (Phase 7 — server-side projection via `json_extract`).\n */\nexport type SqliteCapability =\n | 'core.read'\n | 'core.write'\n | 'core.transactions'\n | 'core.batch'\n | 'core.subgraph'\n | 'query.aggregate'\n | 'query.dml'\n | 'query.join'\n | 'query.select'\n | 'raw.sql';\n\nconst SQLITE_CORE_CAPS: ReadonlyArray<SqliteCapability> = [\n 'core.read',\n 'core.write',\n 'core.batch',\n 'core.subgraph',\n 'query.aggregate',\n 'query.dml',\n 'query.join',\n 'query.select',\n 'raw.sql',\n];\n\nclass SqliteBackendImpl implements StorageBackend<SqliteCapability> {\n readonly capabilities: BackendCapabilities<SqliteCapability>;\n /** Logical table name (returned through `collectionPath` for parity with Firestore). */\n readonly collectionPath: string;\n readonly scopePath: string;\n /** Materialized storage scope (interleaved parent UIDs + subgraph names). */\n private readonly storageScope: string;\n\n constructor(\n private readonly executor: SqliteExecutor,\n tableName: string,\n storageScope: string,\n scopePath: string,\n ) {\n this.collectionPath = tableName;\n this.storageScope = storageScope;\n this.scopePath = scopePath;\n const caps = new Set<SqliteCapability>(SQLITE_CORE_CAPS);\n if (typeof executor.transaction === 'function') {\n caps.add('core.transactions');\n }\n this.capabilities = createCapabilities(caps);\n }\n\n // --- Reads ---\n\n async getDoc(docId: string): Promise<StoredGraphRecord | null> {\n const stmt = compileSelectByDocId(this.collectionPath, this.storageScope, docId);\n const rows = await this.executor.all(stmt.sql, stmt.params);\n return rows.length === 0 ? null : rowToRecord(rows[0]);\n }\n\n async query(filters: QueryFilter[], options?: QueryOptions): Promise<StoredGraphRecord[]> {\n const stmt = compileSelect(this.collectionPath, this.storageScope, filters, options);\n const rows = await this.executor.all(stmt.sql, stmt.params);\n return rows.map(rowToRecord);\n }\n\n // --- Writes ---\n\n async setDoc(docId: string, record: WritableRecord, mode: WriteMode): Promise<void> {\n const stmt = compileSet(\n this.collectionPath,\n this.storageScope,\n docId,\n record,\n Date.now(),\n mode,\n );\n await this.executor.run(stmt.sql, stmt.params);\n }\n\n async updateDoc(docId: string, update: UpdatePayload): Promise<void> {\n const stmt = compileUpdate(this.collectionPath, this.storageScope, docId, update, Date.now());\n // Use RETURNING + `all()` so missing rows surface as an error, matching\n // Firestore's `update()` semantics (NOT_FOUND when the doc doesn't exist).\n // SQLite ≥3.35 supports UPDATE … RETURNING; better-sqlite3, D1, and DO\n // SQLite all run on a recent enough engine.\n const sqlWithReturning = `${stmt.sql} RETURNING \"doc_id\"`;\n const rows = await this.executor.all(sqlWithReturning, stmt.params);\n if (rows.length === 0) {\n throw new FiregraphError(\n `updateDoc: no document found for doc_id=${docId} (scope=${this.storageScope})`,\n 'NOT_FOUND',\n );\n }\n }\n\n async deleteDoc(docId: string): Promise<void> {\n const stmt = compileDelete(this.collectionPath, this.storageScope, docId);\n await this.executor.run(stmt.sql, stmt.params);\n }\n\n // --- Transactions / Batches ---\n\n async runTransaction<T>(fn: (tx: TransactionBackend) => Promise<T>): Promise<T> {\n if (!this.executor.transaction) {\n throw new FiregraphError(\n 'Interactive transactions are not supported by this SQLite driver. ' +\n 'D1 in particular has no read-then-conditional-write transactions; ' +\n 'use a Durable Object SQLite client instead, or rewrite the code path ' +\n 'as a batch().',\n 'UNSUPPORTED_OPERATION',\n );\n }\n return this.executor.transaction(async (tx) => {\n const txBackend = new SqliteTransactionBackendImpl(\n tx,\n this.collectionPath,\n this.storageScope,\n );\n return fn(txBackend);\n });\n }\n\n createBatch(): BatchBackend {\n return new SqliteBatchBackendImpl(this.executor, this.collectionPath, this.storageScope);\n }\n\n // --- Subgraphs ---\n\n subgraph(parentNodeUid: string, name: string): StorageBackend {\n // Defense-in-depth: the public `GraphClient.subgraph()` also validates,\n // but backend users (traversal, cross-graph hops, custom integrations)\n // reach this method directly. A bad UID or a name containing '/' would\n // corrupt the materialized-path scope encoding — reject loudly.\n if (!parentNodeUid || parentNodeUid.includes('/')) {\n throw new FiregraphError(\n `Invalid parentNodeUid for subgraph: \"${parentNodeUid}\". ` +\n 'Must be a non-empty string without \"/\".',\n 'INVALID_SUBGRAPH',\n );\n }\n if (!name || name.includes('/')) {\n throw new FiregraphError(\n `Subgraph name must not contain \"/\" and must be non-empty: got \"${name}\". ` +\n 'Use chained .subgraph() calls for nested subgraphs.',\n 'INVALID_SUBGRAPH',\n );\n }\n const newStorageScope = this.storageScope\n ? `${this.storageScope}/${parentNodeUid}/${name}`\n : `${parentNodeUid}/${name}`;\n const newScope = this.scopePath ? `${this.scopePath}/${name}` : name;\n return new SqliteBackendImpl(this.executor, this.collectionPath, newStorageScope, newScope);\n }\n\n // --- Cascade & bulk ---\n\n async removeNodeCascade(\n uid: string,\n reader: GraphReader,\n options?: BulkOptions,\n ): Promise<CascadeResult> {\n // Collect all edges touching the node in the current scope (excluding self-loop).\n const [outgoingRaw, incomingRaw] = await Promise.all([\n reader.findEdges({ aUid: uid, allowCollectionScan: true, limit: 0 }),\n reader.findEdges({ bUid: uid, allowCollectionScan: true, limit: 0 }),\n ]);\n\n const seen = new Set<string>();\n const edgeDocIds: string[] = [];\n for (const edge of [...outgoingRaw, ...incomingRaw]) {\n if (edge.axbType === NODE_RELATION) continue;\n const docId = computeEdgeDocId(edge.aUid, edge.axbType, edge.bUid);\n if (!seen.has(docId)) {\n seen.add(docId);\n edgeDocIds.push(docId);\n }\n }\n\n const nodeDocId = computeNodeDocId(uid);\n const shouldDeleteSubgraphs = options?.deleteSubcollections !== false;\n\n // Pre-count subgraph rows so the returned `deleted` total reflects the\n // actual number of records removed by the prefix-delete (which is a\n // single statement, but may match many rows). One extra index lookup is\n // cheap relative to the cascade itself.\n let subgraphRowCount = 0;\n if (shouldDeleteSubgraphs) {\n const prefix = this.storageScope ? `${this.storageScope}/${uid}` : uid;\n const countStmt = compileCountScopePrefix(this.collectionPath, prefix);\n const countRows = await this.executor.all(countStmt.sql, countStmt.params);\n const first = countRows[0] as Record<string, unknown> | undefined;\n const n = first?.n;\n subgraphRowCount = typeof n === 'bigint' ? Number(n) : Number(n ?? 0);\n }\n\n // Build the full statement list. Order: edges → node → prefix-delete.\n // When the executor's `batch()` is fully atomic (DO SQLite uses\n // `transactionSync`) the chunking loop below collapses to a single batch\n // and the operation is atomic. When the executor caps batches (D1, ~100\n // statements) we lose cross-batch atomicity, but `removeNodeCascade` is\n // idempotent so a caller can retry after a partial failure.\n const writeStatements: CompiledStatement[] = edgeDocIds.map((id) =>\n compileDelete(this.collectionPath, this.storageScope, id),\n );\n writeStatements.push(compileDelete(this.collectionPath, this.storageScope, nodeDocId));\n if (shouldDeleteSubgraphs) {\n const prefix = this.storageScope ? `${this.storageScope}/${uid}` : uid;\n writeStatements.push(compileDeleteScopePrefix(this.collectionPath, prefix));\n }\n\n const {\n deleted: stmtDeleted,\n batches,\n errors,\n } = await this.executeChunkedBatches(writeStatements, options);\n\n // `nodeDeleted` / `edgesDeleted` reflect best-effort completion: a\n // chunk failure leaves us unable to know which sub-batch contained the\n // node-row delete, so we conservatively flag both as incomplete when\n // any batch fails. The caller can retry — cascade is idempotent.\n const allOk = errors.length === 0;\n const edgesDeleted = allOk ? edgeDocIds.length : 0;\n const nodeDeleted = allOk;\n\n // `stmtDeleted` counts committed *statements*. Replace the prefix-\n // delete's per-statement contribution (1) with the pre-computed row\n // count so callers see a true row total. Only credit subgraph rows when\n // every chunk succeeded — partial failure means we can't be sure the\n // chunk containing the prefix-delete actually committed.\n const prefixStatementContribution = shouldDeleteSubgraphs && allOk ? 1 : 0;\n const deleted = stmtDeleted - prefixStatementContribution + (allOk ? subgraphRowCount : 0);\n\n return { deleted, batches, errors, edgesDeleted, nodeDeleted };\n }\n\n async bulkRemoveEdges(\n params: FindEdgesParams,\n reader: GraphReader,\n options?: BulkOptions,\n ): Promise<BulkResult> {\n // Override default query limit for bulk deletion — we need all matching edges.\n // limit: 0 bypasses DEFAULT_QUERY_LIMIT; an explicit user limit is preserved.\n // allowCollectionScan: true — bulk deletion inherently implies scanning.\n const effectiveParams =\n params.limit !== undefined\n ? { ...params, allowCollectionScan: params.allowCollectionScan ?? true }\n : { ...params, limit: 0, allowCollectionScan: params.allowCollectionScan ?? true };\n const edges = await reader.findEdges(effectiveParams);\n const docIds = edges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));\n\n if (docIds.length === 0) {\n return { deleted: 0, batches: 0, errors: [] };\n }\n\n const statements = docIds.map((id) =>\n compileDelete(this.collectionPath, this.storageScope, id),\n );\n\n return this.executeChunkedBatches(statements, options);\n }\n\n /**\n * Submit `statements` to the executor as one or more `batch()` calls,\n * chunking by `executor.maxBatchSize` (e.g. D1's ~100-statement cap).\n * Drivers that don't advertise a cap submit everything in one batch,\n * preserving cross-batch atomicity.\n *\n * Each chunk is retried with exponential backoff up to `maxRetries`\n * (default 3) before being recorded in `errors`. The loop continues past\n * a permanently failed chunk so the caller still gets partial progress\n * visibility — to halt on first failure, set `maxRetries: 0` and check\n * `result.errors.length` after the call.\n *\n * Returns `BulkResult`-shaped fields. `deleted` reflects only the\n * statement count of *successfully committed* batches — a prefix-delete\n * statement contributes 1 to that total even though it may match many\n * rows; `removeNodeCascade` patches that up with a pre-counted row total.\n *\n * **Atomicity caveat (D1):** when chunking kicks in, atomicity is lost\n * across chunk boundaries — one chunk may commit while a later one fails.\n * `removeNodeCascade` is idempotent (deleting the same docs again is a\n * no-op) so a caller can simply retry on partial failure. `bulkRemoveEdges`\n * is also idempotent for the same reason. DO SQLite leaves `maxBatchSize`\n * unset, so everything funnels through one atomic `transactionSync` and\n * this caveat does not apply.\n */\n private async executeChunkedBatches(\n statements: CompiledStatement[],\n options?: BulkOptions,\n ): Promise<{ deleted: number; batches: number; errors: BulkBatchError[] }> {\n if (statements.length === 0) {\n return { deleted: 0, batches: 0, errors: [] };\n }\n const maxRetries = options?.maxRetries ?? DEFAULT_MAX_RETRIES;\n\n // Split `statements` into chunks up front. Chunking honors the smallest\n // of: caller-supplied `batchSize` (used by callers who want progress\n // granularity), the driver's statement-count cap (`maxBatchSize`, D1 ≈\n // 100), and the driver's total bound-parameter cap (`maxBatchParams`,\n // D1 ≈ 1000). Most cascade/bulk statements are 2-param DELETEs so the\n // param cap rarely triggers, but we respect it defensively. Drivers with\n // no declared caps and no caller cap submit everything in one batch (DO\n // SQLite's atomic `transactionSync`).\n const callerBatchSize = options?.batchSize;\n const stmtCap = minDefined(callerBatchSize, this.executor.maxBatchSize);\n const chunks = chunkStatements(statements, stmtCap, this.executor.maxBatchParams);\n\n const errors: BulkBatchError[] = [];\n let deleted = 0;\n let batches = 0;\n const totalBatches = chunks.length;\n\n const driverParamCap = this.executor.maxBatchParams;\n\n for (let batchIndex = 0; batchIndex < chunks.length; batchIndex++) {\n const chunk = chunks[batchIndex];\n\n // A chunk that's a single statement whose param count already exceeds\n // the driver's per-batch param cap will be rejected on every attempt —\n // retrying just adds latency before surfacing the failure. `chunkStatements`\n // intentionally emits such statements as their own chunk (failing loudly\n // beats silently dropping); fast-fail here closes the loop.\n const isUnretriableOversize =\n chunk.length === 1 &&\n driverParamCap !== undefined &&\n chunk[0].params.length > driverParamCap;\n\n let committed = false;\n let lastError: Error | null = null;\n const effectiveRetries = isUnretriableOversize ? 0 : maxRetries;\n for (let attempt = 0; attempt <= effectiveRetries; attempt++) {\n try {\n await this.executor.batch(chunk);\n committed = true;\n break;\n } catch (err) {\n lastError = err instanceof Error ? err : new Error(String(err));\n if (attempt < effectiveRetries) {\n const delay = Math.min(BASE_RETRY_DELAY_MS * Math.pow(2, attempt), MAX_RETRY_DELAY_MS);\n await sleep(delay);\n }\n }\n }\n\n if (committed) {\n deleted += chunk.length;\n batches += 1;\n } else if (lastError) {\n errors.push({\n batchIndex,\n error: lastError,\n operationCount: chunk.length,\n });\n }\n\n if (options?.onProgress) {\n options.onProgress({\n completedBatches: batches,\n totalBatches,\n deletedSoFar: deleted,\n });\n }\n }\n\n return { deleted, batches, errors };\n }\n\n // --- Cross-scope (collection group) ---\n\n async findEdgesGlobal(\n params: FindEdgesParams,\n collectionName?: string,\n ): Promise<StoredGraphRecord[]> {\n const plan = buildEdgeQueryPlan(params);\n if (plan.strategy === 'get') {\n throw new FiregraphError(\n 'findEdgesGlobal() requires a query, not a direct document lookup. ' +\n 'Omit one of aUid/axbType/bUid to force a query strategy.',\n 'INVALID_QUERY',\n );\n }\n // Mirror Firestore's `collectionGroup(name)` semantics over the\n // materialized-scope SQLite layout: when `collectionName` matches the\n // table name (the implicit root default), filter to root rows; otherwise\n // filter to rows whose scope's last segment equals the requested name.\n const name = collectionName ?? this.collectionPath;\n const scopeNameFilter = {\n name,\n isRoot: name === this.collectionPath,\n };\n const stmt = compileSelectGlobal(\n this.collectionPath,\n plan.filters,\n plan.options,\n scopeNameFilter,\n );\n const rows = await this.executor.all(stmt.sql, stmt.params);\n return rows.map(rowToRecord);\n }\n\n // --- Aggregate ---\n\n /**\n * Run an aggregate query in a single SQL statement. Supports the full\n * count/sum/avg/min/max set — the SQLite engine evaluates each aggregate\n * function over the filtered row set and the executor returns one row\n * with one column per alias. SUM/MIN/MAX of an empty set returns 0\n * (SQLite's `SUM(NULL) = NULL` is mapped to a clean number for the\n * cross-backend contract); AVG returns NaN, matching the mathematical\n * convention and the Firestore Standard helper.\n */\n async aggregate(spec: AggregateSpec, filters: QueryFilter[]): Promise<Record<string, number>> {\n const { stmt, aliases } = compileAggregate(\n this.collectionPath,\n this.storageScope,\n spec,\n filters,\n );\n const rows = await this.executor.all(stmt.sql, stmt.params);\n const row = rows[0] ?? {};\n const out: Record<string, number> = {};\n for (const alias of aliases) {\n const v = row[alias];\n if (v === null || v === undefined) {\n // SQLite returns NULL for SUM/MIN/MAX over an empty set. Resolve\n // to 0 for SUM/MIN/MAX (well-defined) and NaN for AVG (empty-set\n // average is undefined). COUNT(*) is never null.\n const op = spec[alias].op;\n out[alias] = op === 'avg' ? Number.NaN : 0;\n } else if (typeof v === 'bigint') {\n out[alias] = Number(v);\n } else if (typeof v === 'number') {\n out[alias] = v;\n } else {\n // Some drivers return strings for very large or precise numerics.\n // Coerce defensively — the contract is `number`.\n out[alias] = Number(v);\n }\n }\n return out;\n }\n\n // --- Server-side DML ---\n\n /**\n * Delete every row matching `filters` in a single SQL DELETE statement.\n *\n * Uses `RETURNING \"doc_id\"` to count rows touched — the SQLite executor's\n * `run` returns void, so RETURNING + `all()` is the portable way to learn\n * how many rows the engine actually deleted. SQLite ≥ 3.35 supports\n * `DELETE … RETURNING`; better-sqlite3, D1, and DO SQLite all run on a\n * recent enough engine.\n *\n * Single-statement DML doesn't chunk: the engine handles N rows in one\n * shot, so `BulkOptions.batchSize` is intentionally ignored. The retry\n * loop here exists only for transient driver errors (e.g. D1 surface\n * congestion); a permanent failure is surfaced via the `errors` array\n * with `batchIndex: 0` so callers see the same shape as `bulkRemoveEdges`.\n *\n * Subgraph scoping is enforced inside `compileBulkDelete` (the leading\n * `\"scope\" = ?` predicate) so this method, like every other backend\n * surface, naturally honours subgraph isolation.\n */\n async bulkDelete(filters: QueryFilter[], options?: BulkOptions): Promise<BulkResult> {\n const stmt = compileBulkDelete(this.collectionPath, this.storageScope, filters);\n return this.executeDmlWithReturning(stmt, options);\n }\n\n /**\n * Update every row matching `filters` with `patch.data` in a single SQL\n * UPDATE statement. The patch is deep-merged into each row's `data`\n * column via the same `flattenPatch` → `compileDataOpsExpr` pipeline that\n * `compileUpdate` (single-row) uses.\n *\n * Same contract notes as `bulkDelete` apply: single-statement, no\n * chunking, `RETURNING \"doc_id\"` for the affected count, retry loop for\n * transient driver errors.\n */\n async bulkUpdate(\n filters: QueryFilter[],\n patch: BulkUpdatePatch,\n options?: BulkOptions,\n ): Promise<BulkResult> {\n const stmt = compileBulkUpdate(\n this.collectionPath,\n this.storageScope,\n filters,\n patch.data,\n Date.now(),\n );\n return this.executeDmlWithReturning(stmt, options);\n }\n\n /**\n * Multi-source fan-out — `query.join` capability.\n *\n * Issues a single `SELECT … WHERE \"aUid\" IN (?, ?, …)` statement that\n * matches every edge from every source UID in one round trip. When\n * `params.hydrate === true`, follows up with a second statement that\n * fetches the target node rows; both queries hit the same table so\n * the executor amortises connection / parsing cost across them.\n *\n * Empty `params.sources` short-circuits to an empty result without\n * touching the executor — `IN ()` is not valid SQL.\n *\n * Per-source ordering / strict per-source LIMIT enforcement is NOT\n * implemented here; see the `ExpandParams.limitPerSource` JSDoc and\n * `compileExpand` for the cap semantics. Strict per-source caps would\n * require window functions and were judged out of scope for the\n * round-trip-collapse goal.\n */\n async expand(params: ExpandParams): Promise<ExpandResult> {\n if (params.sources.length === 0) {\n return params.hydrate ? { edges: [], targets: [] } : { edges: [] };\n }\n const stmt = compileExpand(this.collectionPath, this.storageScope, params);\n const rows = await this.executor.all(stmt.sql, stmt.params);\n const edges = rows.map(rowToRecord);\n if (!params.hydrate) {\n return { edges };\n }\n // Hydration: fetch target nodes for every edge in one IN-clause statement.\n // The \"target\" side depends on direction — forward hops point at `bUid`,\n // reverse hops point at `aUid`.\n const direction = params.direction ?? 'forward';\n const targetUids = edges.map((e) => (direction === 'forward' ? e.bUid : e.aUid));\n const uniqueTargets = [...new Set(targetUids)];\n if (uniqueTargets.length === 0) {\n return { edges, targets: [] };\n }\n const hydrateStmt = compileExpandHydrate(this.collectionPath, this.storageScope, uniqueTargets);\n const hydrateRows = await this.executor.all(hydrateStmt.sql, hydrateStmt.params);\n const byUid = new Map<string, StoredGraphRecord>();\n for (const row of hydrateRows) {\n const node = rowToRecord(row);\n // Node UID is `bUid` (== `aUid` for self-loop) by convention. Key the\n // map by `bUid` so the alignment loop below indexes correctly.\n byUid.set(node.bUid, node);\n }\n const targets = targetUids.map((uid) => byUid.get(uid) ?? null);\n return { edges, targets };\n }\n\n /**\n * Server-side projection — `query.select` capability.\n *\n * Issues a single `SELECT json_extract(data, '$.f1'), …` statement that\n * returns only the requested fields. The compiler emits one column per\n * unique field plus a paired `json_type` column for `data.*` projections\n * so the decoder can recover JSON-encoded objects/arrays without a\n * second round trip. Migrations are NOT applied — the caller asked for\n * a partial shape, and rehydrating that into the migration pipeline\n * would require synthesising every absent field.\n *\n * The wire-payload reduction is the entire reason this method exists:\n * a list view that only needs `title` / `date` no longer drags the\n * full `data` JSON across the network. Callers that need the full\n * record should use `findEdges` (with migration support).\n */\n async findEdgesProjected(\n select: ReadonlyArray<string>,\n filters: QueryFilter[],\n options?: QueryOptions,\n ): Promise<Array<Record<string, unknown>>> {\n const { stmt, columns } = compileFindEdgesProjected(\n this.collectionPath,\n this.storageScope,\n select,\n filters,\n options,\n );\n const rows = await this.executor.all(stmt.sql, stmt.params);\n return rows.map((row) => decodeProjectedRow(row, columns));\n }\n\n /**\n * Run a DML statement with `RETURNING \"doc_id\"` so we can count the\n * rows the engine touched, with the same retry/backoff contract as\n * `executeChunkedBatches`. Single statement, single batch.\n */\n private async executeDmlWithReturning(\n stmt: CompiledStatement,\n options?: BulkOptions,\n ): Promise<BulkResult> {\n const sqlWithReturning = `${stmt.sql} RETURNING \"doc_id\"`;\n const maxRetries = options?.maxRetries ?? DEFAULT_MAX_RETRIES;\n let lastError: Error | null = null;\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n const rows = await this.executor.all(sqlWithReturning, stmt.params);\n const deleted = rows.length;\n if (options?.onProgress) {\n options.onProgress({\n completedBatches: 1,\n totalBatches: 1,\n deletedSoFar: deleted,\n });\n }\n return { deleted, batches: 1, errors: [] };\n } catch (err) {\n lastError = err instanceof Error ? err : new Error(String(err));\n if (attempt < maxRetries) {\n const delay = Math.min(BASE_RETRY_DELAY_MS * Math.pow(2, attempt), MAX_RETRY_DELAY_MS);\n await sleep(delay);\n }\n }\n }\n // `operationCount` is genuinely unknown for a server-side DML — we\n // don't know how many rows the failed statement would have touched.\n // Report 0 as the lower bound; callers concerned about partial state\n // should re-query and reconcile.\n return {\n deleted: 0,\n batches: 0,\n errors: [\n {\n batchIndex: 0,\n error: lastError ?? new Error('bulk DML failed for unknown reason'),\n operationCount: 0,\n },\n ],\n };\n }\n}\n\n/**\n * Create a SQLite-backed `StorageBackend`.\n *\n * `tableName` is the single table that holds every triple. The driver must\n * have already created the table and indexes via `buildSchemaStatements()`\n * before any reads/writes arrive — callers that ship their own SQLite\n * driver are responsible for wiring that up.\n */\nexport function createSqliteBackend(\n executor: SqliteExecutor,\n tableName: string,\n options: SqliteBackendOptions = {},\n): StorageBackend<SqliteCapability> {\n const storageScope = options.storageScope ?? '';\n const scopePath = options.scopePath ?? '';\n return new SqliteBackendImpl(executor, tableName, storageScope, scopePath);\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAiBO,SAAS,cAAc,OAAyB;AACrD,MAAI,UAAU,QAAQ,OAAO,UAAU,SAAU,QAAO;AACxD,QAAM,MAAO,MAAkC,iBAAiB;AAChE,SAAO,OAAO,QAAQ,YAAY,YAAY,IAAI,GAAG;AACvD;AArBA,IAYa,mBAEP;AAdN;AAAA;AAAA;AAYO,IAAM,oBAAoB;AAEjC,IAAM,cAAc,oBAAI,IAAI,CAAC,aAAa,YAAY,eAAe,mBAAmB,CAAC;AAAA;AAAA;;;ACdzF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAmCA,SAAS,YAAY,OAAoC;AACvD,SAAO,iBAAiB;AAC1B;AAEA,SAAS,WAAW,OAAmC;AACrD,SAAO,iBAAiB;AAC1B;AAEA,SAAS,oBAAoB,OAA4C;AAEvE,MAAI,UAAU,QAAQ,OAAO,UAAU,SAAU,QAAO;AACxD,QAAM,IAAI;AACV,SACE,OAAO,EAAE,SAAS,YAClB,EAAE,cAAc,UAChB,OAAO,EAAE,OAAO,YAChB,EAAE,aAAa,SAAS;AAE5B;AAEA,SAAS,cAAc,OAAyB;AAC9C,MAAI,UAAU,QAAQ,OAAO,UAAU,SAAU,QAAO;AACxD,QAAM,IAAI;AACV,SACE,EAAE,aAAa,SAAS,iBAAiB,MAAM,QAAS,EAA8B,OAAO;AAEjG;AAYO,SAAS,wBAAwB,MAAwD;AAC9F,SAAO,eAAe,IAAI;AAC5B;AAEA,SAAS,eAAe,OAAyB;AAE/C,MAAI,UAAU,QAAQ,UAAU,OAAW,QAAO;AAClD,MAAI,OAAO,UAAU,SAAU,QAAO;AAGtC,MAAI,YAAY,KAAK,GAAG;AACtB,WAAO;AAAA,MACL,CAAC,iBAAiB,GAAG;AAAA,MACrB,SAAS,MAAM;AAAA,MACf,aAAa,MAAM;AAAA,IACrB;AAAA,EACF;AACA,MAAI,WAAW,KAAK,GAAG;AACrB,WAAO;AAAA,MACL,CAAC,iBAAiB,GAAG;AAAA,MACrB,UAAU,MAAM;AAAA,MAChB,WAAW,MAAM;AAAA,IACnB;AAAA,EACF;AACA,MAAI,oBAAoB,KAAK,GAAG;AAC9B,WAAO,EAAE,CAAC,iBAAiB,GAAG,qBAAqB,MAAO,MAA4B,KAAK;AAAA,EAC7F;AACA,MAAI,cAAc,KAAK,GAAG;AAExB,UAAM,IAAI;AACV,UAAM,SACJ,OAAO,EAAE,YAAY,aAAc,EAAE,QAA2B,IAAK,EAAE;AACzE,WAAO,EAAE,CAAC,iBAAiB,GAAG,eAAe,QAAQ,CAAC,GAAG,MAAM,EAAE;AAAA,EACnE;AAGA,MAAI,MAAM,QAAQ,KAAK,GAAG;AACxB,WAAO,MAAM,IAAI,cAAc;AAAA,EACjC;AAGA,QAAM,SAAkC,CAAC;AACzC,aAAW,OAAO,OAAO,KAAK,KAAgC,GAAG;AAC/D,WAAO,GAAG,IAAI,eAAgB,MAAkC,GAAG,CAAC;AAAA,EACtE;AACA,SAAO;AACT;AAgBO,SAAS,0BACd,MACA,IACyB;AACzB,SAAO,iBAAiB,MAAM,EAAE;AAClC;AAEA,SAAS,iBAAiB,OAAgB,IAAyB;AACjE,MAAI,UAAU,QAAQ,UAAU,OAAW,QAAO;AAClD,MAAI,OAAO,UAAU,SAAU,QAAO;AAMtC,MACE,YAAY,KAAK,KACjB,WAAW,KAAK,KAChB,oBAAoB,KAAK,KACzB,cAAc,KAAK,GACnB;AACA,WAAO;AAAA,EACT;AAGA,MAAI,MAAM,QAAQ,KAAK,GAAG;AACxB,WAAO,MAAM,IAAI,CAAC,MAAM,iBAAiB,GAAG,EAAE,CAAC;AAAA,EACjD;AAEA,QAAM,MAAM;AAGZ,MAAI,cAAc,GAAG,GAAG;AACtB,UAAM,MAAM,IAAI,iBAAiB;AAEjC,YAAQ,KAAK;AAAA,MACX,KAAK;AAEH,YAAI,OAAO,IAAI,YAAY,YAAY,OAAO,IAAI,gBAAgB,SAAU,QAAO;AACnF,eAAO,IAAI,2BAAU,IAAI,SAAS,IAAI,WAAW;AAAA,MAEnD,KAAK;AACH,YAAI,OAAO,IAAI,aAAa,YAAY,OAAO,IAAI,cAAc,SAAU,QAAO;AAClF,eAAO,IAAI,0BAAS,IAAI,UAAU,IAAI,SAAS;AAAA,MAEjD,KAAK;AACH,YAAI,CAAC,MAAM,QAAQ,IAAI,MAAM,EAAG,QAAO;AACvC,eAAO,4BAAW,OAAO,IAAI,MAAkB;AAAA,MAEjD,KAAK;AACH,YAAI,OAAO,IAAI,SAAS,SAAU,QAAO;AACzC,YAAI,IAAI;AACN,iBAAO,GAAG,IAAI,IAAI,IAAI;AAAA,QACxB;AAEA,YAAI,CAAC,eAAe;AAClB,0BAAgB;AAChB,kBAAQ;AAAA,YACN;AAAA,UAGF;AAAA,QACF;AACA,eAAO;AAAA,MAET;AAEE,eAAO;AAAA,IACX;AAAA,EACF;AAGA,QAAM,SAAkC,CAAC;AACzC,aAAW,OAAO,OAAO,KAAK,GAAG,GAAG;AAClC,WAAO,GAAG,IAAI,iBAAiB,IAAI,GAAG,GAAG,EAAE;AAAA,EAC7C;AACA,SAAO;AACT;AApNA,IAcA,kBAeI;AA7BJ;AAAA;AAAA;AAcA,uBAAgD;AAWhD;AACA;AAGA,IAAI,gBAAgB;AAAA;AAAA;;;AC7BpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,yBAA2B;;;ACApB,IAAM,gBAAgB;AAOtB,IAAM,sBAAsB;AAO5B,IAAM,iBAAiB,oBAAI,IAAI;AAAA,EACpC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAGM,IAAM,kBAAkB;;;ADrBxB,SAAS,iBAAiB,KAAqB;AACpD,SAAO;AACT;AAEO,SAAS,iBAAiB,MAAc,SAAiB,MAAsB;AACpF,QAAM,YAAY,GAAG,IAAI,GAAG,eAAe,GAAG,OAAO,GAAG,eAAe,GAAG,IAAI;AAC9E,QAAM,WAAO,+BAAW,QAAQ,EAAE,OAAO,SAAS,EAAE,OAAO,KAAK;AAChE,QAAM,QAAQ,KAAK,CAAC;AACpB,SAAO,GAAG,KAAK,GAAG,eAAe,GAAG,IAAI,GAAG,eAAe,GAAG,OAAO,GAAG,eAAe,GAAG,IAAI;AAC/F;;;AEeA;AAaO,IAAM,eAA8B,uBAAO,IAAI,uBAAuB;AAkBtE,SAAS,iBAAiB,OAAyC;AACxE,SAAO,UAAU;AACnB;AAMA,IAAM,0BAA0B,oBAAI,IAAI;AAAA,EACtC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AASM,SAAS,gBAAgB,OAAyB;AACvD,MAAI,UAAU,KAAM,QAAO;AAC3B,QAAM,IAAI,OAAO;AACjB,MAAI,MAAM,SAAU,QAAO;AAC3B,MAAI,MAAM,QAAQ,KAAK,EAAG,QAAO;AAGjC,MAAI,cAAc,KAAK,EAAG,QAAO;AACjC,QAAM,QAAQ,OAAO,eAAe,KAAK;AACzC,MAAI,UAAU,QAAQ,UAAU,OAAO,UAAW,QAAO;AAEzD,QAAM,OAAQ,MAA8C;AAC5D,MAAI,QAAQ,OAAO,KAAK,SAAS,YAAY,wBAAwB,IAAI,KAAK,IAAI,EAAG,QAAO;AAG5F,SAAO;AACT;AAkCA,IAAM,cAAc;AAcb,SAAS,6BAA6B,QAGpC;AACP,MAAI,OAAO,gBAAgB,UAAa,OAAO,YAAY,QAAW;AACpE,UAAM,IAAI;AAAA,MACR;AAAA,IAGF;AAAA,EACF;AACF;AAuBO,SAAS,wBAAwB,MAAe,aAA2B;AAChF,yBAAuB,MAAM,CAAC,GAAG,EAAE,MAAM,OAAO,GAAG,CAAC,EAAE,KAAK,MAAM;AAC/D,UAAM,QAAQ,KAAK,WAAW,IAAI,WAAW,KAAK,IAAI,CAAC,MAAM,KAAK,UAAU,CAAC,CAAC,EAAE,KAAK,KAAK;AAC1F,UAAM,IAAI;AAAA,MACR,cAAc,WAAW,iDAAiD,KAAK;AAAA,IAIjF;AAAA,EACF,CAAC;AACH;AAIA,SAAS,uBACP,MACA,MACA,QACA,OACM;AACN,MAAI,SAAS,QAAQ,SAAS,OAAW;AACzC,MAAI,iBAAiB,IAAI,GAAG;AAC1B,UAAM,EAAE,MAAM,OAAO,CAAC;AACtB;AAAA,EACF;AACA,MAAI,OAAO,SAAS,SAAU;AAC9B,MAAI,cAAc,IAAI,EAAG;AACzB,MAAI,MAAM,QAAQ,IAAI,GAAG;AACvB,aAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,6BAAuB,KAAK,CAAC,GAAG,CAAC,GAAG,MAAM,OAAO,CAAC,CAAC,GAAG,EAAE,MAAM,SAAS,OAAO,EAAE,GAAG,KAAK;AAAA,IAC1F;AACA;AAAA,EACF;AACA,QAAM,QAAQ,OAAO,eAAe,IAAI;AACxC,MAAI,UAAU,QAAQ,UAAU,OAAO,UAAW;AAClD,QAAM,MAAM;AACZ,aAAW,OAAO,OAAO,KAAK,GAAG,GAAG;AAClC,2BAAuB,IAAI,GAAG,GAAG,CAAC,GAAG,MAAM,GAAG,GAAG,EAAE,MAAM,SAAS,GAAG,KAAK;AAAA,EAC5E;AACF;AAGO,SAAS,eAAe,MAA+B;AAC5D,aAAW,OAAO,MAAM;AACtB,QAAI,CAAC,YAAY,KAAK,GAAG,GAAG;AAC1B,YAAM,IAAI;AAAA,QACR,gCAAgC,KAAK,UAAU,GAAG,CAAC,YAAY,KAC5D,IAAI,CAAC,MAAM,KAAK,UAAU,CAAC,CAAC,EAC5B,KAAK,KAAK,CAAC;AAAA,MAGhB;AAAA,IACF;AAAA,EACF;AACF;AA2BO,SAAS,aAAa,MAA6C;AACxE,QAAM,MAAoB,CAAC;AAC3B,OAAK,MAAM,CAAC,GAAG,GAAG;AAClB,SAAO;AACT;AAEA,SAAS,oCACP,KACA,WACM;AACN,yBAAuB,KAAK,WAAW,EAAE,MAAM,OAAO,GAAG,CAAC,EAAE,OAAO,MAAM;AACvE,UAAM,eACJ,UAAU,WAAW,IAAI,WAAW,UAAU,IAAI,CAAC,MAAM,KAAK,UAAU,CAAC,CAAC,EAAE,KAAK,KAAK;AACxF,QAAI,OAAO,SAAS,SAAS;AAC3B,YAAM,IAAI;AAAA,QACR,8CAA8C,OAAO,KAAK,4BAChD,YAAY;AAAA,MAIxB;AAAA,IACF;AACA,UAAM,IAAI;AAAA,MACR,qEACU,YAAY;AAAA,IAGxB;AAAA,EACF,CAAC;AACH;AAEA,SAAS,KAAK,MAAe,MAAgB,KAAyB;AAGpE,MAAI,SAAS,OAAW;AACxB,MAAI,iBAAiB,IAAI,GAAG;AAC1B,QAAI,KAAK,WAAW,GAAG;AACrB,YAAM,IAAI,MAAM,+DAA+D;AAAA,IACjF;AACA,mBAAe,IAAI;AACnB,QAAI,KAAK,EAAE,MAAM,CAAC,GAAG,IAAI,GAAG,OAAO,QAAW,QAAQ,KAAK,CAAC;AAC5D;AAAA,EACF;AACA,MAAI,gBAAgB,IAAI,GAAG;AACzB,QAAI,KAAK,WAAW,GAAG;AAGrB,YAAM,IAAI;AAAA,QACR,4DACG,SAAS,OAAO,SAAS,MAAM,QAAQ,IAAI,IAAI,UAAU,OAAO,QACjE;AAAA,MACJ;AAAA,IACF;AAMA,QAAI,MAAM,QAAQ,IAAI,GAAG;AACvB,0CAAoC,MAAM,IAAI;AAAA,IAChD;AACA,mBAAe,IAAI;AACnB,QAAI,KAAK,EAAE,MAAM,CAAC,GAAG,IAAI,GAAG,OAAO,MAAM,QAAQ,MAAM,CAAC;AACxD;AAAA,EACF;AAEA,QAAM,MAAM;AACZ,QAAM,OAAO,OAAO,KAAK,GAAG;AAC5B,MAAI,KAAK,WAAW,GAAG;AAIrB,QAAI,KAAK,SAAS,GAAG;AACnB,qBAAe,IAAI;AACnB,UAAI,KAAK,EAAE,MAAM,CAAC,GAAG,IAAI,GAAG,OAAO,CAAC,GAAG,QAAQ,MAAM,CAAC;AAAA,IACxD;AACA;AAAA,EACF;AACA,aAAW,OAAO,MAAM;AACtB,QAAI,QAAQ,mBAAmB;AAC7B,YAAM,QAAQ,KAAK,WAAW,IAAI,WAAW,KAAK,IAAI,CAAC,MAAM,KAAK,UAAU,CAAC,CAAC,EAAE,KAAK,KAAK;AAC1F,YAAM,IAAI;AAAA,QACR,kDAAkD,iBAAiB,aAC9D,KAAK;AAAA,MAGZ;AAAA,IACF;AACA,SAAK,IAAI,GAAG,GAAG,CAAC,GAAG,MAAM,GAAG,GAAG,GAAG;AAAA,EACpC;AACF;;;AC9VA,SAAS,wBACP,OACA,KACA,MACgB;AAChB,SAAO,EAAE,OAAO,MAAM,KAAK,SAAS,eAAe,OAAO,OAAO,MAAM,KAAK,KAAK;AACnF;AAEA,SAAS,wBACP,OACA,MACA,SACA,OACA,MACA,MACgB;AAChB,SAAO,EAAE,OAAO,MAAM,SAAS,OAAO,MAAM,KAAK;AACnD;AAEO,IAAM,iBAAN,MAA2C;AAAA,EAChD,YACmB,SACA,UACA,YAAoB,IACrC;AAHiB;AACA;AACA;AAAA,EAChB;AAAA,EAEH,MAAM,QAAQ,OAAe,KAAa,MAA8C;AACtF,SAAK,UAAU,OAAO,KAAK,MAAM,OAAO;AAAA,EAC1C;AAAA,EAEA,MAAM,QACJ,OACA,MACA,SACA,OACA,MACA,MACe;AACf,SAAK,UAAU,OAAO,MAAM,SAAS,OAAO,MAAM,MAAM,OAAO;AAAA,EACjE;AAAA,EAEA,MAAM,YAAY,OAAe,KAAa,MAA8C;AAC1F,SAAK,UAAU,OAAO,KAAK,MAAM,SAAS;AAAA,EAC5C;AAAA,EAEA,MAAM,YACJ,OACA,MACA,SACA,OACA,MACA,MACe;AACf,SAAK,UAAU,OAAO,MAAM,SAAS,OAAO,MAAM,MAAM,SAAS;AAAA,EACnE;AAAA,EAEQ,UACN,OACA,KACA,MACA,MACM;AACN,4BAAwB,MAAM,SAAS,YAAY,gBAAgB,SAAS;AAC5E,QAAI,KAAK,UAAU;AACjB,WAAK,SAAS,SAAS,OAAO,eAAe,OAAO,MAAM,KAAK,SAAS;AAAA,IAC1E;AACA,UAAM,QAAQ,iBAAiB,GAAG;AAClC,UAAM,SAAS,wBAAwB,OAAO,KAAK,IAAI;AACvD,QAAI,KAAK,UAAU;AACjB,YAAM,QAAQ,KAAK,SAAS,OAAO,OAAO,eAAe,KAAK;AAC9D,UAAI,OAAO,iBAAiB,MAAM,gBAAgB,GAAG;AACnD,eAAO,IAAI,MAAM;AAAA,MACnB;AAAA,IACF;AACA,SAAK,QAAQ,OAAO,OAAO,QAAQ,IAAI;AAAA,EACzC;AAAA,EAEQ,UACN,OACA,MACA,SACA,OACA,MACA,MACA,MACM;AACN,4BAAwB,MAAM,SAAS,YAAY,gBAAgB,SAAS;AAC5E,QAAI,KAAK,UAAU;AACjB,WAAK,SAAS,SAAS,OAAO,SAAS,OAAO,MAAM,KAAK,SAAS;AAAA,IACpE;AACA,UAAM,QAAQ,iBAAiB,MAAM,SAAS,IAAI;AAClD,UAAM,SAAS,wBAAwB,OAAO,MAAM,SAAS,OAAO,MAAM,IAAI;AAC9E,QAAI,KAAK,UAAU;AACjB,YAAM,QAAQ,KAAK,SAAS,OAAO,OAAO,SAAS,KAAK;AACxD,UAAI,OAAO,iBAAiB,MAAM,gBAAgB,GAAG;AACnD,eAAO,IAAI,MAAM;AAAA,MACnB;AAAA,IACF;AACA,SAAK,QAAQ,OAAO,OAAO,QAAQ,IAAI;AAAA,EACzC;AAAA,EAEA,MAAM,WAAW,KAAa,MAA8C;AAC1E,UAAM,QAAQ,iBAAiB,GAAG;AAClC,SAAK,QAAQ,UAAU,OAAO,EAAE,SAAS,aAAa,IAAI,EAAE,CAAC;AAAA,EAC/D;AAAA,EAEA,MAAM,WACJ,MACA,SACA,MACA,MACe;AACf,UAAM,QAAQ,iBAAiB,MAAM,SAAS,IAAI;AAClD,SAAK,QAAQ,UAAU,OAAO,EAAE,SAAS,aAAa,IAAI,EAAE,CAAC;AAAA,EAC/D;AAAA,EAEA,MAAM,WAAW,KAA4B;AAC3C,UAAM,QAAQ,iBAAiB,GAAG;AAClC,SAAK,QAAQ,UAAU,KAAK;AAAA,EAC9B;AAAA,EAEA,MAAM,WAAW,MAAc,SAAiB,MAA6B;AAC3E,UAAM,QAAQ,iBAAiB,MAAM,SAAS,IAAI;AAClD,SAAK,QAAQ,UAAU,KAAK;AAAA,EAC9B;AAAA,EAEA,MAAM,SAAwB;AAC5B,UAAM,KAAK,QAAQ,OAAO;AAAA,EAC5B;AACF;;;ACvIA,IAAAA,sBAA2B;;;ACApB,IAAM,iBAAN,cAA6B,MAAM;AAAA,EACxC,YACE,SACgB,MAChB;AACA,UAAM,OAAO;AAFG;AAGhB,SAAK,OAAO;AAAA,EACd;AACF;AAgBO,IAAM,kBAAN,cAA8B,eAAe;AAAA,EAClD,YACE,SACgB,SAChB;AACA,UAAM,SAAS,kBAAkB;AAFjB;AAGhB,SAAK,OAAO;AAAA,EACd;AACF;AAEO,IAAM,yBAAN,cAAqC,eAAe;AAAA,EACzD,YAAY,OAAe,SAAiB,OAAe;AACzD,UAAM,yBAAyB,KAAK,OAAO,OAAO,QAAQ,KAAK,KAAK,oBAAoB;AACxF,SAAK,OAAO;AAAA,EACd;AACF;AAEO,IAAM,oBAAN,cAAgC,eAAe;AAAA,EACpD,YAAY,SAAiB;AAC3B,UAAM,SAAS,eAAe;AAC9B,SAAK,OAAO;AAAA,EACd;AACF;AASO,IAAM,uBAAN,cAAmC,eAAe;AAAA,EACvD,YAAY,SAAiB;AAC3B,UAAM,SAAS,wBAAwB;AACvC,SAAK,OAAO;AAAA,EACd;AACF;AAEO,IAAM,mBAAN,cAA+B,eAAe;AAAA,EACnD,YAAY,SAAiB;AAC3B,UAAM,SAAS,cAAc;AAC7B,SAAK,OAAO;AAAA,EACd;AACF;AAEO,IAAM,qBAAN,cAAiC,eAAe;AAAA,EACrD,YACE,OACA,SACA,OACA,WACA,WACA;AACA;AAAA,MACE,SAAS,KAAK,OAAO,OAAO,QAAQ,KAAK,8BAA8B,aAAa,MAAM,mBACxE,UAAU,KAAK,IAAI,CAAC;AAAA,MACtC;AAAA,IACF;AACA,SAAK,OAAO;AAAA,EACd;AACF;AAEO,IAAM,iBAAN,cAA6B,eAAe;AAAA,EACjD,YAAY,SAAiB;AAC3B,UAAM,SAAS,iBAAiB;AAChC,SAAK,OAAO;AAAA,EACd;AACF;;;AC3EA,yBAAwD;AA6BxD,IAAM,sBAAsB;AAgCrB,SAAS,cAAc,QAAgB,OAAyC;AAOrF,QAAM,YAAY,IAAI,6BAAU,QAAkB,WAAW,KAAK;AAClE,SAAO,CAAC,SAAkB;AACxB,UAAM,SAAS,UAAU,SAAS,IAAI;AACtC,QAAI,CAAC,OAAO,OAAO;AACjB,YAAM,QAAQ,OAAO,OAAO;AAC5B,YAAM,OAAO,OAAO,OAAO,MAAM,GAAG,mBAAmB,EAAE,IAAI,WAAW,EAAE,KAAK,IAAI;AACnF,YAAM,WAAW,QAAQ,sBAAsB,MAAM,QAAQ,mBAAmB,WAAW;AAC3F,YAAM,IAAI;AAAA,QACR,yBAAyB,QAAQ,UAAU,QAAQ,EAAE,KAAK,IAAI,GAAG,QAAQ;AAAA,QACzE,OAAO;AAAA,MACT;AAAA,IACF;AAAA,EACF;AACF;AAWA,SAAS,YAAY,KAAyB;AAC5C,QAAM,OAAO,IAAI,iBAAiB,QAAQ,MAAM,EAAE,KAAK;AACvD,QAAM,UAAU,IAAI,UAAU,IAAI,IAAI,OAAO,OAAO;AACpD,QAAM,SAAS,IAAI,QAAQ,KAAK,OAAO,GAAG,IAAI,KAAK,KAAK;AACxD,SAAO,GAAG,IAAI,GAAG,MAAM;AACzB;;;ACjFA,eAAsB,oBACpB,MACA,gBACA,eACA,YACkC;AAClC,QAAM,SAAS,CAAC,GAAG,UAAU,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,cAAc,EAAE,WAAW;AAC3E,MAAI,SAAS,EAAE,GAAG,KAAK;AACvB,MAAI,UAAU;AAEd,aAAW,QAAQ,QAAQ;AACzB,QAAI,KAAK,gBAAgB,SAAS;AAChC,UAAI;AACF,iBAAS,MAAM,KAAK,GAAG,MAAM;AAAA,MAC/B,SAAS,KAAc;AACrB,YAAI,eAAe,eAAgB,OAAM;AACzC,cAAM,IAAI;AAAA,UACR,mBAAmB,KAAK,WAAW,QAAQ,KAAK,SAAS,YAAa,IAAc,OAAO;AAAA,QAC7F;AAAA,MACF;AACA,UAAI,CAAC,UAAU,OAAO,WAAW,UAAU;AACzC,cAAM,IAAI;AAAA,UACR,mBAAmB,KAAK,WAAW,QAAQ,KAAK,SAAS;AAAA,QAC3D;AAAA,MACF;AACA,gBAAU,KAAK;AAAA,IACjB;AAAA,EACF;AAEA,MAAI,YAAY,eAAe;AAC7B,UAAM,IAAI;AAAA,MACR,wCAAwC,OAAO,mBAAmB,aAAa;AAAA,IACjF;AAAA,EACF;AAEA,SAAO;AACT;AAUO,SAAS,uBAAuB,YAA6B,OAAqB;AACvF,MAAI,WAAW,WAAW,EAAG;AAG7B,QAAM,OAAO,oBAAI,IAAY;AAC7B,aAAW,QAAQ,YAAY;AAC7B,QAAI,KAAK,aAAa,KAAK,aAAa;AACtC,YAAM,IAAI;AAAA,QACR,GAAG,KAAK,mCAAmC,KAAK,SAAS,qBAAqB,KAAK,WAAW;AAAA,MAChG;AAAA,IACF;AACA,QAAI,KAAK,IAAI,KAAK,WAAW,GAAG;AAC9B,YAAM,IAAI;AAAA,QACR,GAAG,KAAK,8CAA8C,KAAK,WAAW;AAAA,MACxE;AAAA,IACF;AACA,SAAK,IAAI,KAAK,WAAW;AAAA,EAC3B;AAEA,QAAM,SAAS,CAAC,GAAG,UAAU,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,cAAc,EAAE,WAAW;AAC3E,QAAM,gBAAgB,KAAK,IAAI,GAAG,WAAW,IAAI,CAAC,MAAM,EAAE,SAAS,CAAC;AACpE,MAAI,UAAU;AAEd,aAAW,QAAQ,QAAQ;AACzB,QAAI,KAAK,gBAAgB,SAAS;AAChC,gBAAU,KAAK;AAAA,IACjB,WAAW,KAAK,cAAc,SAAS;AACrC,YAAM,IAAI;AAAA,QACR,GAAG,KAAK,sDAAiD,OAAO,YAAO,KAAK,WAAW;AAAA,MACzF;AAAA,IACF;AAAA,EACF;AAEA,MAAI,YAAY,eAAe;AAC7B,UAAM,IAAI;AAAA,MACR,GAAG,KAAK,qCAAqC,aAAa,eAAe,OAAO;AAAA,IAClF;AAAA,EACF;AACF;AAQA,eAAsB,cACpB,QACA,UACA,kBAAsC,OACZ;AAC1B,QAAM,QAAQ,SAAS,OAAO,OAAO,OAAO,OAAO,SAAS,OAAO,KAAK;AAExE,MAAI,CAAC,OAAO,YAAY,UAAU,CAAC,MAAM,eAAe;AACtD,WAAO,EAAE,QAAQ,UAAU,OAAO,WAAW,MAAM;AAAA,EACrD;AAEA,QAAM,iBAAiB,OAAO,KAAK;AAEnC,MAAI,kBAAkB,MAAM,eAAe;AACzC,WAAO,EAAE,QAAQ,UAAU,OAAO,WAAW,MAAM;AAAA,EACrD;AAEA,QAAM,eAAe,MAAM;AAAA,IACzB,OAAO;AAAA,IACP;AAAA,IACA,MAAM;AAAA,IACN,MAAM;AAAA,EACR;AAGA,QAAM,YAAY,MAAM,sBAAsB,mBAAmB;AAEjE,SAAO;AAAA,IACL,QAAQ,EAAE,GAAG,QAAQ,MAAM,cAAc,GAAG,MAAM,cAAc;AAAA,IAChE,UAAU;AAAA,IACV;AAAA,EACF;AACF;AAOA,eAAsB,eACpB,SACA,UACA,kBAAsC,OACV;AAC5B,SAAO,QAAQ,IAAI,QAAQ,IAAI,CAAC,MAAM,cAAc,GAAG,UAAU,eAAe,CAAC,CAAC;AACpF;;;ACnJO,SAAS,WAAW,WAAmB,SAA0B;AAEtE,MAAI,YAAY,OAAQ,QAAO,cAAc;AAG7C,MAAI,YAAY,KAAM,QAAO;AAE7B,QAAM,eAAe,cAAc,KAAK,CAAC,IAAI,UAAU,MAAM,GAAG;AAChE,QAAM,kBAAkB,QAAQ,MAAM,GAAG;AAEzC,SAAO,cAAc,cAAc,GAAG,iBAAiB,CAAC;AAC1D;AASO,SAAS,cAAc,WAAmB,UAA6B;AAC5E,MAAI,CAAC,YAAY,SAAS,WAAW,EAAG,QAAO;AAC/C,SAAO,SAAS,KAAK,CAAC,MAAM,WAAW,WAAW,CAAC,CAAC;AACtD;AAMA,SAAS,cAAc,MAAgB,IAAY,SAAmB,IAAqB;AAEzF,MAAI,OAAO,KAAK,UAAU,OAAO,QAAQ,OAAQ,QAAO;AAGxD,MAAI,OAAO,QAAQ,OAAQ,QAAO;AAElC,QAAM,MAAM,QAAQ,EAAE;AAEtB,MAAI,QAAQ,MAAM;AAEhB,QAAI,OAAO,QAAQ,SAAS,EAAG,QAAO;AAGtC,aAAS,OAAO,GAAG,QAAQ,KAAK,SAAS,IAAI,QAAQ;AACnD,UAAI,cAAc,MAAM,KAAK,MAAM,SAAS,KAAK,CAAC,EAAG,QAAO;AAAA,IAC9D;AACA,WAAO;AAAA,EACT;AAGA,MAAI,OAAO,KAAK,OAAQ,QAAO;AAE/B,MAAI,QAAQ,KAAK;AAEf,WAAO,cAAc,MAAM,KAAK,GAAG,SAAS,KAAK,CAAC;AAAA,EACpD;AAGA,MAAI,KAAK,EAAE,MAAM,KAAK;AACpB,WAAO,cAAc,MAAM,KAAK,GAAG,SAAS,KAAK,CAAC;AAAA,EACpD;AAEA,SAAO;AACT;;;AC9EA,SAAS,UAAU,OAAe,SAAiB,OAAuB;AACxE,SAAO,GAAG,KAAK,IAAI,OAAO,IAAI,KAAK;AACrC;AAEA,SAAS,aAAa,GAA0B;AAC9C,SAAO,UAAU,EAAE,OAAO,EAAE,SAAS,EAAE,KAAK;AAC9C;AAkBO,SAAS,eAAe,OAAyD;AACtF,QAAM,MAAM,oBAAI,IAA0E;AAE1F,MAAI;AAEJ,MAAI,MAAM,QAAQ,KAAK,GAAG;AACxB,cAAU;AAAA,EACZ,OAAO;AACL,cAAU,mBAAmB,KAAK;AAAA,EACpC;AAEA,QAAM,YAA0C,OAAO,OAAO,CAAC,GAAG,OAAO,CAAC;AAE1E,aAAW,SAAS,SAAS;AAC3B,QAAI,MAAM,eAAe,MAAM,YAAY,SAAS,GAAG,GAAG;AACxD,YAAM,IAAI;AAAA,QACR,UAAU,MAAM,KAAK,OAAO,MAAM,OAAO,QAAQ,MAAM,KAAK,8BAA8B,MAAM,WAAW;AAAA,MAC7G;AAAA,IACF;AACA,QAAI,MAAM,YAAY,QAAQ;AAC5B,YAAM,QAAQ,UAAU,MAAM,KAAK,OAAO,MAAM,OAAO,QAAQ,MAAM,KAAK;AAC1E,6BAAuB,MAAM,YAAY,KAAK;AAE9C,YAAM,gBAAgB,KAAK,IAAI,GAAG,MAAM,WAAW,IAAI,CAAC,MAAM,EAAE,SAAS,CAAC;AAAA,IAC5E,OAAO;AAEL,YAAM,gBAAgB;AAAA,IACxB;AACA,UAAM,MAAM,UAAU,MAAM,OAAO,MAAM,SAAS,MAAM,KAAK;AAC7D,UAAM,YAAY,MAAM,aACpB,cAAc,MAAM,YAAY,IAAI,MAAM,KAAK,OAAO,MAAM,OAAO,QAAQ,MAAM,KAAK,GAAG,IACzF;AACJ,QAAI,IAAI,KAAK,EAAE,OAAO,UAAU,UAAU,CAAC;AAAA,EAC7C;AAGA,QAAM,WAAW,oBAAI,IAA0C;AAC/D,QAAM,WAAW,oBAAI,IAA6B;AAClD,aAAW,SAAS,SAAS;AAC3B,UAAM,WAAW,SAAS,IAAI,MAAM,OAAO;AAC3C,QAAI,UAAU;AACZ,eAAS,KAAK,KAAK;AAAA,IACrB,OAAO;AACL,eAAS,IAAI,MAAM,SAAS,CAAC,KAAK,CAAC;AAAA,IACrC;AAAA,EACF;AACA,aAAW,CAAC,KAAK,GAAG,KAAK,UAAU;AACjC,aAAS,IAAI,KAAK,OAAO,OAAO,GAAG,CAAC;AAAA,EACtC;AAWA,QAAM,gBAAgB,oBAAI,IAA0C;AACpE,QAAM,gBAAgB,oBAAI,IAA6B;AACvD,QAAM,eAAe,oBAAI,IAAyB;AAClD,aAAW,SAAS,SAAS;AAC3B,QAAI,CAAC,MAAM,YAAa;AACxB,QAAI,OAAO,aAAa,IAAI,MAAM,KAAK;AACvC,QAAI,CAAC,MAAM;AACT,aAAO,oBAAI,IAAI;AACf,mBAAa,IAAI,MAAM,OAAO,IAAI;AAAA,IACpC;AACA,QAAI,KAAK,IAAI,MAAM,WAAW,EAAG;AACjC,SAAK,IAAI,MAAM,WAAW;AAC1B,UAAM,WAAW,cAAc,IAAI,MAAM,KAAK;AAC9C,QAAI,UAAU;AACZ,eAAS,KAAK,KAAK;AAAA,IACrB,OAAO;AACL,oBAAc,IAAI,MAAM,OAAO,CAAC,KAAK,CAAC;AAAA,IACxC;AAAA,EACF;AACA,aAAW,CAAC,KAAK,GAAG,KAAK,eAAe;AACtC,kBAAc,IAAI,KAAK,OAAO,OAAO,GAAG,CAAC;AAAA,EAC3C;AAEA,SAAO;AAAA,IACL,OAAO,OAAe,SAAiB,OAA0C;AAC/E,aAAO,IAAI,IAAI,UAAU,OAAO,SAAS,KAAK,CAAC,GAAG;AAAA,IACpD;AAAA,IAEA,gBAAgB,SAA+C;AAC7D,aAAO,SAAS,IAAI,OAAO,KAAK,CAAC;AAAA,IACnC;AAAA,IAEA,oBAAoB,OAA6C;AAC/D,aAAO,cAAc,IAAI,KAAK,KAAK,CAAC;AAAA,IACtC;AAAA,IAEA,SACE,OACA,SACA,OACA,MACA,WACM;AACN,YAAM,MAAM,IAAI,IAAI,UAAU,OAAO,SAAS,KAAK,CAAC;AAEpD,UAAI,CAAC,KAAK;AACR,cAAM,IAAI,uBAAuB,OAAO,SAAS,KAAK;AAAA,MACxD;AAGA,UAAI,cAAc,UAAa,IAAI,MAAM,aAAa,IAAI,MAAM,UAAU,SAAS,GAAG;AACpF,YAAI,CAAC,cAAc,WAAW,IAAI,MAAM,SAAS,GAAG;AAClD,gBAAM,IAAI,mBAAmB,OAAO,SAAS,OAAO,WAAW,IAAI,MAAM,SAAS;AAAA,QACpF;AAAA,MACF;AAEA,UAAI,IAAI,UAAU;AAChB,YAAI;AACF,cAAI,SAAS,IAAI;AAAA,QACnB,SAAS,KAAc;AACrB,cAAI,eAAe,gBAAiB,OAAM;AAC1C,gBAAM,IAAI;AAAA,YACR,+BAA+B,KAAK,OAAO,OAAO,QAAQ,KAAK;AAAA,YAC/D;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,IAEA,UAAwC;AACtC,aAAO;AAAA,IACT;AAAA,EACF;AACF;AAYO,SAAS,qBAAqB,MAAqB,WAAyC;AAEjG,QAAM,WAAW,IAAI,IAAI,KAAK,QAAQ,EAAE,IAAI,YAAY,CAAC;AAEzD,SAAO;AAAA,IACL,OAAO,OAAe,SAAiB,OAA0C;AAC/E,aAAO,KAAK,OAAO,OAAO,SAAS,KAAK,KAAK,UAAU,OAAO,OAAO,SAAS,KAAK;AAAA,IACrF;AAAA,IAEA,gBAAgB,SAA+C;AAC7D,YAAM,cAAc,KAAK,gBAAgB,OAAO;AAChD,YAAM,aAAa,UAAU,gBAAgB,OAAO;AACpD,UAAI,WAAW,WAAW,EAAG,QAAO;AACpC,UAAI,YAAY,WAAW,EAAG,QAAO;AAGrC,YAAM,OAAO,IAAI,IAAI,YAAY,IAAI,YAAY,CAAC;AAClD,YAAM,SAAS,CAAC,GAAG,WAAW;AAC9B,iBAAW,SAAS,YAAY;AAC9B,YAAI,CAAC,KAAK,IAAI,aAAa,KAAK,CAAC,GAAG;AAClC,iBAAO,KAAK,KAAK;AAAA,QACnB;AAAA,MACF;AACA,aAAO,OAAO,OAAO,MAAM;AAAA,IAC7B;AAAA,IAEA,oBAAoB,OAA6C;AAC/D,YAAM,cAAc,KAAK,oBAAoB,KAAK;AAClD,YAAM,aAAa,UAAU,oBAAoB,KAAK;AACtD,UAAI,WAAW,WAAW,EAAG,QAAO;AACpC,UAAI,YAAY,WAAW,EAAG,QAAO;AAMrC,YAAM,OAAO,IAAI,IAAI,YAAY,IAAI,CAAC,MAAM,EAAE,WAAW,CAAC;AAC1D,YAAM,SAAS,CAAC,GAAG,WAAW;AAC9B,iBAAW,SAAS,YAAY;AAC9B,YAAI,CAAC,KAAK,IAAI,MAAM,WAAW,GAAG;AAChC,eAAK,IAAI,MAAM,WAAW;AAC1B,iBAAO,KAAK,KAAK;AAAA,QACnB;AAAA,MACF;AACA,aAAO,OAAO,OAAO,MAAM;AAAA,IAC7B;AAAA,IAEA,SACE,OACA,SACA,OACA,MACA,WACM;AACN,UAAI,SAAS,IAAI,UAAU,OAAO,SAAS,KAAK,CAAC,GAAG;AAClD,eAAO,KAAK,SAAS,OAAO,SAAS,OAAO,MAAM,SAAS;AAAA,MAC7D;AAEA,aAAO,UAAU,SAAS,OAAO,SAAS,OAAO,MAAM,SAAS;AAAA,IAClE;AAAA,IAEA,UAAwC;AACtC,YAAM,aAAa,UAAU,QAAQ;AACrC,UAAI,WAAW,WAAW,EAAG,QAAO,KAAK,QAAQ;AAEjD,YAAM,SAAS,CAAC,GAAG,KAAK,QAAQ,CAAC;AACjC,iBAAW,SAAS,YAAY;AAC9B,YAAI,CAAC,SAAS,IAAI,aAAa,KAAK,CAAC,GAAG;AACtC,iBAAO,KAAK,KAAK;AAAA,QACnB;AAAA,MACF;AACA,aAAO,OAAO,OAAO,MAAM;AAAA,IAC7B;AAAA,EACF;AACF;AAOA,SAAS,mBAAmB,WAA6C;AACvE,QAAM,UAA2B,CAAC;AAGlC,aAAW,CAAC,MAAM,MAAM,KAAK,UAAU,OAAO;AAC5C,YAAQ,KAAK;AAAA,MACX,OAAO;AAAA,MACP,SAAS;AAAA,MACT,OAAO;AAAA,MACP,YAAY,OAAO;AAAA,MACnB,aAAa,OAAO;AAAA,MACpB,YAAY,OAAO;AAAA,MACnB,eAAe,OAAO;AAAA,MACtB,WAAW,OAAO;AAAA,MAClB,YAAY,OAAO;AAAA,MACnB,oBAAoB,OAAO;AAAA,MAC3B,SAAS,OAAO;AAAA,IAClB,CAAC;AAAA,EACH;AAGA,aAAW,CAAC,SAAS,MAAM,KAAK,UAAU,OAAO;AAC/C,UAAM,WAAW,OAAO;AACxB,QAAI,CAAC,SAAU;AAEf,UAAM,YAAY,MAAM,QAAQ,SAAS,IAAI,IAAI,SAAS,OAAO,CAAC,SAAS,IAAI;AAC/E,UAAM,UAAU,MAAM,QAAQ,SAAS,EAAE,IAAI,SAAS,KAAK,CAAC,SAAS,EAAE;AAEvE,UAAM,sBAAsB,OAAO,eAAe,SAAS;AAC3D,QAAI,uBAAuB,oBAAoB,SAAS,GAAG,GAAG;AAC5D,YAAM,IAAI;AAAA,QACR,SAAS,OAAO,8BAA8B,mBAAmB;AAAA,MACnE;AAAA,IACF;AAEA,eAAW,SAAS,WAAW;AAC7B,iBAAW,SAAS,SAAS;AAC3B,gBAAQ,KAAK;AAAA,UACX;AAAA,UACA;AAAA,UACA;AAAA,UACA,YAAY,OAAO;AAAA,UACnB,aAAa,OAAO;AAAA,UACpB,cAAc,SAAS;AAAA,UACvB,YAAY,OAAO;AAAA,UACnB,eAAe,OAAO;AAAA,UACtB,WAAW,OAAO;AAAA,UAClB,aAAa;AAAA,UACb,YAAY,OAAO;AAAA,UACnB,oBAAoB,OAAO;AAAA,UAC3B,SAAS,OAAO;AAAA,QAClB,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;;;ACxSA,IAAAC,sBAA2B;AAjB3B;AAmCA,IAAI,UAAyB;AAC7B,IAAI,aAAa;AACjB,IAAM,WAAW,oBAAI,IAMnB;AAUF,IAAM,gBAAgB;AAAA,EACpB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,EAAE,KAAK,IAAI;AAgBX,IAAI,cAAsF;AAE1F,eAAe,iBAA2D;AACxE,MAAI,YAAa,QAAO;AACxB,QAAM,KAAK,MAAM,OAAO,gBAAqB;AAC7C,gBAAc,GAAG;AACjB,SAAO;AACT;AAEA,eAAe,eAAgC;AAC7C,MAAI,QAAS,QAAO;AAEpB,QAAM,OAAO,MAAM,eAAe;AAClC,YAAU,IAAI,KAAK,eAAe;AAAA,IAChC,MAAM;AAAA,IACN,YAAY,EAAE,WAAW,YAAY,IAAI;AAAA,EAC3C,CAAC;AAGD,UAAQ,MAAM;AAEd,UAAQ,GAAG,WAAW,CAAC,QAAwB;AAC7C,QAAI,IAAI,OAAO,OAAW;AAC1B,UAAM,UAAU,SAAS,IAAI,IAAI,EAAE;AACnC,QAAI,CAAC,QAAS;AACd,aAAS,OAAO,IAAI,EAAE;AAEtB,QAAI,IAAI,SAAS,SAAS;AACxB,cAAQ,OAAO,IAAI,eAAe,IAAI,WAAW,uBAAuB,CAAC;AAAA,IAC3E,OAAO;AACL,cAAQ,QAAQ,GAAG;AAAA,IACrB;AAAA,EACF,CAAC;AAED,UAAQ,GAAG,SAAS,CAAC,QAAe;AAElC,eAAW,CAAC,EAAE,CAAC,KAAK,UAAU;AAC5B,QAAE,OAAO,IAAI,eAAe,yBAAyB,IAAI,OAAO,EAAE,CAAC;AAAA,IACrE;AACA,aAAS,MAAM;AACf,cAAU;AAAA,EACZ,CAAC;AAED,UAAQ,GAAG,QAAQ,CAAC,SAAiB;AAInC,QAAI,SAAS,OAAO,GAAG;AACrB,iBAAW,CAAC,EAAE,CAAC,KAAK,UAAU;AAC5B,UAAE,OAAO,IAAI,eAAe,mCAAmC,IAAI,EAAE,CAAC;AAAA,MACxE;AACA,eAAS,MAAM;AAAA,IACjB;AACA,cAAU;AAAA,EACZ,CAAC;AAED,SAAO;AACT;AAEA,eAAe,aAAa,KAAuD;AACjF,QAAM,SAAS,MAAM,aAAa;AAClC,MAAI,cAAc,OAAO,iBAAkB,cAAa;AACxD,QAAM,KAAK,EAAE;AACb,SAAO,IAAI,QAAwB,CAAC,SAAS,WAAW;AACtD,aAAS,IAAI,IAAI,EAAE,SAA0C,OAAO,CAAC;AACrE,WAAO,YAAY,EAAE,GAAG,KAAK,GAAG,CAAC;AAAA,EACnC,CAAC;AACH;AAWA,IAAM,gBAAgB,oBAAI,QAAqD;AAE/E,SAAS,iBAAiB,UAAuD;AAC/E,MAAI,QAAQ,cAAc,IAAI,QAAQ;AACtC,MAAI,CAAC,OAAO;AACV,YAAQ,oBAAI,IAAI;AAChB,kBAAc,IAAI,UAAU,KAAK;AAAA,EACnC;AACA,SAAO;AACT;AAEA,SAAS,WAAW,QAAwB;AAC1C,aAAO,gCAAW,QAAQ,EAAE,OAAO,MAAM,EAAE,OAAO,KAAK;AACzD;AAQA,IAAI,uBAA0D;AAE9D,eAAe,oBAAyD;AACtE,MAAI,qBAAsB,QAAO;AACjC,yBAAuB,MAAM;AAC7B,SAAO;AACT;AAqBO,SAAS,gBAAgB,QAA6B;AAQ3D,UAAQ,OAAO,SAAkC;AAC/C,UAAM,EAAE,yBAAAC,0BAAyB,2BAAAC,2BAA0B,IAAI,MAAM,kBAAkB;AACvF,UAAM,WAAW,KAAK,UAAUD,yBAAwB,IAAI,CAAC;AAC7D,UAAM,WAAW,MAAM,aAAa,EAAE,MAAM,WAAW,QAAQ,SAAS,CAAC;AACzE,QAAI,SAAS,eAAe,UAAa,SAAS,eAAe,MAAM;AACrE,YAAM,IAAI,eAAe,kDAAkD;AAAA,IAC7E;AACA,QAAI;AACF,aAAOC,2BAA0B,KAAK,MAAM,SAAS,UAAU,CAAC;AAAA,IAClE,QAAQ;AACN,YAAM,IAAI,eAAe,kDAAkD;AAAA,IAC7E;AAAA,EACF;AACF;AAgBA,eAAsB,iBACpB,QACA,UACe;AACf,MAAI,YAAY,aAAa,iBAAiB;AAE5C,QAAI;AACF,eAAS,MAAM;AAAA,IACjB,SAAS,KAAc;AACrB,UAAI,eAAe,eAAgB,OAAM;AACzC,YAAM,IAAI,eAAe,uCAAwC,IAAc,OAAO,EAAE;AAAA,IAC1F;AACA;AAAA,EACF;AAGA,QAAM,aAAa,EAAE,MAAM,WAAW,OAAO,CAAC;AAChD;AAaO,SAAS,mBACd,QACA,WAA8B,iBACjB;AACb,QAAM,QAAQ,iBAAiB,QAAQ;AACvC,QAAM,MAAM,WAAW,MAAM;AAC7B,QAAM,SAAS,MAAM,IAAI,GAAG;AAC5B,MAAI,OAAQ,QAAO;AAEnB,MAAI;AACF,UAAM,KAAK,SAAS,MAAM;AAC1B,UAAM,IAAI,KAAK,EAAE;AACjB,WAAO;AAAA,EACT,SAAS,KAAc;AACrB,QAAI,eAAe,eAAgB,OAAM;AACzC,UAAM,IAAI,eAAe,uCAAwC,IAAc,OAAO,EAAE;AAAA,EAC1F;AACF;AASO,SAAS,kBACd,QACA,UACiB;AACjB,SAAO,OAAO,IAAI,CAAC,UAAU;AAAA,IAC3B,aAAa,KAAK;AAAA,IAClB,WAAW,KAAK;AAAA,IAChB,IAAI,mBAAmB,KAAK,IAAI,QAAQ;AAAA,EAC1C,EAAE;AACJ;;;ANnYO,IAAM,iBAAiB;AAGvB,IAAM,iBAAiB;AAO9B,IAAM,+BAA+B;AAAA,EACnC,MAAM;AAAA,EACN,UAAU,CAAC,eAAe,aAAa,IAAI;AAAA,EAC3C,YAAY;AAAA,IACV,aAAa,EAAE,MAAM,WAAW,SAAS,EAAE;AAAA,IAC3C,WAAW,EAAE,MAAM,WAAW,SAAS,EAAE;AAAA,IACzC,IAAI,EAAE,MAAM,UAAU,WAAW,EAAE;AAAA,EACrC;AAAA,EACA,sBAAsB;AACxB;AAGO,IAAM,mBAA2B;AAAA,EACtC,MAAM;AAAA,EACN,UAAU,CAAC,QAAQ,YAAY;AAAA,EAC/B,YAAY;AAAA,IACV,MAAM,EAAE,MAAM,UAAU,WAAW,EAAE;AAAA,IACrC,YAAY,EAAE,MAAM,SAAS;AAAA,IAC7B,aAAa,EAAE,MAAM,SAAS;AAAA,IAC9B,YAAY,EAAE,MAAM,SAAS;AAAA,IAC7B,eAAe,EAAE,MAAM,SAAS;AAAA,IAChC,cAAc,EAAE,MAAM,SAAS;AAAA,IAC/B,SAAS,EAAE,MAAM,SAAS;AAAA,IAC1B,WAAW,EAAE,MAAM,SAAS,OAAO,EAAE,MAAM,UAAU,WAAW,EAAE,EAAE;AAAA,IACpE,eAAe,EAAE,MAAM,WAAW,SAAS,EAAE;AAAA,IAC7C,YAAY,EAAE,MAAM,SAAS,OAAO,6BAA6B;AAAA,IACjE,oBAAoB,EAAE,MAAM,UAAU,MAAM,CAAC,OAAO,SAAS,YAAY,EAAE;AAAA,EAC7E;AAAA,EACA,sBAAsB;AACxB;AAGO,IAAM,mBAA2B;AAAA,EACtC,MAAM;AAAA,EACN,UAAU,CAAC,QAAQ,QAAQ,IAAI;AAAA,EAC/B,YAAY;AAAA,IACV,MAAM,EAAE,MAAM,UAAU,WAAW,EAAE;AAAA,IACrC,MAAM;AAAA,MACJ,OAAO;AAAA,QACL,EAAE,MAAM,UAAU,WAAW,EAAE;AAAA,QAC/B,EAAE,MAAM,SAAS,OAAO,EAAE,MAAM,UAAU,WAAW,EAAE,GAAG,UAAU,EAAE;AAAA,MACxE;AAAA,IACF;AAAA,IACA,IAAI;AAAA,MACF,OAAO;AAAA,QACL,EAAE,MAAM,UAAU,WAAW,EAAE;AAAA,QAC/B,EAAE,MAAM,SAAS,OAAO,EAAE,MAAM,UAAU,WAAW,EAAE,GAAG,UAAU,EAAE;AAAA,MACxE;AAAA,IACF;AAAA,IACA,YAAY,EAAE,MAAM,SAAS;AAAA,IAC7B,cAAc,EAAE,MAAM,SAAS;AAAA,IAC/B,aAAa,EAAE,MAAM,SAAS;AAAA,IAC9B,YAAY,EAAE,MAAM,SAAS;AAAA,IAC7B,eAAe,EAAE,MAAM,SAAS;AAAA,IAChC,cAAc,EAAE,MAAM,SAAS;AAAA,IAC/B,SAAS,EAAE,MAAM,SAAS;AAAA,IAC1B,WAAW,EAAE,MAAM,SAAS,OAAO,EAAE,MAAM,UAAU,WAAW,EAAE,EAAE;AAAA,IACpE,aAAa,EAAE,MAAM,UAAU,WAAW,GAAG,SAAS,UAAU;AAAA,IAChE,eAAe,EAAE,MAAM,WAAW,SAAS,EAAE;AAAA,IAC7C,YAAY,EAAE,MAAM,SAAS,OAAO,6BAA6B;AAAA,IACjE,oBAAoB,EAAE,MAAM,UAAU,MAAM,CAAC,OAAO,SAAS,YAAY,EAAE;AAAA,EAC7E;AAAA,EACA,sBAAsB;AACxB;AAOO,IAAM,oBAA8C;AAAA,EACzD;AAAA,IACE,OAAO;AAAA,IACP,SAAS;AAAA,IACT,OAAO;AAAA,IACP,YAAY;AAAA,IACZ,aAAa;AAAA,EACf;AAAA,EACA;AAAA,IACE,OAAO;AAAA,IACP,SAAS;AAAA,IACT,OAAO;AAAA,IACP,YAAY;AAAA,IACZ,aAAa;AAAA,EACf;AACF;AAeA,IAAI,qBAA2C;AACxC,SAAS,0BAAyC;AACvD,MAAI,mBAAoB,QAAO;AAC/B,uBAAqB,eAAe,CAAC,GAAG,iBAAiB,CAAC;AAC1D,SAAO;AACT;AAaO,SAAS,yBAAyB,UAAkB,MAAsB;AAC/E,QAAM,WAAO,gCAAW,QAAQ,EAAE,OAAO,GAAG,QAAQ,IAAI,IAAI,EAAE,EAAE,OAAO,WAAW;AAClF,SAAO,KAAK,MAAM,GAAG,EAAE;AACzB;AAeA,eAAsB,wBACpB,QACA,UACwB;AACxB,QAAM,CAAC,WAAW,SAAS,IAAI,MAAM,QAAQ,IAAI;AAAA,IAC/C,OAAO,UAAU,EAAE,OAAO,eAAe,CAAC;AAAA,IAC1C,OAAO,UAAU,EAAE,OAAO,eAAe,CAAC;AAAA,EAC5C,CAAC;AAED,QAAM,UAA2B,CAAC,GAAG,iBAAiB;AAItD,QAAM,iBAAkC,CAAC;AACzC,aAAW,UAAU,WAAW;AAC9B,UAAM,OAAO,OAAO;AACpB,QAAI,KAAK,YAAY;AACnB,iBAAW,KAAK,KAAK,YAAY;AAC/B,uBAAe,KAAK,iBAAiB,EAAE,IAAI,QAAQ,CAAC;AAAA,MACtD;AAAA,IACF;AAAA,EACF;AACA,aAAW,UAAU,WAAW;AAC9B,UAAM,OAAO,OAAO;AACpB,QAAI,KAAK,YAAY;AACnB,iBAAW,KAAK,KAAK,YAAY;AAC/B,uBAAe,KAAK,iBAAiB,EAAE,IAAI,QAAQ,CAAC;AAAA,MACtD;AAAA,IACF;AAAA,EACF;AACA,QAAM,QAAQ,IAAI,cAAc;AAGhC,aAAW,UAAU,WAAW;AAC9B,UAAM,OAAO,OAAO;AACpB,YAAQ,KAAK;AAAA,MACX,OAAO,KAAK;AAAA,MACZ,SAAS;AAAA,MACT,OAAO,KAAK;AAAA,MACZ,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,YAAY,KAAK;AAAA,MACjB,eAAe,KAAK;AAAA,MACpB,WAAW,KAAK;AAAA,MAChB,YAAY,KAAK,aAAa,kBAAkB,KAAK,YAAY,QAAQ,IAAI;AAAA,MAC7E,oBAAoB,KAAK;AAAA,IAC3B,CAAC;AAAA,EACH;AAGA,aAAW,UAAU,WAAW;AAC9B,UAAM,OAAO,OAAO;AACpB,UAAM,YAAY,MAAM,QAAQ,KAAK,IAAI,IAAI,KAAK,OAAO,CAAC,KAAK,IAAI;AACnE,UAAM,UAAU,MAAM,QAAQ,KAAK,EAAE,IAAI,KAAK,KAAK,CAAC,KAAK,EAAE;AAE3D,UAAM,qBAAqB,KAAK,aAC5B,kBAAkB,KAAK,YAAY,QAAQ,IAC3C;AAEJ,eAAW,SAAS,WAAW;AAC7B,iBAAW,SAAS,SAAS;AAC3B,gBAAQ,KAAK;AAAA,UACX;AAAA,UACA,SAAS,KAAK;AAAA,UACd;AAAA,UACA,YAAY,KAAK;AAAA,UACjB,aAAa,KAAK;AAAA,UAClB,cAAc,KAAK;AAAA,UACnB,YAAY,KAAK;AAAA,UACjB,eAAe,KAAK;AAAA,UACpB,WAAW,KAAK;AAAA,UAChB,aAAa,KAAK;AAAA,UAClB,YAAY;AAAA,UACZ,oBAAoB,KAAK;AAAA,QAC3B,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AAEA,SAAO,eAAe,OAAO;AAC/B;;;AOhPO,SAAS,mBAAmB,QAAoC;AACrE,QAAM,EAAE,OAAO,MAAM,SAAS,OAAO,MAAM,OAAO,QAAQ,IAAI;AAE9D,MAAI,QAAQ,WAAW,QAAQ,CAAC,OAAO,OAAO,QAAQ;AACpD,WAAO,EAAE,UAAU,OAAO,OAAO,iBAAiB,MAAM,SAAS,IAAI,EAAE;AAAA,EACzE;AAEA,QAAM,UAAyB,CAAC;AAEhC,MAAI,MAAO,SAAQ,KAAK,EAAE,OAAO,SAAS,IAAI,MAAM,OAAO,MAAM,CAAC;AAClE,MAAI,KAAM,SAAQ,KAAK,EAAE,OAAO,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AAC/D,MAAI,QAAS,SAAQ,KAAK,EAAE,OAAO,WAAW,IAAI,MAAM,OAAO,QAAQ,CAAC;AACxE,MAAI,MAAO,SAAQ,KAAK,EAAE,OAAO,SAAS,IAAI,MAAM,OAAO,MAAM,CAAC;AAClE,MAAI,KAAM,SAAQ,KAAK,EAAE,OAAO,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AAE/D,MAAI,OAAO,OAAO;AAChB,eAAW,UAAU,OAAO,OAAO;AACjC,YAAM,QAAQ,eAAe,IAAI,OAAO,KAAK,IACzC,OAAO,QACP,OAAO,MAAM,WAAW,OAAO,IAC7B,OAAO,QACP,QAAQ,OAAO,KAAK;AAC1B,cAAQ,KAAK,EAAE,OAAO,IAAI,OAAO,IAAI,OAAO,OAAO,MAAM,CAAC;AAAA,IAC5D;AAAA,EACF;AAEA,MAAI,QAAQ,WAAW,GAAG;AACxB,UAAM,IAAI,kBAAkB,kDAAkD;AAAA,EAChF;AAKA,QAAM,iBAAiB,UAAU,SAAY,sBAAsB,SAAS;AAC5E,SAAO,EAAE,UAAU,SAAS,SAAS,SAAS,EAAE,OAAO,gBAAgB,QAAQ,EAAE;AACnF;AAEO,SAAS,mBAAmB,QAAoC;AACrE,QAAM,EAAE,OAAO,OAAO,QAAQ,IAAI;AAElC,QAAM,UAAyB;AAAA,IAC7B,EAAE,OAAO,SAAS,IAAI,MAAM,OAAO,MAAM;AAAA,IACzC,EAAE,OAAO,WAAW,IAAI,MAAM,OAAO,cAAc;AAAA,EACrD;AAEA,MAAI,OAAO,OAAO;AAChB,eAAW,UAAU,OAAO,OAAO;AACjC,YAAM,QAAQ,eAAe,IAAI,OAAO,KAAK,IACzC,OAAO,QACP,OAAO,MAAM,WAAW,OAAO,IAC7B,OAAO,QACP,QAAQ,OAAO,KAAK;AAC1B,cAAQ,KAAK,EAAE,OAAO,IAAI,OAAO,IAAI,OAAO,OAAO,MAAM,CAAC;AAAA,IAC5D;AAAA,EACF;AAEA,QAAM,iBAAiB,UAAU,SAAY,sBAAsB,SAAS;AAC5E,SAAO,EAAE,UAAU,SAAS,SAAS,SAAS,EAAE,OAAO,gBAAgB,QAAQ,EAAE;AACnF;;;ACtCA,IAAM,sBAA0D;AAAA,EAC9D,oBAAI,IAAI,CAAC,QAAQ,SAAS,CAAC;AAAA,EAC3B,oBAAI,IAAI,CAAC,WAAW,MAAM,CAAC;AAAA,EAC3B,oBAAI,IAAI,CAAC,SAAS,SAAS,CAAC;AAAA,EAC5B,oBAAI,IAAI,CAAC,WAAW,OAAO,CAAC;AAC9B;AAUO,SAAS,mBAAmB,SAA2C;AAI5E,QAAM,uBAAuB,oBAAI,IAAY;AAC7C,MAAI,iBAAiB;AAErB,aAAW,KAAK,SAAS;AACvB,QAAI,eAAe,IAAI,EAAE,KAAK,GAAG;AAC/B,2BAAqB,IAAI,EAAE,KAAK;AAAA,IAClC,OAAO;AAEL,uBAAiB;AAAA,IACnB;AAAA,EACF;AAIA,aAAW,WAAW,qBAAqB;AACzC,QAAI,UAAU;AACd,eAAW,SAAS,SAAS;AAC3B,UAAI,CAAC,qBAAqB,IAAI,KAAK,GAAG;AACpC,kBAAU;AACV;AAAA,MACF;AAAA,IACF;AACA,QAAI,SAAS;AAGX,aAAO,EAAE,MAAM,KAAK;AAAA,IACtB;AAAA,EACF;AAGA,QAAM,gBAAgB,CAAC,GAAG,oBAAoB;AAC9C,MAAI,cAAc,WAAW,KAAK,gBAAgB;AAChD,WAAO;AAAA,MACL,MAAM;AAAA,MACN,QACE;AAAA,IAGJ;AAAA,EACF;AAEA,MAAI,gBAAgB;AAClB,WAAO;AAAA,MACL,MAAM;AAAA,MACN,QACE,qBAAqB,cAAc,KAAK,IAAI,CAAC;AAAA,IAIjD;AAAA,EACF;AAEA,SAAO;AAAA,IACL,MAAM;AAAA,IACN,QACE,qBAAqB,cAAc,KAAK,IAAI,CAAC;AAAA,EAIjD;AACF;;;ACrFA,SAASC,yBACP,OACA,KACA,MACgB;AAChB,SAAO,EAAE,OAAO,MAAM,KAAK,SAAS,eAAe,OAAO,OAAO,MAAM,KAAK,KAAK;AACnF;AAEA,SAASC,yBACP,OACA,MACA,SACA,OACA,MACA,MACgB;AAChB,SAAO,EAAE,OAAO,MAAM,SAAS,OAAO,MAAM,KAAK;AACnD;AAEO,IAAM,uBAAN,MAAuD;AAAA,EAC5D,YACmB,SACA,UACA,iBAAiC,SACjC,YAAoB,IACpB,kBAAsC,OACvD;AALiB;AACA;AACA;AACA;AACA;AAAA,EAChB;AAAA,EAEH,MAAM,QAAQ,KAAgD;AAC5D,UAAM,QAAQ,iBAAiB,GAAG;AAClC,UAAM,SAAS,MAAM,KAAK,QAAQ,OAAO,KAAK;AAC9C,QAAI,CAAC,UAAU,CAAC,KAAK,SAAU,QAAO;AACtC,UAAM,SAAS,MAAM,cAAc,QAAQ,KAAK,UAAU,KAAK,eAAe;AAC9E,QAAI,OAAO,YAAY,OAAO,cAAc,OAAO;AACjD,YAAM,KAAK,QAAQ,UAAU,OAAO;AAAA,QAClC,aAAa,OAAO,OAAO;AAAA,QAC3B,GAAG,OAAO,OAAO;AAAA,MACnB,CAAC;AAAA,IACH;AACA,WAAO,OAAO;AAAA,EAChB;AAAA,EAEA,MAAM,QAAQ,MAAc,SAAiB,MAAiD;AAC5F,UAAM,QAAQ,iBAAiB,MAAM,SAAS,IAAI;AAClD,UAAM,SAAS,MAAM,KAAK,QAAQ,OAAO,KAAK;AAC9C,QAAI,CAAC,UAAU,CAAC,KAAK,SAAU,QAAO;AACtC,UAAM,SAAS,MAAM,cAAc,QAAQ,KAAK,UAAU,KAAK,eAAe;AAC9E,QAAI,OAAO,YAAY,OAAO,cAAc,OAAO;AACjD,YAAM,KAAK,QAAQ,UAAU,OAAO;AAAA,QAClC,aAAa,OAAO,OAAO;AAAA,QAC3B,GAAG,OAAO,OAAO;AAAA,MACnB,CAAC;AAAA,IACH;AACA,WAAO,OAAO;AAAA,EAChB;AAAA,EAEA,MAAM,WAAW,MAAc,SAAiB,MAAgC;AAC9E,UAAM,QAAQ,iBAAiB,MAAM,SAAS,IAAI;AAClD,UAAM,SAAS,MAAM,KAAK,QAAQ,OAAO,KAAK;AAC9C,WAAO,WAAW;AAAA,EACpB;AAAA,EAEQ,iBAAiB,SAAwB,qBAAqC;AACpF,QAAI,uBAAuB,KAAK,mBAAmB,MAAO;AAE1D,UAAM,SAAS,mBAAmB,OAAO;AACzC,QAAI,OAAO,KAAM;AAEjB,QAAI,KAAK,mBAAmB,SAAS;AACnC,YAAM,IAAI,iBAAiB,OAAO,MAAO;AAAA,IAC3C;AAEA,YAAQ,KAAK,qCAAqC,OAAO,MAAM,EAAE;AAAA,EACnE;AAAA,EAEA,MAAM,UAAU,QAAuD;AACrE,UAAM,OAAO,mBAAmB,MAAM;AACtC,QAAI;AACJ,QAAI,KAAK,aAAa,OAAO;AAC3B,YAAM,SAAS,MAAM,KAAK,QAAQ,OAAO,KAAK,KAAK;AACnD,gBAAU,SAAS,CAAC,MAAM,IAAI,CAAC;AAAA,IACjC,OAAO;AACL,WAAK,iBAAiB,KAAK,SAAS,OAAO,mBAAmB;AAC9D,gBAAU,MAAM,KAAK,QAAQ,MAAM,KAAK,SAAS,KAAK,OAAO;AAAA,IAC/D;AACA,WAAO,KAAK,gBAAgB,OAAO;AAAA,EACrC;AAAA,EAEA,MAAM,UAAU,QAAuD;AACrE,UAAM,OAAO,mBAAmB,MAAM;AACtC,QAAI;AACJ,QAAI,KAAK,aAAa,OAAO;AAC3B,YAAM,SAAS,MAAM,KAAK,QAAQ,OAAO,KAAK,KAAK;AACnD,gBAAU,SAAS,CAAC,MAAM,IAAI,CAAC;AAAA,IACjC,OAAO;AACL,WAAK,iBAAiB,KAAK,SAAS,OAAO,mBAAmB;AAC9D,gBAAU,MAAM,KAAK,QAAQ,MAAM,KAAK,SAAS,KAAK,OAAO;AAAA,IAC/D;AACA,WAAO,KAAK,gBAAgB,OAAO;AAAA,EACrC;AAAA,EAEA,MAAc,gBAAgB,SAA4D;AACxF,QAAI,CAAC,KAAK,YAAY,QAAQ,WAAW,EAAG,QAAO;AACnD,UAAM,UAAU,MAAM,eAAe,SAAS,KAAK,UAAU,KAAK,eAAe;AACjF,eAAW,UAAU,SAAS;AAC5B,UAAI,OAAO,YAAY,OAAO,cAAc,OAAO;AACjD,cAAM,QACJ,OAAO,OAAO,YAAY,gBACtB,iBAAiB,OAAO,OAAO,IAAI,IACnC,iBAAiB,OAAO,OAAO,MAAM,OAAO,OAAO,SAAS,OAAO,OAAO,IAAI;AACpF,cAAM,KAAK,QAAQ,UAAU,OAAO;AAAA,UAClC,aAAa,OAAO,OAAO;AAAA,UAC3B,GAAG,OAAO,OAAO;AAAA,QACnB,CAAC;AAAA,MACH;AAAA,IACF;AACA,WAAO,QAAQ,IAAI,CAAC,MAAM,EAAE,MAAM;AAAA,EACpC;AAAA,EAEA,MAAM,QAAQ,OAAe,KAAa,MAA8C;AACtF,UAAM,KAAK,UAAU,OAAO,KAAK,MAAM,OAAO;AAAA,EAChD;AAAA,EAEA,MAAM,QACJ,OACA,MACA,SACA,OACA,MACA,MACe;AACf,UAAM,KAAK,UAAU,OAAO,MAAM,SAAS,OAAO,MAAM,MAAM,OAAO;AAAA,EACvE;AAAA,EAEA,MAAM,YAAY,OAAe,KAAa,MAA8C;AAC1F,UAAM,KAAK,UAAU,OAAO,KAAK,MAAM,SAAS;AAAA,EAClD;AAAA,EAEA,MAAM,YACJ,OACA,MACA,SACA,OACA,MACA,MACe;AACf,UAAM,KAAK,UAAU,OAAO,MAAM,SAAS,OAAO,MAAM,MAAM,SAAS;AAAA,EACzE;AAAA,EAEA,MAAc,UACZ,OACA,KACA,MACA,MACe;AACf,4BAAwB,MAAM,SAAS,YAAY,gBAAgB,SAAS;AAC5E,QAAI,KAAK,UAAU;AACjB,WAAK,SAAS,SAAS,OAAO,eAAe,OAAO,MAAM,KAAK,SAAS;AAAA,IAC1E;AACA,UAAM,QAAQ,iBAAiB,GAAG;AAClC,UAAM,SAASD,yBAAwB,OAAO,KAAK,IAAI;AACvD,QAAI,KAAK,UAAU;AACjB,YAAM,QAAQ,KAAK,SAAS,OAAO,OAAO,eAAe,KAAK;AAC9D,UAAI,OAAO,iBAAiB,MAAM,gBAAgB,GAAG;AACnD,eAAO,IAAI,MAAM;AAAA,MACnB;AAAA,IACF;AACA,UAAM,KAAK,QAAQ,OAAO,OAAO,QAAQ,IAAI;AAAA,EAC/C;AAAA,EAEA,MAAc,UACZ,OACA,MACA,SACA,OACA,MACA,MACA,MACe;AACf,4BAAwB,MAAM,SAAS,YAAY,gBAAgB,SAAS;AAC5E,QAAI,KAAK,UAAU;AACjB,WAAK,SAAS,SAAS,OAAO,SAAS,OAAO,MAAM,KAAK,SAAS;AAAA,IACpE;AACA,UAAM,QAAQ,iBAAiB,MAAM,SAAS,IAAI;AAClD,UAAM,SAASC,yBAAwB,OAAO,MAAM,SAAS,OAAO,MAAM,IAAI;AAC9E,QAAI,KAAK,UAAU;AACjB,YAAM,QAAQ,KAAK,SAAS,OAAO,OAAO,SAAS,KAAK;AACxD,UAAI,OAAO,iBAAiB,MAAM,gBAAgB,GAAG;AACnD,eAAO,IAAI,MAAM;AAAA,MACnB;AAAA,IACF;AACA,UAAM,KAAK,QAAQ,OAAO,OAAO,QAAQ,IAAI;AAAA,EAC/C;AAAA,EAEA,MAAM,WAAW,KAAa,MAA8C;AAC1E,UAAM,QAAQ,iBAAiB,GAAG;AAClC,UAAM,KAAK,QAAQ,UAAU,OAAO,EAAE,SAAS,aAAa,IAAI,EAAE,CAAC;AAAA,EACrE;AAAA,EAEA,MAAM,WACJ,MACA,SACA,MACA,MACe;AACf,UAAM,QAAQ,iBAAiB,MAAM,SAAS,IAAI;AAClD,UAAM,KAAK,QAAQ,UAAU,OAAO,EAAE,SAAS,aAAa,IAAI,EAAE,CAAC;AAAA,EACrE;AAAA,EAEA,MAAM,WAAW,KAA4B;AAC3C,UAAM,QAAQ,iBAAiB,GAAG;AAClC,UAAM,KAAK,QAAQ,UAAU,KAAK;AAAA,EACpC;AAAA,EAEA,MAAM,WAAW,MAAc,SAAiB,MAA6B;AAC3E,UAAM,QAAQ,iBAAiB,MAAM,SAAS,IAAI;AAClD,UAAM,KAAK,QAAQ,UAAU,KAAK;AAAA,EACpC;AACF;;;ACjLA,IAAM,sBAAsB,oBAAI,IAAI,CAAC,gBAAgB,cAAc,CAAC;AAEpE,SAASC,yBACP,OACA,KACA,MACgB;AAChB,SAAO,EAAE,OAAO,MAAM,KAAK,SAAS,eAAe,OAAO,OAAO,MAAM,KAAK,KAAK;AACnF;AAEA,SAASC,yBACP,OACA,MACA,SACA,OACA,MACA,MACgB;AAChB,SAAO,EAAE,OAAO,MAAM,SAAS,OAAO,MAAM,KAAK;AACnD;AAEO,IAAM,kBAAN,MAAM,iBAAgE;AAAA,EA0B3E,YACmB,SACjB,SAEA,aACA;AAJiB;AAKjB,SAAK,kBAAkB,SAAS,sBAAsB;AACtD,SAAK,mBAAmB,SAAS;AAEjC,QAAI,SAAS,cAAc;AACzB,WAAK,gBAAgB,QAAQ;AAC7B,WAAK,oBAAoB,wBAAwB;AACjD,UAAI,QAAQ,UAAU;AACpB,aAAK,iBAAiB,QAAQ;AAAA,MAChC;AACA,WAAK,cAAc;AAAA,IACrB,OAAO;AACL,WAAK,iBAAiB,SAAS;AAAA,IACjC;AAEA,SAAK,iBAAiB,SAAS,kBAAkB;AAAA,EACnD;AAAA,EA9CS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQT,IAAI,eAAoC;AACtC,WAAO,KAAK,QAAQ;AAAA,EACtB;AAAA;AAAA,EAGiB;AAAA;AAAA,EAGA;AAAA,EACA;AAAA,EACT;AAAA,EACS;AAAA;AAAA,EAGA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA,EA8BjB,aAA6B;AAC3B,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,sBAAiD;AAC/C,WAAO,KAAK,oBAAoB;AAAA,EAClC;AAAA;AAAA;AAAA;AAAA,EAMQ,mBAAmB,OAA0C;AACnE,QAAI,CAAC,KAAK,cAAe,QAAO,KAAK;AAErC,QAAI,UAAU,kBAAkB,UAAU,gBAAgB;AACxD,aAAO,KAAK;AAAA,IACd;AAEA,WAAO,KAAK,mBAAmB,KAAK,kBAAkB,KAAK;AAAA,EAC7D;AAAA,EAEQ,kBAAkB,OAA+B;AACvD,QAAI,KAAK,gBAAgB,UAAU,kBAAkB,UAAU,iBAAiB;AAC9E,aAAO,KAAK;AAAA,IACd;AACA,WAAO,KAAK;AAAA,EACd;AAAA,EAEQ,sBAAiD;AACvD,QAAI,CAAC,KAAK,cAAe,QAAO,KAAK;AACrC,WAAO,KAAK,mBAAmB,KAAK,kBAAkB,KAAK;AAAA,EAC7D;AAAA;AAAA;AAAA;AAAA,EAMQ,iBAAiB,SAAwB,qBAAqC;AACpF,QAAI,uBAAuB,KAAK,mBAAmB,MAAO;AAE1D,UAAM,SAAS,mBAAmB,OAAO;AACzC,QAAI,OAAO,KAAM;AAEjB,QAAI,KAAK,mBAAmB,SAAS;AACnC,YAAM,IAAI,iBAAiB,OAAO,MAAO;AAAA,IAC3C;AAEA,YAAQ,KAAK,qCAAqC,OAAO,MAAM,EAAE;AAAA,EACnE;AAAA;AAAA;AAAA;AAAA,EAMA,MAAc,eACZ,QACA,OAC4B;AAC5B,UAAM,WAAW,KAAK,oBAAoB;AAC1C,QAAI,CAAC,SAAU,QAAO;AAEtB,UAAM,SAAS,MAAM,cAAc,QAAQ,UAAU,KAAK,eAAe;AACzE,QAAI,OAAO,UAAU;AACnB,WAAK,gBAAgB,QAAQ,KAAK;AAAA,IACpC;AACA,WAAO,OAAO;AAAA,EAChB;AAAA,EAEA,MAAc,gBAAgB,SAA4D;AACxF,UAAM,WAAW,KAAK,oBAAoB;AAC1C,QAAI,CAAC,YAAY,QAAQ,WAAW,EAAG,QAAO;AAE9C,UAAM,UAAU,MAAM,eAAe,SAAS,UAAU,KAAK,eAAe;AAC5E,eAAW,UAAU,SAAS;AAC5B,UAAI,OAAO,UAAU;AACnB,cAAM,QACJ,OAAO,OAAO,YAAY,gBACtB,iBAAiB,OAAO,OAAO,IAAI,IACnC,iBAAiB,OAAO,OAAO,MAAM,OAAO,OAAO,SAAS,OAAO,OAAO,IAAI;AACpF,aAAK,gBAAgB,QAAQ,KAAK;AAAA,MACpC;AAAA,IACF;AACA,WAAO,QAAQ,IAAI,CAAC,MAAM,EAAE,MAAM;AAAA,EACpC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQQ,gBAAgB,QAAyB,OAAqB;AACpE,QAAI,OAAO,cAAc,MAAO;AAEhC,UAAM,cAAc,YAAY;AAC9B,UAAI;AACF,cAAM,KAAK,QAAQ,UAAU,OAAO;AAAA,UAClC,aAAa,OAAO,OAAO;AAAA,UAC3B,GAAG,OAAO,OAAO;AAAA,QACnB,CAAC;AAAA,MACH,SAAS,KAAc;AACrB,cAAM,MAAM,+CAA+C,KAAK,KAAM,IAAc,OAAO;AAC3F,YAAI,OAAO,cAAc,SAAS;AAChC,kBAAQ,MAAM,GAAG;AAAA,QACnB,OAAO;AACL,kBAAQ,KAAK,GAAG;AAAA,QAClB;AAAA,MACF;AAAA,IACF;AAEA,SAAK,YAAY;AAAA,EACnB;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,QAAQ,KAAgD;AAC5D,UAAM,QAAQ,iBAAiB,GAAG;AAClC,UAAM,SAAS,MAAM,KAAK,QAAQ,OAAO,KAAK;AAC9C,QAAI,CAAC,OAAQ,QAAO;AACpB,WAAO,KAAK,eAAe,QAAQ,KAAK;AAAA,EAC1C;AAAA,EAEA,MAAM,QAAQ,MAAc,SAAiB,MAAiD;AAC5F,UAAM,QAAQ,iBAAiB,MAAM,SAAS,IAAI;AAClD,UAAM,SAAS,MAAM,KAAK,QAAQ,OAAO,KAAK;AAC9C,QAAI,CAAC,OAAQ,QAAO;AACpB,WAAO,KAAK,eAAe,QAAQ,KAAK;AAAA,EAC1C;AAAA,EAEA,MAAM,WAAW,MAAc,SAAiB,MAAgC;AAC9E,UAAM,QAAQ,iBAAiB,MAAM,SAAS,IAAI;AAClD,UAAM,SAAS,MAAM,KAAK,QAAQ,OAAO,KAAK;AAC9C,WAAO,WAAW;AAAA,EACpB;AAAA,EAEA,MAAM,UAAU,QAAuD;AACrE,UAAM,OAAO,mBAAmB,MAAM;AACtC,QAAI;AACJ,QAAI,KAAK,aAAa,OAAO;AAC3B,YAAM,SAAS,MAAM,KAAK,QAAQ,OAAO,KAAK,KAAK;AACnD,gBAAU,SAAS,CAAC,MAAM,IAAI,CAAC;AAAA,IACjC,OAAO;AACL,WAAK,iBAAiB,KAAK,SAAS,OAAO,mBAAmB;AAC9D,gBAAU,MAAM,KAAK,QAAQ,MAAM,KAAK,SAAS,KAAK,OAAO;AAAA,IAC/D;AACA,WAAO,KAAK,gBAAgB,OAAO;AAAA,EACrC;AAAA,EAEA,MAAM,UAAU,QAAuD;AACrE,UAAM,OAAO,mBAAmB,MAAM;AACtC,QAAI;AACJ,QAAI,KAAK,aAAa,OAAO;AAC3B,YAAM,SAAS,MAAM,KAAK,QAAQ,OAAO,KAAK,KAAK;AACnD,gBAAU,SAAS,CAAC,MAAM,IAAI,CAAC;AAAA,IACjC,OAAO;AACL,WAAK,iBAAiB,KAAK,SAAS,OAAO,mBAAmB;AAC9D,gBAAU,MAAM,KAAK,QAAQ,MAAM,KAAK,SAAS,KAAK,OAAO;AAAA,IAC/D;AACA,WAAO,KAAK,gBAAgB,OAAO;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,QAAQ,OAAe,KAAa,MAA8C;AACtF,UAAM,KAAK,UAAU,OAAO,KAAK,MAAM,OAAO;AAAA,EAChD;AAAA,EAEA,MAAM,QACJ,OACA,MACA,SACA,OACA,MACA,MACe;AACf,UAAM,KAAK,UAAU,OAAO,MAAM,SAAS,OAAO,MAAM,MAAM,OAAO;AAAA,EACvE;AAAA,EAEA,MAAM,YAAY,OAAe,KAAa,MAA8C;AAC1F,UAAM,KAAK,UAAU,OAAO,KAAK,MAAM,SAAS;AAAA,EAClD;AAAA,EAEA,MAAM,YACJ,OACA,MACA,SACA,OACA,MACA,MACe;AACf,UAAM,KAAK,UAAU,OAAO,MAAM,SAAS,OAAO,MAAM,MAAM,SAAS;AAAA,EACzE;AAAA,EAEA,MAAc,UACZ,OACA,KACA,MACA,MACe;AACf,4BAAwB,MAAM,SAAS,YAAY,gBAAgB,SAAS;AAC5E,UAAM,WAAW,KAAK,mBAAmB,KAAK;AAC9C,QAAI,UAAU;AACZ,eAAS,SAAS,OAAO,eAAe,OAAO,MAAM,KAAK,QAAQ,SAAS;AAAA,IAC7E;AACA,UAAM,UAAU,KAAK,kBAAkB,KAAK;AAC5C,UAAM,QAAQ,iBAAiB,GAAG;AAClC,UAAM,SAASD,yBAAwB,OAAO,KAAK,IAAI;AACvD,QAAI,UAAU;AACZ,YAAM,QAAQ,SAAS,OAAO,OAAO,eAAe,KAAK;AACzD,UAAI,OAAO,iBAAiB,MAAM,gBAAgB,GAAG;AACnD,eAAO,IAAI,MAAM;AAAA,MACnB;AAAA,IACF;AACA,UAAM,QAAQ,OAAO,OAAO,QAAQ,IAAI;AAAA,EAC1C;AAAA,EAEA,MAAc,UACZ,OACA,MACA,SACA,OACA,MACA,MACA,MACe;AACf,4BAAwB,MAAM,SAAS,YAAY,gBAAgB,SAAS;AAC5E,UAAM,WAAW,KAAK,mBAAmB,KAAK;AAC9C,QAAI,UAAU;AACZ,eAAS,SAAS,OAAO,SAAS,OAAO,MAAM,KAAK,QAAQ,SAAS;AAAA,IACvE;AACA,UAAM,UAAU,KAAK,kBAAkB,KAAK;AAC5C,UAAM,QAAQ,iBAAiB,MAAM,SAAS,IAAI;AAClD,UAAM,SAASC,yBAAwB,OAAO,MAAM,SAAS,OAAO,MAAM,IAAI;AAC9E,QAAI,UAAU;AACZ,YAAM,QAAQ,SAAS,OAAO,OAAO,SAAS,KAAK;AACnD,UAAI,OAAO,iBAAiB,MAAM,gBAAgB,GAAG;AACnD,eAAO,IAAI,MAAM;AAAA,MACnB;AAAA,IACF;AACA,UAAM,QAAQ,OAAO,OAAO,QAAQ,IAAI;AAAA,EAC1C;AAAA,EAEA,MAAM,WAAW,KAAa,MAA8C;AAC1E,UAAM,QAAQ,iBAAiB,GAAG;AAClC,UAAM,KAAK,QAAQ,UAAU,OAAO,EAAE,SAAS,aAAa,IAAI,EAAE,CAAC;AAAA,EACrE;AAAA,EAEA,MAAM,WACJ,MACA,SACA,MACA,MACe;AACf,UAAM,QAAQ,iBAAiB,MAAM,SAAS,IAAI;AAClD,UAAM,KAAK,QAAQ,UAAU,OAAO,EAAE,SAAS,aAAa,IAAI,EAAE,CAAC;AAAA,EACrE;AAAA,EAEA,MAAM,WAAW,KAA4B;AAC3C,UAAM,QAAQ,iBAAiB,GAAG;AAClC,UAAM,KAAK,QAAQ,UAAU,KAAK;AAAA,EACpC;AAAA,EAEA,MAAM,WAAW,MAAc,SAAiB,MAA6B;AAC3E,UAAM,QAAQ,iBAAiB,MAAM,SAAS,IAAI;AAClD,UAAM,KAAK,QAAQ,UAAU,KAAK;AAAA,EACpC;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,eAAkB,IAAsD;AAC5E,WAAO,KAAK,QAAQ,eAAe,OAAO,cAAc;AACtD,YAAM,UAAU,IAAI;AAAA,QAClB;AAAA,QACA,KAAK,oBAAoB;AAAA,QACzB,KAAK;AAAA,QACL,KAAK,QAAQ;AAAA,QACb,KAAK;AAAA,MACP;AACA,aAAO,GAAG,OAAO;AAAA,IACnB,CAAC;AAAA,EACH;AAAA,EAEA,QAAoB;AAClB,WAAO,IAAI;AAAA,MACT,KAAK,QAAQ,YAAY;AAAA,MACzB,KAAK,oBAAoB;AAAA,MACzB,KAAK,QAAQ;AAAA,IACf;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAMA,SAAS,eAAuB,OAAe,SAAsB;AACnE,QAAI,CAAC,iBAAiB,cAAc,SAAS,GAAG,GAAG;AACjD,YAAM,IAAI;AAAA,QACR,wCAAwC,aAAa;AAAA,QAErD;AAAA,MACF;AAAA,IACF;AACA,QAAI,KAAK,SAAS,GAAG,GAAG;AACtB,YAAM,IAAI;AAAA,QACR,4CAA4C,IAAI;AAAA,QAEhD;AAAA,MACF;AAAA,IACF;AAEA,UAAM,eAAe,KAAK,QAAQ,SAAS,eAAe,IAAI;AAE9D,WAAO,IAAI;AAAA,MACT;AAAA,MACA;AAAA,QACE,UAAU,KAAK,oBAAoB;AAAA,QACnC,gBAAgB,KAAK;AAAA,QACrB,oBAAoB,KAAK;AAAA,QACzB,kBAAkB,KAAK;AAAA,MACzB;AAAA;AAAA,IAEF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,gBACJ,QACA,gBAC8B;AAC9B,QAAI,CAAC,KAAK,QAAQ,iBAAiB;AACjC,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MACF;AAAA,IACF;AACA,UAAM,OAAO,mBAAmB,MAAM;AACtC,QAAI,KAAK,aAAa,OAAO;AAC3B,YAAM,IAAI;AAAA,QACR;AAAA,QAEA;AAAA,MACF;AAAA,IACF;AACA,SAAK,iBAAiB,KAAK,SAAS,OAAO,mBAAmB;AAC9D,UAAM,UAAU,MAAM,KAAK,QAAQ,gBAAgB,QAAQ,cAAc;AACzE,WAAO,KAAK,gBAAgB,OAAO;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,UACJ,QAC6B;AAC7B,QAAI,CAAC,KAAK,QAAQ,WAAW;AAC3B,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAMA,UAAM,eACJ,OAAO,SACP,OAAO,QACP,OAAO,WACP,OAAO,SACP,OAAO,QACN,OAAO,SAAS,OAAO,MAAM,SAAS;AAEzC,QAAI,CAAC,cAAc;AACjB,WAAK,iBAAiB,CAAC,GAAG,OAAO,mBAAmB;AACpD,YAAMC,UAAS,MAAM,KAAK,QAAQ,UAAU,OAAO,YAAY,CAAC,CAAC;AACjE,aAAOA;AAAA,IACT;AAEA,UAAM,OAAO,mBAAmB,MAAM;AACtC,QAAI,KAAK,aAAa,OAAO;AAC3B,YAAM,IAAI;AAAA,QACR;AAAA,QAEA;AAAA,MACF;AAAA,IACF;AACA,SAAK,iBAAiB,KAAK,SAAS,OAAO,mBAAmB;AAC9D,UAAM,SAAS,MAAM,KAAK,QAAQ,UAAU,OAAO,YAAY,KAAK,OAAO;AAC3E,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,kBAAkB,KAAa,SAA+C;AAClF,WAAO,KAAK,QAAQ,kBAAkB,KAAK,MAAM,OAAO;AAAA,EAC1D;AAAA,EAEA,MAAM,gBAAgB,QAAyB,SAA4C;AACzF,WAAO,KAAK,QAAQ,gBAAgB,QAAQ,MAAM,OAAO;AAAA,EAC3D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAqBA,MAAM,WAAW,QAAyB,SAA4C;AACpF,QAAI,CAAC,KAAK,QAAQ,YAAY;AAC5B,YAAM,IAAI;AAAA,QACR;AAAA,QAGA;AAAA,MACF;AAAA,IACF;AACA,UAAM,UAAU,KAAK,gBAAgB,MAAM;AAC3C,WAAO,KAAK,QAAQ,WAAW,SAAS,OAAO;AAAA,EACjD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,MAAM,WACJ,QACA,OACA,SACqB;AACrB,QAAI,CAAC,KAAK,QAAQ,YAAY;AAC5B,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MACF;AAAA,IACF;AACA,UAAM,UAAU,KAAK,gBAAgB,MAAM;AAC3C,WAAO,KAAK,QAAQ,WAAW,SAAS,OAAO,OAAO;AAAA,EACxD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,MAAM,OAAO,QAA6C;AACxD,QAAI,CAAC,KAAK,QAAQ,QAAQ;AACxB,YAAM,IAAI;AAAA,QACR;AAAA,QAGA;AAAA,MACF;AAAA,IACF;AACA,QAAI,OAAO,QAAQ,WAAW,GAAG;AAC/B,aAAO,OAAO,UAAU,EAAE,OAAO,CAAC,GAAG,SAAS,CAAC,EAAE,IAAI,EAAE,OAAO,CAAC,EAAE;AAAA,IACnE;AACA,WAAO,KAAK,QAAQ,OAAO,MAAM;AAAA,EACnC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA2BA,MAAM,mBAAmB,QAA+D;AACtF,QAAI,CAAC,KAAK,QAAQ,oBAAoB;AACpC,YAAM,IAAI;AAAA,QACR;AAAA,QAIA;AAAA,MACF;AAAA,IACF;AACA,QAAI,OAAO,QAAQ,WAAW,GAAG;AAC/B,aAAO;AAAA,QACL,MAAM,OAAO,KAAK,IAAI,OAAO,EAAE,OAAO,CAAC,GAAG,aAAa,EAAE,EAAE;AAAA,QAC3D,YAAY;AAAA,MACd;AAAA,IACF;AACA,WAAO,KAAK,QAAQ,mBAAmB,MAAM;AAAA,EAC/C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuCA,MAAM,mBACJ,QACiC;AACjC,QAAI,CAAC,KAAK,QAAQ,oBAAoB;AACpC,YAAM,IAAI;AAAA,QACR;AAAA,QAIA;AAAA,MACF;AAAA,IACF;AACA,QAAI,OAAO,OAAO,WAAW,GAAG;AAC9B,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAQA,UAAM,OAAO,mBAAmB,MAAM;AACtC,QAAI;AACJ,QAAI;AACJ,QAAI,KAAK,aAAa,OAAO;AAK3B,gBAAU;AAAA,QACR,EAAE,OAAO,QAAQ,IAAI,MAAM,OAAO,OAAO,KAAM;AAAA,QAC/C,EAAE,OAAO,WAAW,IAAI,MAAM,OAAO,OAAO,QAAS;AAAA,QACrD,EAAE,OAAO,QAAQ,IAAI,MAAM,OAAO,OAAO,KAAM;AAAA,MACjD;AACA,UAAI,OAAO,MAAO,SAAQ,KAAK,EAAE,OAAO,SAAS,IAAI,MAAM,OAAO,OAAO,MAAM,CAAC;AAChF,UAAI,OAAO,MAAO,SAAQ,KAAK,EAAE,OAAO,SAAS,IAAI,MAAM,OAAO,OAAO,MAAM,CAAC;AAChF,gBAAU;AAAA,IACZ,OAAO;AACL,gBAAU,KAAK;AACf,gBAAU,KAAK;AAAA,IACjB;AACA,SAAK,iBAAiB,SAAS,OAAO,mBAAmB;AACzD,UAAM,OAAO,MAAM,KAAK,QAAQ,mBAAmB,OAAO,QAAQ,SAAS,OAAO;AAClF,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiCA,MAAM,YAAY,QAAyD;AACzE,QAAI,CAAC,KAAK,QAAQ,aAAa;AAC7B,YAAM,IAAI;AAAA,QACR;AAAA,QAKA;AAAA,MACF;AAAA,IACF;AAQA,UAAM,UAAyB,CAAC;AAChC,QAAI,OAAO,MAAO,SAAQ,KAAK,EAAE,OAAO,SAAS,IAAI,MAAM,OAAO,OAAO,MAAM,CAAC;AAChF,QAAI,OAAO,QAAS,SAAQ,KAAK,EAAE,OAAO,WAAW,IAAI,MAAM,OAAO,OAAO,QAAQ,CAAC;AACtF,QAAI,OAAO,MAAO,SAAQ,KAAK,EAAE,OAAO,SAAS,IAAI,MAAM,OAAO,OAAO,MAAM,CAAC;AAChF,QAAI,OAAO,MAAO,SAAQ,KAAK,GAAG,OAAO,KAAK;AAC9C,SAAK,iBAAiB,SAAS,OAAO,mBAAmB;AAEzD,WAAO,KAAK,QAAQ,YAAY,MAAM;AAAA,EACxC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBA,MAAM,eAAe,QAA4D;AAC/E,QAAI,CAAC,KAAK,QAAQ,gBAAgB;AAChC,YAAM,IAAI;AAAA,QACR;AAAA,QAKA;AAAA,MACF;AAAA,IACF;AACA,UAAM,UAAyB,CAAC;AAChC,QAAI,OAAO,MAAO,SAAQ,KAAK,EAAE,OAAO,SAAS,IAAI,MAAM,OAAO,OAAO,MAAM,CAAC;AAChF,QAAI,OAAO,QAAS,SAAQ,KAAK,EAAE,OAAO,WAAW,IAAI,MAAM,OAAO,OAAO,QAAQ,CAAC;AACtF,QAAI,OAAO,MAAO,SAAQ,KAAK,EAAE,OAAO,SAAS,IAAI,MAAM,OAAO,OAAO,MAAM,CAAC;AAChF,SAAK,iBAAiB,SAAS,OAAO,mBAAmB;AACzD,WAAO,KAAK,QAAQ,eAAe,MAAM;AAAA,EAC3C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAgBA,MAAM,UAAU,QAAuD;AACrE,QAAI,CAAC,KAAK,QAAQ,WAAW;AAC3B,YAAM,IAAI;AAAA,QACR;AAAA,QAMA;AAAA,MACF;AAAA,IACF;AACA,UAAM,UAAyB,CAAC;AAChC,QAAI,OAAO,MAAO,SAAQ,KAAK,EAAE,OAAO,SAAS,IAAI,MAAM,OAAO,OAAO,MAAM,CAAC;AAChF,QAAI,OAAO,QAAS,SAAQ,KAAK,EAAE,OAAO,WAAW,IAAI,MAAM,OAAO,OAAO,QAAQ,CAAC;AACtF,QAAI,OAAO,MAAO,SAAQ,KAAK,EAAE,OAAO,SAAS,IAAI,MAAM,OAAO,OAAO,MAAM,CAAC;AAChF,SAAK,iBAAiB,SAAS,OAAO,mBAAmB;AACzD,WAAO,KAAK,QAAQ,UAAU,MAAM;AAAA,EACtC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWQ,gBAAgB,QAAwC;AAC9D,UAAM,eACJ,OAAO,SACP,OAAO,QACP,OAAO,WACP,OAAO,SACP,OAAO,QACN,OAAO,SAAS,OAAO,MAAM,SAAS;AAEzC,QAAI,CAAC,cAAc;AACjB,WAAK,iBAAiB,CAAC,GAAG,OAAO,mBAAmB;AACpD,aAAO,CAAC;AAAA,IACV;AAEA,UAAM,OAAO,mBAAmB,MAAM;AACtC,QAAI,KAAK,aAAa,OAAO;AAC3B,YAAM,IAAI;AAAA,QACR;AAAA,QAGA;AAAA,MACF;AAAA,IACF;AACA,SAAK,iBAAiB,KAAK,SAAS,OAAO,mBAAmB;AAC9D,WAAO,KAAK;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAMA,MAAM,eACJ,MACA,YACA,aACA,SACe;AACf,QAAI,CAAC,KAAK,eAAe;AACvB,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AAEA,QAAI,oBAAoB,IAAI,IAAI,GAAG;AACjC,YAAM,IAAI;AAAA,QACR,uBAAuB,IAAI;AAAA,MAC7B;AAAA,IACF;AAEA,QAAI,KAAK,gBAAgB,OAAO,MAAM,eAAe,IAAI,GAAG;AAC1D,YAAM,IAAI;AAAA,QACR,4BAA4B,IAAI;AAAA,MAClC;AAAA,IACF;AAEA,UAAM,MAAM,yBAAyB,gBAAgB,IAAI;AACzD,UAAM,OAAgC,EAAE,MAAM,WAAW;AACzD,QAAI,gBAAgB,OAAW,MAAK,cAAc;AAClD,QAAI,SAAS,eAAe,OAAW,MAAK,aAAa,QAAQ;AACjE,QAAI,SAAS,kBAAkB,OAAW,MAAK,gBAAgB,QAAQ;AACvE,QAAI,SAAS,iBAAiB,OAAW,MAAK,eAAe,QAAQ;AACrE,QAAI,SAAS,YAAY,OAAW,MAAK,UAAU,QAAQ;AAC3D,QAAI,SAAS,cAAc,OAAW,MAAK,YAAY,QAAQ;AAC/D,QAAI,SAAS,uBAAuB;AAClC,WAAK,qBAAqB,QAAQ;AACpC,QAAI,SAAS,eAAe,QAAW;AACrC,WAAK,aAAa,MAAM,KAAK,oBAAoB,QAAQ,UAAU;AAAA,IACrE;AAEA,UAAM,KAAK,QAAQ,gBAAgB,KAAK,IAAI;AAAA,EAC9C;AAAA,EAEA,MAAM,eACJ,MACA,UACA,YACA,aACA,SACe;AACf,QAAI,CAAC,KAAK,eAAe;AACvB,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AAEA,QAAI,oBAAoB,IAAI,IAAI,GAAG;AACjC,YAAM,IAAI;AAAA,QACR,uBAAuB,IAAI;AAAA,MAC7B;AAAA,IACF;AAEA,QAAI,KAAK,gBAAgB;AACvB,YAAM,YAAY,MAAM,QAAQ,SAAS,IAAI,IAAI,SAAS,OAAO,CAAC,SAAS,IAAI;AAC/E,YAAM,UAAU,MAAM,QAAQ,SAAS,EAAE,IAAI,SAAS,KAAK,CAAC,SAAS,EAAE;AACvE,iBAAW,SAAS,WAAW;AAC7B,mBAAW,SAAS,SAAS;AAC3B,cAAI,KAAK,eAAe,OAAO,OAAO,MAAM,KAAK,GAAG;AAClD,kBAAM,IAAI;AAAA,cACR,4BAA4B,IAAI,UAAU,KAAK,SAAS,KAAK;AAAA,YAC/D;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAEA,UAAM,MAAM,yBAAyB,gBAAgB,IAAI;AACzD,UAAM,OAAgC;AAAA,MACpC;AAAA,MACA,MAAM,SAAS;AAAA,MACf,IAAI,SAAS;AAAA,IACf;AACA,QAAI,eAAe,OAAW,MAAK,aAAa;AAChD,QAAI,SAAS,iBAAiB,OAAW,MAAK,eAAe,SAAS;AACtE,QAAI,SAAS,gBAAgB,OAAW,MAAK,cAAc,SAAS;AACpE,QAAI,gBAAgB,OAAW,MAAK,cAAc;AAClD,QAAI,SAAS,eAAe,OAAW,MAAK,aAAa,QAAQ;AACjE,QAAI,SAAS,kBAAkB,OAAW,MAAK,gBAAgB,QAAQ;AACvE,QAAI,SAAS,iBAAiB,OAAW,MAAK,eAAe,QAAQ;AACrE,QAAI,SAAS,YAAY,OAAW,MAAK,UAAU,QAAQ;AAC3D,QAAI,SAAS,cAAc,OAAW,MAAK,YAAY,QAAQ;AAC/D,QAAI,SAAS,uBAAuB;AAClC,WAAK,qBAAqB,QAAQ;AACpC,QAAI,SAAS,eAAe,QAAW;AACrC,WAAK,aAAa,MAAM,KAAK,oBAAoB,QAAQ,UAAU;AAAA,IACrE;AAEA,UAAM,KAAK,QAAQ,gBAAgB,KAAK,IAAI;AAAA,EAC9C;AAAA,EAEA,MAAM,iBAAgC;AACpC,QAAI,CAAC,KAAK,eAAe;AACvB,YAAM,IAAI;AAAA,QACR;AAAA,MAEF;AAAA,IACF;AAEA,UAAM,SAAS,KAAK,iBAAiB;AACrC,UAAM,cAAc,MAAM,wBAAwB,QAAQ,KAAK,gBAAgB;AAE/E,QAAI,KAAK,gBAAgB;AACvB,WAAK,kBAAkB,qBAAqB,KAAK,gBAAgB,WAAW;AAAA,IAC9E,OAAO;AACL,WAAK,kBAAkB;AAAA,IACzB;AAAA,EACF;AAAA,EAEA,MAAc,oBACZ,YACwE;AACxE,UAAM,SAAS,WAAW,IAAI,CAAC,MAAM;AACnC,YAAM,SAAS,OAAO,EAAE,OAAO,aAAa,EAAE,GAAG,SAAS,IAAI,EAAE;AAChE,aAAO,EAAE,aAAa,EAAE,aAAa,WAAW,EAAE,WAAW,IAAI,OAAO;AAAA,IAC1E,CAAC;AACD,UAAM,QAAQ,IAAI,OAAO,IAAI,CAAC,MAAM,iBAAiB,EAAE,IAAI,KAAK,gBAAgB,CAAC,CAAC;AAClF,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,mBAAgC;AACtC,QAAI,CAAC,KAAK,YAAa,QAAO;AAE9B,UAAM,UAAU,KAAK;AAErB,UAAM,mBAAmB,CACvB,SACA,YACiC,QAAQ,MAAM,SAAS,OAAO;AAEjE,WAAO;AAAA,MACL,MAAM,QAAQ,KAAgD;AAC5D,eAAO,QAAQ,OAAO,iBAAiB,GAAG,CAAC;AAAA,MAC7C;AAAA,MACA,MAAM,QACJ,MACA,SACA,MACmC;AACnC,eAAO,QAAQ,OAAO,iBAAiB,MAAM,SAAS,IAAI,CAAC;AAAA,MAC7D;AAAA,MACA,MAAM,WAAW,MAAc,SAAiB,MAAgC;AAC9E,cAAM,SAAS,MAAM,QAAQ,OAAO,iBAAiB,MAAM,SAAS,IAAI,CAAC;AACzE,eAAO,WAAW;AAAA,MACpB;AAAA,MACA,MAAM,UAAU,QAAuD;AACrE,cAAM,OAAO,mBAAmB,MAAM;AACtC,YAAI,KAAK,aAAa,OAAO;AAC3B,gBAAM,SAAS,MAAM,QAAQ,OAAO,KAAK,KAAK;AAC9C,iBAAO,SAAS,CAAC,MAAM,IAAI,CAAC;AAAA,QAC9B;AACA,eAAO,iBAAiB,KAAK,SAAS,KAAK,OAAO;AAAA,MACpD;AAAA,MACA,MAAM,UAAU,QAAuD;AACrE,cAAM,OAAO,mBAAmB,MAAM;AACtC,YAAI,KAAK,aAAa,OAAO;AAC3B,gBAAM,SAAS,MAAM,QAAQ,OAAO,KAAK,KAAK;AAC9C,iBAAO,SAAS,CAAC,MAAM,IAAI,CAAC;AAAA,QAC9B;AACA,eAAO,iBAAiB,KAAK,SAAS,KAAK,OAAO;AAAA,MACpD;AAAA,IACF;AAAA,EACF;AACF;AAgCO,SAAS,kBACd,SACA,SACA,aACwC;AAYxC,SAAO,IAAI,gBAAgB,SAAS,SAAS,WAAW;AAG1D;;;ACjqCA,oBAAuB;AAEhB,SAAS,aAAqB;AACnC,aAAO,sBAAO;AAChB;;;ACsDO,SAAS,mBACd,MACwB;AACxB,SAAO;AAAA,IACL,KAAK,CAAC,eAAoC,KAAK,IAAI,UAAe;AAAA,IAClE,QAAQ,MAAM,KAAK,OAAO;AAAA,EAC5B;AACF;;;ACnCO,IAAM,uBAAuB,oBAAI,IAAI;AAAA,EAC1C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAEM,SAAS,uBAAuB,OAA8B;AACnE,QAAM,WAAY,MAA8C,aAAa;AAC7E,MAAI,YAAY,qBAAqB,IAAI,QAAQ,EAAG,QAAO;AAC3D,SAAO;AACT;AAUO,IAAM,mBAAmB;AAEzB,SAAS,oBAAoB,KAAa,cAA4B;AAC3E,MAAI,IAAI,WAAW,GAAG;AACpB,UAAM,IAAI;AAAA,MACR,GAAG,YAAY;AAAA,MACf;AAAA,IACF;AAAA,EACF;AACA,MAAI,CAAC,iBAAiB,KAAK,GAAG,GAAG;AAC/B,UAAM,IAAI;AAAA,MACR,GAAG,YAAY,gCAAgC,GAAG;AAAA,MAGlD;AAAA,IACF;AAAA,EACF;AACF;AASO,SAAS,SAAS,OAAgB,cAA8B;AACrE,MAAI,UAAU,OAAW,QAAO;AAChC,MAAI,UAAU,QAAQ,OAAO,UAAU,UAAU;AAC/C,UAAM,gBAAgB,uBAAuB,KAAK;AAClD,QAAI,eAAe;AACjB,YAAM,IAAI;AAAA,QACR,GAAG,YAAY,+BAA+B,aAAa;AAAA,QAE3D;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACA,SAAO,KAAK,UAAU,KAAK;AAC7B;AAgBO,SAAS,mBACd,KACA,MACA,QACA,cACe;AACf,MAAI,IAAI,WAAW,EAAG,QAAO;AAE7B,QAAM,UAAwB,CAAC;AAC/B,QAAM,OAAqB,CAAC;AAC5B,aAAW,MAAM,IAAK,EAAC,GAAG,SAAS,UAAU,MAAM,KAAK,EAAE;AAE1D,MAAI,OAAO;AAEX,MAAI,QAAQ,SAAS,GAAG;AACtB,UAAM,eAAe,QAAQ,IAAI,MAAM,GAAG,EAAE,KAAK,IAAI;AACrD,WAAO,eAAe,IAAI,KAAK,YAAY;AAC3C,eAAW,MAAM,SAAS;AACxB,iBAAW,OAAO,GAAG,KAAM,qBAAoB,KAAK,YAAY;AAChE,aAAO,KAAK,KAAK,GAAG,KAAK,KAAK,GAAG,CAAC,EAAE;AAAA,IACtC;AAAA,EACF;AAEA,MAAI,KAAK,SAAS,GAAG;AACnB,UAAM,SAAS,KAAK,IAAI,MAAM,YAAY,EAAE,KAAK,IAAI;AACrD,WAAO,YAAY,IAAI,KAAK,MAAM;AAClC,eAAW,MAAM,MAAM;AACrB,iBAAW,OAAO,GAAG,KAAM,qBAAoB,KAAK,YAAY;AAChE,aAAO,KAAK,KAAK,GAAG,KAAK,KAAK,GAAG,CAAC,EAAE;AACpC,aAAO,KAAK,SAAS,GAAG,OAAO,YAAY,CAAC;AAAA,IAC9C;AAAA,EACF;AAEA,SAAO;AACT;;;AC3GA;AAGA,IAAMC,wBAAuB,oBAAI,IAAI;AAAA,EACnC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAWM,SAAS,sBAAsB,MAAe,OAAqB;AACxE,EAAAC,MAAK,MAAM,CAAC,GAAG,KAAK;AACtB;AAEA,SAASA,MAAK,MAAe,MAAyB,OAAqB;AACzE,MAAI,SAAS,QAAQ,SAAS,OAAW;AACzC,MAAI,iBAAiB,IAAI,GAAG;AAC1B,UAAM,IAAI;AAAA,MACR,GAAG,KAAK,0MAGG,WAAW,IAAI,CAAC;AAAA,MAC3B;AAAA,IACF;AAAA,EACF;AACA,QAAM,IAAI,OAAO;AACjB,MAAI,MAAM,YAAY,MAAM,YAAY;AACtC,UAAM,IAAI;AAAA,MACR,GAAG,KAAK,2CAA2C,CAAC,6CACP,WAAW,IAAI,CAAC;AAAA,MAC7D;AAAA,IACF;AAAA,EACF;AACA,MAAI,MAAM,UAAU;AAClB,UAAM,IAAI;AAAA,MACR,GAAG,KAAK,uHAEG,WAAW,IAAI,CAAC;AAAA,MAC3B;AAAA,IACF;AAAA,EACF;AACA,MAAI,MAAM,SAAU;AACpB,MAAI,MAAM,QAAQ,IAAI,GAAG;AACvB,aAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,MAAAA,MAAK,KAAK,CAAC,GAAG,CAAC,GAAG,MAAM,OAAO,CAAC,CAAC,GAAG,KAAK;AAAA,IAC3C;AACA;AAAA,EACF;AAOA,QAAM,MAAM;AACZ,MAAI,OAAO,UAAU,eAAe,KAAK,KAAK,iBAAiB,GAAG;AAChE,UAAM,WAAW,IAAI,iBAAiB;AACtC,UAAM,IAAI;AAAA,MACR,GAAG,KAAK,8CAA8C,iBAAiB,kBACtD,eAAe,QAAQ,CAAC,sGAElC,iBAAiB,4DACb,WAAW,IAAI,CAAC;AAAA,MAC3B;AAAA,IACF;AAAA,EACF;AAMA,QAAM,QAAQ,OAAO,eAAe,IAAI;AACxC,MAAI,UAAU,QAAQ,UAAU,OAAO,WAAW;AAChD,UAAM,OAAQ,KAA6C;AAC3D,UAAM,WAAW,QAAQ,OAAO,KAAK,SAAS,WAAW,KAAK,OAAO;AACrE,QAAID,sBAAqB,IAAI,QAAQ,GAAG;AACtC,YAAM,IAAI;AAAA,QACR,GAAG,KAAK,uCAAuC,QAAQ,2HAEJ,WAAW,IAAI,CAAC;AAAA,QACnE;AAAA,MACF;AAAA,IACF;AAKA,QAAI,gBAAgB,KAAM;AAC1B,UAAM,IAAI;AAAA,MACR,GAAG,KAAK,oDAAoD,QAAQ,8FAE3C,WAAW,IAAI,CAAC;AAAA,MACzC;AAAA,IACF;AAAA,EACF;AACA,aAAW,OAAO,OAAO,KAAK,GAAG,GAAG;AAClC,IAAAC,MAAK,IAAI,GAAG,GAAG,CAAC,GAAG,MAAM,GAAG,GAAG,KAAK;AAAA,EACtC;AACF;AAEA,SAAS,WAAW,MAAiC;AACnD,SAAO,KAAK,WAAW,IAAI,WAAW,KAAK,IAAI,CAAC,MAAM,KAAK,UAAU,CAAC,CAAC,EAAE,KAAK,KAAK;AACrF;AAEA,SAAS,eAAe,OAAwB;AAC9C,MAAI,UAAU,KAAM,QAAO;AAC3B,MAAI,UAAU,OAAW,QAAO;AAChC,MAAI,OAAO,UAAU,SAAU,QAAO,KAAK,UAAU,KAAK;AAC1D,MAAI,OAAO,UAAU,YAAY,OAAO,UAAU,aAAa,OAAO,UAAU,UAAU;AACxF,WAAO,OAAO,KAAK;AAAA,EACrB;AACA,SAAO,OAAO;AAChB;;;AC5GO,IAAM,uBAAiD,OAAO,OAAO;AAAA,EAC1E,EAAE,QAAQ,CAAC,MAAM,EAAE;AAAA,EACnB,EAAE,QAAQ,CAAC,MAAM,EAAE;AAAA,EACnB,EAAE,QAAQ,CAAC,OAAO,EAAE;AAAA,EACpB,EAAE,QAAQ,CAAC,OAAO,EAAE;AAAA,EACpB,EAAE,QAAQ,CAAC,QAAQ,SAAS,EAAE;AAAA,EAC9B,EAAE,QAAQ,CAAC,WAAW,MAAM,EAAE;AAAA,EAC9B,EAAE,QAAQ,CAAC,SAAS,SAAS,EAAE;AAAA,EAC/B,EAAE,QAAQ,CAAC,WAAW,OAAO,EAAE;AACjC,CAAC;;;ACTM,IAAM,kBAAgD;AAAA,EAC3D,OAAO;AAAA,EACP,MAAM;AAAA,EACN,SAAS;AAAA,EACT,OAAO;AAAA,EACP,MAAM;AAAA,EACN,GAAG;AAAA,EACH,WAAW;AAAA,EACX,WAAW;AACb;AA8DO,SAAS,WAAW,MAAsB;AAC/C,oBAAkB,IAAI;AACtB,SAAO,IAAI,IAAI;AACjB;AAOO,SAAS,kBAAkB,MAAoB;AACpD,MAAI,CAAC,2BAA2B,KAAK,IAAI,GAAG;AAC1C,UAAM,IAAI,MAAM,2BAA2B,IAAI,0CAA0C;AAAA,EAC3F;AACF;AAqBO,SAAS,iBAAiB,OAAuB;AACtD,SAAO,IAAI,MAAM,QAAQ,MAAM,IAAI,CAAC;AACtC;;;ACrIO,IAAM,qBAAN,MAAM,oBAA6C;AAAA,EACxD,YACkB,SACA,aAChB;AAFgB;AACA;AAAA,EACf;AAAA,EAEH,SAAe;AACb,WAAO,IAAI,KAAK,KAAK,SAAS,CAAC;AAAA,EACjC;AAAA,EAEA,WAAmB;AACjB,WAAO,KAAK,UAAU,MAAO,KAAK,MAAM,KAAK,cAAc,GAAG;AAAA,EAChE;AAAA,EAEA,SAAmD;AACjD,WAAO,EAAE,SAAS,KAAK,SAAS,aAAa,KAAK,YAAY;AAAA,EAChE;AAAA,EAEA,OAAO,WAAW,IAAgC;AAChD,UAAM,UAAU,KAAK,MAAM,KAAK,GAAI;AACpC,UAAM,eAAe,KAAK,UAAU,OAAQ;AAC5C,WAAO,IAAI,oBAAmB,SAAS,WAAW;AAAA,EACpD;AAAA,EAEA,OAAO,MAA0B;AAC/B,WAAO,oBAAmB,WAAW,KAAK,IAAI,CAAC;AAAA,EACjD;AACF;;;ACpBA,IAAM,uBAAuB;AAC7B,IAAM,2BAA2B;AAyBjC,SAAS,gBAAgB,OAAiC;AACxD,QAAM,SAAS,gBAAgB,KAAK;AACpC,MAAI,QAAQ;AACV,WAAO,EAAE,MAAM,WAAW,MAAM,EAAE;AAAA,EACpC;AACA,MAAI,MAAM,WAAW,OAAO,GAAG;AAC7B,UAAM,SAAS,MAAM,MAAM,CAAC;AAC5B,eAAW,QAAQ,OAAO,MAAM,GAAG,GAAG;AACpC,0BAAoB,MAAM,wBAAwB;AAAA,IACpD;AACA,WAAO,EAAE,MAAM,2BAA2B,MAAM,KAAK;AAAA,EACvD;AACA,MAAI,UAAU,QAAQ;AACpB,WAAO,EAAE,MAAM,4BAA4B;AAAA,EAC7C;AACA,QAAM,IAAI,eAAe,+CAA+C,KAAK,IAAI,eAAe;AAClG;AAaA,SAAS,UAAU,OAAyB;AAC1C,MAAI,UAAU,QAAQ,UAAU,OAAW,QAAO;AAClD,MACE,OAAO,UAAU,YACjB,OAAO,UAAU,YACjB,OAAO,UAAU,YACjB,OAAO,UAAU,WACjB;AACA,WAAO;AAAA,EACT;AACA,MAAI,iBAAiB,KAAM,QAAO,MAAM,QAAQ;AAChD,MAAI,OAAO,UAAU,UAAU;AAC7B,UAAM,gBAAgB,uBAAuB,KAAK;AAClD,QAAI,eAAe;AACjB,YAAM,IAAI;AAAA,QACR,0CAA0C,aAAa;AAAA,QAIvD;AAAA,MACF;AAAA,IACF;AACA,WAAO,KAAK,UAAU,KAAK;AAAA,EAC7B;AACA,SAAO,OAAO,KAAK;AACrB;AAEA,SAAS,cAAc,QAAqB,QAA2B;AACrE,QAAM,EAAE,KAAK,IAAI,gBAAgB,OAAO,KAAK;AAE7C,UAAQ,OAAO,IAAI;AAAA,IACjB,KAAK;AACH,aAAO,KAAK,UAAU,OAAO,KAAK,CAAC;AACnC,aAAO,GAAG,IAAI;AAAA,IAChB,KAAK;AACH,aAAO,KAAK,UAAU,OAAO,KAAK,CAAC;AACnC,aAAO,GAAG,IAAI;AAAA,IAChB,KAAK;AACH,aAAO,KAAK,UAAU,OAAO,KAAK,CAAC;AACnC,aAAO,GAAG,IAAI;AAAA,IAChB,KAAK;AACH,aAAO,KAAK,UAAU,OAAO,KAAK,CAAC;AACnC,aAAO,GAAG,IAAI;AAAA,IAChB,KAAK;AACH,aAAO,KAAK,UAAU,OAAO,KAAK,CAAC;AACnC,aAAO,GAAG,IAAI;AAAA,IAChB,KAAK;AACH,aAAO,KAAK,UAAU,OAAO,KAAK,CAAC;AACnC,aAAO,GAAG,IAAI;AAAA,IAChB,KAAK,MAAM;AACT,YAAM,SAAS,QAAQ,OAAO,OAAO,IAAI;AACzC,YAAM,eAAe,OAAO,IAAI,MAAM,GAAG,EAAE,KAAK,IAAI;AACpD,iBAAW,KAAK,OAAQ,QAAO,KAAK,UAAU,CAAC,CAAC;AAChD,aAAO,GAAG,IAAI,QAAQ,YAAY;AAAA,IACpC;AAAA,IACA,KAAK,UAAU;AACb,YAAM,SAAS,QAAQ,OAAO,OAAO,QAAQ;AAC7C,YAAM,eAAe,OAAO,IAAI,MAAM,GAAG,EAAE,KAAK,IAAI;AACpD,iBAAW,KAAK,OAAQ,QAAO,KAAK,UAAU,CAAC,CAAC;AAChD,aAAO,GAAG,IAAI,YAAY,YAAY;AAAA,IACxC;AAAA,IACA,KAAK,kBAAkB;AACrB,aAAO,KAAK,UAAU,OAAO,KAAK,CAAC;AACnC,aAAO,mCAAmC,IAAI;AAAA,IAChD;AAAA,IACA,KAAK,sBAAsB;AACzB,YAAM,SAAS,QAAQ,OAAO,OAAO,oBAAoB;AACzD,YAAM,eAAe,OAAO,IAAI,MAAM,GAAG,EAAE,KAAK,IAAI;AACpD,iBAAW,KAAK,OAAQ,QAAO,KAAK,UAAU,CAAC,CAAC;AAChD,aAAO,mCAAmC,IAAI,qBAAqB,YAAY;AAAA,IACjF;AAAA,IACA;AACE,YAAM,IAAI;AAAA,QACR,oDAAoD,OAAO,OAAO,EAAE,CAAC;AAAA,QACrE;AAAA,MACF;AAAA,EACJ;AACF;AAEA,SAAS,QAAQ,OAAgB,IAAuB;AACtD,MAAI,CAAC,MAAM,QAAQ,KAAK,KAAK,MAAM,WAAW,GAAG;AAC/C,UAAM,IAAI,eAAe,aAAa,EAAE,sCAAsC,eAAe;AAAA,EAC/F;AACA,SAAO;AACT;AAEA,SAAS,eAAe,SAAmC,SAA4B;AACrF,MAAI,CAAC,SAAS,QAAS,QAAO;AAC9B,QAAM,EAAE,OAAO,UAAU,IAAI,QAAQ;AACrC,QAAM,EAAE,KAAK,IAAI,gBAAgB,KAAK;AACtC,QAAM,MAAM,cAAc,SAAS,SAAS;AAC5C,SAAO,aAAa,IAAI,IAAI,GAAG;AACjC;AAEA,SAAS,aAAa,SAAmC,QAA2B;AAClF,MAAI,SAAS,UAAU,OAAW,QAAO;AACzC,SAAO,KAAK,QAAQ,KAAK;AACzB,SAAO;AACT;AAMO,SAAS,cACd,OACA,OACA,SACA,SACmB;AACnB,QAAM,SAAoB,CAAC;AAC3B,QAAM,aAAuB,CAAC,aAAa;AAC3C,SAAO,KAAK,KAAK;AAEjB,aAAW,KAAK,SAAS;AACvB,eAAW,KAAK,cAAc,GAAG,MAAM,CAAC;AAAA,EAC1C;AAEA,MAAI,MAAM,iBAAiB,WAAW,KAAK,CAAC,UAAU,WAAW,KAAK,OAAO,CAAC;AAE9E,QAAM,cAAc,eAAe,SAAS,MAAM;AAClD,SAAO;AACP,SAAO,aAAa,SAAS,MAAM;AAEnC,SAAO,EAAE,KAAK,OAAO;AACvB;AAuBO,SAAS,iBACd,OACA,OACA,MACA,SACgD;AAChD,QAAM,UAAU,OAAO,KAAK,IAAI;AAChC,MAAI,QAAQ,WAAW,GAAG;AACxB,UAAM,IAAI;AAAA,MACR;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,QAAM,cAAwB,CAAC;AAC/B,aAAW,SAAS,SAAS;AAC3B,UAAM,EAAE,IAAI,MAAM,IAAI,KAAK,KAAK;AAKhC,wBAAoB,OAAO,wBAAwB;AACnD,QAAI,OAAO,SAAS;AAElB,UAAI,UAAU,QAAW;AACvB,cAAM,IAAI;AAAA,UACR,cAAc,KAAK;AAAA,UAEnB;AAAA,QACF;AAAA,MACF;AACA,kBAAY,KAAK,eAAe,WAAW,KAAK,CAAC,EAAE;AACnD;AAAA,IACF;AACA,QAAI,CAAC,OAAO;AACV,YAAM,IAAI;AAAA,QACR,cAAc,KAAK,SAAS,EAAE;AAAA,QAC9B;AAAA,MACF;AAAA,IACF;AACA,UAAM,EAAE,KAAK,IAAI,gBAAgB,KAAK;AACtC,UAAM,UAAU,QAAQ,IAAI;AAC5B,QAAI,OAAO,MAAO,aAAY,KAAK,OAAO,OAAO,QAAQ,WAAW,KAAK,CAAC,EAAE;AAAA,aACnE,OAAO,MAAO,aAAY,KAAK,OAAO,OAAO,QAAQ,WAAW,KAAK,CAAC,EAAE;AAAA,aACxE,OAAO,MAAO,aAAY,KAAK,OAAO,OAAO,QAAQ,WAAW,KAAK,CAAC,EAAE;AAAA,aACxE,OAAO,MAAO,aAAY,KAAK,OAAO,OAAO,QAAQ,WAAW,KAAK,CAAC,EAAE;AAAA;AAE/E,YAAM,IAAI;AAAA,QACR,iDAAiD,OAAO,EAAE,CAAC;AAAA,QAC3D;AAAA,MACF;AAAA,EACJ;AAEA,QAAM,SAAoB,CAAC,KAAK;AAChC,QAAM,aAAuB,CAAC,aAAa;AAC3C,aAAW,KAAK,SAAS;AACvB,eAAW,KAAK,cAAc,GAAG,MAAM,CAAC;AAAA,EAC1C;AAEA,QAAM,MACJ,UAAU,YAAY,KAAK,IAAI,CAAC,SACxB,WAAW,KAAK,CAAC,UAChB,WAAW,KAAK,OAAO,CAAC;AACnC,SAAO,EAAE,MAAM,EAAE,KAAK,OAAO,GAAG,QAAQ;AAC1C;AAWO,SAAS,oBACd,OACA,SACA,SACA,iBACmB;AACnB,MAAI,QAAQ,WAAW,GAAG;AACxB,UAAM,IAAI;AAAA,MACR;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAEA,QAAM,SAAoB,CAAC;AAC3B,QAAM,aAAuB,CAAC;AAE9B,MAAI,iBAAiB;AACnB,QAAI,gBAAgB,QAAQ;AAC1B,iBAAW,KAAK,aAAa;AAC7B,aAAO,KAAK,EAAE;AAAA,IAChB,OAAO;AACL,iBAAW,KAAK,4BAA4B;AAC5C,aAAO,KAAK,KAAK,WAAW,gBAAgB,IAAI,CAAC,EAAE;AAAA,IACrD;AAAA,EACF;AAEA,aAAW,KAAK,SAAS;AACvB,eAAW,KAAK,cAAc,GAAG,MAAM,CAAC;AAAA,EAC1C;AAEA,QAAM,MACJ,iBAAiB,WAAW,KAAK,CAAC,UAAU,WAAW,KAAK,OAAO,CAAC,KACpE,eAAe,SAAS,MAAM,IAC9B,aAAa,SAAS,MAAM;AAC9B,SAAO,EAAE,KAAK,OAAO;AACvB;AAEO,SAAS,qBACd,OACA,OACA,OACmB;AACnB,SAAO;AAAA,IACL,KAAK,iBAAiB,WAAW,KAAK,CAAC;AAAA,IACvC,QAAQ,CAAC,OAAO,KAAK;AAAA,EACvB;AACF;AAoDA,SAAS,yBAAyB,OAAuB;AACvD,MAAI,SAAS,gBAAiB,QAAO;AACrC,MAAI,UAAU,UAAU,MAAM,WAAW,OAAO,EAAG,QAAO;AAC1D,SAAO,QAAQ,KAAK;AACtB;AAiCO,SAAS,0BACd,OACA,OACA,QACA,SACA,SAC6D;AAC7D,MAAI,OAAO,WAAW,GAAG;AACvB,UAAM,IAAI;AAAA,MACR;AAAA,MAEA;AAAA,IACF;AAAA,EACF;AAMA,QAAM,OAAO,oBAAI,IAAY;AAC7B,QAAM,eAAyB,CAAC;AAChC,aAAW,KAAK,QAAQ;AACtB,QAAI,CAAC,KAAK,IAAI,CAAC,GAAG;AAChB,WAAK,IAAI,CAAC;AACV,mBAAa,KAAK,CAAC;AAAA,IACrB;AAAA,EACF;AAEA,QAAM,cAAwB,CAAC;AAC/B,QAAM,UAAiC,CAAC;AACxC,WAAS,MAAM,GAAG,MAAM,aAAa,QAAQ,OAAO;AAClD,UAAM,QAAQ,aAAa,GAAG;AAC9B,UAAM,YAAY,yBAAyB,KAAK;AAChD,UAAM,EAAE,KAAK,IAAI,gBAAgB,SAAS;AAM1C,UAAM,QAAQ,iBAAiB,KAAK;AACpC,gBAAY,KAAK,GAAG,IAAI,OAAO,KAAK,EAAE;AAEtC,QAAI;AACJ,QAAI;AACJ,QAAI,cAAc,QAAQ;AACxB,aAAO;AAAA,IACT,WAAW,UAAU,WAAW,OAAO,GAAG;AACxC,aAAO;AAQP,sBAAgB,UAAU,GAAG;AAC7B,YAAM,YAAY,iBAAiB,aAAa;AAChD,kBAAY,KAAK,wBAAwB,UAAU,MAAM,CAAC,CAAC,SAAS,SAAS,EAAE;AAAA,IACjF,OAAO;AAGL,UAAI,cAAc,IAAK,QAAO;AAAA,eACrB,cAAc,eAAe,cAAc,YAAa,QAAO;AAAA,UACnE,QAAO;AAAA,IACd;AACA,YAAQ,KAAK,EAAE,OAAO,MAAM,WAAW,cAAc,CAAC;AAAA,EACxD;AAEA,QAAM,SAAoB,CAAC,KAAK;AAChC,QAAM,aAAuB,CAAC,aAAa;AAC3C,aAAW,KAAK,SAAS;AACvB,eAAW,KAAK,cAAc,GAAG,MAAM,CAAC;AAAA,EAC1C;AAEA,MAAI,MACF,UAAU,YAAY,KAAK,IAAI,CAAC,SACxB,WAAW,KAAK,CAAC,UAChB,WAAW,KAAK,OAAO,CAAC;AACnC,SAAO,eAAe,SAAS,MAAM;AACrC,SAAO,aAAa,SAAS,MAAM;AAEnC,SAAO,EAAE,MAAM,EAAE,KAAK,OAAO,GAAG,QAAQ;AAC1C;AAeO,SAAS,mBACd,KACA,SACyB;AACzB,QAAM,MAA+B,CAAC;AACtC,aAAW,KAAK,SAAS;AACvB,UAAM,MAAM,IAAI,EAAE,KAAK;AACvB,YAAQ,EAAE,MAAM;AAAA,MACd,KAAK;AACH,YAAI,EAAE,KAAK,IAAI,QAAQ,QAAQ,QAAQ,SAAY,OAAO,OAAO,GAAG;AACpE;AAAA,MACF,KAAK;AACH,YAAI,QAAQ,QAAQ,QAAQ,QAAW;AAGrC,cAAI,EAAE,KAAK,IAAI;AAAA,QACjB,WAAW,OAAO,QAAQ,UAAU;AAClC,cAAI,EAAE,KAAK,IAAI,OAAO,GAAG;AAAA,QAC3B,WAAW,OAAO,QAAQ,UAAU;AAClC,cAAI,EAAE,KAAK,IAAI;AAAA,QACjB,OAAO;AACL,cAAI,EAAE,KAAK,IAAI,OAAO,GAAG;AAAA,QAC3B;AACA;AAAA,MACF,KAAK,qBAAqB;AACxB,cAAM,KAAK,SAAS,GAAG;AACvB,YAAI,EAAE,KAAK,IAAI,mBAAmB,WAAW,EAAE;AAC/C;AAAA,MACF;AAAA,MACA,KAAK;AAGH,YAAI,QAAQ,QAAQ,QAAQ,UAAa,QAAQ,IAAI;AACnD,cAAI,EAAE,KAAK,IAAI,CAAC;AAAA,QAClB,OAAO;AACL,cAAI,EAAE,KAAK,IAAI,KAAK,MAAM,GAAa;AAAA,QACzC;AACA;AAAA,MACF,KAAK,QAAQ;AAKX,cAAM,IAAI,IAAI,EAAE,SAAU;AAC1B,YAAI,QAAQ,QAAQ,QAAQ,QAAW;AACrC,cAAI,EAAE,KAAK,IAAI;AAAA,QACjB,WAAW,MAAM,YAAY,MAAM,SAAS;AAG1C,cAAI,EAAE,KAAK,IAAI,OAAO,QAAQ,WAAW,KAAK,MAAM,GAAG,IAAI;AAAA,QAC7D,WAAW,MAAM,aAAa,OAAO,QAAQ,UAAU;AACrD,cAAI,EAAE,KAAK,IAAI,OAAO,GAAG;AAAA,QAC3B,OAAO;AAGL,cAAI,EAAE,KAAK,IAAI;AAAA,QACjB;AACA;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACA,SAAO;AACT;AAkCO,SAAS,cACd,OACA,OACA,QACmB;AACnB,MAAI,OAAO,QAAQ,WAAW,GAAG;AAC/B,UAAM,IAAI;AAAA,MACR;AAAA,MAEA;AAAA,IACF;AAAA,EACF;AACA,QAAM,YAAY,OAAO,aAAa;AAMtC,QAAM,UAAU,gBAAgB,MAAM,EAAE;AACxC,QAAM,UAAU,gBAAgB,MAAM,EAAE;AACxC,QAAM,WAAW,gBAAgB,OAAO,EAAE;AAC1C,QAAM,WAAW,gBAAgB,OAAO,EAAE;AAC1C,QAAM,aAAa,gBAAgB,SAAS,EAAE;AAC9C,QAAM,eAAe,cAAc,YAAY,UAAU;AAEzD,QAAM,YAAuB,CAAC,OAAO,OAAO,OAAO;AACnD,QAAM,aAAuB,CAAC,eAAe,GAAG,UAAU,MAAM;AAQhE,QAAM,eAAe,OAAO,QAAQ,IAAI,MAAM,GAAG,EAAE,KAAK,IAAI;AAC5D,aAAW,KAAK,GAAG,YAAY,QAAQ,YAAY,GAAG;AACtD,aAAW,OAAO,OAAO,QAAS,WAAU,KAAK,GAAG;AAEpD,MAAI,OAAO,UAAU,QAAW;AAC9B,eAAW,KAAK,GAAG,QAAQ,MAAM;AACjC,cAAU,KAAK,OAAO,KAAK;AAAA,EAC7B;AACA,MAAI,OAAO,UAAU,QAAW;AAC9B,eAAW,KAAK,GAAG,QAAQ,MAAM;AACjC,cAAU,KAAK,OAAO,KAAK;AAAA,EAC7B;AAQA,MAAI,OAAO,YAAY,eAAe;AACpC,eAAW,KAAK,GAAG,OAAO,OAAO,OAAO,EAAE;AAAA,EAC5C;AAEA,MAAI,MAAM,iBAAiB,WAAW,KAAK,CAAC,UAAU,WAAW,KAAK,OAAO,CAAC;AAO9E,MAAI,OAAO,SAAS;AAClB,WAAO,eAAe,EAAE,SAAS,OAAO,QAAQ,GAAG,SAAS;AAAA,EAC9D;AAEA,MAAI,OAAO,mBAAmB,QAAW;AAIvC,UAAM,aAAa,OAAO,QAAQ,SAAS,OAAO;AAClD,WAAO;AACP,cAAU,KAAK,UAAU;AAAA,EAC3B;AAEA,SAAO,EAAE,KAAK,QAAQ,UAAU;AAClC;AAUO,SAAS,qBACd,OACA,OACA,YACmB;AACnB,MAAI,WAAW,WAAW,GAAG;AAC3B,UAAM,IAAI;AAAA,MACR;AAAA,MACA;AAAA,IACF;AAAA,EACF;AACA,QAAM,eAAe,WAAW,IAAI,MAAM,GAAG,EAAE,KAAK,IAAI;AACxD,QAAM,YAAuB,CAAC,OAAO,aAAa;AAClD,aAAW,OAAO,WAAY,WAAU,KAAK,GAAG;AAIhD,QAAM,UAAU,gBAAgB,MAAM,EAAE;AACxC,QAAM,UAAU,gBAAgB,MAAM,EAAE;AACxC,QAAM,aAAa,gBAAgB,SAAS,EAAE;AAM9C,SAAO;AAAA,IACL,KACE,iBAAiB,WAAW,KAAK,CAAC,0BACT,UAAU,YAAY,OAAO,MAAM,OAAO,QAAQ,OAAO,QAAQ,YAAY;AAAA,IACxG,QAAQ;AAAA,EACV;AACF;AAoBO,SAAS,WACd,OACA,OACA,OACA,QACA,WACA,MACmB;AAOnB,wBAAsB,OAAO,MAAM,oBAAoB;AACvD,MAAI,SAAS,WAAW;AACtB,UAAMC,OAAM,0BAA0B,WAAW,KAAK,CAAC;AAAA;AAAA;AAGvD,UAAM,SAAoB;AAAA,MACxB;AAAA,MACA;AAAA,MACA,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,OAAO;AAAA,MACP,KAAK,UAAU,OAAO,QAAQ,CAAC,CAAC;AAAA,MAChC,OAAO,KAAK;AAAA,MACZ;AAAA,MACA;AAAA,IACF;AACA,WAAO,EAAE,KAAAA,MAAK,OAAO;AAAA,EACvB;AAGA,QAAM,eAA0B;AAAA,IAC9B;AAAA,IACA;AAAA,IACA,OAAO;AAAA,IACP,OAAO;AAAA,IACP,OAAO;AAAA,IACP,OAAO;AAAA,IACP,OAAO;AAAA,IACP,KAAK,UAAU,OAAO,QAAQ,CAAC,CAAC;AAAA,IAChC,OAAO,KAAK;AAAA,IACZ;AAAA,IACA;AAAA,EACF;AAGA,QAAM,MAAM,aAAa,OAAO,QAAQ,CAAC,CAAC;AAC1C,QAAM,eAA0B,CAAC;AACjC,QAAM,WACJ,mBAAmB,KAAK,0BAA0B,cAAc,wBAAwB,KACxF;AASF,QAAM,MAAM,eAAe,WAAW,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iBAS7B,QAAQ;AAAA;AAAA;AAAA;AAKvB,SAAO,EAAE,KAAK,QAAQ,CAAC,GAAG,cAAc,GAAG,YAAY,EAAE;AAC3D;AAaO,SAAS,cACd,OACA,OACA,OACA,QACA,WACmB;AACnB,+BAA6B,MAAM;AACnC,QAAM,aAAuB,CAAC;AAC9B,QAAM,SAAoB,CAAC;AAE3B,MAAI,OAAO,aAAa;AACtB,0BAAsB,OAAO,aAAa,oBAAoB;AAC9D,eAAW,KAAK,YAAY;AAC5B,WAAO,KAAK,KAAK,UAAU,OAAO,WAAW,CAAC;AAAA,EAChD,WAAW,OAAO,WAAW,OAAO,QAAQ,SAAS,GAAG;AACtD,eAAW,MAAM,OAAO,SAAS;AAC/B,UAAI,CAAC,GAAG,OAAQ,uBAAsB,GAAG,OAAO,oBAAoB;AAAA,IACtE;AACA,UAAM,OAAO;AAAA,MACX,OAAO;AAAA,MACP;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,QAAI,SAAS,MAAM;AACjB,iBAAW,KAAK,YAAY,IAAI,EAAE;AAAA,IACpC;AAAA,EACF;AAEA,MAAI,OAAO,MAAM,QAAW;AAC1B,eAAW,KAAK,SAAS;AACzB,WAAO,KAAK,OAAO,CAAC;AAAA,EACtB;AAEA,aAAW,KAAK,kBAAkB;AAClC,SAAO,KAAK,SAAS;AAGrB,SAAO,KAAK,OAAO,KAAK;AAExB,SAAO;AAAA,IACL,KAAK,UAAU,WAAW,KAAK,CAAC,QAAQ,WAAW,KAAK,IAAI,CAAC;AAAA,IAC7D;AAAA,EACF;AACF;AAEO,SAAS,cAAc,OAAe,OAAe,OAAkC;AAC5F,SAAO;AAAA,IACL,KAAK,eAAe,WAAW,KAAK,CAAC;AAAA,IACrC,QAAQ,CAAC,OAAO,KAAK;AAAA,EACvB;AACF;AAYO,SAAS,kBACd,OACA,OACA,SACmB;AACnB,QAAM,SAAoB,CAAC,KAAK;AAChC,QAAM,aAAuB,CAAC,aAAa;AAC3C,aAAW,KAAK,SAAS;AACvB,eAAW,KAAK,cAAc,GAAG,MAAM,CAAC;AAAA,EAC1C;AACA,SAAO;AAAA,IACL,KAAK,eAAe,WAAW,KAAK,CAAC,UAAU,WAAW,KAAK,OAAO,CAAC;AAAA,IACvE;AAAA,EACF;AACF;AAgBO,SAAS,kBACd,OACA,OACA,SACA,WACA,WACmB;AACnB,QAAM,UAAU,aAAa,SAAS;AACtC,MAAI,QAAQ,WAAW,GAAG;AACxB,UAAM,IAAI;AAAA,MACR;AAAA,MAGA;AAAA,IACF;AAAA,EACF;AACA,aAAW,MAAM,SAAS;AACxB,QAAI,CAAC,GAAG,OAAQ,uBAAsB,GAAG,OAAO,oBAAoB;AAAA,EACtE;AACA,QAAM,YAAuB,CAAC;AAC9B,QAAM,OAAO;AAAA,IACX;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACA,MAAI,SAAS,MAAM;AAIjB,UAAM,IAAI;AAAA,MACR;AAAA,MACA;AAAA,IACF;AAAA,EACF;AACA,QAAM,aAAuB,CAAC,YAAY,IAAI,IAAI,kBAAkB;AACpE,YAAU,KAAK,SAAS;AAKxB,QAAM,cAAyB,CAAC,KAAK;AACrC,QAAM,aAAuB,CAAC,aAAa;AAC3C,aAAW,KAAK,SAAS;AACvB,eAAW,KAAK,cAAc,GAAG,WAAW,CAAC;AAAA,EAC/C;AAEA,SAAO;AAAA,IACL,KACE,UAAU,WAAW,KAAK,CAAC,QAAQ,WAAW,KAAK,IAAI,CAAC,UAC/C,WAAW,KAAK,OAAO,CAAC;AAAA,IACnC,QAAQ,CAAC,GAAG,WAAW,GAAG,WAAW;AAAA,EACvC;AACF;AAaO,SAAS,yBAAyB,OAAe,aAAwC;AAE9F,QAAM,UAAU,WAAW,WAAW;AACtC,SAAO;AAAA,IACL,KAAK,eAAe,WAAW,KAAK,CAAC;AAAA,IACrC,QAAQ,CAAC,GAAG,OAAO,IAAI;AAAA,EACzB;AACF;AASO,SAAS,wBAAwB,OAAe,aAAwC;AAC7F,QAAM,UAAU,WAAW,WAAW;AACtC,SAAO;AAAA,IACL,KAAK,6BAA6B,WAAW,KAAK,CAAC;AAAA,IACnD,QAAQ,CAAC,GAAG,OAAO,IAAI;AAAA,EACzB;AACF;AAEA,SAAS,WAAW,OAAuB;AACzC,SAAO,MAAM,QAAQ,OAAO,MAAM,EAAE,QAAQ,MAAM,KAAK,EAAE,QAAQ,MAAM,KAAK;AAC9E;AAOO,SAAS,YAAY,KAAiD;AAC3E,QAAM,aAAa,IAAI;AACvB,QAAM,OAAO,aAAc,KAAK,MAAM,UAAU,IAAgC,CAAC;AAEjF,QAAM,YAAY,SAAS,IAAI,UAAU;AACzC,QAAM,YAAY,SAAS,IAAI,UAAU;AAEzC,QAAM,SAAkC;AAAA,IACtC,OAAO,IAAI;AAAA,IACX,MAAM,IAAI;AAAA,IACV,SAAS,IAAI;AAAA,IACb,OAAO,IAAI;AAAA,IACX,MAAM,IAAI;AAAA,IACV;AAAA,IACA,WAAW,mBAAmB,WAAW,SAAS;AAAA,IAClD,WAAW,mBAAmB,WAAW,SAAS;AAAA,EACpD;AAEA,MAAI,IAAI,MAAM,QAAQ,IAAI,MAAM,QAAW;AACzC,WAAO,IAAI,OAAO,IAAI,CAAC;AAAA,EACzB;AACA,SAAO;AACT;AAEA,SAAS,SAAS,OAAwB;AACxC,MAAI,OAAO,UAAU,SAAU,QAAO;AACtC,MAAI,OAAO,UAAU,SAAU,QAAO,OAAO,KAAK;AAClD,MAAI,OAAO,UAAU,SAAU,QAAO,OAAO,KAAK;AAClD,SAAO;AACT;;;ACx/BA,IAAM,sBAAsB;AAC5B,IAAM,sBAAsB;AAO5B,IAAM,qBAAqB;AAE3B,SAAS,MAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAOA,SAAS,WAAW,GAAuB,GAA2C;AACpF,MAAI,MAAM,OAAW,QAAO;AAC5B,MAAI,MAAM,OAAW,QAAO;AAC5B,SAAO,KAAK,IAAI,GAAG,CAAC;AACtB;AAcA,SAAS,gBACP,YACA,eACA,WACO;AACP,QAAM,UACJ,iBAAiB,gBAAgB,KAAK,OAAO,SAAS,aAAa,IAC/D,KAAK,MAAM,aAAa,IACxB;AACN,QAAM,WACJ,aAAa,YAAY,KAAK,OAAO,SAAS,SAAS,IAAI,KAAK,MAAM,SAAS,IAAI;AAErF,MAAI,YAAY,YAAY,aAAa,UAAU;AACjD,WAAO,CAAC,UAAU;AAAA,EACpB;AAEA,QAAM,SAAgB,CAAC;AACvB,MAAI,UAAe,CAAC;AACpB,MAAI,oBAAoB;AACxB,aAAW,QAAQ,YAAY;AAC7B,UAAM,aAAa,KAAK,OAAO;AAC/B,UAAM,kBAAkB,QAAQ,SAAS,IAAI;AAC7C,UAAM,mBAAmB,oBAAoB,aAAa;AAC1D,QAAI,QAAQ,SAAS,MAAM,mBAAmB,mBAAmB;AAC/D,aAAO,KAAK,OAAO;AACnB,gBAAU,CAAC;AACX,0BAAoB;AAAA,IACtB;AACA,YAAQ,KAAK,IAAI;AACjB,yBAAqB;AAAA,EACvB;AACA,MAAI,QAAQ,SAAS,EAAG,QAAO,KAAK,OAAO;AAC3C,SAAO;AACT;AAEA,IAAM,+BAAN,MAAiE;AAAA,EAC/D,YACmB,IACA,WACA,cACjB;AAHiB;AACA;AACA;AAAA,EAChB;AAAA,EAEH,MAAM,OAAO,OAAkD;AAC7D,UAAM,OAAO,qBAAqB,KAAK,WAAW,KAAK,cAAc,KAAK;AAC1E,UAAM,OAAO,MAAM,KAAK,GAAG,IAAI,KAAK,KAAK,KAAK,MAAM;AACpD,WAAO,KAAK,WAAW,IAAI,OAAO,YAAY,KAAK,CAAC,CAAC;AAAA,EACvD;AAAA,EAEA,MAAM,MAAM,SAAwB,SAAsD;AACxF,UAAM,OAAO,cAAc,KAAK,WAAW,KAAK,cAAc,SAAS,OAAO;AAC9E,UAAM,OAAO,MAAM,KAAK,GAAG,IAAI,KAAK,KAAK,KAAK,MAAM;AACpD,WAAO,KAAK,IAAI,WAAW;AAAA,EAC7B;AAAA,EAEA,MAAM,OAAO,OAAe,QAAwB,MAAgC;AAClF,UAAM,OAAO,WAAW,KAAK,WAAW,KAAK,cAAc,OAAO,QAAQ,KAAK,IAAI,GAAG,IAAI;AAC1F,UAAM,KAAK,GAAG,IAAI,KAAK,KAAK,KAAK,MAAM;AAAA,EACzC;AAAA,EAEA,MAAM,UAAU,OAAe,QAAsC;AACnE,UAAM,OAAO,cAAc,KAAK,WAAW,KAAK,cAAc,OAAO,QAAQ,KAAK,IAAI,CAAC;AAEvF,UAAM,mBAAmB,GAAG,KAAK,GAAG;AACpC,UAAM,OAAO,MAAM,KAAK,GAAG,IAAI,kBAAkB,KAAK,MAAM;AAC5D,QAAI,KAAK,WAAW,GAAG;AACrB,YAAM,IAAI;AAAA,QACR,2CAA2C,KAAK,WAAW,KAAK,YAAY;AAAA,QAC5E;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,UAAU,OAA8B;AAC5C,UAAM,OAAO,cAAc,KAAK,WAAW,KAAK,cAAc,KAAK;AACnE,UAAM,KAAK,GAAG,IAAI,KAAK,KAAK,KAAK,MAAM;AAAA,EACzC;AACF;AAEA,IAAM,yBAAN,MAAqD;AAAA,EAGnD,YACmB,UACA,WACA,cACjB;AAHiB;AACA;AACA;AAAA,EAChB;AAAA,EANc,aAAkC,CAAC;AAAA,EAQpD,OAAO,OAAe,QAAwB,MAAuB;AACnE,SAAK,WAAW;AAAA,MACd,WAAW,KAAK,WAAW,KAAK,cAAc,OAAO,QAAQ,KAAK,IAAI,GAAG,IAAI;AAAA,IAC/E;AAAA,EACF;AAAA,EAEA,UAAU,OAAe,QAA6B;AACpD,SAAK,WAAW;AAAA,MACd,cAAc,KAAK,WAAW,KAAK,cAAc,OAAO,QAAQ,KAAK,IAAI,CAAC;AAAA,IAC5E;AAAA,EACF;AAAA,EAEA,UAAU,OAAqB;AAC7B,SAAK,WAAW,KAAK,cAAc,KAAK,WAAW,KAAK,cAAc,KAAK,CAAC;AAAA,EAC9E;AAAA,EAEA,MAAM,SAAwB;AAC5B,QAAI,KAAK,WAAW,WAAW,EAAG;AAClC,UAAM,KAAK,SAAS,MAAM,KAAK,UAAU;AACzC,SAAK,WAAW,SAAS;AAAA,EAC3B;AACF;AAkCA,IAAM,mBAAoD;AAAA,EACxD;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEA,IAAM,oBAAN,MAAM,mBAA8D;AAAA,EAQlE,YACmB,UACjB,WACA,cACA,WACA;AAJiB;AAKjB,SAAK,iBAAiB;AACtB,SAAK,eAAe;AACpB,SAAK,YAAY;AACjB,UAAM,OAAO,IAAI,IAAsB,gBAAgB;AACvD,QAAI,OAAO,SAAS,gBAAgB,YAAY;AAC9C,WAAK,IAAI,mBAAmB;AAAA,IAC9B;AACA,SAAK,eAAe,mBAAmB,IAAI;AAAA,EAC7C;AAAA,EArBS;AAAA;AAAA,EAEA;AAAA,EACA;AAAA;AAAA,EAEQ;AAAA;AAAA,EAoBjB,MAAM,OAAO,OAAkD;AAC7D,UAAM,OAAO,qBAAqB,KAAK,gBAAgB,KAAK,cAAc,KAAK;AAC/E,UAAM,OAAO,MAAM,KAAK,SAAS,IAAI,KAAK,KAAK,KAAK,MAAM;AAC1D,WAAO,KAAK,WAAW,IAAI,OAAO,YAAY,KAAK,CAAC,CAAC;AAAA,EACvD;AAAA,EAEA,MAAM,MAAM,SAAwB,SAAsD;AACxF,UAAM,OAAO,cAAc,KAAK,gBAAgB,KAAK,cAAc,SAAS,OAAO;AACnF,UAAM,OAAO,MAAM,KAAK,SAAS,IAAI,KAAK,KAAK,KAAK,MAAM;AAC1D,WAAO,KAAK,IAAI,WAAW;AAAA,EAC7B;AAAA;AAAA,EAIA,MAAM,OAAO,OAAe,QAAwB,MAAgC;AAClF,UAAM,OAAO;AAAA,MACX,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA,KAAK,IAAI;AAAA,MACT;AAAA,IACF;AACA,UAAM,KAAK,SAAS,IAAI,KAAK,KAAK,KAAK,MAAM;AAAA,EAC/C;AAAA,EAEA,MAAM,UAAU,OAAe,QAAsC;AACnE,UAAM,OAAO,cAAc,KAAK,gBAAgB,KAAK,cAAc,OAAO,QAAQ,KAAK,IAAI,CAAC;AAK5F,UAAM,mBAAmB,GAAG,KAAK,GAAG;AACpC,UAAM,OAAO,MAAM,KAAK,SAAS,IAAI,kBAAkB,KAAK,MAAM;AAClE,QAAI,KAAK,WAAW,GAAG;AACrB,YAAM,IAAI;AAAA,QACR,2CAA2C,KAAK,WAAW,KAAK,YAAY;AAAA,QAC5E;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,UAAU,OAA8B;AAC5C,UAAM,OAAO,cAAc,KAAK,gBAAgB,KAAK,cAAc,KAAK;AACxE,UAAM,KAAK,SAAS,IAAI,KAAK,KAAK,KAAK,MAAM;AAAA,EAC/C;AAAA;AAAA,EAIA,MAAM,eAAkB,IAAwD;AAC9E,QAAI,CAAC,KAAK,SAAS,aAAa;AAC9B,YAAM,IAAI;AAAA,QACR;AAAA,QAIA;AAAA,MACF;AAAA,IACF;AACA,WAAO,KAAK,SAAS,YAAY,OAAO,OAAO;AAC7C,YAAM,YAAY,IAAI;AAAA,QACpB;AAAA,QACA,KAAK;AAAA,QACL,KAAK;AAAA,MACP;AACA,aAAO,GAAG,SAAS;AAAA,IACrB,CAAC;AAAA,EACH;AAAA,EAEA,cAA4B;AAC1B,WAAO,IAAI,uBAAuB,KAAK,UAAU,KAAK,gBAAgB,KAAK,YAAY;AAAA,EACzF;AAAA;AAAA,EAIA,SAAS,eAAuB,MAA8B;AAK5D,QAAI,CAAC,iBAAiB,cAAc,SAAS,GAAG,GAAG;AACjD,YAAM,IAAI;AAAA,QACR,wCAAwC,aAAa;AAAA,QAErD;AAAA,MACF;AAAA,IACF;AACA,QAAI,CAAC,QAAQ,KAAK,SAAS,GAAG,GAAG;AAC/B,YAAM,IAAI;AAAA,QACR,kEAAkE,IAAI;AAAA,QAEtE;AAAA,MACF;AAAA,IACF;AACA,UAAM,kBAAkB,KAAK,eACzB,GAAG,KAAK,YAAY,IAAI,aAAa,IAAI,IAAI,KAC7C,GAAG,aAAa,IAAI,IAAI;AAC5B,UAAM,WAAW,KAAK,YAAY,GAAG,KAAK,SAAS,IAAI,IAAI,KAAK;AAChE,WAAO,IAAI,mBAAkB,KAAK,UAAU,KAAK,gBAAgB,iBAAiB,QAAQ;AAAA,EAC5F;AAAA;AAAA,EAIA,MAAM,kBACJ,KACA,QACA,SACwB;AAExB,UAAM,CAAC,aAAa,WAAW,IAAI,MAAM,QAAQ,IAAI;AAAA,MACnD,OAAO,UAAU,EAAE,MAAM,KAAK,qBAAqB,MAAM,OAAO,EAAE,CAAC;AAAA,MACnE,OAAO,UAAU,EAAE,MAAM,KAAK,qBAAqB,MAAM,OAAO,EAAE,CAAC;AAAA,IACrE,CAAC;AAED,UAAM,OAAO,oBAAI,IAAY;AAC7B,UAAM,aAAuB,CAAC;AAC9B,eAAW,QAAQ,CAAC,GAAG,aAAa,GAAG,WAAW,GAAG;AACnD,UAAI,KAAK,YAAY,cAAe;AACpC,YAAM,QAAQ,iBAAiB,KAAK,MAAM,KAAK,SAAS,KAAK,IAAI;AACjE,UAAI,CAAC,KAAK,IAAI,KAAK,GAAG;AACpB,aAAK,IAAI,KAAK;AACd,mBAAW,KAAK,KAAK;AAAA,MACvB;AAAA,IACF;AAEA,UAAM,YAAY,iBAAiB,GAAG;AACtC,UAAM,wBAAwB,SAAS,yBAAyB;AAMhE,QAAI,mBAAmB;AACvB,QAAI,uBAAuB;AACzB,YAAM,SAAS,KAAK,eAAe,GAAG,KAAK,YAAY,IAAI,GAAG,KAAK;AACnE,YAAM,YAAY,wBAAwB,KAAK,gBAAgB,MAAM;AACrE,YAAM,YAAY,MAAM,KAAK,SAAS,IAAI,UAAU,KAAK,UAAU,MAAM;AACzE,YAAM,QAAQ,UAAU,CAAC;AACzB,YAAM,IAAI,OAAO;AACjB,yBAAmB,OAAO,MAAM,WAAW,OAAO,CAAC,IAAI,OAAO,KAAK,CAAC;AAAA,IACtE;AAQA,UAAM,kBAAuC,WAAW;AAAA,MAAI,CAAC,OAC3D,cAAc,KAAK,gBAAgB,KAAK,cAAc,EAAE;AAAA,IAC1D;AACA,oBAAgB,KAAK,cAAc,KAAK,gBAAgB,KAAK,cAAc,SAAS,CAAC;AACrF,QAAI,uBAAuB;AACzB,YAAM,SAAS,KAAK,eAAe,GAAG,KAAK,YAAY,IAAI,GAAG,KAAK;AACnE,sBAAgB,KAAK,yBAAyB,KAAK,gBAAgB,MAAM,CAAC;AAAA,IAC5E;AAEA,UAAM;AAAA,MACJ,SAAS;AAAA,MACT;AAAA,MACA;AAAA,IACF,IAAI,MAAM,KAAK,sBAAsB,iBAAiB,OAAO;AAM7D,UAAM,QAAQ,OAAO,WAAW;AAChC,UAAM,eAAe,QAAQ,WAAW,SAAS;AACjD,UAAM,cAAc;AAOpB,UAAM,8BAA8B,yBAAyB,QAAQ,IAAI;AACzE,UAAM,UAAU,cAAc,+BAA+B,QAAQ,mBAAmB;AAExF,WAAO,EAAE,SAAS,SAAS,QAAQ,cAAc,YAAY;AAAA,EAC/D;AAAA,EAEA,MAAM,gBACJ,QACA,QACA,SACqB;AAIrB,UAAM,kBACJ,OAAO,UAAU,SACb,EAAE,GAAG,QAAQ,qBAAqB,OAAO,uBAAuB,KAAK,IACrE,EAAE,GAAG,QAAQ,OAAO,GAAG,qBAAqB,OAAO,uBAAuB,KAAK;AACrF,UAAM,QAAQ,MAAM,OAAO,UAAU,eAAe;AACpD,UAAM,SAAS,MAAM,IAAI,CAAC,MAAM,iBAAiB,EAAE,MAAM,EAAE,SAAS,EAAE,IAAI,CAAC;AAE3E,QAAI,OAAO,WAAW,GAAG;AACvB,aAAO,EAAE,SAAS,GAAG,SAAS,GAAG,QAAQ,CAAC,EAAE;AAAA,IAC9C;AAEA,UAAM,aAAa,OAAO;AAAA,MAAI,CAAC,OAC7B,cAAc,KAAK,gBAAgB,KAAK,cAAc,EAAE;AAAA,IAC1D;AAEA,WAAO,KAAK,sBAAsB,YAAY,OAAO;AAAA,EACvD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA2BA,MAAc,sBACZ,YACA,SACyE;AACzE,QAAI,WAAW,WAAW,GAAG;AAC3B,aAAO,EAAE,SAAS,GAAG,SAAS,GAAG,QAAQ,CAAC,EAAE;AAAA,IAC9C;AACA,UAAM,aAAa,SAAS,cAAc;AAU1C,UAAM,kBAAkB,SAAS;AACjC,UAAM,UAAU,WAAW,iBAAiB,KAAK,SAAS,YAAY;AACtE,UAAM,SAAS,gBAAgB,YAAY,SAAS,KAAK,SAAS,cAAc;AAEhF,UAAM,SAA2B,CAAC;AAClC,QAAI,UAAU;AACd,QAAI,UAAU;AACd,UAAM,eAAe,OAAO;AAE5B,UAAM,iBAAiB,KAAK,SAAS;AAErC,aAAS,aAAa,GAAG,aAAa,OAAO,QAAQ,cAAc;AACjE,YAAM,QAAQ,OAAO,UAAU;AAO/B,YAAM,wBACJ,MAAM,WAAW,KACjB,mBAAmB,UACnB,MAAM,CAAC,EAAE,OAAO,SAAS;AAE3B,UAAI,YAAY;AAChB,UAAI,YAA0B;AAC9B,YAAM,mBAAmB,wBAAwB,IAAI;AACrD,eAAS,UAAU,GAAG,WAAW,kBAAkB,WAAW;AAC5D,YAAI;AACF,gBAAM,KAAK,SAAS,MAAM,KAAK;AAC/B,sBAAY;AACZ;AAAA,QACF,SAAS,KAAK;AACZ,sBAAY,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,GAAG,CAAC;AAC9D,cAAI,UAAU,kBAAkB;AAC9B,kBAAM,QAAQ,KAAK,IAAI,sBAAsB,KAAK,IAAI,GAAG,OAAO,GAAG,kBAAkB;AACrF,kBAAM,MAAM,KAAK;AAAA,UACnB;AAAA,QACF;AAAA,MACF;AAEA,UAAI,WAAW;AACb,mBAAW,MAAM;AACjB,mBAAW;AAAA,MACb,WAAW,WAAW;AACpB,eAAO,KAAK;AAAA,UACV;AAAA,UACA,OAAO;AAAA,UACP,gBAAgB,MAAM;AAAA,QACxB,CAAC;AAAA,MACH;AAEA,UAAI,SAAS,YAAY;AACvB,gBAAQ,WAAW;AAAA,UACjB,kBAAkB;AAAA,UAClB;AAAA,UACA,cAAc;AAAA,QAChB,CAAC;AAAA,MACH;AAAA,IACF;AAEA,WAAO,EAAE,SAAS,SAAS,OAAO;AAAA,EACpC;AAAA;AAAA,EAIA,MAAM,gBACJ,QACA,gBAC8B;AAC9B,UAAM,OAAO,mBAAmB,MAAM;AACtC,QAAI,KAAK,aAAa,OAAO;AAC3B,YAAM,IAAI;AAAA,QACR;AAAA,QAEA;AAAA,MACF;AAAA,IACF;AAKA,UAAM,OAAO,kBAAkB,KAAK;AACpC,UAAM,kBAAkB;AAAA,MACtB;AAAA,MACA,QAAQ,SAAS,KAAK;AAAA,IACxB;AACA,UAAM,OAAO;AAAA,MACX,KAAK;AAAA,MACL,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,IACF;AACA,UAAM,OAAO,MAAM,KAAK,SAAS,IAAI,KAAK,KAAK,KAAK,MAAM;AAC1D,WAAO,KAAK,IAAI,WAAW;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,UAAU,MAAqB,SAAyD;AAC5F,UAAM,EAAE,MAAM,QAAQ,IAAI;AAAA,MACxB,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,MACA;AAAA,IACF;AACA,UAAM,OAAO,MAAM,KAAK,SAAS,IAAI,KAAK,KAAK,KAAK,MAAM;AAC1D,UAAM,MAAM,KAAK,CAAC,KAAK,CAAC;AACxB,UAAM,MAA8B,CAAC;AACrC,eAAW,SAAS,SAAS;AAC3B,YAAM,IAAI,IAAI,KAAK;AACnB,UAAI,MAAM,QAAQ,MAAM,QAAW;AAIjC,cAAM,KAAK,KAAK,KAAK,EAAE;AACvB,YAAI,KAAK,IAAI,OAAO,QAAQ,OAAO,MAAM;AAAA,MAC3C,WAAW,OAAO,MAAM,UAAU;AAChC,YAAI,KAAK,IAAI,OAAO,CAAC;AAAA,MACvB,WAAW,OAAO,MAAM,UAAU;AAChC,YAAI,KAAK,IAAI;AAAA,MACf,OAAO;AAGL,YAAI,KAAK,IAAI,OAAO,CAAC;AAAA,MACvB;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuBA,MAAM,WAAW,SAAwB,SAA4C;AACnF,UAAM,OAAO,kBAAkB,KAAK,gBAAgB,KAAK,cAAc,OAAO;AAC9E,WAAO,KAAK,wBAAwB,MAAM,OAAO;AAAA,EACnD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,MAAM,WACJ,SACA,OACA,SACqB;AACrB,UAAM,OAAO;AAAA,MACX,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,MACA,MAAM;AAAA,MACN,KAAK,IAAI;AAAA,IACX;AACA,WAAO,KAAK,wBAAwB,MAAM,OAAO;AAAA,EACnD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBA,MAAM,OAAO,QAA6C;AACxD,QAAI,OAAO,QAAQ,WAAW,GAAG;AAC/B,aAAO,OAAO,UAAU,EAAE,OAAO,CAAC,GAAG,SAAS,CAAC,EAAE,IAAI,EAAE,OAAO,CAAC,EAAE;AAAA,IACnE;AACA,UAAM,OAAO,cAAc,KAAK,gBAAgB,KAAK,cAAc,MAAM;AACzE,UAAM,OAAO,MAAM,KAAK,SAAS,IAAI,KAAK,KAAK,KAAK,MAAM;AAC1D,UAAM,QAAQ,KAAK,IAAI,WAAW;AAClC,QAAI,CAAC,OAAO,SAAS;AACnB,aAAO,EAAE,MAAM;AAAA,IACjB;AAIA,UAAM,YAAY,OAAO,aAAa;AACtC,UAAM,aAAa,MAAM,IAAI,CAAC,MAAO,cAAc,YAAY,EAAE,OAAO,EAAE,IAAK;AAC/E,UAAM,gBAAgB,CAAC,GAAG,IAAI,IAAI,UAAU,CAAC;AAC7C,QAAI,cAAc,WAAW,GAAG;AAC9B,aAAO,EAAE,OAAO,SAAS,CAAC,EAAE;AAAA,IAC9B;AACA,UAAM,cAAc,qBAAqB,KAAK,gBAAgB,KAAK,cAAc,aAAa;AAC9F,UAAM,cAAc,MAAM,KAAK,SAAS,IAAI,YAAY,KAAK,YAAY,MAAM;AAC/E,UAAM,QAAQ,oBAAI,IAA+B;AACjD,eAAW,OAAO,aAAa;AAC7B,YAAM,OAAO,YAAY,GAAG;AAG5B,YAAM,IAAI,KAAK,MAAM,IAAI;AAAA,IAC3B;AACA,UAAM,UAAU,WAAW,IAAI,CAAC,QAAQ,MAAM,IAAI,GAAG,KAAK,IAAI;AAC9D,WAAO,EAAE,OAAO,QAAQ;AAAA,EAC1B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBA,MAAM,mBACJ,QACA,SACA,SACyC;AACzC,UAAM,EAAE,MAAM,QAAQ,IAAI;AAAA,MACxB,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,UAAM,OAAO,MAAM,KAAK,SAAS,IAAI,KAAK,KAAK,KAAK,MAAM;AAC1D,WAAO,KAAK,IAAI,CAAC,QAAQ,mBAAmB,KAAK,OAAO,CAAC;AAAA,EAC3D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAc,wBACZ,MACA,SACqB;AACrB,UAAM,mBAAmB,GAAG,KAAK,GAAG;AACpC,UAAM,aAAa,SAAS,cAAc;AAC1C,QAAI,YAA0B;AAC9B,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAI;AACF,cAAM,OAAO,MAAM,KAAK,SAAS,IAAI,kBAAkB,KAAK,MAAM;AAClE,cAAM,UAAU,KAAK;AACrB,YAAI,SAAS,YAAY;AACvB,kBAAQ,WAAW;AAAA,YACjB,kBAAkB;AAAA,YAClB,cAAc;AAAA,YACd,cAAc;AAAA,UAChB,CAAC;AAAA,QACH;AACA,eAAO,EAAE,SAAS,SAAS,GAAG,QAAQ,CAAC,EAAE;AAAA,MAC3C,SAAS,KAAK;AACZ,oBAAY,eAAe,QAAQ,MAAM,IAAI,MAAM,OAAO,GAAG,CAAC;AAC9D,YAAI,UAAU,YAAY;AACxB,gBAAM,QAAQ,KAAK,IAAI,sBAAsB,KAAK,IAAI,GAAG,OAAO,GAAG,kBAAkB;AACrF,gBAAM,MAAM,KAAK;AAAA,QACnB;AAAA,MACF;AAAA,IACF;AAKA,WAAO;AAAA,MACL,SAAS;AAAA,MACT,SAAS;AAAA,MACT,QAAQ;AAAA,QACN;AAAA,UACE,YAAY;AAAA,UACZ,OAAO,aAAa,IAAI,MAAM,oCAAoC;AAAA,UAClE,gBAAgB;AAAA,QAClB;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAUO,SAAS,oBACd,UACA,WACA,UAAgC,CAAC,GACC;AAClC,QAAM,eAAe,QAAQ,gBAAgB;AAC7C,QAAM,YAAY,QAAQ,aAAa;AACvC,SAAO,IAAI,kBAAkB,UAAU,WAAW,cAAc,SAAS;AAC3E;","names":["import_node_crypto","import_node_crypto","serializeFirestoreTypes","deserializeFirestoreTypes","buildWritableNodeRecord","buildWritableEdgeRecord","buildWritableNodeRecord","buildWritableEdgeRecord","result","FIRESTORE_TYPE_NAMES","walk","sql"]}