@typicalday/firegraph 0.11.2 → 0.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/README.md +355 -78
  2. package/dist/backend-DuvHGgK1.d.cts +1897 -0
  3. package/dist/backend-DuvHGgK1.d.ts +1897 -0
  4. package/dist/backend.cjs +365 -5
  5. package/dist/backend.cjs.map +1 -1
  6. package/dist/backend.d.cts +25 -5
  7. package/dist/backend.d.ts +25 -5
  8. package/dist/backend.js +209 -7
  9. package/dist/backend.js.map +1 -1
  10. package/dist/chunk-2DHMNTV6.js +16 -0
  11. package/dist/chunk-2DHMNTV6.js.map +1 -0
  12. package/dist/chunk-4MMQ5W74.js +288 -0
  13. package/dist/chunk-4MMQ5W74.js.map +1 -0
  14. package/dist/{chunk-5753Y42M.js → chunk-C2QMD7RY.js} +6 -10
  15. package/dist/chunk-C2QMD7RY.js.map +1 -0
  16. package/dist/chunk-D4J7Z4FE.js +67 -0
  17. package/dist/chunk-D4J7Z4FE.js.map +1 -0
  18. package/dist/chunk-EQJUUVFG.js +14 -0
  19. package/dist/chunk-EQJUUVFG.js.map +1 -0
  20. package/dist/chunk-N5HFDWQX.js +23 -0
  21. package/dist/chunk-N5HFDWQX.js.map +1 -0
  22. package/dist/chunk-PAD7WFFU.js +573 -0
  23. package/dist/chunk-PAD7WFFU.js.map +1 -0
  24. package/dist/chunk-TK64DNVK.js +256 -0
  25. package/dist/chunk-TK64DNVK.js.map +1 -0
  26. package/dist/{chunk-NJSOD64C.js → chunk-WRTFC5NG.js} +438 -30
  27. package/dist/chunk-WRTFC5NG.js.map +1 -0
  28. package/dist/client-BKi3vk0Q.d.ts +34 -0
  29. package/dist/client-BrsaXtDV.d.cts +34 -0
  30. package/dist/cloudflare/index.cjs +1386 -74
  31. package/dist/cloudflare/index.cjs.map +1 -1
  32. package/dist/cloudflare/index.d.cts +217 -13
  33. package/dist/cloudflare/index.d.ts +217 -13
  34. package/dist/cloudflare/index.js +639 -180
  35. package/dist/cloudflare/index.js.map +1 -1
  36. package/dist/codegen/index.d.cts +1 -1
  37. package/dist/codegen/index.d.ts +1 -1
  38. package/dist/errors-BRc3I_eH.d.cts +73 -0
  39. package/dist/errors-BRc3I_eH.d.ts +73 -0
  40. package/dist/firestore-enterprise/index.cjs +3877 -0
  41. package/dist/firestore-enterprise/index.cjs.map +1 -0
  42. package/dist/firestore-enterprise/index.d.cts +141 -0
  43. package/dist/firestore-enterprise/index.d.ts +141 -0
  44. package/dist/firestore-enterprise/index.js +985 -0
  45. package/dist/firestore-enterprise/index.js.map +1 -0
  46. package/dist/firestore-standard/index.cjs +3117 -0
  47. package/dist/firestore-standard/index.cjs.map +1 -0
  48. package/dist/firestore-standard/index.d.cts +49 -0
  49. package/dist/firestore-standard/index.d.ts +49 -0
  50. package/dist/firestore-standard/index.js +283 -0
  51. package/dist/firestore-standard/index.js.map +1 -0
  52. package/dist/index.cjs +809 -534
  53. package/dist/index.cjs.map +1 -1
  54. package/dist/index.d.cts +24 -100
  55. package/dist/index.d.ts +24 -100
  56. package/dist/index.js +184 -531
  57. package/dist/index.js.map +1 -1
  58. package/dist/registry-Bc7h6WTM.d.cts +64 -0
  59. package/dist/registry-C2KUPVZj.d.ts +64 -0
  60. package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.cts} +1 -56
  61. package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.ts} +1 -56
  62. package/dist/{serialization-ZZ7RSDRX.js → serialization-OE2PFZMY.js} +6 -4
  63. package/dist/sqlite/index.cjs +3631 -0
  64. package/dist/sqlite/index.cjs.map +1 -0
  65. package/dist/sqlite/index.d.cts +111 -0
  66. package/dist/sqlite/index.d.ts +111 -0
  67. package/dist/sqlite/index.js +1164 -0
  68. package/dist/sqlite/index.js.map +1 -0
  69. package/package.json +33 -3
  70. package/dist/backend-U-MLShlg.d.ts +0 -97
  71. package/dist/backend-np4gEVhB.d.cts +0 -97
  72. package/dist/chunk-5753Y42M.js.map +0 -1
  73. package/dist/chunk-NJSOD64C.js.map +0 -1
  74. package/dist/chunk-R7CRGYY4.js +0 -94
  75. package/dist/chunk-R7CRGYY4.js.map +0 -1
  76. package/dist/types-BGWxcpI_.d.cts +0 -736
  77. package/dist/types-BGWxcpI_.d.ts +0 -736
  78. /package/dist/{serialization-ZZ7RSDRX.js.map → serialization-OE2PFZMY.js.map} +0 -0
@@ -0,0 +1,1897 @@
1
+ import { WhereFilterOp, Timestamp, FieldValue } from '@google-cloud/firestore';
2
+
3
+ /**
4
+ * Firegraph Configuration — project-level config file support.
5
+ *
6
+ * Projects create a `firegraph.config.ts` (or `.js`/`.mjs`) in their root:
7
+ *
8
+ * @example
9
+ * ```ts
10
+ * import { defineConfig } from 'firegraph';
11
+ *
12
+ * export default defineConfig({
13
+ * entities: './entities',
14
+ * project: 'my-project',
15
+ * collection: 'graph',
16
+ * });
17
+ * ```
18
+ */
19
+
20
+ /** Display contexts where views can appear. */
21
+ type ViewContext = 'listing' | 'detail' | 'inline';
22
+ /** View resolution configuration for a single entity type. */
23
+ interface ViewResolverConfig {
24
+ /** Default view name (e.g. 'card'). Falls back to 'json' if unset. */
25
+ default?: string;
26
+ /** View to use in NodeBrowser listing rows. */
27
+ listing?: string;
28
+ /** View to use on the NodeDetail page. */
29
+ detail?: string;
30
+ /** View to use for inline/embedded previews (edge rows, traversal). */
31
+ inline?: string;
32
+ }
33
+ /** Declarative view defaults, keyed by entity type. */
34
+ interface ViewDefaultsConfig {
35
+ /** Node view defaults keyed by aType (e.g. 'user', 'task'). */
36
+ nodes?: Record<string, ViewResolverConfig>;
37
+ /** Edge view defaults keyed by axbType (e.g. 'hasDeparture'). */
38
+ edges?: Record<string, ViewResolverConfig>;
39
+ }
40
+ /** Project-level firegraph configuration. */
41
+ interface FiregraphConfig {
42
+ /** Path to entities directory (per-entity folder convention). */
43
+ entities?: string;
44
+ /** GCP project ID. */
45
+ project?: string;
46
+ /** Firestore collection path (default: 'graph'). */
47
+ collection?: string;
48
+ /** Firestore emulator address (e.g. '127.0.0.1:8080'). */
49
+ emulator?: string;
50
+ /**
51
+ * Query execution backend.
52
+ *
53
+ * - `'pipeline'` (default) — Uses Firestore Pipeline API. Requires Enterprise
54
+ * Firestore. Enables indexless queries on `data.*` fields.
55
+ * - `'standard'` — Uses standard Firestore `.where().get()` queries. Not
56
+ * recommended for production. See README for risk details.
57
+ *
58
+ * When the emulator is active, always falls back to `'standard'`.
59
+ */
60
+ queryMode?: QueryMode;
61
+ /**
62
+ * AI chat configuration. Auto-detects `claude` CLI on PATH by default.
63
+ * Set to `false` to disable chat even if claude is available.
64
+ */
65
+ chat?: false | {
66
+ /** Claude model to use (default: 'sonnet'). */
67
+ model?: string;
68
+ /** Maximum concurrent claude processes (default: 2). */
69
+ maxConcurrency?: number;
70
+ };
71
+ /** Editor-specific settings. */
72
+ editor?: {
73
+ /** Server port (default: 3883). */
74
+ port?: number;
75
+ /** Force read-only mode. */
76
+ readonly?: boolean;
77
+ };
78
+ /** Declarative view defaults per entity type (overrides per-entity meta.json). */
79
+ viewDefaults?: ViewDefaultsConfig;
80
+ /**
81
+ * Dynamic registry mode. When set, the editor loads type definitions
82
+ * from Firestore meta-nodes in addition to filesystem entities.
83
+ * Filesystem types take precedence on name conflicts.
84
+ */
85
+ registryMode?: DynamicRegistryConfig;
86
+ }
87
+ /**
88
+ * Identity function providing type-checking and autocomplete for config files.
89
+ *
90
+ * @example
91
+ * ```ts
92
+ * import { defineConfig } from 'firegraph';
93
+ * export default defineConfig({ entities: './entities' });
94
+ * ```
95
+ */
96
+ declare function defineConfig(config: FiregraphConfig): FiregraphConfig;
97
+ /**
98
+ * Resolve which view to show for a given entity.
99
+ *
100
+ * 1. If `context` is provided and a context-specific default exists, use it.
101
+ * 2. Falls back to `resolverConfig.default`.
102
+ * 3. Ultimate fallback: `'json'`.
103
+ *
104
+ * Only returns view names that exist in `availableViewNames`.
105
+ */
106
+ declare function resolveView(resolverConfig: ViewResolverConfig | undefined, availableViewNames: string[], context?: ViewContext): string;
107
+
108
+ /**
109
+ * Backend-agnostic timestamp.
110
+ *
111
+ * Structurally compatible with `@google-cloud/firestore`'s `Timestamp` so
112
+ * that records returned by either the Firestore or SQLite backend can be
113
+ * consumed through the same `StoredGraphRecord` shape.
114
+ *
115
+ * Firestore's native `Timestamp` already satisfies this interface, so
116
+ * existing Firestore consumers see no behavior change. The SQLite backend
117
+ * returns instances of `GraphTimestampImpl` which also satisfies it.
118
+ */
119
+ interface GraphTimestamp {
120
+ readonly seconds: number;
121
+ readonly nanoseconds: number;
122
+ toDate(): Date;
123
+ toMillis(): number;
124
+ }
125
+
126
+ interface GraphRecord {
127
+ aType: string;
128
+ aUid: string;
129
+ axbType: string;
130
+ bType: string;
131
+ bUid: string;
132
+ data: Record<string, unknown>;
133
+ createdAt: Timestamp | FieldValue;
134
+ updatedAt: Timestamp | FieldValue;
135
+ /** Schema version — set automatically when the registry entry has migrations. */
136
+ v?: number;
137
+ }
138
+ interface StoredGraphRecord {
139
+ aType: string;
140
+ aUid: string;
141
+ axbType: string;
142
+ bType: string;
143
+ bUid: string;
144
+ data: Record<string, unknown>;
145
+ /**
146
+ * Backend-agnostic timestamp. Firestore returns its native `Timestamp`
147
+ * (which structurally satisfies `GraphTimestamp`); the SQLite backends
148
+ * return a `GraphTimestampImpl` instance.
149
+ */
150
+ createdAt: GraphTimestamp;
151
+ updatedAt: GraphTimestamp;
152
+ /** Schema version — set automatically when the registry entry has migrations. */
153
+ v?: number;
154
+ }
155
+ interface WhereClause {
156
+ field: string;
157
+ op: '==' | '!=' | '<' | '<=' | '>' | '>=';
158
+ value: unknown;
159
+ }
160
+ interface FindEdgesParams {
161
+ aType?: string;
162
+ aUid?: string;
163
+ axbType?: string;
164
+ bType?: string;
165
+ bUid?: string;
166
+ limit?: number;
167
+ orderBy?: {
168
+ field: string;
169
+ direction?: 'asc' | 'desc';
170
+ };
171
+ where?: WhereClause[];
172
+ /** Set to true to allow queries that may cause full collection scans. */
173
+ allowCollectionScan?: boolean;
174
+ }
175
+ interface FindNodesParams {
176
+ aType: string;
177
+ limit?: number;
178
+ orderBy?: {
179
+ field: string;
180
+ direction?: 'asc' | 'desc';
181
+ };
182
+ where?: WhereClause[];
183
+ /** Set to true to allow queries that may cause full collection scans. */
184
+ allowCollectionScan?: boolean;
185
+ }
186
+ interface QueryOptions {
187
+ limit?: number;
188
+ orderBy?: {
189
+ field: string;
190
+ direction?: 'asc' | 'desc';
191
+ };
192
+ }
193
+ type QueryPlan = {
194
+ strategy: 'get';
195
+ docId: string;
196
+ } | {
197
+ strategy: 'query';
198
+ filters: QueryFilter[];
199
+ options?: QueryOptions;
200
+ };
201
+ interface QueryFilter {
202
+ field: string;
203
+ op: WhereFilterOp;
204
+ value: unknown;
205
+ }
206
+ /**
207
+ * Closed string-literal union of every logical capability a storage backend
208
+ * may declare. Capabilities express user-facing query features, not SDK
209
+ * details — the same logical capability may map to different SDK calls per
210
+ * backend (e.g. `query.aggregate` is `runAggregationQuery` on Firestore
211
+ * Standard, `pipeline().aggregate()` on Firestore Enterprise, `GROUP BY` on
212
+ * SQL).
213
+ *
214
+ * See `.claude/backend-capabilities.md` for the design rationale and the
215
+ * capability matrix per backend.
216
+ */
217
+ type Capability = 'core.read' | 'core.write' | 'core.transactions' | 'core.batch' | 'core.subgraph' | 'query.aggregate' | 'query.select' | 'query.join' | 'query.dml' | 'traversal.serverSide' | 'search.fullText' | 'search.geo' | 'search.vector' | 'realtime.listen' | 'raw.firestore' | 'raw.sql';
218
+ /**
219
+ * An executable migration function that transforms data from one schema
220
+ * version to the next. Can be synchronous or asynchronous.
221
+ */
222
+ type MigrationFn = (data: Record<string, unknown>) => Record<string, unknown> | Promise<Record<string, unknown>>;
223
+ /**
224
+ * A single migration step in a registry entry.
225
+ * Transforms data from `fromVersion` to `toVersion`.
226
+ */
227
+ interface MigrationStep {
228
+ fromVersion: number;
229
+ toVersion: number;
230
+ up: MigrationFn;
231
+ }
232
+ /**
233
+ * A stored migration step for dynamic registry types.
234
+ * The `up` field is a source code string that will be compiled at runtime.
235
+ *
236
+ * @example
237
+ * ```ts
238
+ * { fromVersion: 0, toVersion: 1, up: "(data) => ({ ...data, status: 'draft' })" }
239
+ * ```
240
+ */
241
+ interface StoredMigrationStep {
242
+ fromVersion: number;
243
+ toVersion: number;
244
+ up: string;
245
+ }
246
+ /**
247
+ * Pluggable executor interface for compiling migration function source
248
+ * strings into executable functions. Used for dynamic registry migrations.
249
+ *
250
+ * The default executor uses SES (Secure ECMAScript) Compartments with
251
+ * JSON marshaling for isolation. Users can supply an alternative via
252
+ * `GraphClientOptions.migrationSandbox`.
253
+ */
254
+ type MigrationExecutor = (source: string) => MigrationFn;
255
+ /** Write-back mode for auto-migrated records. */
256
+ type MigrationWriteBack = 'off' | 'eager' | 'background';
257
+ /**
258
+ * One field in a composite index. The string shorthand form defaults to
259
+ * ascending order; use the object form when a field needs to be indexed
260
+ * descending (e.g., pagination by `{ path: 'updatedAt', desc: true }`).
261
+ */
262
+ interface IndexFieldSpec {
263
+ /**
264
+ * Field path. Top-level firegraph fields (`aType`, `aUid`, `axbType`,
265
+ * `bType`, `bUid`, `createdAt`, `updatedAt`, `v`) resolve to their
266
+ * underlying columns. Dotted paths like `'data.status'` or
267
+ * `'data.author.name'` index into the JSON data payload.
268
+ *
269
+ * Each dotted component must match `/^[A-Za-z_][A-Za-z0-9_-]*$/` — keys
270
+ * with dots, quotes, brackets, spaces, or other syntax characters are
271
+ * rejected at DDL build time. Indexes on exotic keys are not supported
272
+ * because SQLite expression indexes must match the query compiler's
273
+ * output verbatim, and inlining quoted path components into DDL would
274
+ * desynchronize the two compilers. If you need to filter by an exotic
275
+ * key, use `replaceNode` / `replaceEdge` writes rather than an indexed
276
+ * field.
277
+ */
278
+ path: string;
279
+ /** Descending order; defaults to ascending. */
280
+ desc?: boolean;
281
+ }
282
+ /**
283
+ * Declarative secondary index. Translators emit a `CREATE INDEX` statement
284
+ * (SQLite) or a `FirestoreIndex` composite (Firestore) per spec.
285
+ *
286
+ * Composite indexes support the prefix of their `fields` list — a spec
287
+ * `{ fields: ['aType', 'axbType'] }` also covers queries filtering on
288
+ * `aType` alone.
289
+ *
290
+ * @example
291
+ * ```ts
292
+ * // Plain composite on top-level fields
293
+ * { fields: ['aType', 'axbType'] }
294
+ *
295
+ * // Mixed string + object form; `updatedAt` descending
296
+ * { fields: ['aType', 'aUid', 'axbType', { path: 'updatedAt', desc: true }] }
297
+ *
298
+ * // JSON data-field index (SQLite: expression index on json_extract)
299
+ * { fields: ['aType', 'axbType', 'data.status'] }
300
+ *
301
+ * // Partial index (SQLite only — Firestore ignores the `where` clause)
302
+ * { fields: ['aType'], where: "json_extract(data, '$.archived') = 0" }
303
+ * ```
304
+ */
305
+ interface IndexSpec {
306
+ /**
307
+ * Ordered field list. String shorthand = ascending. Use `IndexFieldSpec`
308
+ * form to mark individual fields descending.
309
+ */
310
+ fields: Array<string | IndexFieldSpec>;
311
+ /**
312
+ * Partial-index predicate. Applied verbatim after `WHERE` in the emitted
313
+ * SQLite DDL. Ignored (with a one-time warning) by the Firestore
314
+ * generator — Firestore composite indexes do not support predicates.
315
+ */
316
+ where?: string;
317
+ }
318
+ interface RegistryEntry {
319
+ aType: string;
320
+ axbType: string;
321
+ bType: string;
322
+ /** JSON Schema object for the data payload. */
323
+ jsonSchema?: object;
324
+ description?: string;
325
+ inverseLabel?: string;
326
+ /** Data field to use as the display title (e.g. 'name', 'date'). */
327
+ titleField?: string;
328
+ /** Data field to use as the display subtitle (e.g. 'status', 'difficulty'). */
329
+ subtitleField?: string;
330
+ /**
331
+ * Scope patterns constraining where this type can exist.
332
+ * Omit or leave empty to allow everywhere (backwards compatible).
333
+ *
334
+ * Patterns:
335
+ * - `'root'` — top-level collection only
336
+ * - `'agents'` — exact subgraph name match
337
+ * - `'workflow/agents'` — exact path match
338
+ * - `'*​/agents'` — `*` matches one segment
339
+ * - `'**​/agents'` — `**` matches zero or more segments
340
+ */
341
+ allowedIn?: string[];
342
+ /**
343
+ * Subgraph name where cross-graph edges of this type live.
344
+ *
345
+ * When set, forward traversal queries the named subgraph under each
346
+ * source node (e.g., `{collection}/{sourceUid}/{targetGraph}`) instead
347
+ * of the current collection. The subgraph contains both the edge
348
+ * documents and the target nodes they reference.
349
+ *
350
+ * Reverse traversal is unaffected — if you're already in the subgraph,
351
+ * the edges are local.
352
+ *
353
+ * Only applies to edge entries (not node self-loop entries).
354
+ * Must be a single segment (no `/`).
355
+ *
356
+ * @example
357
+ * ```ts
358
+ * { aType: 'task', axbType: 'assignedTo', bType: 'agent', targetGraph: 'workflow' }
359
+ * // Forward traversal from task1: queries {collection}/task1/workflow
360
+ * ```
361
+ */
362
+ targetGraph?: string;
363
+ /**
364
+ * Schema version for this type's data payload.
365
+ * **Computed automatically** from `migrations` as `max(toVersion)`.
366
+ * Do not set directly — provide migrations instead.
367
+ */
368
+ schemaVersion?: number;
369
+ /**
370
+ * Ordered list of migrations to transform data from older versions
371
+ * to the current version. The schema version is derived as the highest
372
+ * `toVersion` in this array.
373
+ */
374
+ migrations?: MigrationStep[];
375
+ /**
376
+ * Per-entry write-back override for auto-migrated records.
377
+ * Takes precedence over `GraphClientOptions.migrationWriteBack`.
378
+ * Omit to inherit the global setting.
379
+ */
380
+ migrationWriteBack?: MigrationWriteBack;
381
+ /**
382
+ * Secondary indexes tied to this triple. Each spec becomes a single
383
+ * backend-native composite index scoped to rows matching
384
+ * `(aType, axbType, bType)` — though the DDL does not currently restrict
385
+ * by triple, so authors should think of these as globally-applied indexes
386
+ * declared on the triple's behalf.
387
+ *
388
+ * Use this to accelerate `findNodes` / `findEdges` queries that filter
389
+ * on `data.*` fields or compose with firegraph's top-level fields in ways
390
+ * the default preset doesn't cover.
391
+ */
392
+ indexes?: IndexSpec[];
393
+ }
394
+ /** Topology declaration for an edge (from edge.json). */
395
+ interface EdgeTopology {
396
+ from: string | string[];
397
+ to: string | string[];
398
+ inverseLabel?: string;
399
+ /**
400
+ * Subgraph name where cross-graph edges of this type live.
401
+ * See `RegistryEntry.targetGraph` for full documentation.
402
+ */
403
+ targetGraph?: string;
404
+ }
405
+ /** A discovered entity from the per-entity folder convention. */
406
+ interface DiscoveredEntity {
407
+ kind: 'node' | 'edge';
408
+ name: string;
409
+ /** Parsed JSON Schema for the data payload. */
410
+ schema: object;
411
+ /** Edge topology (only for edges). */
412
+ topology?: EdgeTopology;
413
+ description?: string;
414
+ /** Data field to use as the display title (e.g. 'name', 'date'). */
415
+ titleField?: string;
416
+ /** Data field to use as the display subtitle (e.g. 'status', 'difficulty'). */
417
+ subtitleField?: string;
418
+ /** View defaults from meta.json. */
419
+ viewDefaults?: ViewResolverConfig;
420
+ /** Absolute path to views.ts if present. */
421
+ viewsPath?: string;
422
+ /** Sample data from sample.json. */
423
+ sampleData?: Record<string, unknown>;
424
+ /** Scope patterns constraining where this type can exist in subgraphs. */
425
+ allowedIn?: string[];
426
+ /** Subgraph name where cross-graph edges of this type live. */
427
+ targetGraph?: string;
428
+ /** Migration steps loaded from migrations.ts. */
429
+ migrations?: MigrationStep[];
430
+ /** Per-entity write-back override from meta.json. */
431
+ migrationWriteBack?: MigrationWriteBack;
432
+ /** Secondary indexes loaded from meta.json (`indexes` field). */
433
+ indexes?: IndexSpec[];
434
+ }
435
+ /** Result of scanning an entities directory. */
436
+ interface DiscoveryResult {
437
+ nodes: Map<string, DiscoveredEntity>;
438
+ edges: Map<string, DiscoveredEntity>;
439
+ }
440
+ /** Controls which Firestore query backend is used. */
441
+ type QueryMode = 'pipeline' | 'standard';
442
+ /**
443
+ * Configuration for dynamic registry mode where type definitions
444
+ * are stored as graph data (meta-nodes) rather than in code.
445
+ */
446
+ interface DynamicRegistryConfig {
447
+ mode: 'dynamic';
448
+ /**
449
+ * Collection path for meta-type nodes (`nodeType`, `edgeType`).
450
+ * Defaults to the main `collectionPath` if omitted.
451
+ */
452
+ collection?: string;
453
+ }
454
+ /** Options for defineNodeType / defineEdgeType beyond the core fields. */
455
+ interface DefineTypeOptions {
456
+ /** Data field to use as the display title (e.g. 'name', 'date'). */
457
+ titleField?: string;
458
+ /** Data field to use as the display subtitle (e.g. 'status', 'difficulty'). */
459
+ subtitleField?: string;
460
+ /** Mustache HTML template for rendering this type in the editor. */
461
+ viewTemplate?: string;
462
+ /** Scoped CSS for the view template (injected via Shadow DOM). */
463
+ viewCss?: string;
464
+ /** Scope patterns constraining where this type can exist in subgraphs. */
465
+ allowedIn?: string[];
466
+ /**
467
+ * Migration steps. Accepts function objects (auto-serialized via .toString())
468
+ * or strings (stored as-is). The schema version is derived as the highest
469
+ * `toVersion` in this array.
470
+ */
471
+ migrations?: Array<{
472
+ fromVersion: number;
473
+ toVersion: number;
474
+ up: MigrationFn | string;
475
+ }>;
476
+ /** Per-type write-back override for auto-migrated records. */
477
+ migrationWriteBack?: MigrationWriteBack;
478
+ }
479
+ /** Data shape stored in a `nodeType` meta-node. */
480
+ interface NodeTypeData {
481
+ name: string;
482
+ jsonSchema: object;
483
+ description?: string;
484
+ titleField?: string;
485
+ subtitleField?: string;
486
+ viewTemplate?: string;
487
+ viewCss?: string;
488
+ allowedIn?: string[];
489
+ migrations?: StoredMigrationStep[];
490
+ migrationWriteBack?: MigrationWriteBack;
491
+ }
492
+ /** Data shape stored in an `edgeType` meta-node. */
493
+ interface EdgeTypeData {
494
+ name: string;
495
+ from: string | string[];
496
+ to: string | string[];
497
+ jsonSchema?: object;
498
+ inverseLabel?: string;
499
+ description?: string;
500
+ titleField?: string;
501
+ subtitleField?: string;
502
+ viewTemplate?: string;
503
+ viewCss?: string;
504
+ allowedIn?: string[];
505
+ targetGraph?: string;
506
+ migrations?: StoredMigrationStep[];
507
+ migrationWriteBack?: MigrationWriteBack;
508
+ }
509
+ type ScanProtection = 'error' | 'warn' | 'off';
510
+ interface GraphClientOptions {
511
+ /**
512
+ * Static registry built from code/discovery.
513
+ *
514
+ * When provided alone, all writes are validated against this registry.
515
+ *
516
+ * When provided together with `registryMode`, operates in **merged mode**:
517
+ * static entries take priority and cannot be overridden by dynamic
518
+ * definitions. Dynamic definitions can only add new types. The merged
519
+ * client is returned as a `DynamicGraphClient`.
520
+ */
521
+ registry?: GraphRegistry;
522
+ /** Dynamic registry mode — type definitions stored as graph data. */
523
+ registryMode?: DynamicRegistryConfig;
524
+ /**
525
+ * Query execution backend.
526
+ *
527
+ * - `'pipeline'` (default) — Uses Firestore Pipeline API. Requires Enterprise
528
+ * Firestore. Enables indexless queries on `data.*` fields.
529
+ * - `'standard'` — Uses standard Firestore `.where().get()` queries. Requires
530
+ * composite indexes for `data.*` filters or risks full collection scans
531
+ * (Enterprise) / query failures (Standard Firestore).
532
+ *
533
+ * When `FIRESTORE_EMULATOR_HOST` is set, the client auto-falls back to
534
+ * `'standard'` regardless of this setting (emulator doesn't support pipelines).
535
+ */
536
+ queryMode?: QueryMode;
537
+ /**
538
+ * Controls query safety behavior for full collection scan prevention.
539
+ *
540
+ * - `'error'` (default) — Throws `QuerySafetyError` for queries that would
541
+ * likely cause a full collection scan. Override per-query with
542
+ * `allowCollectionScan: true`.
543
+ * - `'warn'` — Logs a warning but executes the query.
544
+ * - `'off'` — No scan protection.
545
+ */
546
+ scanProtection?: ScanProtection;
547
+ /**
548
+ * Global default for write-back of auto-migrated records on read.
549
+ *
550
+ * - `'off'` (default) — Migrated data is returned but NOT written back.
551
+ * - `'eager'` — Migrated data is written back immediately after migration.
552
+ * - `'background'` — Write-back happens asynchronously; errors are logged.
553
+ *
554
+ * Per-entry `migrationWriteBack` on `RegistryEntry` overrides this setting.
555
+ */
556
+ migrationWriteBack?: MigrationWriteBack;
557
+ /**
558
+ * Custom executor for compiling dynamic registry migration source strings.
559
+ * Defaults to SES Compartments with JSON marshaling. Supply an
560
+ * alternative for custom sandboxing.
561
+ *
562
+ * Only used for dynamic registry migrations — static registry migrations
563
+ * are already in-memory functions and never go through this executor.
564
+ */
565
+ migrationSandbox?: MigrationExecutor;
566
+ }
567
+ interface GraphRegistry {
568
+ validate(aType: string, axbType: string, bType: string, data: unknown, scopePath?: string): void;
569
+ lookup(aType: string, axbType: string, bType: string): RegistryEntry | undefined;
570
+ /** Return all entries matching the given axbType (edge relation name). */
571
+ lookupByAxbType(axbType: string): ReadonlyArray<RegistryEntry>;
572
+ /**
573
+ * Return every edge entry originating from `aType` that has `targetGraph`
574
+ * set — i.e. the direct subgraph children of nodes of this type.
575
+ *
576
+ * Used by backends that need to enumerate a node's subgraph DOs without
577
+ * walking the graph. Each returned entry carries both `axbType` (the edge
578
+ * label that introduces the subgraph) and `targetGraph` (the subgraph
579
+ * segment name).
580
+ *
581
+ * Entries are deduplicated by `targetGraph` alone — the physical subgraph
582
+ * store is addressed by `(parentUid, targetGraph)`, so multiple edge
583
+ * relations (distinct `axbType` or `bType`) pointing into the same segment
584
+ * collapse to a single representative entry. The first-declared entry
585
+ * wins the collision. Callers only care about the subgraph name, not the
586
+ * originating relation or target node type.
587
+ */
588
+ getSubgraphTopology(aType: string): ReadonlyArray<RegistryEntry>;
589
+ entries(): ReadonlyArray<RegistryEntry>;
590
+ }
591
+ interface GraphReader {
592
+ getNode(uid: string): Promise<StoredGraphRecord | null>;
593
+ getEdge(aUid: string, axbType: string, bUid: string): Promise<StoredGraphRecord | null>;
594
+ edgeExists(aUid: string, axbType: string, bUid: string): Promise<boolean>;
595
+ findEdges(params: FindEdgesParams): Promise<StoredGraphRecord[]>;
596
+ findNodes(params: FindNodesParams): Promise<StoredGraphRecord[]>;
597
+ }
598
+ interface GraphWriter {
599
+ /**
600
+ * Write a node, deep-merging into any existing record.
601
+ *
602
+ * Nested objects are merged recursively — sibling keys at any depth
603
+ * survive. Arrays are terminal (replaced as a unit, not element-merged).
604
+ * `undefined` values are omitted; `null` is preserved. To delete a field,
605
+ * pass the `deleteField()` sentinel as its value.
606
+ *
607
+ * Use {@link replaceNode} when you want full-document replacement.
608
+ */
609
+ putNode(aType: string, uid: string, data: Record<string, unknown>): Promise<void>;
610
+ /**
611
+ * Write an edge, deep-merging into any existing record. See
612
+ * {@link putNode} for the merge contract.
613
+ */
614
+ putEdge(aType: string, aUid: string, axbType: string, bType: string, bUid: string, data: Record<string, unknown>): Promise<void>;
615
+ /**
616
+ * Replace a node's `data` payload entirely. Any field absent from
617
+ * `data` is dropped. Use sparingly — prefer {@link putNode} unless you
618
+ * specifically need to drop unknown fields.
619
+ */
620
+ replaceNode(aType: string, uid: string, data: Record<string, unknown>): Promise<void>;
621
+ /**
622
+ * Replace an edge's `data` payload entirely. See {@link replaceNode}.
623
+ */
624
+ replaceEdge(aType: string, aUid: string, axbType: string, bType: string, bUid: string, data: Record<string, unknown>): Promise<void>;
625
+ /**
626
+ * Patch a node's `data` payload. Like {@link putNode} this is a deep
627
+ * merge — nested objects are walked, only leaves are written. Use the
628
+ * `deleteField()` sentinel to remove a field.
629
+ */
630
+ updateNode(uid: string, data: Record<string, unknown>): Promise<void>;
631
+ /**
632
+ * Patch an edge's `data` payload. See {@link updateNode}.
633
+ */
634
+ updateEdge(aUid: string, axbType: string, bUid: string, data: Record<string, unknown>): Promise<void>;
635
+ removeNode(uid: string): Promise<void>;
636
+ removeEdge(aUid: string, axbType: string, bUid: string): Promise<void>;
637
+ }
638
+ /**
639
+ * Portable graph client surface.
640
+ *
641
+ * Every backend supports these methods unconditionally — they are the
642
+ * "graph as a graph" operations: read/write nodes and edges, run
643
+ * transactions, scope into subgraphs, etc. Edition-specific extensions
644
+ * (aggregate, full-text search, vector search, raw escape hatches, etc.)
645
+ * are added by intersection in `GraphClient<C>` only when the backend
646
+ * declares the matching capability — see the `*Extension` interfaces and
647
+ * `GraphClient<C>` below.
648
+ */
649
+ interface CoreGraphClient extends GraphReader, GraphWriter {
650
+ /**
651
+ * Capability set of the underlying storage backend. Mirrors
652
+ * `StorageBackend.capabilities` so callers can do portability checks
653
+ * without reaching for the backend handle:
654
+ *
655
+ * ```ts
656
+ * if (client.capabilities.has('query.join')) {
657
+ * await client.expand({ sources, axbType: 'wrote' });
658
+ * } else {
659
+ * // fall back to the per-source loop in createTraversal()
660
+ * }
661
+ * ```
662
+ *
663
+ * The set is static for the lifetime of the client (invariant 3 from
664
+ * `.claude/backend-capabilities.md`). Subgraph clients return the cap
665
+ * set of their wrapped backend — typically identical to the parent's,
666
+ * but the routing wrapper may return a narrowed set when crossing into
667
+ * a routed child of a different backend type.
668
+ */
669
+ readonly capabilities: BackendCapabilities;
670
+ runTransaction<T>(fn: (tx: GraphTransaction) => Promise<T>): Promise<T>;
671
+ batch(): GraphBatch;
672
+ /** Delete a node and all its outgoing/incoming edges in chunked batches. */
673
+ removeNodeCascade(uid: string, options?: BulkOptions): Promise<CascadeResult>;
674
+ /** Find all edges matching `params` and delete them in chunked batches. */
675
+ bulkRemoveEdges(params: FindEdgesParams, options?: BulkOptions): Promise<BulkResult>;
676
+ /**
677
+ * Create a scoped client for a Firestore subcollection under the given
678
+ * parent node's document.
679
+ *
680
+ * The returned client shares a snapshot of the parent's registry at
681
+ * the time of this call. If the parent is a `DynamicGraphClient` and
682
+ * `reloadRegistry()` is called later, existing subgraph clients will
683
+ * NOT see the updated types — create a new subgraph client after
684
+ * reloading to pick up changes.
685
+ *
686
+ * @param parentNodeUid - UID of the parent node whose document owns the subcollection
687
+ * @param name - Subcollection name (defaults to `'graph'`). Must not contain `/`.
688
+ * @returns A `GraphClient` scoped to `{collectionPath}/{parentNodeUid}/{name}`
689
+ */
690
+ subgraph(parentNodeUid: string, name?: string): GraphClient;
691
+ /**
692
+ * Find edges across all subgraphs using a Firestore collection group query.
693
+ *
694
+ * Queries all collections with the given name (defaults to `'graph'`) across
695
+ * the entire database. This is useful for cross-cutting reads that span
696
+ * multiple subgraphs.
697
+ *
698
+ * **Requires** a Firestore collection group index for the query pattern.
699
+ *
700
+ * @param params - Edge filter parameters (same as `findEdges`)
701
+ * @param collectionName - Collection name to query across (defaults to last segment of this client's collection path)
702
+ */
703
+ findEdgesGlobal(params: FindEdgesParams, collectionName?: string): Promise<StoredGraphRecord[]>;
704
+ }
705
+ /** Supported aggregation operations.
706
+ *
707
+ * Firestore Standard supports `count`, `sum`, `avg` only. SQLite/DO additionally
708
+ * support `min` and `max` via SQL. Backends that cannot satisfy a requested op
709
+ * throw `FiregraphError` with code `UNSUPPORTED_AGGREGATE`.
710
+ */
711
+ type AggregateOp = 'count' | 'sum' | 'avg' | 'min' | 'max';
712
+ /** A single aggregation request.
713
+ *
714
+ * `field` is required for `sum`/`avg`/`min`/`max` and follows the same dotted
715
+ * path convention as `QueryFilter.field` (e.g. `'data.price'`). For `count`
716
+ * the field is forbidden — every backend rejects `count` with a stray field
717
+ * via `INVALID_QUERY`. We reject (rather than silently ignore) so a typo like
718
+ * `{ n: { op: 'count', field: 'data.price' } }` — easy to introduce when
719
+ * cribbing a sum spec and changing only the op — surfaces as a clear error
720
+ * instead of producing misleading row counts. */
721
+ interface AggregateField {
722
+ op: AggregateOp;
723
+ field?: string;
724
+ }
725
+ /** Map of result alias -> aggregation request. */
726
+ type AggregateSpec = Record<string, AggregateField>;
727
+ /** Result shape derived from an `AggregateSpec` — one number per alias. */
728
+ type AggregateResult<A extends AggregateSpec> = {
729
+ [K in keyof A]: number;
730
+ };
731
+ /** Aggregate query surface — count/sum/avg/min/max over a filter set. */
732
+ interface AggregateExtension {
733
+ aggregate<A extends AggregateSpec>(params: FindEdgesParams & {
734
+ aggregates: A;
735
+ }): Promise<AggregateResult<A>>;
736
+ }
737
+ /**
738
+ * One row in the result of a `findEdgesProjected()` call.
739
+ *
740
+ * The shape is `{ [F]: unknown }` over the caller-supplied field list `F`.
741
+ * Each value is whatever the backend extracted from the underlying record:
742
+ *
743
+ * - Top-level firegraph fields (`aType`, `aUid`, `axbType`, `bType`,
744
+ * `bUid`, `createdAt`, `updatedAt`, `v`) come back as the same JS shape
745
+ * `findEdges` produces — strings for the identifying fields, `number |
746
+ * null` for `v` (`null` when the record predates schema versioning),
747
+ * and the backend's native timestamp instance (Firestore's `Timestamp`
748
+ * or `GraphTimestampImpl`) for `createdAt` / `updatedAt`.
749
+ * - A bare name (e.g. `'title'`) is interpreted as `data.<name>`. SQL
750
+ * backends extract via `json_extract` and the value comes back as the
751
+ * JSON-decoded primitive / object. Firestore returns the field's stored
752
+ * type unchanged.
753
+ * - A dotted `data.x.y` path is the explicit form for nested fields.
754
+ * - Absent paths surface as `null` (not `undefined`) across all backends
755
+ * so that `JSON.stringify(row)` round-trips the requested shape.
756
+ *
757
+ * Why `unknown` rather than a stricter type: per-entity codegen integration
758
+ * (Phase 7 plan note) is the right place to surface concrete value types.
759
+ * Until that lands, the projection layer doesn't know whether `data.title`
760
+ * is a string, a number, or an object — a stricter type would lie. Use
761
+ * a registry-aware wrapper (or a per-call cast) to narrow.
762
+ */
763
+ type ProjectedRow<F extends ReadonlyArray<string>> = {
764
+ [K in F[number]]: unknown;
765
+ };
766
+ /**
767
+ * Parameters for `findEdgesProjected` — `FindEdgesParams` plus a `select`
768
+ * field list. Field names follow the same rules as `WhereClause.field`:
769
+ *
770
+ * - Built-in record fields (`aType`, `aUid`, etc.) resolve to their typed
771
+ * column / Firestore field directly.
772
+ * - A bare name resolves to `data.<name>` (the most common shape — most
773
+ * callers project a few keys out of the JSON payload).
774
+ * - A dotted `data.x.y` path is explicit.
775
+ *
776
+ * Empty `select: []` is rejected at the client level. The backend never
777
+ * sees an empty projection list because `SELECT FROM …` (no projection
778
+ * clause) is a syntactically different query and `SELECT * FROM …` is what
779
+ * `findEdges` already does.
780
+ *
781
+ * Duplicate entries in `select` are collapsed at compile time — the
782
+ * resulting row carries one slot per unique field. This keeps the
783
+ * SQL projection list minimal and matches Firestore's `Query.select(...)`
784
+ * de-duplication behaviour.
785
+ */
786
+ interface FindEdgesProjectedParams<F extends ReadonlyArray<string>> extends FindEdgesParams {
787
+ /** Non-empty list of field paths to return. See type-level docs for the
788
+ * dotted-path convention. */
789
+ select: F;
790
+ }
791
+ /**
792
+ * Server-side field projection — return only the requested fields.
793
+ *
794
+ * Backends declaring `query.select` translate the call into a projecting
795
+ * server-side query (`SELECT json_extract(data, '$.f1'), …` on SQLite,
796
+ * `Query.select(...)` on Firestore Standard, pipeline `select()` on
797
+ * Firestore Enterprise). Backends without the cap throw
798
+ * `UNSUPPORTED_OPERATION` from the client wrapper — there is no
799
+ * client-side fallback that materialises full rows and then drops fields,
800
+ * because the wire-payload reduction is the entire point of the API.
801
+ */
802
+ interface SelectExtension {
803
+ /** Fetch only the requested field paths from each matching edge.
804
+ *
805
+ * Returns one `ProjectedRow<F>` per matching edge, in the same order
806
+ * `findEdges` would have produced. Migrations are not applied — the
807
+ * projection bypasses the read-path migration pipeline because the
808
+ * caller asked for a specific shape, not a full record. If you need the
809
+ * migrated shape, use `findEdges` and project in JS.
810
+ */
811
+ findEdgesProjected<F extends ReadonlyArray<string>>(params: FindEdgesProjectedParams<F>): Promise<Array<ProjectedRow<F>>>;
812
+ }
813
+ /**
814
+ * Parameters for one expansion hop — fan out from a set of source UIDs over
815
+ * a single edge type in one server-side round trip.
816
+ *
817
+ * The shape mirrors `HopDefinition` (see `traverse.ts`) but is flat instead
818
+ * of chained: a multi-hop traversal calls `expand()` once per depth, and
819
+ * the traversal layer drives the hop-to-hop loop. We keep `expand()` per-
820
+ * depth (not per-traversal) for two reasons:
821
+ *
822
+ * 1. **Backend symmetry.** SQL `JOIN`s and Firestore Pipeline subqueries
823
+ * both express N→1-source fan-out cleanly, but neither expresses
824
+ * arbitrary-depth chained joins as a single statement (CTEs are
825
+ * possible, but the `IN (?, …)` cap on Firestore makes a multi-depth
826
+ * pipeline brittle). Per-depth fan-out is the largest constant-factor
827
+ * win that's portable across all backends declaring `query.join`.
828
+ *
829
+ * 2. **Result shaping.** Per-hop edges feed cross-graph hops and
830
+ * `targetGraph` re-routing, which traversal already owns. Pushing the
831
+ * whole chain into one backend call would re-implement that logic at
832
+ * the storage layer.
833
+ */
834
+ interface ExpandParams {
835
+ /** Source UIDs from which to expand. The hop matches every row whose
836
+ * `aUid` (forward) or `bUid` (reverse) is in this list. May be empty —
837
+ * empty input yields empty output without touching the backend. */
838
+ sources: string[];
839
+ /** Edge relation name. Required. */
840
+ axbType: string;
841
+ /** Hop direction. `'forward'` (default) follows `aUid → bUid`; `'reverse'`
842
+ * follows `bUid → aUid`. */
843
+ direction?: 'forward' | 'reverse';
844
+ /** Optional `aType` predicate on the matched edge. */
845
+ aType?: string;
846
+ /** Optional `bType` predicate on the matched edge. */
847
+ bType?: string;
848
+ /** Per-source soft fan-out cap. The backend translates this to an upper
849
+ * bound on the total result count (`sources.length * limitPerSource`); it
850
+ * does **not** enforce strict per-source limits — a SQL `LIMIT N` over an
851
+ * `IN (…)` query may return all N rows from a single source if that's
852
+ * where the matches concentrate. Callers needing strict per-source caps
853
+ * should fall back to the per-hop loop. */
854
+ limitPerSource?: number;
855
+ /** Order edges by field; applied before limit. */
856
+ orderBy?: {
857
+ field: string;
858
+ direction?: 'asc' | 'desc';
859
+ };
860
+ /** Hydrate target nodes alongside edges. When `true`, the returned
861
+ * `ExpandResult.targets` array is index-aligned with `edges` and contains
862
+ * the corresponding target-side node record (the b-side for forward, the
863
+ * a-side for reverse) or `null` when the node row is missing. */
864
+ hydrate?: boolean;
865
+ }
866
+ /** Result shape for one `expand()` call.
867
+ *
868
+ * `edges` is the list of edge rows that matched the hop, in the order the
869
+ * backend returned them (subject to `orderBy`). `targets`, when present,
870
+ * is the same length as `edges` — one slot per edge — and holds the
871
+ * corresponding target-side node record (or `null` when the target node
872
+ * does not exist). */
873
+ interface ExpandResult {
874
+ edges: StoredGraphRecord[];
875
+ /** Present iff the request set `hydrate: true`. Index-aligned with
876
+ * `edges`; entries are `null` for edges whose target node row is
877
+ * missing. */
878
+ targets?: Array<StoredGraphRecord | null>;
879
+ }
880
+ /** Multi-hop fan-out with target-node hydration in one round trip per hop.
881
+ *
882
+ * Backends declaring `query.join` translate one `expand()` call into one
883
+ * server-side query (SQL `IN (…)`, Firestore Pipeline batched fan-out).
884
+ * That collapses the per-source `findEdges` loop in `traverse.ts` into a
885
+ * single round trip per hop, regardless of source-set size.
886
+ *
887
+ * Backends without `query.join` are not required to expose `expand()` at
888
+ * all — `traverse.ts` keeps the per-source loop for them. The capability
889
+ * gate is the single source of truth on whether the optimization runs. */
890
+ interface JoinExtension {
891
+ /** Fan out from `params.sources` over `params.axbType` in one round
892
+ * trip. See `ExpandParams` for shape and `ExpandResult` for return value.
893
+ *
894
+ * Cross-graph hops (those with `targetGraph`) are not eligible for
895
+ * `expand()` because each source UID would resolve to a distinct
896
+ * subgraph location — there's no single collection to fan out over.
897
+ * Callers (notably `traverse.ts`) detect cross-graph hops and stay on
898
+ * the per-source loop. */
899
+ expand(params: ExpandParams): Promise<ExpandResult>;
900
+ }
901
+ /**
902
+ * Patch shape for `bulkUpdate`.
903
+ *
904
+ * - `data`: a deep partial of the row's `data` field. Applied via
905
+ * deep-merge semantics (the same `flattenPatch` pipeline that
906
+ * `updateNode` / `updateEdge` use). Use `deleteField()` sentinels to
907
+ * remove individual leaves; arrays are replaced as a unit, never
908
+ * concatenated.
909
+ *
910
+ * Backends with `query.dml` translate this to a single server-side UPDATE
911
+ * statement. The patch is applied to every row that matches the filter
912
+ * list; there is no per-row callback or read-modify-write loop. Identifying
913
+ * fields (`aType`, `axbType`, `bType`, `aUid`, `bUid`, `v`) are owned by
914
+ * firegraph and cannot be mutated through `bulkUpdate` — pass them in the
915
+ * filter list to scope the update, not in the patch body.
916
+ */
917
+ interface BulkUpdatePatch {
918
+ /** Deep-partial patch applied to each matching row's `data` field. */
919
+ data: Record<string, unknown>;
920
+ }
921
+ /** Server-side conditional bulk DML — bulkDelete / bulkUpdate.
922
+ *
923
+ * Backends declaring `query.dml` translate each call to one server-side
924
+ * statement (Firestore Pipeline `remove`/`update` stage, SQL `DELETE`/
925
+ * `UPDATE`). Standard Firestore omits this capability and the
926
+ * Phase 5 code falls back to the existing fetch-then-write loop in
927
+ * `src/bulk.ts`.
928
+ *
929
+ * Both methods scope to the **current backend** only — they do not fan
930
+ * out to routed children or subcollections. Use `removeNodeCascade` for
931
+ * the cascade-aware cousin of `bulkDelete`. */
932
+ interface DmlExtension {
933
+ /**
934
+ * Delete every row matching `params` in one server-side statement.
935
+ * Subject to the same scan-protection rules as `findEdges`: pass
936
+ * `allowCollectionScan: true` to bypass.
937
+ */
938
+ bulkDelete(params: FindEdgesParams, options?: BulkOptions): Promise<BulkResult>;
939
+ /**
940
+ * Update every row matching `params` with `patch` in one server-side
941
+ * statement. The patch is deep-merged into each row's `data` field.
942
+ * Identifying columns (`aType`, `axbType`, etc.) are immutable through
943
+ * this path — to relabel rows, delete and re-insert.
944
+ */
945
+ bulkUpdate(params: FindEdgesParams, patch: BulkUpdatePatch, options?: BulkOptions): Promise<BulkResult>;
946
+ }
947
+ /**
948
+ * One hop in an engine-level traversal spec.
949
+ *
950
+ * Strict subset of `HopDefinition` — engine traversal cannot honour
951
+ * arbitrary client-side filter callbacks (the predicate runs in JS, not
952
+ * server-side) and cannot compose across `targetGraph` boundaries (each
953
+ * subgraph lives at a distinct collection path; nested pipelines need
954
+ * one root collection). The compiler rejects specs that include either,
955
+ * falling back to the per-hop loop in `traverse.ts`.
956
+ *
957
+ * `limitPerSource` is REQUIRED on every engine hop — without it the
958
+ * compiler can't bound the response-size product against `maxReads`.
959
+ * Missing it is a compile-time error.
960
+ */
961
+ interface EngineHopSpec {
962
+ axbType: string;
963
+ direction?: 'forward' | 'reverse';
964
+ aType?: string;
965
+ bType?: string;
966
+ /** Required for engine traversal — bounds the worst-case response size. */
967
+ limitPerSource: number;
968
+ orderBy?: {
969
+ field: string;
970
+ direction?: 'asc' | 'desc';
971
+ };
972
+ }
973
+ /**
974
+ * Parameters for one engine-level traversal call. The traversal layer
975
+ * compiles a multi-hop spec into a single nested Pipeline and dispatches
976
+ * one round trip; the executor decodes the tree result into per-hop
977
+ * `StoredGraphRecord[][]` arrays index-aligned with the source set.
978
+ *
979
+ * Cross-graph hops, depth > `MAX_PIPELINE_DEPTH`, or response-size
980
+ * estimates over `maxReads` are caught at compile time by the compiler
981
+ * and signal the traversal layer to fall back to the per-hop loop.
982
+ */
983
+ interface EngineTraversalParams {
984
+ /** Initial source UIDs (the "frontier" at depth 0). */
985
+ sources: string[];
986
+ /** Hop chain. Length must be ≥ 1 and ≤ `MAX_PIPELINE_DEPTH`. */
987
+ hops: EngineHopSpec[];
988
+ /** Optional cap on the worst-case response-size product. The compiler
989
+ * estimates `Π(limitPerSource_i × N_i)` and refuses to emit (forcing
990
+ * fallback) if the estimate exceeds this. */
991
+ maxReads?: number;
992
+ }
993
+ /**
994
+ * Result of one engine-traversal call. `hops[i]` is the edge set
995
+ * returned at depth `i`, after per-hop dedupe on `bUid` (forward) /
996
+ * `aUid` (reverse). The arrays are flat — the tree shape is collapsed
997
+ * by the executor so the traversal layer can splice the result into
998
+ * the same `HopResult[]` shape `traverse.ts` already produces from the
999
+ * per-hop loop.
1000
+ */
1001
+ interface EngineTraversalResult {
1002
+ hops: Array<{
1003
+ /** Edges returned at this depth, deduped on the target-side UID. */
1004
+ edges: StoredGraphRecord[];
1005
+ /** Number of distinct source UIDs at this depth. */
1006
+ sourceCount: number;
1007
+ }>;
1008
+ /** Total documents read on the server side (for budget bookkeeping). */
1009
+ totalReads: number;
1010
+ }
1011
+ /**
1012
+ * Engine-level multi-hop traversal — a compiled, single-round-trip
1013
+ * traversal for backends that can express it server-side.
1014
+ *
1015
+ * Backends declaring `traversal.serverSide` translate one
1016
+ * `runEngineTraversal()` call into one server-side query (a nested
1017
+ * Pipeline using `define` / `addFields` / `toArrayExpression` on
1018
+ * Firestore Enterprise). That collapses the per-hop `expand()` loop
1019
+ * in `traverse.ts` into a single round trip, regardless of depth.
1020
+ *
1021
+ * The traversal layer (`src/traverse.ts`) compiles a `TraversalBuilder`
1022
+ * spec to `EngineTraversalParams` when:
1023
+ *
1024
+ * - the backend declares `traversal.serverSide`;
1025
+ * - no hop is cross-graph (`targetGraph` unset);
1026
+ * - no hop carries a JS `filter` callback;
1027
+ * - depth ≤ `MAX_PIPELINE_DEPTH`;
1028
+ * - `Π(limitPerSource_i × N_i)` ≤ `maxReads` budget;
1029
+ * - every hop sets `limitPerSource`.
1030
+ *
1031
+ * Specs that fail any condition fall back to the per-hop loop with
1032
+ * an optional `console.warn` (only when explicitly forced via the
1033
+ * `engineTraversal: 'force'` opt-in in `TraversalOptions`).
1034
+ */
1035
+ interface EngineTraversalExtension {
1036
+ /** Execute one nested-Pipeline traversal in a single round trip. */
1037
+ runEngineTraversal(params: EngineTraversalParams): Promise<EngineTraversalResult>;
1038
+ }
1039
+ /**
1040
+ * Parameters for a server-side full-text search query.
1041
+ *
1042
+ * Translates on Firestore Enterprise into a Pipeline `search({ query: documentMatches(...) })`
1043
+ * stage. Field-path conventions match `FindNearestParams.vectorField` and
1044
+ * `WhereClause.field` — bare names resolve to `data.<name>`, envelope
1045
+ * fields are rejected.
1046
+ */
1047
+ interface FullTextSearchParams {
1048
+ /**
1049
+ * Optional filter on `aType`. Applied as a `where(aType == …)` stage
1050
+ * after the `search()` stage (Firestore requires `search` to be the
1051
+ * first stage of a pipeline, so identifying filters cannot be applied
1052
+ * before the index walk).
1053
+ */
1054
+ aType?: string;
1055
+ /** Optional filter on `axbType`. Same post-search-stage application as `aType`. */
1056
+ axbType?: string;
1057
+ /** Optional filter on `bType`. Same post-search-stage application as `aType`. */
1058
+ bType?: string;
1059
+ /**
1060
+ * Free-form query string. The Firestore search index tokenises and
1061
+ * ranks; the string accepts the same DSL as `documentMatches(...)` —
1062
+ * boolean operators (`AND`, `OR`, `NOT`), phrase quoting, etc.
1063
+ */
1064
+ query: string;
1065
+ /**
1066
+ * Indexed text fields the caller wants the search restricted to.
1067
+ *
1068
+ * **Not yet supported.** Passing a non-empty `fields` array throws
1069
+ * `INVALID_QUERY` (`'fields is not yet supported'`). The option is
1070
+ * reserved for when `@google-cloud/firestore` exposes a typed per-field
1071
+ * text predicate (`matches(field, query)`). Until then, omit `fields` —
1072
+ * every search executes document-wide `documentMatches(query)`. For
1073
+ * per-`aType` scoping, rely on Firestore's per-collection FTS indexes.
1074
+ */
1075
+ fields?: string[];
1076
+ /** Upper bound on rows returned, sorted by relevance. */
1077
+ limit: number;
1078
+ /**
1079
+ * Bypass scan-protection for unfiltered FTS. A search with no
1080
+ * `aType` / `axbType` / `bType` filter walks every row the index
1081
+ * scored — opt in explicitly when that's intended (analytics dumps,
1082
+ * full-collection rerank).
1083
+ */
1084
+ allowCollectionScan?: boolean;
1085
+ }
1086
+ /**
1087
+ * Native full-text search.
1088
+ *
1089
+ * - **Firestore Enterprise** ✓ — implemented via Pipeline
1090
+ * `search({ query: documentMatches(...) })` (typed stage exposed in
1091
+ * `@google-cloud/firestore@8.5.0`). Identifying filters (`aType` /
1092
+ * `axbType` / `bType`) are applied as a follow-up `where(...)`
1093
+ * stage because the `search` stage must be the first stage of a
1094
+ * pipeline. Requires Enterprise Firestore (the FTS index is an
1095
+ * Enterprise product feature, not a free-tier feature).
1096
+ * - **Firestore Standard** — not supported. FTS is an Enterprise-only
1097
+ * product feature; this row will never become "✓".
1098
+ * - **SQLite / Cloudflare DO** — not supported. No native FTS index;
1099
+ * emulating it over `json_extract` is not viable for any realistic
1100
+ * dataset.
1101
+ *
1102
+ * Migrations are NOT applied to the result. The search index walked
1103
+ * the raw stored shape; rehydrating each row through the migration
1104
+ * pipeline would change the candidate set the index already scored.
1105
+ * If you need migrated shape, follow up with `getNode` / `findEdges`
1106
+ * on the returned UIDs.
1107
+ */
1108
+ interface FullTextSearchExtension {
1109
+ /**
1110
+ * Run a full-text search. Returns the top-N records by relevance,
1111
+ * ordered by the search index's score.
1112
+ *
1113
+ * Throws:
1114
+ *
1115
+ * - `INVALID_QUERY` if `query` is empty, any field path resolves to
1116
+ * a built-in envelope field, or `limit` is non-positive.
1117
+ * - `QUERY_SAFETY` if no identifying filters are supplied and
1118
+ * `allowCollectionScan` is not set.
1119
+ * - `UNSUPPORTED_OPERATION` if the backend does not declare
1120
+ * `search.fullText`.
1121
+ */
1122
+ fullTextSearch(params: FullTextSearchParams): Promise<StoredGraphRecord[]>;
1123
+ }
1124
+ /**
1125
+ * Geographic point — lat/lng in degrees. Mirrors the runtime shape of
1126
+ * Firestore's `GeoPoint` so callers can pass either a literal or a
1127
+ * `GeoPoint` instance once wiring lands.
1128
+ */
1129
+ interface GeoPointLiteral {
1130
+ lat: number;
1131
+ lng: number;
1132
+ }
1133
+ /**
1134
+ * Parameters for a server-side geospatial distance query.
1135
+ *
1136
+ * Translates on Firestore Enterprise into a Pipeline
1137
+ * `search({ query: geoDistance(field, point).lessThanOrEqual(radius), sort: geoDistance(...).ascending() })`
1138
+ * stage. The two `geoDistance(...)` expressions are computed identically
1139
+ * server-side; the radius cap goes into the search query and the
1140
+ * nearest-first ordering goes into `sort`.
1141
+ */
1142
+ interface GeoSearchParams {
1143
+ /**
1144
+ * Optional filter on `aType`. Applied as a `where(aType == …)` stage
1145
+ * after the `search()` stage (search must be the first stage).
1146
+ */
1147
+ aType?: string;
1148
+ /** Optional filter on `axbType`. Same post-search-stage application as `aType`. */
1149
+ axbType?: string;
1150
+ /** Optional filter on `bType`. Same post-search-stage application as `aType`. */
1151
+ bType?: string;
1152
+ /**
1153
+ * Field path of the indexed `GeoPoint`. Bare name → `data.<name>` per
1154
+ * the same convention as `select` / `where`. Built-in envelope fields
1155
+ * are rejected.
1156
+ */
1157
+ geoField: string;
1158
+ /** Centre of the search radius. */
1159
+ point: GeoPointLiteral;
1160
+ /** Search radius in metres. */
1161
+ radiusMeters: number;
1162
+ /** Upper bound on rows returned. */
1163
+ limit: number;
1164
+ /**
1165
+ * If true (default), results are sorted nearest-first via a
1166
+ * `geoDistance(...).ascending()` ordering inside the `search` stage;
1167
+ * if false, ordering is unspecified — the backend returns rows in
1168
+ * whatever order the geo index emits.
1169
+ */
1170
+ orderByDistance?: boolean;
1171
+ /**
1172
+ * Bypass scan-protection for unfiltered geo searches. A geo query
1173
+ * with no `aType` / `axbType` / `bType` filter walks every indexed
1174
+ * row inside the radius — opt in explicitly when that's intended.
1175
+ */
1176
+ allowCollectionScan?: boolean;
1177
+ }
1178
+ /**
1179
+ * Native geospatial distance search.
1180
+ *
1181
+ * - **Firestore Enterprise** ✓ — implemented via Pipeline
1182
+ * `search({ query: geoDistance(field, point).lessThanOrEqual(radius), sort: geoDistance(...).ascending() })`
1183
+ * (typed `geoDistance(...)` function exposed in
1184
+ * `@google-cloud/firestore@8.5.0`). Identifying filters
1185
+ * (`aType` / `axbType` / `bType`) are applied as a follow-up
1186
+ * `where(...)` stage because the `search` stage must be the
1187
+ * first stage of a pipeline. Requires Enterprise Firestore (the
1188
+ * geo index is an Enterprise product feature).
1189
+ * - **Firestore Standard** — not supported. Geospatial queries are
1190
+ * an Enterprise-only product feature; this row will never become
1191
+ * "✓".
1192
+ * - **SQLite / Cloudflare DO** — not supported. No native geo
1193
+ * index; emulating it over `json_extract` and the haversine
1194
+ * formula is viable only for trivial dataset sizes and would give
1195
+ * callers the wrong mental model about cost.
1196
+ *
1197
+ * Migrations are NOT applied to the result — same rationale as
1198
+ * `findNearest` and `fullTextSearch`. The geo index walked the raw
1199
+ * stored shape.
1200
+ */
1201
+ interface GeoExtension {
1202
+ /**
1203
+ * Run a geospatial distance search. Returns rows whose
1204
+ * `geoField` lies within `radiusMeters` of `point`, ordered
1205
+ * nearest-first by default.
1206
+ *
1207
+ * Throws:
1208
+ *
1209
+ * - `INVALID_QUERY` if `geoField` resolves to a built-in envelope
1210
+ * field, `radiusMeters` is non-positive, `limit` is
1211
+ * non-positive, or `point.lat` / `point.lng` are out of range.
1212
+ * - `QUERY_SAFETY` if no identifying filters are supplied and
1213
+ * `allowCollectionScan` is not set.
1214
+ * - `UNSUPPORTED_OPERATION` if the backend does not declare
1215
+ * `search.geo`.
1216
+ */
1217
+ geoSearch(params: GeoSearchParams): Promise<StoredGraphRecord[]>;
1218
+ }
1219
+ /**
1220
+ * Distance metric for vector / nearest-neighbour search. Mirrors
1221
+ * Firestore's `VectorQueryOptions.distanceMeasure` enum so the value
1222
+ * passes through to the SDK without translation:
1223
+ *
1224
+ * - `EUCLIDEAN` — straight-line distance in n-dimensional space; lower
1225
+ * is more similar.
1226
+ * - `COSINE` — angle between vectors; lower is more similar (1 −
1227
+ * cosine_similarity).
1228
+ * - `DOT_PRODUCT` — inner product; *higher* is more similar. The
1229
+ * `distanceThreshold` semantics flip accordingly (see
1230
+ * `FindNearestParams`).
1231
+ */
1232
+ type DistanceMeasure = 'EUCLIDEAN' | 'COSINE' | 'DOT_PRODUCT';
1233
+ /**
1234
+ * Parameters for a server-side vector / nearest-neighbour query.
1235
+ *
1236
+ * Identifying filters (`aType`, `axbType`, `bType`) and `where` clauses
1237
+ * narrow the candidate set *before* the ANN query runs — Firestore folds
1238
+ * them into the same `Query` the vector index walks. Combining multiple
1239
+ * filters with vector search requires composite indexes on Firestore
1240
+ * Standard; the Enterprise edition lifts the index requirement for some
1241
+ * shapes (see Firestore docs).
1242
+ *
1243
+ * `vectorField` follows the same dotted-path / bare-name convention as
1244
+ * `select` in `FindEdgesProjectedParams` and `field` in `WhereClause`:
1245
+ *
1246
+ * - A bare name (e.g. `'embedding'`) resolves to `data.embedding`.
1247
+ * - A literal `'data'` or `'data.<x>'` is taken as-is.
1248
+ * - Built-in envelope fields are not vector-indexable — passing one
1249
+ * throws `INVALID_QUERY` at the client surface.
1250
+ *
1251
+ * `queryVector` accepts either a plain `number[]` or a Firestore
1252
+ * `VectorValue`. The dimension must match the indexed `vectorField`'s
1253
+ * dimension; Firestore filters out rows whose vector dimension differs
1254
+ * (rather than throwing) so the result set may be smaller than `limit`.
1255
+ *
1256
+ * `distanceThreshold` semantics depend on `distanceMeasure`:
1257
+ *
1258
+ * - `EUCLIDEAN` / `COSINE` → return rows with `distance <=` threshold.
1259
+ * - `DOT_PRODUCT` → return rows with `distance >=` threshold (higher
1260
+ * dot product = more similar).
1261
+ *
1262
+ * If `distanceResultField` is set, every returned record carries the
1263
+ * computed distance at that field path inside `data`. Pass a built-in
1264
+ * envelope field name (e.g. `'aType'`) and the request fails server-side
1265
+ * — the SDK reserves the envelope.
1266
+ */
1267
+ interface FindNearestParams {
1268
+ /** Optional filter on `aType`. Resolves to `where('aType', '==', …)`. */
1269
+ aType?: string;
1270
+ /** Optional filter on `axbType`. */
1271
+ axbType?: string;
1272
+ /** Optional filter on `bType`. */
1273
+ bType?: string;
1274
+ /**
1275
+ * Field path of the indexed vector. Bare name → `data.<name>`. Built-in
1276
+ * envelope fields are rejected — they are not vector-indexable.
1277
+ */
1278
+ vectorField: string;
1279
+ /** Query vector. `number[]` or `VectorValue`; must match the indexed dimension. */
1280
+ queryVector: number[] | {
1281
+ toArray(): number[];
1282
+ };
1283
+ /** Upper bound on rows returned. Firestore caps at 1000. */
1284
+ limit: number;
1285
+ /** Distance metric — see `DistanceMeasure` for the semantics flip on `DOT_PRODUCT`. */
1286
+ distanceMeasure: DistanceMeasure;
1287
+ /**
1288
+ * Optional similarity cutoff. Interpretation depends on `distanceMeasure`
1289
+ * — see the type-level docs.
1290
+ */
1291
+ distanceThreshold?: number;
1292
+ /**
1293
+ * Optional dotted path that, if set, will be populated on each returned
1294
+ * record with the computed distance. Bare name → `data.<name>`. Use this
1295
+ * when downstream code needs to rank or threshold the results in JS.
1296
+ */
1297
+ distanceResultField?: string;
1298
+ /**
1299
+ * Additional filters applied before the ANN walk. Same shape as
1300
+ * `findEdges({ where })`. Field-path rules match `WhereClause.field`.
1301
+ */
1302
+ where?: WhereClause[];
1303
+ /**
1304
+ * Bypass scan-protection for unfiltered vector searches. A vector query
1305
+ * with no `aType` / `axbType` / `bType` / `where` filters scans every
1306
+ * row in the collection before the ANN narrowing — opt in explicitly.
1307
+ */
1308
+ allowCollectionScan?: boolean;
1309
+ }
1310
+ /**
1311
+ * Native vector / nearest-neighbour search.
1312
+ *
1313
+ * Backends declaring `search.vector` translate the call into a single
1314
+ * server-side `findNearest` query. The SQLite-shaped backends (shared
1315
+ * SQLite, Cloudflare DO) do not declare this capability — they have no
1316
+ * native vector index, and emulating ANN on top of `json_extract` is a
1317
+ * non-starter for any realistic dataset. Firestore Standard and
1318
+ * Enterprise both implement it via the classic `Query.findNearest(...)`
1319
+ * API; the pipeline `findNearest` stage is a future optimisation.
1320
+ *
1321
+ * Migrations are NOT applied to the result. The vector query selects
1322
+ * documents by similarity, not by query plan — applying migrations
1323
+ * inline would change the candidate set the index already walked. If
1324
+ * you need migrated shape, follow up with `getNode` / `findEdges` on the
1325
+ * returned UIDs.
1326
+ */
1327
+ interface VectorExtension {
1328
+ /**
1329
+ * Run a vector / nearest-neighbour search. Returns the top-K records
1330
+ * by similarity, sorted nearest-first (or furthest-first for
1331
+ * `DOT_PRODUCT` where higher = more similar).
1332
+ *
1333
+ * Throws:
1334
+ *
1335
+ * - `INVALID_QUERY` if `vectorField` resolves to a built-in envelope
1336
+ * field, `limit` is non-positive or > 1000, `queryVector` is
1337
+ * empty, or `distanceResultField` collides with a built-in.
1338
+ * - `QUERY_SAFETY` if no identifying filters / `where` clauses are
1339
+ * supplied and `allowCollectionScan` is not set.
1340
+ * - `UNSUPPORTED_OPERATION` if the backend does not declare
1341
+ * `search.vector`.
1342
+ */
1343
+ findNearest(params: FindNearestParams): Promise<StoredGraphRecord[]>;
1344
+ }
1345
+ /** Escape hatch — expose the underlying Firestore handle. */
1346
+ interface RawFirestoreExtension {
1347
+ }
1348
+ /** Escape hatch — expose the underlying SQL executor. */
1349
+ interface RawSqlExtension {
1350
+ }
1351
+ /** Realtime listener API — `onSnapshot`-style live subscriptions. */
1352
+ interface RealtimeListenExtension {
1353
+ }
1354
+ /**
1355
+ * Capability-gated graph client.
1356
+ *
1357
+ * `C` is the closed union of capabilities the underlying backend
1358
+ * declared. Each extension is conditionally intersected: it appears in the
1359
+ * resulting type only when the matching capability is in `C`. The default
1360
+ * `C = Capability` evaluates every conditional truthy, yielding the full
1361
+ * surface — that is the "permissive" shape returned when no capability
1362
+ * narrowing is in effect (e.g. legacy callers using
1363
+ * `let x: GraphClient = …` without a parameter).
1364
+ *
1365
+ * Why distributive conditionals work: `'query.aggregate' extends C ? A : B`
1366
+ * distributes over the union members of `C`. If any union member is
1367
+ * `'query.aggregate'`, the conditional evaluates to `A`; otherwise `B`.
1368
+ * Intersection with `object` (the false branch) is a no-op, so omitted
1369
+ * extensions contribute nothing to the resulting type.
1370
+ */
1371
+ type GraphClient<C extends Capability = Capability> = CoreGraphClient & ('query.aggregate' extends C ? AggregateExtension : object) & ('query.select' extends C ? SelectExtension : object) & ('query.join' extends C ? JoinExtension : object) & ('query.dml' extends C ? DmlExtension : object) & ('traversal.serverSide' extends C ? EngineTraversalExtension : object) & ('search.fullText' extends C ? FullTextSearchExtension : object) & ('search.geo' extends C ? GeoExtension : object) & ('search.vector' extends C ? VectorExtension : object) & ('realtime.listen' extends C ? RealtimeListenExtension : object) & ('raw.firestore' extends C ? RawFirestoreExtension : object) & ('raw.sql' extends C ? RawSqlExtension : object);
1372
+ /**
1373
+ * Methods present only on dynamic-registry clients. Composed with
1374
+ * `GraphClient<C>` to form `DynamicGraphClient<C>` — the type returned
1375
+ * by `createGraphClient(...)` when `registryMode` is set on the options.
1376
+ */
1377
+ interface DynamicGraphMethods {
1378
+ /** Define or update a node type in the dynamic registry. */
1379
+ defineNodeType(name: string, jsonSchema: object, description?: string, options?: DefineTypeOptions): Promise<void>;
1380
+ /** Define or update an edge type in the dynamic registry. */
1381
+ defineEdgeType(name: string, topology: EdgeTopology, jsonSchema?: object, description?: string, options?: DefineTypeOptions): Promise<void>;
1382
+ /** Reload the registry from meta-type nodes in the graph. */
1383
+ reloadRegistry(): Promise<void>;
1384
+ }
1385
+ /**
1386
+ * Dynamic-registry graph client. Same conditional capability surface as
1387
+ * `GraphClient<C>`, plus the meta-type definition methods.
1388
+ */
1389
+ type DynamicGraphClient<C extends Capability = Capability> = GraphClient<C> & DynamicGraphMethods;
1390
+ interface GraphTransaction extends GraphReader, GraphWriter {
1391
+ }
1392
+ interface GraphBatch extends GraphWriter {
1393
+ commit(): Promise<void>;
1394
+ }
1395
+ interface HopDefinition {
1396
+ axbType: string;
1397
+ direction?: 'forward' | 'reverse';
1398
+ aType?: string;
1399
+ bType?: string;
1400
+ limit?: number;
1401
+ orderBy?: {
1402
+ field: string;
1403
+ direction?: 'asc' | 'desc';
1404
+ };
1405
+ filter?: (edge: StoredGraphRecord) => boolean;
1406
+ /**
1407
+ * Subgraph name to cross into for this hop (forward traversal only).
1408
+ *
1409
+ * When set, the traversal queries the named subgraph under each source node
1410
+ * instead of the current collection (`{collection}/{sourceUid}/{targetGraph}`).
1411
+ *
1412
+ * If omitted but the registry has a `targetGraph` for this `axbType`,
1413
+ * the registry value is used automatically.
1414
+ *
1415
+ * **Context tracking:** Once a hop crosses into a subgraph, subsequent
1416
+ * hops without `targetGraph` stay in that subgraph automatically. To
1417
+ * cross into a different subgraph, set `targetGraph` explicitly on the
1418
+ * next hop — explicit `targetGraph` always resolves relative to the
1419
+ * root client, not the current subgraph. To return to the root graph,
1420
+ * create a separate traversal from the root client.
1421
+ */
1422
+ targetGraph?: string;
1423
+ }
1424
+ interface TraversalOptions {
1425
+ maxReads?: number;
1426
+ concurrency?: number;
1427
+ returnIntermediates?: boolean;
1428
+ /**
1429
+ * Engine-level traversal mode. Controls whether the traversal layer
1430
+ * tries to compile the hop chain into one server-side nested Pipeline
1431
+ * (Firestore Enterprise only, gated by `traversal.serverSide`).
1432
+ *
1433
+ * - `'auto'` (default) — use engine traversal when the backend
1434
+ * declares the capability AND the spec passes the compiler's
1435
+ * eligibility checks (no cross-graph hops, no JS filters, depth
1436
+ * ≤ `MAX_PIPELINE_DEPTH`, response-size product ≤ `maxReads`,
1437
+ * `limitPerSource` set on every hop). Otherwise fall back to
1438
+ * the per-hop loop. No warning fires on fallback.
1439
+ *
1440
+ * - `'force'` — engine traversal MUST run. If the backend lacks
1441
+ * the capability or the spec is ineligible, the traversal throws
1442
+ * `FiregraphError('UNSUPPORTED_OPERATION')`. Useful for
1443
+ * benchmarking and tests.
1444
+ *
1445
+ * - `'off'` — never use engine traversal, even when available.
1446
+ * The traversal layer always uses the per-hop loop.
1447
+ *
1448
+ * Default: `'auto'`.
1449
+ */
1450
+ engineTraversal?: 'auto' | 'force' | 'off';
1451
+ }
1452
+ interface HopResult {
1453
+ axbType: string;
1454
+ depth: number;
1455
+ edges: StoredGraphRecord[];
1456
+ sourceCount: number;
1457
+ truncated: boolean;
1458
+ }
1459
+ interface TraversalResult {
1460
+ nodes: StoredGraphRecord[];
1461
+ hops: HopResult[];
1462
+ totalReads: number;
1463
+ truncated: boolean;
1464
+ }
1465
+ interface TraversalBuilder {
1466
+ follow(axbType: string, options?: Omit<HopDefinition, 'axbType'>): TraversalBuilder;
1467
+ run(options?: TraversalOptions): Promise<TraversalResult>;
1468
+ }
1469
+ interface BulkOptions {
1470
+ /** Max operations per Firestore batch (default 500, Firestore hard limit). */
1471
+ batchSize?: number;
1472
+ /** Number of retry attempts per failed batch (default 3). */
1473
+ maxRetries?: number;
1474
+ /** Called after each batch commits. */
1475
+ onProgress?: (progress: BulkProgress) => void;
1476
+ /**
1477
+ * Recursively delete subcollections (subgraphs) under the node's document.
1478
+ * Defaults to `true` for `removeNodeCascade`.
1479
+ */
1480
+ deleteSubcollections?: boolean;
1481
+ }
1482
+ interface BulkProgress {
1483
+ /** Batches committed so far. */
1484
+ completedBatches: number;
1485
+ /** Total batches planned. */
1486
+ totalBatches: number;
1487
+ /** Total documents deleted so far. */
1488
+ deletedSoFar: number;
1489
+ }
1490
+ interface BulkResult {
1491
+ /**
1492
+ * Total documents affected.
1493
+ *
1494
+ * For `bulkDelete()` this is the count of deleted documents; for
1495
+ * `bulkUpdate()` this is the count of updated documents (the field name
1496
+ * is a legacy from cascade-delete).
1497
+ */
1498
+ deleted: number;
1499
+ /** Number of batches committed. */
1500
+ batches: number;
1501
+ /** Errors from batches that failed after all retries. */
1502
+ errors: BulkBatchError[];
1503
+ }
1504
+ interface BulkBatchError {
1505
+ /** Zero-based index of the failed batch. */
1506
+ batchIndex: number;
1507
+ /** The underlying error. */
1508
+ error: Error;
1509
+ /** Number of operations in this batch that were not applied. */
1510
+ operationCount: number;
1511
+ }
1512
+ interface CascadeResult extends BulkResult {
1513
+ /** Number of edges deleted. */
1514
+ edgesDeleted: number;
1515
+ /** Whether the node itself was deleted. */
1516
+ nodeDeleted: boolean;
1517
+ }
1518
+
1519
+ /**
1520
+ * Write-plan helper — flattens partial-update payloads into a list of
1521
+ * deep-path operations every backend can execute identically.
1522
+ *
1523
+ * Background: firegraph used to ship two write semantics that quietly
1524
+ * disagreed about depth.
1525
+ * - `putNode`/`putEdge` did a full document replace.
1526
+ * - `updateNode`/`updateEdge` did a one-level shallow merge: top-level
1527
+ * keys were preserved, but nested objects were replaced wholesale.
1528
+ *
1529
+ * Both behaviours dropped sibling keys silently. The 0.12 contract is that
1530
+ * `put*` and `update*` deep-merge by default (sibling keys at any depth
1531
+ * survive); `replace*` is the explicit escape hatch.
1532
+ *
1533
+ * `flattenPatch` walks a partial-update payload and emits one
1534
+ * {@link DataPathOp} per terminal value. Plain objects recurse; arrays,
1535
+ * primitives, Firestore special types, and tagged firegraph-serialization
1536
+ * objects are terminal (replaced as a unit). `undefined` values are
1537
+ * skipped; `null` is preserved as a real `null` write; the
1538
+ * {@link DELETE_FIELD} sentinel marks a field for removal.
1539
+ *
1540
+ * The output is deliberately backend-agnostic. Each backend translates ops
1541
+ * into its native dialect:
1542
+ * - Firestore: dotted field path → `data.a.b.c` for `update()`.
1543
+ * - SQLite / DO SQLite: `json_set(data, '$.a.b.c', ?)` /
1544
+ * `json_remove(data, '$.a.b.c')`.
1545
+ */
1546
+ /**
1547
+ * Sentinel returned by {@link deleteField}. Treated by all backends as
1548
+ * "remove this field from the stored document".
1549
+ *
1550
+ * Equivalent to Firestore's `FieldValue.delete()`, but works for SQLite
1551
+ * backends too. Use inside `updateNode`/`updateEdge` payloads.
1552
+ */
1553
+ declare const DELETE_FIELD: unique symbol;
1554
+ type DeleteSentinel = typeof DELETE_FIELD;
1555
+ /**
1556
+ * Returns the firegraph delete sentinel. Place this anywhere in an
1557
+ * `updateNode`/`updateEdge` payload to remove the corresponding field.
1558
+ *
1559
+ * ```ts
1560
+ * await client.updateNode('tour', uid, {
1561
+ * attrs: { obsoleteFlag: deleteField() },
1562
+ * });
1563
+ * ```
1564
+ */
1565
+ declare function deleteField(): DeleteSentinel;
1566
+ /** Type guard for the delete sentinel. */
1567
+ declare function isDeleteSentinel(value: unknown): value is DeleteSentinel;
1568
+ /**
1569
+ * Single terminal write operation produced by {@link flattenPatch}.
1570
+ *
1571
+ * `path` is a non-empty array of plain object keys. `value` is the value to
1572
+ * write; ignored when `delete` is `true`. Arrays / primitives / Firestore
1573
+ * special types appear here as whole terminal values.
1574
+ */
1575
+ interface DataPathOp {
1576
+ path: readonly string[];
1577
+ value: unknown;
1578
+ delete: boolean;
1579
+ }
1580
+ /**
1581
+ * Flatten a partial-update payload into a list of terminal {@link DataPathOp}s.
1582
+ *
1583
+ * Rules:
1584
+ * - Plain objects (no prototype or `Object.prototype`) recurse — each
1585
+ * key becomes another path segment.
1586
+ * - Arrays are terminal: writing `{tags: ['a']}` overwrites the whole
1587
+ * `tags` array. Element-wise array merging is intentionally NOT
1588
+ * supported — it's almost never what callers actually want, and
1589
+ * Firestore `arrayUnion`/`arrayRemove` give precise semantics when
1590
+ * they are.
1591
+ * - `undefined` values are skipped (no op generated). Use
1592
+ * {@link deleteField} if you actually want to remove a field.
1593
+ * - `null` is preserved verbatim — emits a terminal op with `value: null`.
1594
+ * - {@link DELETE_FIELD} produces an op with `delete: true`.
1595
+ * - Firestore special types and tagged serialization payloads are terminal.
1596
+ * - Class instances are terminal.
1597
+ *
1598
+ * Throws if any object key on the recursion path is unsafe (see
1599
+ * {@link assertSafePath}).
1600
+ */
1601
+ declare function flattenPatch(data: Record<string, unknown>): DataPathOp[];
1602
+
1603
+ /**
1604
+ * Backend abstraction for firegraph.
1605
+ *
1606
+ * `StorageBackend` is the single interface every storage driver implements.
1607
+ * The Firestore backend wraps `@google-cloud/firestore`; the SQLite backend
1608
+ * (shared by D1 and Durable Object SQLite) uses a parameterized SQL executor.
1609
+ *
1610
+ * `GraphClientImpl` and friends depend only on this interface — they have
1611
+ * no direct knowledge of Firestore or SQLite.
1612
+ */
1613
+
1614
+ /**
1615
+ * Runtime descriptor of which `Capability`s a `StorageBackend` actually
1616
+ * implements. Static for the lifetime of a backend instance; declared at
1617
+ * construction. The phantom `_phantom` field is a type-level marker
1618
+ * (never read at runtime) that lets the type parameter `C` flow through
1619
+ * the descriptor for use by `GraphClient<C>` conditional gating.
1620
+ *
1621
+ * Use `createCapabilities` to construct one. Use `.has(c)` to check
1622
+ * membership at runtime; the type system gates extension methods on the
1623
+ * client level (see `.claude/backend-capabilities.md`).
1624
+ */
1625
+ interface BackendCapabilities<C extends Capability = Capability> {
1626
+ /** Runtime membership check. */
1627
+ has(capability: Capability): boolean;
1628
+ /** Iterate declared capabilities (diagnostics, error messages). */
1629
+ values(): IterableIterator<Capability>;
1630
+ /** Type-level marker. Never read at runtime. */
1631
+ readonly _phantom?: C;
1632
+ }
1633
+ /**
1634
+ * Construct a `BackendCapabilities<C>` from an explicit set. The set is
1635
+ * captured by reference; callers should treat it as readonly after passing
1636
+ * it in. The runtime cost of `has()` is one Set lookup.
1637
+ */
1638
+ declare function createCapabilities<C extends Capability>(caps: ReadonlySet<C>): BackendCapabilities<C>;
1639
+ /**
1640
+ * Intersect multiple capability sets. Used by `RoutingStorageBackend` to
1641
+ * derive the capability set of a composite backend: a routed graph can
1642
+ * only honour a capability if every wrapped backend honours it.
1643
+ */
1644
+ declare function intersectCapabilities(parts: ReadonlyArray<BackendCapabilities>): BackendCapabilities;
1645
+ /**
1646
+ * Per-record write payload — backend-agnostic. Timestamps are not present;
1647
+ * the backend supplies them via `serverTimestamp()` placeholders that it
1648
+ * itself resolves at commit time.
1649
+ */
1650
+ interface WritableRecord {
1651
+ aType: string;
1652
+ aUid: string;
1653
+ axbType: string;
1654
+ bType: string;
1655
+ bUid: string;
1656
+ data: Record<string, unknown>;
1657
+ /** Schema version (set by the writer when registry has migrations). */
1658
+ v?: number;
1659
+ }
1660
+ /**
1661
+ * Write semantics for `setDoc`.
1662
+ *
1663
+ * - `'merge'` — the new contract (0.12+). Existing fields not mentioned
1664
+ * in the new data survive; nested objects are recursively merged;
1665
+ * arrays are replaced as a unit. This is the default for
1666
+ * `putNode` / `putEdge`.
1667
+ * - `'replace'` — the document is replaced wholesale, dropping any
1668
+ * fields not present in the payload. This is the explicit escape
1669
+ * hatch surfaced as `replaceNode` / `replaceEdge` and used by
1670
+ * migration write-back.
1671
+ */
1672
+ type WriteMode = 'merge' | 'replace';
1673
+ /**
1674
+ * Patch shape for `updateDoc`.
1675
+ *
1676
+ * - `dataOps`: list of deep-path terminal ops produced by
1677
+ * `flattenPatch()` (one op per leaf — arrays / primitives / Firestore
1678
+ * special types are terminal). Used by `updateNode` / `updateEdge`.
1679
+ * Sibling keys at every depth are preserved.
1680
+ * - `replaceData`: full `data` replacement. Used only by the migration
1681
+ * write-back path, which has already produced a complete migrated
1682
+ * document.
1683
+ * - `v`: optional schema-version stamp.
1684
+ *
1685
+ * `updatedAt` is always set by the backend.
1686
+ */
1687
+ interface UpdatePayload {
1688
+ dataOps?: DataPathOp[];
1689
+ replaceData?: Record<string, unknown>;
1690
+ v?: number;
1691
+ }
1692
+ /**
1693
+ * Read/write transaction adapter. Mirrors Firestore's transaction semantics:
1694
+ * reads are snapshot-consistent; writes are issued inside the transaction
1695
+ * and a rejection from any write aborts the surrounding `runTransaction`.
1696
+ *
1697
+ * Writes return `Promise<void>` so SQL drivers can surface row-level errors
1698
+ * (constraint violations, malformed JSON paths) rather than swallowing them.
1699
+ * Firestore implementations can resolve synchronously since the underlying
1700
+ * `Transaction.set/update/delete` calls are themselves synchronous buffers.
1701
+ */
1702
+ interface TransactionBackend {
1703
+ getDoc(docId: string): Promise<StoredGraphRecord | null>;
1704
+ query(filters: QueryFilter[], options?: QueryOptions): Promise<StoredGraphRecord[]>;
1705
+ setDoc(docId: string, record: WritableRecord, mode: WriteMode): Promise<void>;
1706
+ updateDoc(docId: string, update: UpdatePayload): Promise<void>;
1707
+ deleteDoc(docId: string): Promise<void>;
1708
+ }
1709
+ /**
1710
+ * Atomic multi-write batch.
1711
+ */
1712
+ interface BatchBackend {
1713
+ setDoc(docId: string, record: WritableRecord, mode: WriteMode): void;
1714
+ updateDoc(docId: string, update: UpdatePayload): void;
1715
+ deleteDoc(docId: string): void;
1716
+ commit(): Promise<void>;
1717
+ }
1718
+ /**
1719
+ * The single storage abstraction.
1720
+ *
1721
+ * Each backend instance is scoped to a "graph location" — for Firestore
1722
+ * that's a collection path; for SQLite it's a (table, scopePath) pair.
1723
+ * `subgraph()` returns a child backend bound to a nested location.
1724
+ */
1725
+ interface StorageBackend<C extends Capability = Capability> {
1726
+ /** Capabilities this backend instance declares. Static for the lifetime of the backend. */
1727
+ readonly capabilities: BackendCapabilities<C>;
1728
+ /** Backend-internal location identifier (collection path or table name). */
1729
+ readonly collectionPath: string;
1730
+ /** Subgraph scope (empty string for root). */
1731
+ readonly scopePath: string;
1732
+ getDoc(docId: string): Promise<StoredGraphRecord | null>;
1733
+ query(filters: QueryFilter[], options?: QueryOptions): Promise<StoredGraphRecord[]>;
1734
+ setDoc(docId: string, record: WritableRecord, mode: WriteMode): Promise<void>;
1735
+ updateDoc(docId: string, update: UpdatePayload): Promise<void>;
1736
+ deleteDoc(docId: string): Promise<void>;
1737
+ runTransaction<T>(fn: (tx: TransactionBackend) => Promise<T>): Promise<T>;
1738
+ createBatch(): BatchBackend;
1739
+ subgraph(parentNodeUid: string, name: string): StorageBackend;
1740
+ removeNodeCascade(uid: string, reader: GraphReader, options?: BulkOptions): Promise<CascadeResult>;
1741
+ bulkRemoveEdges(params: FindEdgesParams, reader: GraphReader, options?: BulkOptions): Promise<BulkResult>;
1742
+ /**
1743
+ * Find edges across all subgraphs sharing a given collection name.
1744
+ * Optional — backends that can't support this should throw a clear error.
1745
+ */
1746
+ findEdgesGlobal?(params: FindEdgesParams, collectionName?: string): Promise<StoredGraphRecord[]>;
1747
+ /**
1748
+ * Run an aggregate query (count/sum/avg/min/max). Present only on backends
1749
+ * that declare `query.aggregate`. The map's keys are caller-defined aliases
1750
+ * matching `AggregateSpec`; values are the resolved numeric results.
1751
+ *
1752
+ * Backends that can't satisfy a particular op throw `FiregraphError` with
1753
+ * code `UNSUPPORTED_AGGREGATE` (e.g. Firestore Standard rejects min/max).
1754
+ */
1755
+ aggregate?(spec: AggregateSpec, filters: QueryFilter[]): Promise<Record<string, number>>;
1756
+ /**
1757
+ * Delete every row matching `filters` in one server-side statement.
1758
+ * Present only on backends that declare `query.dml`. The default cascade
1759
+ * implementation in `bulk.ts` uses this when available; backends without
1760
+ * the cap (e.g. Firestore Standard) fall back to a fetch-then-delete
1761
+ * loop driven by `findEdges` + per-row `deleteDoc`.
1762
+ *
1763
+ * The contract matches `findEdges`: scope predicates are honoured
1764
+ * automatically by the backend's own internal scope tracking. Callers
1765
+ * supply only the filter list — the same shape produced by
1766
+ * `buildEdgeQueryPlan`.
1767
+ */
1768
+ bulkDelete?(filters: QueryFilter[], options?: BulkOptions): Promise<BulkResult>;
1769
+ /**
1770
+ * Update every row matching `filters` with `patch` in one server-side
1771
+ * statement. The patch is deep-merged into each row's `data` field, the
1772
+ * same flatten-then-merge pipeline `updateDoc` uses. Identifying columns
1773
+ * (`aType`, `axbType`, `aUid`, `bType`, `bUid`, `v`) are not writable
1774
+ * through this path.
1775
+ */
1776
+ bulkUpdate?(filters: QueryFilter[], patch: BulkUpdatePatch, options?: BulkOptions): Promise<BulkResult>;
1777
+ /**
1778
+ * Fan out from `params.sources` over a single edge type in one server-side
1779
+ * round trip. Present only on backends that declare `query.join`. The
1780
+ * traversal layer (`traverse.ts`) calls `expand` once per hop when the
1781
+ * backend declares the cap; otherwise it falls back to the per-source
1782
+ * `findEdges` loop.
1783
+ *
1784
+ * Cross-graph hops are never dispatched through `expand` — each source
1785
+ * UID resolves to a distinct subgraph location, which can't be fanned
1786
+ * out as a single statement. The traversal layer enforces that
1787
+ * boundary; `expand` itself does not need to inspect `targetGraph`.
1788
+ */
1789
+ expand?(params: ExpandParams): Promise<ExpandResult>;
1790
+ /**
1791
+ * Compile a multi-hop traversal spec into one server-side query and
1792
+ * dispatch a single round trip. Present only on backends that declare
1793
+ * `traversal.serverSide` (Firestore Enterprise today, via nested
1794
+ * Pipelines that combine `define`, `addFields`, and
1795
+ * `toArrayExpression`).
1796
+ *
1797
+ * The traversal layer (`traverse.ts`) compiles a `TraversalBuilder`
1798
+ * spec into `EngineTraversalParams` only when the spec is eligible
1799
+ * (no cross-graph hops, no JS filters, depth ≤ `MAX_PIPELINE_DEPTH`,
1800
+ * `Π(limitPerSource_i × N_i) ≤ maxReads`, `limitPerSource` set on
1801
+ * every hop). Ineligible specs fall back to the per-hop `expand()`
1802
+ * loop without invoking this method.
1803
+ *
1804
+ * The result collapses the nested-pipeline tree into per-hop edge
1805
+ * arrays so the traversal layer can fold the result into the same
1806
+ * `HopResult[]` shape it produces from the per-hop loop.
1807
+ */
1808
+ runEngineTraversal?(params: EngineTraversalParams): Promise<EngineTraversalResult>;
1809
+ /**
1810
+ * Run a projecting query — return only the listed fields per row. Present
1811
+ * only on backends that declare `query.select`. The cap-less fallback is
1812
+ * `findEdges` followed by a JS-side projection in user code; firegraph
1813
+ * does not auto-fall-back because the wire-payload reduction is the only
1814
+ * reason to call this method.
1815
+ *
1816
+ * `select` is the explicit field list; `filters` and `options` mirror the
1817
+ * `query()` shape. The returned rows have one slot per unique entry in
1818
+ * `select`. Field-name interpretation is the backend's responsibility:
1819
+ * built-in fields resolve to columns / Firestore field names, bare names
1820
+ * resolve to `data.<name>`, and dotted paths resolve verbatim. See
1821
+ * `FindEdgesProjectedParams` for the user-facing contract.
1822
+ *
1823
+ * Migrations are not applied to the result — the caller asked for a
1824
+ * specific projection shape, and rehydrating a partial record into the
1825
+ * migration pipeline would require synthesising every absent field.
1826
+ */
1827
+ findEdgesProjected?(select: ReadonlyArray<string>, filters: QueryFilter[], options?: QueryOptions): Promise<Array<Record<string, unknown>>>;
1828
+ /**
1829
+ * Run a vector / nearest-neighbour query. Present only on backends that
1830
+ * declare `search.vector`. There is no client-side fallback — the
1831
+ * SQLite-shaped backends (shared SQLite, Cloudflare DO) genuinely have
1832
+ * no native ANN index, and a JS-side k-NN sweep over `findEdges()` would
1833
+ * scale catastrophically. Backends without the cap throw
1834
+ * `UNSUPPORTED_OPERATION` from the client wrapper.
1835
+ *
1836
+ * `params` carries the user-facing shape (vector field path, query
1837
+ * vector, distance metric, optional threshold and result-field). The
1838
+ * client wrapper has already run scan-protection on the identifying
1839
+ * / `where` filter list before dispatching.
1840
+ *
1841
+ * Path normalisation is the backend's responsibility: rewriting bare
1842
+ * `vectorField` / `distanceResultField` names to `data.<name>` and
1843
+ * rejecting envelope fields (`aType`, `axbType`, `bType`, `aUid`,
1844
+ * `bUid`, `v`, etc.) with `INVALID_QUERY` happens inside the
1845
+ * backend, not the client wrapper. The two in-tree Firestore-edition
1846
+ * backends share `runFirestoreFindNearest` (see
1847
+ * `src/internal/firestore-vector.ts`) for this; third-party backends
1848
+ * declaring `search.vector` must apply equivalent normalisation
1849
+ * before calling their underlying SDK.
1850
+ *
1851
+ * The backend is also responsible for translating to the underlying
1852
+ * SDK call (`Query.findNearest` on Firestore today) and decoding the
1853
+ * result snapshot into `StoredGraphRecord[]`.
1854
+ *
1855
+ * Migrations are not applied to the result. The vector index walks the
1856
+ * raw stored shape; rehydrating into the migration pipeline before
1857
+ * returning would change the candidate set the index already chose.
1858
+ */
1859
+ findNearest?(params: FindNearestParams): Promise<StoredGraphRecord[]>;
1860
+ /**
1861
+ * Run a full-text search query. Present only on backends that declare
1862
+ * `search.fullText`. There is no client-side fallback — the only
1863
+ * in-tree backend that supports it is Firestore Enterprise (via
1864
+ * Pipeline `search({ query: documentMatches(...) })`); Standard and
1865
+ * the SQLite-shaped backends throw `UNSUPPORTED_OPERATION` from the
1866
+ * client wrapper.
1867
+ *
1868
+ * The backend is responsible for path normalisation (rewriting
1869
+ * bare `fields` entries to `data.<name>`, rejecting envelope fields
1870
+ * with `INVALID_QUERY`), translating to the underlying SDK call,
1871
+ * and decoding the result into `StoredGraphRecord[]`.
1872
+ *
1873
+ * Migrations are not applied to the result. The search index walked
1874
+ * the raw stored shape; rehydrating into the migration pipeline
1875
+ * would change the candidate set the index already scored.
1876
+ */
1877
+ fullTextSearch?(params: FullTextSearchParams): Promise<StoredGraphRecord[]>;
1878
+ /**
1879
+ * Run a geospatial distance search. Present only on backends that
1880
+ * declare `search.geo`. There is no client-side fallback — only
1881
+ * Firestore Enterprise has a native geo index (translated via
1882
+ * Pipeline `search({ query: geoDistance(...).lessThanOrEqual(...) })`).
1883
+ * Backends without the cap throw `UNSUPPORTED_OPERATION` from the
1884
+ * client wrapper.
1885
+ *
1886
+ * The backend is responsible for `geoField` path normalisation,
1887
+ * translating `point` to a Firestore `GeoPoint`, applying the
1888
+ * radius cap inside the search query, and (when
1889
+ * `orderByDistance` is true / unset) emitting the
1890
+ * `geoDistance(...).ascending()` ordering inside the search stage.
1891
+ *
1892
+ * Migrations are not applied to the result.
1893
+ */
1894
+ geoSearch?(params: GeoSearchParams): Promise<StoredGraphRecord[]>;
1895
+ }
1896
+
1897
+ export { type DynamicRegistryConfig as $, type AggregateExtension as A, type BackendCapabilities as B, type BulkBatchError as C, DELETE_FIELD as D, type ExpandParams as E, type FindEdgesParams as F, type GraphRegistry as G, type BulkOptions as H, type IndexSpec as I, type JoinExtension as J, type BulkProgress as K, type BulkResult as L, type MigrationWriteBack as M, type Capability as N, type CascadeResult as O, type CoreGraphClient as P, type QueryPlan as Q, type RegistryEntry as R, type StorageBackend as S, type TransactionBackend as T, type UpdatePayload as U, type DefineTypeOptions as V, type WritableRecord as W, type DiscoveredEntity as X, type DistanceMeasure as Y, type DynamicGraphClient as Z, type DynamicGraphMethods as _, type BatchBackend as a, type EdgeTopology as a0, type EdgeTypeData as a1, type FindEdgesProjectedParams as a2, type FindNearestParams as a3, type FiregraphConfig as a4, type FullTextSearchExtension as a5, type GeoExtension as a6, type GraphBatch as a7, type GraphClientOptions as a8, type GraphRecord as a9, type GraphTransaction as aa, type GraphWriter as ab, type HopDefinition as ac, type HopResult as ad, type IndexFieldSpec as ae, type NodeTypeData as af, type ProjectedRow as ag, type QueryMode as ah, type QueryOptions as ai, type RawFirestoreExtension as aj, type RawSqlExtension as ak, type RealtimeListenExtension as al, type ScanProtection as am, type SelectExtension as an, type TraversalOptions as ao, type TraversalResult as ap, type VectorExtension as aq, type ViewContext as ar, type ViewDefaultsConfig as as, type ViewResolverConfig as at, type WhereClause as au, defineConfig as av, resolveView as aw, type BulkUpdatePatch as b, type DataPathOp as c, type DmlExtension as d, type ExpandResult as e, type WriteMode as f, createCapabilities as g, deleteField as h, flattenPatch as i, intersectCapabilities as j, isDeleteSentinel as k, type DiscoveryResult as l, type StoredGraphRecord as m, type MigrationStep as n, type FindNodesParams as o, type QueryFilter as p, type MigrationExecutor as q, type MigrationFn as r, type StoredMigrationStep as s, type GraphClient as t, type GraphReader as u, type TraversalBuilder as v, type AggregateField as w, type AggregateOp as x, type AggregateResult as y, type AggregateSpec as z };