@typicalday/firegraph 0.11.2 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +355 -78
- package/dist/backend-DuvHGgK1.d.cts +1897 -0
- package/dist/backend-DuvHGgK1.d.ts +1897 -0
- package/dist/backend.cjs +365 -5
- package/dist/backend.cjs.map +1 -1
- package/dist/backend.d.cts +25 -5
- package/dist/backend.d.ts +25 -5
- package/dist/backend.js +209 -7
- package/dist/backend.js.map +1 -1
- package/dist/chunk-2DHMNTV6.js +16 -0
- package/dist/chunk-2DHMNTV6.js.map +1 -0
- package/dist/chunk-4MMQ5W74.js +288 -0
- package/dist/chunk-4MMQ5W74.js.map +1 -0
- package/dist/{chunk-5753Y42M.js → chunk-C2QMD7RY.js} +6 -10
- package/dist/chunk-C2QMD7RY.js.map +1 -0
- package/dist/chunk-D4J7Z4FE.js +67 -0
- package/dist/chunk-D4J7Z4FE.js.map +1 -0
- package/dist/chunk-EQJUUVFG.js +14 -0
- package/dist/chunk-EQJUUVFG.js.map +1 -0
- package/dist/chunk-N5HFDWQX.js +23 -0
- package/dist/chunk-N5HFDWQX.js.map +1 -0
- package/dist/chunk-PAD7WFFU.js +573 -0
- package/dist/chunk-PAD7WFFU.js.map +1 -0
- package/dist/chunk-TK64DNVK.js +256 -0
- package/dist/chunk-TK64DNVK.js.map +1 -0
- package/dist/{chunk-NJSOD64C.js → chunk-WRTFC5NG.js} +438 -30
- package/dist/chunk-WRTFC5NG.js.map +1 -0
- package/dist/client-BKi3vk0Q.d.ts +34 -0
- package/dist/client-BrsaXtDV.d.cts +34 -0
- package/dist/cloudflare/index.cjs +1386 -74
- package/dist/cloudflare/index.cjs.map +1 -1
- package/dist/cloudflare/index.d.cts +217 -13
- package/dist/cloudflare/index.d.ts +217 -13
- package/dist/cloudflare/index.js +639 -180
- package/dist/cloudflare/index.js.map +1 -1
- package/dist/codegen/index.d.cts +1 -1
- package/dist/codegen/index.d.ts +1 -1
- package/dist/errors-BRc3I_eH.d.cts +73 -0
- package/dist/errors-BRc3I_eH.d.ts +73 -0
- package/dist/firestore-enterprise/index.cjs +3877 -0
- package/dist/firestore-enterprise/index.cjs.map +1 -0
- package/dist/firestore-enterprise/index.d.cts +141 -0
- package/dist/firestore-enterprise/index.d.ts +141 -0
- package/dist/firestore-enterprise/index.js +985 -0
- package/dist/firestore-enterprise/index.js.map +1 -0
- package/dist/firestore-standard/index.cjs +3117 -0
- package/dist/firestore-standard/index.cjs.map +1 -0
- package/dist/firestore-standard/index.d.cts +49 -0
- package/dist/firestore-standard/index.d.ts +49 -0
- package/dist/firestore-standard/index.js +283 -0
- package/dist/firestore-standard/index.js.map +1 -0
- package/dist/index.cjs +809 -534
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +24 -100
- package/dist/index.d.ts +24 -100
- package/dist/index.js +184 -531
- package/dist/index.js.map +1 -1
- package/dist/registry-Bc7h6WTM.d.cts +64 -0
- package/dist/registry-C2KUPVZj.d.ts +64 -0
- package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.cts} +1 -56
- package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.ts} +1 -56
- package/dist/{serialization-ZZ7RSDRX.js → serialization-OE2PFZMY.js} +6 -4
- package/dist/sqlite/index.cjs +3631 -0
- package/dist/sqlite/index.cjs.map +1 -0
- package/dist/sqlite/index.d.cts +111 -0
- package/dist/sqlite/index.d.ts +111 -0
- package/dist/sqlite/index.js +1164 -0
- package/dist/sqlite/index.js.map +1 -0
- package/package.json +33 -3
- package/dist/backend-U-MLShlg.d.ts +0 -97
- package/dist/backend-np4gEVhB.d.cts +0 -97
- package/dist/chunk-5753Y42M.js.map +0 -1
- package/dist/chunk-NJSOD64C.js.map +0 -1
- package/dist/chunk-R7CRGYY4.js +0 -94
- package/dist/chunk-R7CRGYY4.js.map +0 -1
- package/dist/types-BGWxcpI_.d.cts +0 -736
- package/dist/types-BGWxcpI_.d.ts +0 -736
- /package/dist/{serialization-ZZ7RSDRX.js.map → serialization-OE2PFZMY.js.map} +0 -0
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
// src/internal/serialization-tag.ts
|
|
2
|
+
var SERIALIZATION_TAG = "__firegraph_ser__";
|
|
3
|
+
var KNOWN_TYPES = /* @__PURE__ */ new Set(["Timestamp", "GeoPoint", "VectorValue", "DocumentReference"]);
|
|
4
|
+
function isTaggedValue(value) {
|
|
5
|
+
if (value === null || typeof value !== "object") return false;
|
|
6
|
+
const tag = value[SERIALIZATION_TAG];
|
|
7
|
+
return typeof tag === "string" && KNOWN_TYPES.has(tag);
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
export {
|
|
11
|
+
SERIALIZATION_TAG,
|
|
12
|
+
isTaggedValue
|
|
13
|
+
};
|
|
14
|
+
//# sourceMappingURL=chunk-EQJUUVFG.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/internal/serialization-tag.ts"],"sourcesContent":["/**\n * Firegraph serialization tag — split from `src/serialization.ts` so it can\n * be imported from Workers-facing code without dragging in\n * `@google-cloud/firestore`.\n *\n * The full serialization module (with Timestamp/GeoPoint round-tripping)\n * lives one folder up because the sandbox migration pipeline needs it; the\n * write-plan helper only needs to recognise tagged objects to keep them\n * terminal during patch flattening, so it imports just the tag from here.\n */\n\n/** Sentinel key used to tag serialized Firestore types. */\nexport const SERIALIZATION_TAG = '__firegraph_ser__' as const;\n\nconst KNOWN_TYPES = new Set(['Timestamp', 'GeoPoint', 'VectorValue', 'DocumentReference']);\n\n/** Check if a value is a tagged serialized Firestore type. */\nexport function isTaggedValue(value: unknown): boolean {\n if (value === null || typeof value !== 'object') return false;\n const tag = (value as Record<string, unknown>)[SERIALIZATION_TAG];\n return typeof tag === 'string' && KNOWN_TYPES.has(tag);\n}\n"],"mappings":";AAYO,IAAM,oBAAoB;AAEjC,IAAM,cAAc,oBAAI,IAAI,CAAC,aAAa,YAAY,eAAe,mBAAmB,CAAC;AAGlF,SAAS,cAAc,OAAyB;AACrD,MAAI,UAAU,QAAQ,OAAO,UAAU,SAAU,QAAO;AACxD,QAAM,MAAO,MAAkC,iBAAiB;AAChE,SAAO,OAAO,QAAQ,YAAY,YAAY,IAAI,GAAG;AACvD;","names":[]}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
// src/internal/backend.ts
|
|
2
|
+
function createCapabilities(caps) {
|
|
3
|
+
return {
|
|
4
|
+
has: (capability) => caps.has(capability),
|
|
5
|
+
values: () => caps.values()
|
|
6
|
+
};
|
|
7
|
+
}
|
|
8
|
+
function intersectCapabilities(parts) {
|
|
9
|
+
if (parts.length === 0) return createCapabilities(/* @__PURE__ */ new Set());
|
|
10
|
+
const sets = parts.map((p) => new Set(p.values()));
|
|
11
|
+
const [first, ...rest] = sets;
|
|
12
|
+
const intersection = /* @__PURE__ */ new Set();
|
|
13
|
+
for (const c of first) {
|
|
14
|
+
if (rest.every((s) => s.has(c))) intersection.add(c);
|
|
15
|
+
}
|
|
16
|
+
return createCapabilities(intersection);
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export {
|
|
20
|
+
createCapabilities,
|
|
21
|
+
intersectCapabilities
|
|
22
|
+
};
|
|
23
|
+
//# sourceMappingURL=chunk-N5HFDWQX.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/internal/backend.ts"],"sourcesContent":["/**\n * Backend abstraction for firegraph.\n *\n * `StorageBackend` is the single interface every storage driver implements.\n * The Firestore backend wraps `@google-cloud/firestore`; the SQLite backend\n * (shared by D1 and Durable Object SQLite) uses a parameterized SQL executor.\n *\n * `GraphClientImpl` and friends depend only on this interface — they have\n * no direct knowledge of Firestore or SQLite.\n */\n\nimport type {\n AggregateSpec,\n BulkOptions,\n BulkResult,\n BulkUpdatePatch,\n Capability,\n CascadeResult,\n EngineTraversalParams,\n EngineTraversalResult,\n ExpandParams,\n ExpandResult,\n FindEdgesParams,\n FindNearestParams,\n FullTextSearchParams,\n GeoSearchParams,\n GraphReader,\n QueryFilter,\n QueryOptions,\n StoredGraphRecord,\n} from '../types.js';\nimport type { DataPathOp } from './write-plan.js';\n\n/**\n * Runtime descriptor of which `Capability`s a `StorageBackend` actually\n * implements. Static for the lifetime of a backend instance; declared at\n * construction. The phantom `_phantom` field is a type-level marker\n * (never read at runtime) that lets the type parameter `C` flow through\n * the descriptor for use by `GraphClient<C>` conditional gating.\n *\n * Use `createCapabilities` to construct one. Use `.has(c)` to check\n * membership at runtime; the type system gates extension methods on the\n * client level (see `.claude/backend-capabilities.md`).\n */\nexport interface BackendCapabilities<C extends Capability = Capability> {\n /** Runtime membership check. */\n has(capability: Capability): boolean;\n /** Iterate declared capabilities (diagnostics, error messages). */\n values(): IterableIterator<Capability>;\n /** Type-level marker. Never read at runtime. */\n readonly _phantom?: C;\n}\n\n/**\n * Construct a `BackendCapabilities<C>` from an explicit set. The set is\n * captured by reference; callers should treat it as readonly after passing\n * it in. The runtime cost of `has()` is one Set lookup.\n */\nexport function createCapabilities<C extends Capability>(\n caps: ReadonlySet<C>,\n): BackendCapabilities<C> {\n return {\n has: (capability: Capability): boolean => caps.has(capability as C),\n values: () => caps.values() as IterableIterator<Capability>,\n };\n}\n\n/**\n * Intersect multiple capability sets. Used by `RoutingStorageBackend` to\n * derive the capability set of a composite backend: a routed graph can\n * only honour a capability if every wrapped backend honours it.\n */\nexport function intersectCapabilities(\n parts: ReadonlyArray<BackendCapabilities>,\n): BackendCapabilities {\n if (parts.length === 0) return createCapabilities(new Set<Capability>());\n const sets = parts.map((p) => new Set<Capability>(p.values()));\n const [first, ...rest] = sets;\n const intersection = new Set<Capability>();\n for (const c of first) {\n if (rest.every((s) => s.has(c))) intersection.add(c);\n }\n return createCapabilities(intersection);\n}\n\n/**\n * Per-record write payload — backend-agnostic. Timestamps are not present;\n * the backend supplies them via `serverTimestamp()` placeholders that it\n * itself resolves at commit time.\n */\nexport interface WritableRecord {\n aType: string;\n aUid: string;\n axbType: string;\n bType: string;\n bUid: string;\n data: Record<string, unknown>;\n /** Schema version (set by the writer when registry has migrations). */\n v?: number;\n}\n\n/**\n * Write semantics for `setDoc`.\n *\n * - `'merge'` — the new contract (0.12+). Existing fields not mentioned\n * in the new data survive; nested objects are recursively merged;\n * arrays are replaced as a unit. This is the default for\n * `putNode` / `putEdge`.\n * - `'replace'` — the document is replaced wholesale, dropping any\n * fields not present in the payload. This is the explicit escape\n * hatch surfaced as `replaceNode` / `replaceEdge` and used by\n * migration write-back.\n */\nexport type WriteMode = 'merge' | 'replace';\n\n/**\n * Patch shape for `updateDoc`.\n *\n * - `dataOps`: list of deep-path terminal ops produced by\n * `flattenPatch()` (one op per leaf — arrays / primitives / Firestore\n * special types are terminal). Used by `updateNode` / `updateEdge`.\n * Sibling keys at every depth are preserved.\n * - `replaceData`: full `data` replacement. Used only by the migration\n * write-back path, which has already produced a complete migrated\n * document.\n * - `v`: optional schema-version stamp.\n *\n * `updatedAt` is always set by the backend.\n */\nexport interface UpdatePayload {\n dataOps?: DataPathOp[];\n replaceData?: Record<string, unknown>;\n v?: number;\n}\n\n/**\n * Read/write transaction adapter. Mirrors Firestore's transaction semantics:\n * reads are snapshot-consistent; writes are issued inside the transaction\n * and a rejection from any write aborts the surrounding `runTransaction`.\n *\n * Writes return `Promise<void>` so SQL drivers can surface row-level errors\n * (constraint violations, malformed JSON paths) rather than swallowing them.\n * Firestore implementations can resolve synchronously since the underlying\n * `Transaction.set/update/delete` calls are themselves synchronous buffers.\n */\nexport interface TransactionBackend {\n getDoc(docId: string): Promise<StoredGraphRecord | null>;\n query(filters: QueryFilter[], options?: QueryOptions): Promise<StoredGraphRecord[]>;\n setDoc(docId: string, record: WritableRecord, mode: WriteMode): Promise<void>;\n updateDoc(docId: string, update: UpdatePayload): Promise<void>;\n deleteDoc(docId: string): Promise<void>;\n}\n\n/**\n * Atomic multi-write batch.\n */\nexport interface BatchBackend {\n setDoc(docId: string, record: WritableRecord, mode: WriteMode): void;\n updateDoc(docId: string, update: UpdatePayload): void;\n deleteDoc(docId: string): void;\n commit(): Promise<void>;\n}\n\n/**\n * The single storage abstraction.\n *\n * Each backend instance is scoped to a \"graph location\" — for Firestore\n * that's a collection path; for SQLite it's a (table, scopePath) pair.\n * `subgraph()` returns a child backend bound to a nested location.\n */\nexport interface StorageBackend<C extends Capability = Capability> {\n /** Capabilities this backend instance declares. Static for the lifetime of the backend. */\n readonly capabilities: BackendCapabilities<C>;\n /** Backend-internal location identifier (collection path or table name). */\n readonly collectionPath: string;\n /** Subgraph scope (empty string for root). */\n readonly scopePath: string;\n\n // --- Reads ---\n getDoc(docId: string): Promise<StoredGraphRecord | null>;\n query(filters: QueryFilter[], options?: QueryOptions): Promise<StoredGraphRecord[]>;\n\n // --- Writes ---\n setDoc(docId: string, record: WritableRecord, mode: WriteMode): Promise<void>;\n updateDoc(docId: string, update: UpdatePayload): Promise<void>;\n deleteDoc(docId: string): Promise<void>;\n\n // --- Transactions & batches ---\n runTransaction<T>(fn: (tx: TransactionBackend) => Promise<T>): Promise<T>;\n createBatch(): BatchBackend;\n\n // --- Subgraphs ---\n subgraph(parentNodeUid: string, name: string): StorageBackend;\n\n // --- Cascade & bulk ---\n removeNodeCascade(\n uid: string,\n reader: GraphReader,\n options?: BulkOptions,\n ): Promise<CascadeResult>;\n bulkRemoveEdges(\n params: FindEdgesParams,\n reader: GraphReader,\n options?: BulkOptions,\n ): Promise<BulkResult>;\n\n // --- Cross-collection queries ---\n /**\n * Find edges across all subgraphs sharing a given collection name.\n * Optional — backends that can't support this should throw a clear error.\n */\n findEdgesGlobal?(params: FindEdgesParams, collectionName?: string): Promise<StoredGraphRecord[]>;\n\n // --- Aggregations ---\n /**\n * Run an aggregate query (count/sum/avg/min/max). Present only on backends\n * that declare `query.aggregate`. The map's keys are caller-defined aliases\n * matching `AggregateSpec`; values are the resolved numeric results.\n *\n * Backends that can't satisfy a particular op throw `FiregraphError` with\n * code `UNSUPPORTED_AGGREGATE` (e.g. Firestore Standard rejects min/max).\n */\n aggregate?(spec: AggregateSpec, filters: QueryFilter[]): Promise<Record<string, number>>;\n\n // --- Server-side DML ---\n /**\n * Delete every row matching `filters` in one server-side statement.\n * Present only on backends that declare `query.dml`. The default cascade\n * implementation in `bulk.ts` uses this when available; backends without\n * the cap (e.g. Firestore Standard) fall back to a fetch-then-delete\n * loop driven by `findEdges` + per-row `deleteDoc`.\n *\n * The contract matches `findEdges`: scope predicates are honoured\n * automatically by the backend's own internal scope tracking. Callers\n * supply only the filter list — the same shape produced by\n * `buildEdgeQueryPlan`.\n */\n bulkDelete?(filters: QueryFilter[], options?: BulkOptions): Promise<BulkResult>;\n /**\n * Update every row matching `filters` with `patch` in one server-side\n * statement. The patch is deep-merged into each row's `data` field, the\n * same flatten-then-merge pipeline `updateDoc` uses. Identifying columns\n * (`aType`, `axbType`, `aUid`, `bType`, `bUid`, `v`) are not writable\n * through this path.\n */\n bulkUpdate?(\n filters: QueryFilter[],\n patch: BulkUpdatePatch,\n options?: BulkOptions,\n ): Promise<BulkResult>;\n\n // --- Server-side multi-source fan-out ---\n /**\n * Fan out from `params.sources` over a single edge type in one server-side\n * round trip. Present only on backends that declare `query.join`. The\n * traversal layer (`traverse.ts`) calls `expand` once per hop when the\n * backend declares the cap; otherwise it falls back to the per-source\n * `findEdges` loop.\n *\n * Cross-graph hops are never dispatched through `expand` — each source\n * UID resolves to a distinct subgraph location, which can't be fanned\n * out as a single statement. The traversal layer enforces that\n * boundary; `expand` itself does not need to inspect `targetGraph`.\n */\n expand?(params: ExpandParams): Promise<ExpandResult>;\n\n // --- Engine-level multi-hop traversal ---\n /**\n * Compile a multi-hop traversal spec into one server-side query and\n * dispatch a single round trip. Present only on backends that declare\n * `traversal.serverSide` (Firestore Enterprise today, via nested\n * Pipelines that combine `define`, `addFields`, and\n * `toArrayExpression`).\n *\n * The traversal layer (`traverse.ts`) compiles a `TraversalBuilder`\n * spec into `EngineTraversalParams` only when the spec is eligible\n * (no cross-graph hops, no JS filters, depth ≤ `MAX_PIPELINE_DEPTH`,\n * `Π(limitPerSource_i × N_i) ≤ maxReads`, `limitPerSource` set on\n * every hop). Ineligible specs fall back to the per-hop `expand()`\n * loop without invoking this method.\n *\n * The result collapses the nested-pipeline tree into per-hop edge\n * arrays so the traversal layer can fold the result into the same\n * `HopResult[]` shape it produces from the per-hop loop.\n */\n runEngineTraversal?(params: EngineTraversalParams): Promise<EngineTraversalResult>;\n\n // --- Server-side projection ---\n /**\n * Run a projecting query — return only the listed fields per row. Present\n * only on backends that declare `query.select`. The cap-less fallback is\n * `findEdges` followed by a JS-side projection in user code; firegraph\n * does not auto-fall-back because the wire-payload reduction is the only\n * reason to call this method.\n *\n * `select` is the explicit field list; `filters` and `options` mirror the\n * `query()` shape. The returned rows have one slot per unique entry in\n * `select`. Field-name interpretation is the backend's responsibility:\n * built-in fields resolve to columns / Firestore field names, bare names\n * resolve to `data.<name>`, and dotted paths resolve verbatim. See\n * `FindEdgesProjectedParams` for the user-facing contract.\n *\n * Migrations are not applied to the result — the caller asked for a\n * specific projection shape, and rehydrating a partial record into the\n * migration pipeline would require synthesising every absent field.\n */\n findEdgesProjected?(\n select: ReadonlyArray<string>,\n filters: QueryFilter[],\n options?: QueryOptions,\n ): Promise<Array<Record<string, unknown>>>;\n\n // --- Native vector / nearest-neighbour search ---\n /**\n * Run a vector / nearest-neighbour query. Present only on backends that\n * declare `search.vector`. There is no client-side fallback — the\n * SQLite-shaped backends (shared SQLite, Cloudflare DO) genuinely have\n * no native ANN index, and a JS-side k-NN sweep over `findEdges()` would\n * scale catastrophically. Backends without the cap throw\n * `UNSUPPORTED_OPERATION` from the client wrapper.\n *\n * `params` carries the user-facing shape (vector field path, query\n * vector, distance metric, optional threshold and result-field). The\n * client wrapper has already run scan-protection on the identifying\n * / `where` filter list before dispatching.\n *\n * Path normalisation is the backend's responsibility: rewriting bare\n * `vectorField` / `distanceResultField` names to `data.<name>` and\n * rejecting envelope fields (`aType`, `axbType`, `bType`, `aUid`,\n * `bUid`, `v`, etc.) with `INVALID_QUERY` happens inside the\n * backend, not the client wrapper. The two in-tree Firestore-edition\n * backends share `runFirestoreFindNearest` (see\n * `src/internal/firestore-vector.ts`) for this; third-party backends\n * declaring `search.vector` must apply equivalent normalisation\n * before calling their underlying SDK.\n *\n * The backend is also responsible for translating to the underlying\n * SDK call (`Query.findNearest` on Firestore today) and decoding the\n * result snapshot into `StoredGraphRecord[]`.\n *\n * Migrations are not applied to the result. The vector index walks the\n * raw stored shape; rehydrating into the migration pipeline before\n * returning would change the candidate set the index already chose.\n */\n findNearest?(params: FindNearestParams): Promise<StoredGraphRecord[]>;\n\n // --- Native full-text search ---\n /**\n * Run a full-text search query. Present only on backends that declare\n * `search.fullText`. There is no client-side fallback — the only\n * in-tree backend that supports it is Firestore Enterprise (via\n * Pipeline `search({ query: documentMatches(...) })`); Standard and\n * the SQLite-shaped backends throw `UNSUPPORTED_OPERATION` from the\n * client wrapper.\n *\n * The backend is responsible for path normalisation (rewriting\n * bare `fields` entries to `data.<name>`, rejecting envelope fields\n * with `INVALID_QUERY`), translating to the underlying SDK call,\n * and decoding the result into `StoredGraphRecord[]`.\n *\n * Migrations are not applied to the result. The search index walked\n * the raw stored shape; rehydrating into the migration pipeline\n * would change the candidate set the index already scored.\n */\n fullTextSearch?(params: FullTextSearchParams): Promise<StoredGraphRecord[]>;\n\n // --- Native geospatial distance search ---\n /**\n * Run a geospatial distance search. Present only on backends that\n * declare `search.geo`. There is no client-side fallback — only\n * Firestore Enterprise has a native geo index (translated via\n * Pipeline `search({ query: geoDistance(...).lessThanOrEqual(...) })`).\n * Backends without the cap throw `UNSUPPORTED_OPERATION` from the\n * client wrapper.\n *\n * The backend is responsible for `geoField` path normalisation,\n * translating `point` to a Firestore `GeoPoint`, applying the\n * radius cap inside the search query, and (when\n * `orderByDistance` is true / unset) emitting the\n * `geoDistance(...).ascending()` ordering inside the search stage.\n *\n * Migrations are not applied to the result.\n */\n geoSearch?(params: GeoSearchParams): Promise<StoredGraphRecord[]>;\n}\n"],"mappings":";AA0DO,SAAS,mBACd,MACwB;AACxB,SAAO;AAAA,IACL,KAAK,CAAC,eAAoC,KAAK,IAAI,UAAe;AAAA,IAClE,QAAQ,MAAM,KAAK,OAAO;AAAA,EAC5B;AACF;AAOO,SAAS,sBACd,OACqB;AACrB,MAAI,MAAM,WAAW,EAAG,QAAO,mBAAmB,oBAAI,IAAgB,CAAC;AACvE,QAAM,OAAO,MAAM,IAAI,CAAC,MAAM,IAAI,IAAgB,EAAE,OAAO,CAAC,CAAC;AAC7D,QAAM,CAAC,OAAO,GAAG,IAAI,IAAI;AACzB,QAAM,eAAe,oBAAI,IAAgB;AACzC,aAAW,KAAK,OAAO;AACrB,QAAI,KAAK,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,CAAC,EAAG,cAAa,IAAI,CAAC;AAAA,EACrD;AACA,SAAO,mBAAmB,YAAY;AACxC;","names":[]}
|
|
@@ -0,0 +1,573 @@
|
|
|
1
|
+
import {
|
|
2
|
+
NODE_RELATION,
|
|
3
|
+
computeEdgeDocId,
|
|
4
|
+
computeNodeDocId
|
|
5
|
+
} from "./chunk-WRTFC5NG.js";
|
|
6
|
+
import {
|
|
7
|
+
FiregraphError
|
|
8
|
+
} from "./chunk-TK64DNVK.js";
|
|
9
|
+
|
|
10
|
+
// src/bulk.ts
|
|
11
|
+
var MAX_BATCH_SIZE = 500;
|
|
12
|
+
var DEFAULT_MAX_RETRIES = 3;
|
|
13
|
+
var BASE_DELAY_MS = 200;
|
|
14
|
+
function sleep(ms) {
|
|
15
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
16
|
+
}
|
|
17
|
+
function chunk(arr, size) {
|
|
18
|
+
const chunks = [];
|
|
19
|
+
for (let i = 0; i < arr.length; i += size) {
|
|
20
|
+
chunks.push(arr.slice(i, i + size));
|
|
21
|
+
}
|
|
22
|
+
return chunks;
|
|
23
|
+
}
|
|
24
|
+
async function bulkDeleteDocIds(db, collectionPath, docIds, options) {
|
|
25
|
+
if (docIds.length === 0) {
|
|
26
|
+
return { deleted: 0, batches: 0, errors: [] };
|
|
27
|
+
}
|
|
28
|
+
const batchSize = Math.min(options?.batchSize ?? MAX_BATCH_SIZE, MAX_BATCH_SIZE);
|
|
29
|
+
const maxRetries = options?.maxRetries ?? DEFAULT_MAX_RETRIES;
|
|
30
|
+
const onProgress = options?.onProgress;
|
|
31
|
+
const chunks = chunk(docIds, batchSize);
|
|
32
|
+
const errors = [];
|
|
33
|
+
let deleted = 0;
|
|
34
|
+
let completedBatches = 0;
|
|
35
|
+
for (let i = 0; i < chunks.length; i++) {
|
|
36
|
+
const ids = chunks[i];
|
|
37
|
+
let committed = false;
|
|
38
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
39
|
+
try {
|
|
40
|
+
const batch = db.batch();
|
|
41
|
+
const collectionRef = db.collection(collectionPath);
|
|
42
|
+
for (const id of ids) {
|
|
43
|
+
batch.delete(collectionRef.doc(id));
|
|
44
|
+
}
|
|
45
|
+
await batch.commit();
|
|
46
|
+
committed = true;
|
|
47
|
+
deleted += ids.length;
|
|
48
|
+
break;
|
|
49
|
+
} catch (err) {
|
|
50
|
+
if (attempt < maxRetries) {
|
|
51
|
+
const delay = BASE_DELAY_MS * Math.pow(2, attempt);
|
|
52
|
+
await sleep(delay);
|
|
53
|
+
} else {
|
|
54
|
+
errors.push({
|
|
55
|
+
batchIndex: i,
|
|
56
|
+
error: err instanceof Error ? err : new Error(String(err)),
|
|
57
|
+
operationCount: ids.length
|
|
58
|
+
});
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
if (committed) {
|
|
63
|
+
completedBatches++;
|
|
64
|
+
}
|
|
65
|
+
if (onProgress) {
|
|
66
|
+
onProgress({
|
|
67
|
+
completedBatches,
|
|
68
|
+
totalBatches: chunks.length,
|
|
69
|
+
deletedSoFar: deleted
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
return { deleted, batches: completedBatches, errors };
|
|
74
|
+
}
|
|
75
|
+
async function bulkRemoveEdges(db, collectionPath, reader, params, options) {
|
|
76
|
+
const effectiveParams = params.limit !== void 0 ? { ...params, allowCollectionScan: params.allowCollectionScan ?? true } : { ...params, limit: 0, allowCollectionScan: params.allowCollectionScan ?? true };
|
|
77
|
+
const edges = await reader.findEdges(effectiveParams);
|
|
78
|
+
const docIds = edges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
|
|
79
|
+
return bulkDeleteDocIds(db, collectionPath, docIds, options);
|
|
80
|
+
}
|
|
81
|
+
async function deleteSubcollectionsRecursive(db, collectionPath, docId, options) {
|
|
82
|
+
const docRef = db.collection(collectionPath).doc(docId);
|
|
83
|
+
const subcollections = await docRef.listCollections();
|
|
84
|
+
if (subcollections.length === 0) return { deleted: 0, errors: [] };
|
|
85
|
+
let totalDeleted = 0;
|
|
86
|
+
const allErrors = [];
|
|
87
|
+
const subOptions = options ? { batchSize: options.batchSize, maxRetries: options.maxRetries } : void 0;
|
|
88
|
+
for (const subCollRef of subcollections) {
|
|
89
|
+
const subCollPath = subCollRef.path;
|
|
90
|
+
const snapshot = await subCollRef.select().get();
|
|
91
|
+
const subDocIds = snapshot.docs.map((d) => d.id);
|
|
92
|
+
for (const subDocId of subDocIds) {
|
|
93
|
+
const subResult = await deleteSubcollectionsRecursive(db, subCollPath, subDocId, subOptions);
|
|
94
|
+
totalDeleted += subResult.deleted;
|
|
95
|
+
allErrors.push(...subResult.errors);
|
|
96
|
+
}
|
|
97
|
+
if (subDocIds.length > 0) {
|
|
98
|
+
const result = await bulkDeleteDocIds(db, subCollPath, subDocIds, subOptions);
|
|
99
|
+
totalDeleted += result.deleted;
|
|
100
|
+
allErrors.push(...result.errors);
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
return { deleted: totalDeleted, errors: allErrors };
|
|
104
|
+
}
|
|
105
|
+
async function removeNodeCascade(db, collectionPath, reader, uid, options) {
|
|
106
|
+
const [outgoingRaw, incomingRaw] = await Promise.all([
|
|
107
|
+
reader.findEdges({ aUid: uid, allowCollectionScan: true, limit: 0 }),
|
|
108
|
+
reader.findEdges({ bUid: uid, allowCollectionScan: true, limit: 0 })
|
|
109
|
+
]);
|
|
110
|
+
const outgoing = outgoingRaw.filter((e) => e.axbType !== NODE_RELATION);
|
|
111
|
+
const incoming = incomingRaw.filter((e) => e.axbType !== NODE_RELATION);
|
|
112
|
+
const edgeDocIdSet = /* @__PURE__ */ new Set();
|
|
113
|
+
const allEdges = [];
|
|
114
|
+
for (const edge of [...outgoing, ...incoming]) {
|
|
115
|
+
const docId = computeEdgeDocId(edge.aUid, edge.axbType, edge.bUid);
|
|
116
|
+
if (!edgeDocIdSet.has(docId)) {
|
|
117
|
+
edgeDocIdSet.add(docId);
|
|
118
|
+
allEdges.push(edge);
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
const shouldDeleteSubcollections = options?.deleteSubcollections !== false;
|
|
122
|
+
const nodeDocId = computeNodeDocId(uid);
|
|
123
|
+
let subcollectionResult = { deleted: 0, errors: [] };
|
|
124
|
+
if (shouldDeleteSubcollections) {
|
|
125
|
+
subcollectionResult = await deleteSubcollectionsRecursive(
|
|
126
|
+
db,
|
|
127
|
+
collectionPath,
|
|
128
|
+
nodeDocId,
|
|
129
|
+
options
|
|
130
|
+
);
|
|
131
|
+
}
|
|
132
|
+
const edgeDocIds = allEdges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
|
|
133
|
+
const allDocIds = [...edgeDocIds, nodeDocId];
|
|
134
|
+
const batchSize = Math.min(options?.batchSize ?? MAX_BATCH_SIZE, MAX_BATCH_SIZE);
|
|
135
|
+
const result = await bulkDeleteDocIds(db, collectionPath, allDocIds, {
|
|
136
|
+
...options,
|
|
137
|
+
batchSize
|
|
138
|
+
});
|
|
139
|
+
const totalChunks = Math.ceil(allDocIds.length / batchSize);
|
|
140
|
+
const nodeChunkIndex = totalChunks - 1;
|
|
141
|
+
const nodeDeleted = !result.errors.some((e) => e.batchIndex === nodeChunkIndex);
|
|
142
|
+
const topLevelEdgesDeleted = nodeDeleted ? result.deleted - 1 : result.deleted;
|
|
143
|
+
return {
|
|
144
|
+
deleted: result.deleted + subcollectionResult.deleted,
|
|
145
|
+
batches: result.batches,
|
|
146
|
+
errors: [...result.errors, ...subcollectionResult.errors],
|
|
147
|
+
edgesDeleted: topLevelEdgesDeleted,
|
|
148
|
+
nodeDeleted
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// src/internal/firestore-aggregate.ts
|
|
153
|
+
import { AggregateField } from "@google-cloud/firestore";
|
|
154
|
+
function applyFiltersToQuery(base, filters) {
|
|
155
|
+
let q = base;
|
|
156
|
+
for (const f of filters) {
|
|
157
|
+
q = q.where(f.field, f.op, f.value);
|
|
158
|
+
}
|
|
159
|
+
return q;
|
|
160
|
+
}
|
|
161
|
+
async function runFirestoreAggregate(base, spec, filters, { edition }) {
|
|
162
|
+
if (Object.keys(spec).length === 0) {
|
|
163
|
+
throw new FiregraphError(
|
|
164
|
+
"aggregate() requires at least one aggregation in the `aggregates` map.",
|
|
165
|
+
"INVALID_QUERY"
|
|
166
|
+
);
|
|
167
|
+
}
|
|
168
|
+
const filtered = applyFiltersToQuery(base, filters);
|
|
169
|
+
const aggregations = {};
|
|
170
|
+
for (const [alias, { op, field }] of Object.entries(spec)) {
|
|
171
|
+
if (op === "count") {
|
|
172
|
+
if (field !== void 0) {
|
|
173
|
+
throw new FiregraphError(
|
|
174
|
+
`Aggregate '${alias}' op 'count' must not specify a field \u2014 count operates on rows, not a column expression.`,
|
|
175
|
+
"INVALID_QUERY"
|
|
176
|
+
);
|
|
177
|
+
}
|
|
178
|
+
aggregations[alias] = AggregateField.count();
|
|
179
|
+
continue;
|
|
180
|
+
}
|
|
181
|
+
if (!field) {
|
|
182
|
+
throw new FiregraphError(
|
|
183
|
+
`Aggregate '${alias}' op '${op}' requires a field.`,
|
|
184
|
+
"INVALID_QUERY"
|
|
185
|
+
);
|
|
186
|
+
}
|
|
187
|
+
if (op === "sum") {
|
|
188
|
+
aggregations[alias] = AggregateField.sum(field);
|
|
189
|
+
} else if (op === "avg") {
|
|
190
|
+
aggregations[alias] = AggregateField.average(field);
|
|
191
|
+
} else {
|
|
192
|
+
const editionLabel = edition === "enterprise" ? "Firestore Enterprise" : "Firestore Standard";
|
|
193
|
+
throw new FiregraphError(
|
|
194
|
+
`Aggregate op '${op}' is not supported on ${editionLabel}. Both Firestore editions support count/sum/avg via the classic Query API; min/max requires a backend with SQL aggregation (SQLite or DO).`,
|
|
195
|
+
"UNSUPPORTED_AGGREGATE"
|
|
196
|
+
);
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
const snap = await filtered.aggregate(aggregations).get();
|
|
200
|
+
const data = snap.data();
|
|
201
|
+
const out = {};
|
|
202
|
+
for (const alias of Object.keys(spec)) {
|
|
203
|
+
const v = data[alias];
|
|
204
|
+
if (v === null || v === void 0) {
|
|
205
|
+
const op = spec[alias].op;
|
|
206
|
+
out[alias] = op === "avg" ? Number.NaN : 0;
|
|
207
|
+
} else {
|
|
208
|
+
out[alias] = v;
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
return out;
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
// src/internal/firestore-classic-adapter.ts
|
|
215
|
+
function createFirestoreAdapter(db, collectionPath) {
|
|
216
|
+
const collectionRef = db.collection(collectionPath);
|
|
217
|
+
return {
|
|
218
|
+
collectionPath,
|
|
219
|
+
async getDoc(docId) {
|
|
220
|
+
const snap = await collectionRef.doc(docId).get();
|
|
221
|
+
if (!snap.exists) return null;
|
|
222
|
+
return snap.data();
|
|
223
|
+
},
|
|
224
|
+
async setDoc(docId, data, options) {
|
|
225
|
+
if (options?.merge) {
|
|
226
|
+
await collectionRef.doc(docId).set(data, { merge: true });
|
|
227
|
+
} else {
|
|
228
|
+
await collectionRef.doc(docId).set(data);
|
|
229
|
+
}
|
|
230
|
+
},
|
|
231
|
+
async updateDoc(docId, data) {
|
|
232
|
+
await collectionRef.doc(docId).update(data);
|
|
233
|
+
},
|
|
234
|
+
async deleteDoc(docId) {
|
|
235
|
+
await collectionRef.doc(docId).delete();
|
|
236
|
+
},
|
|
237
|
+
async query(filters, options) {
|
|
238
|
+
let q = collectionRef;
|
|
239
|
+
for (const f of filters) {
|
|
240
|
+
q = q.where(f.field, f.op, f.value);
|
|
241
|
+
}
|
|
242
|
+
if (options?.orderBy) {
|
|
243
|
+
q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
|
|
244
|
+
}
|
|
245
|
+
if (options?.limit !== void 0) {
|
|
246
|
+
q = q.limit(options.limit);
|
|
247
|
+
}
|
|
248
|
+
const snap = await q.get();
|
|
249
|
+
return snap.docs.map((doc) => doc.data());
|
|
250
|
+
}
|
|
251
|
+
};
|
|
252
|
+
}
|
|
253
|
+
function createTransactionAdapter(db, collectionPath, tx) {
|
|
254
|
+
const collectionRef = db.collection(collectionPath);
|
|
255
|
+
return {
|
|
256
|
+
async getDoc(docId) {
|
|
257
|
+
const snap = await tx.get(collectionRef.doc(docId));
|
|
258
|
+
if (!snap.exists) return null;
|
|
259
|
+
return snap.data();
|
|
260
|
+
},
|
|
261
|
+
setDoc(docId, data, options) {
|
|
262
|
+
if (options?.merge) {
|
|
263
|
+
tx.set(collectionRef.doc(docId), data, { merge: true });
|
|
264
|
+
} else {
|
|
265
|
+
tx.set(collectionRef.doc(docId), data);
|
|
266
|
+
}
|
|
267
|
+
},
|
|
268
|
+
updateDoc(docId, data) {
|
|
269
|
+
tx.update(collectionRef.doc(docId), data);
|
|
270
|
+
},
|
|
271
|
+
deleteDoc(docId) {
|
|
272
|
+
tx.delete(collectionRef.doc(docId));
|
|
273
|
+
},
|
|
274
|
+
async query(filters, options) {
|
|
275
|
+
let q = collectionRef;
|
|
276
|
+
for (const f of filters) {
|
|
277
|
+
q = q.where(f.field, f.op, f.value);
|
|
278
|
+
}
|
|
279
|
+
if (options?.orderBy) {
|
|
280
|
+
q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
|
|
281
|
+
}
|
|
282
|
+
if (options?.limit !== void 0) {
|
|
283
|
+
q = q.limit(options.limit);
|
|
284
|
+
}
|
|
285
|
+
const snap = await tx.get(q);
|
|
286
|
+
return snap.docs.map((doc) => doc.data());
|
|
287
|
+
}
|
|
288
|
+
};
|
|
289
|
+
}
|
|
290
|
+
function createBatchAdapter(db, collectionPath) {
|
|
291
|
+
const collectionRef = db.collection(collectionPath);
|
|
292
|
+
const batch = db.batch();
|
|
293
|
+
return {
|
|
294
|
+
setDoc(docId, data, options) {
|
|
295
|
+
if (options?.merge) {
|
|
296
|
+
batch.set(collectionRef.doc(docId), data, { merge: true });
|
|
297
|
+
} else {
|
|
298
|
+
batch.set(collectionRef.doc(docId), data);
|
|
299
|
+
}
|
|
300
|
+
},
|
|
301
|
+
updateDoc(docId, data) {
|
|
302
|
+
batch.update(collectionRef.doc(docId), data);
|
|
303
|
+
},
|
|
304
|
+
deleteDoc(docId) {
|
|
305
|
+
batch.delete(collectionRef.doc(docId));
|
|
306
|
+
},
|
|
307
|
+
async commit() {
|
|
308
|
+
await batch.commit();
|
|
309
|
+
}
|
|
310
|
+
};
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
// src/internal/firestore-classic-expand.ts
|
|
314
|
+
var FIRESTORE_CLASSIC_IN_CHUNK_SIZE = 30;
|
|
315
|
+
function readField(record, path) {
|
|
316
|
+
if (!path.includes(".")) {
|
|
317
|
+
return record[path];
|
|
318
|
+
}
|
|
319
|
+
let cursor = record;
|
|
320
|
+
for (const seg of path.split(".")) {
|
|
321
|
+
if (cursor === null || typeof cursor !== "object") return void 0;
|
|
322
|
+
cursor = cursor[seg];
|
|
323
|
+
}
|
|
324
|
+
return cursor;
|
|
325
|
+
}
|
|
326
|
+
function compareFieldValues(a, b) {
|
|
327
|
+
if (a === b) return 0;
|
|
328
|
+
if (a === void 0 || a === null) return -1;
|
|
329
|
+
if (b === void 0 || b === null) return 1;
|
|
330
|
+
const ta = typeof a;
|
|
331
|
+
const tb = typeof b;
|
|
332
|
+
if (ta === tb) {
|
|
333
|
+
return a < b ? -1 : 1;
|
|
334
|
+
}
|
|
335
|
+
const sa = String(a);
|
|
336
|
+
const sb = String(b);
|
|
337
|
+
return sa < sb ? -1 : sa > sb ? 1 : 0;
|
|
338
|
+
}
|
|
339
|
+
async function runFirestoreClassicExpand(adapter, params) {
|
|
340
|
+
if (params.sources.length === 0) {
|
|
341
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
342
|
+
}
|
|
343
|
+
if (params.axbType.length === 0) {
|
|
344
|
+
throw new FiregraphError("expand(): axbType must be a non-empty string.", "INVALID_QUERY");
|
|
345
|
+
}
|
|
346
|
+
const direction = params.direction ?? "forward";
|
|
347
|
+
const sourceField = direction === "forward" ? "aUid" : "bUid";
|
|
348
|
+
const chunks = chunkUids(params.sources, FIRESTORE_CLASSIC_IN_CHUNK_SIZE);
|
|
349
|
+
const totalLimit = params.limitPerSource !== void 0 ? params.sources.length * params.limitPerSource : void 0;
|
|
350
|
+
const buildFilters = (chunk2) => {
|
|
351
|
+
const filters = [
|
|
352
|
+
{ field: "axbType", op: "==", value: params.axbType },
|
|
353
|
+
{ field: sourceField, op: "in", value: chunk2 }
|
|
354
|
+
];
|
|
355
|
+
if (params.aType !== void 0) {
|
|
356
|
+
filters.push({ field: "aType", op: "==", value: params.aType });
|
|
357
|
+
}
|
|
358
|
+
if (params.bType !== void 0) {
|
|
359
|
+
filters.push({ field: "bType", op: "==", value: params.bType });
|
|
360
|
+
}
|
|
361
|
+
return filters;
|
|
362
|
+
};
|
|
363
|
+
const buildOptions = (chunk2) => {
|
|
364
|
+
const opts = {};
|
|
365
|
+
if (params.orderBy) opts.orderBy = params.orderBy;
|
|
366
|
+
if (params.limitPerSource !== void 0) {
|
|
367
|
+
opts.limit = chunk2.length * params.limitPerSource;
|
|
368
|
+
}
|
|
369
|
+
return opts;
|
|
370
|
+
};
|
|
371
|
+
const chunkResults = await Promise.all(
|
|
372
|
+
chunks.map((chunk2) => adapter.query(buildFilters(chunk2), buildOptions(chunk2)))
|
|
373
|
+
);
|
|
374
|
+
let edges = chunkResults.flat();
|
|
375
|
+
if (params.axbType === NODE_RELATION) {
|
|
376
|
+
edges = edges.filter((e) => e.aUid !== e.bUid);
|
|
377
|
+
}
|
|
378
|
+
if (params.orderBy) {
|
|
379
|
+
const sortField = params.orderBy.field;
|
|
380
|
+
const dir = params.orderBy.direction ?? "asc";
|
|
381
|
+
edges.sort((a, b) => {
|
|
382
|
+
const cmp = compareFieldValues(readField(a, sortField), readField(b, sortField));
|
|
383
|
+
return dir === "asc" ? cmp : -cmp;
|
|
384
|
+
});
|
|
385
|
+
}
|
|
386
|
+
if (totalLimit !== void 0 && edges.length > totalLimit) {
|
|
387
|
+
edges = edges.slice(0, totalLimit);
|
|
388
|
+
}
|
|
389
|
+
if (!params.hydrate) return { edges };
|
|
390
|
+
const targetUids = edges.map((e) => direction === "forward" ? e.bUid : e.aUid);
|
|
391
|
+
const uniqueTargets = [...new Set(targetUids)];
|
|
392
|
+
if (uniqueTargets.length === 0) {
|
|
393
|
+
return { edges, targets: [] };
|
|
394
|
+
}
|
|
395
|
+
const hydrateChunks = chunkUids(uniqueTargets, FIRESTORE_CLASSIC_IN_CHUNK_SIZE);
|
|
396
|
+
const hydrateResults = await Promise.all(
|
|
397
|
+
hydrateChunks.map(
|
|
398
|
+
(chunk2) => adapter.query([
|
|
399
|
+
{ field: "axbType", op: "==", value: NODE_RELATION },
|
|
400
|
+
{ field: "aUid", op: "in", value: chunk2 }
|
|
401
|
+
])
|
|
402
|
+
)
|
|
403
|
+
);
|
|
404
|
+
const byUid = /* @__PURE__ */ new Map();
|
|
405
|
+
for (const row of hydrateResults.flat()) {
|
|
406
|
+
byUid.set(row.bUid, row);
|
|
407
|
+
}
|
|
408
|
+
const targets = targetUids.map((uid) => byUid.get(uid) ?? null);
|
|
409
|
+
return { edges, targets };
|
|
410
|
+
}
|
|
411
|
+
function chunkUids(uids, chunkSize) {
|
|
412
|
+
if (chunkSize <= 0) {
|
|
413
|
+
throw new FiregraphError(
|
|
414
|
+
`chunkUids: chunkSize must be positive (got ${chunkSize}).`,
|
|
415
|
+
"INVALID_QUERY"
|
|
416
|
+
);
|
|
417
|
+
}
|
|
418
|
+
const out = [];
|
|
419
|
+
for (let i = 0; i < uids.length; i += chunkSize) {
|
|
420
|
+
out.push(uids.slice(i, i + chunkSize));
|
|
421
|
+
}
|
|
422
|
+
return out;
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
// src/internal/projection.ts
|
|
426
|
+
var PROJECTION_BUILTIN_FIELDS = /* @__PURE__ */ new Set([
|
|
427
|
+
"aType",
|
|
428
|
+
"aUid",
|
|
429
|
+
"axbType",
|
|
430
|
+
"bType",
|
|
431
|
+
"bUid",
|
|
432
|
+
"createdAt",
|
|
433
|
+
"updatedAt",
|
|
434
|
+
"v"
|
|
435
|
+
]);
|
|
436
|
+
function normalizeFirestoreProjectionField(field) {
|
|
437
|
+
if (PROJECTION_BUILTIN_FIELDS.has(field)) return field;
|
|
438
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
439
|
+
return `data.${field}`;
|
|
440
|
+
}
|
|
441
|
+
function readProjectionPath(obj, path) {
|
|
442
|
+
if (obj === void 0 || obj === null) return null;
|
|
443
|
+
const raw = !path.includes(".") ? obj[path] : (() => {
|
|
444
|
+
const parts = path.split(".");
|
|
445
|
+
let cur = obj;
|
|
446
|
+
for (const part of parts) {
|
|
447
|
+
if (cur === void 0 || cur === null) return void 0;
|
|
448
|
+
if (typeof cur !== "object") return void 0;
|
|
449
|
+
cur = cur[part];
|
|
450
|
+
}
|
|
451
|
+
return cur;
|
|
452
|
+
})();
|
|
453
|
+
return raw === void 0 ? null : raw;
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
// src/internal/firestore-projection.ts
|
|
457
|
+
async function runFirestoreFindEdgesProjected(base, select, filters, options) {
|
|
458
|
+
if (select.length === 0) {
|
|
459
|
+
throw new FiregraphError(
|
|
460
|
+
"findEdgesProjected requires a non-empty select list \u2014 an empty projection has no representation distinct from `findEdges`.",
|
|
461
|
+
"INVALID_QUERY"
|
|
462
|
+
);
|
|
463
|
+
}
|
|
464
|
+
const seen = /* @__PURE__ */ new Set();
|
|
465
|
+
const fields = [];
|
|
466
|
+
for (const f of select) {
|
|
467
|
+
if (!seen.has(f)) {
|
|
468
|
+
seen.add(f);
|
|
469
|
+
fields.push({ original: f, canonical: normalizeFirestoreProjectionField(f) });
|
|
470
|
+
}
|
|
471
|
+
}
|
|
472
|
+
let q = base;
|
|
473
|
+
for (const f of filters) {
|
|
474
|
+
q = q.where(f.field, f.op, f.value);
|
|
475
|
+
}
|
|
476
|
+
if (options?.orderBy) {
|
|
477
|
+
q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
|
|
478
|
+
}
|
|
479
|
+
if (options?.limit !== void 0) {
|
|
480
|
+
q = q.limit(options.limit);
|
|
481
|
+
}
|
|
482
|
+
q = q.select(...fields.map((p) => p.canonical));
|
|
483
|
+
const snap = await q.get();
|
|
484
|
+
return snap.docs.map((doc) => {
|
|
485
|
+
const data = doc.data();
|
|
486
|
+
const out = {};
|
|
487
|
+
for (const { original, canonical } of fields) {
|
|
488
|
+
out[original] = readProjectionPath(data, canonical);
|
|
489
|
+
}
|
|
490
|
+
return out;
|
|
491
|
+
});
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
// src/internal/firestore-vector.ts
|
|
495
|
+
var ENVELOPE_FIELDS = /* @__PURE__ */ new Set([
|
|
496
|
+
"aType",
|
|
497
|
+
"aUid",
|
|
498
|
+
"axbType",
|
|
499
|
+
"bType",
|
|
500
|
+
"bUid",
|
|
501
|
+
"createdAt",
|
|
502
|
+
"updatedAt",
|
|
503
|
+
"v"
|
|
504
|
+
]);
|
|
505
|
+
function normalizeVectorFieldPath(label, field) {
|
|
506
|
+
if (ENVELOPE_FIELDS.has(field)) {
|
|
507
|
+
throw new FiregraphError(
|
|
508
|
+
`findNearest(): ${label} '${field}' is a built-in envelope field \u2014 vectors must live under \`data.*\`. Use a path like 'data.${field}' if you really meant a nested data field.`,
|
|
509
|
+
"INVALID_QUERY"
|
|
510
|
+
);
|
|
511
|
+
}
|
|
512
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
513
|
+
return `data.${field}`;
|
|
514
|
+
}
|
|
515
|
+
function buildVectorFilters(params) {
|
|
516
|
+
const filters = [];
|
|
517
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
518
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
519
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
520
|
+
if (params.where) filters.push(...params.where);
|
|
521
|
+
return filters;
|
|
522
|
+
}
|
|
523
|
+
function toNumberArray(qv) {
|
|
524
|
+
if (Array.isArray(qv)) return qv;
|
|
525
|
+
if (typeof qv.toArray === "function") {
|
|
526
|
+
return qv.toArray();
|
|
527
|
+
}
|
|
528
|
+
throw new FiregraphError(
|
|
529
|
+
"findNearest(): queryVector must be a number[] or a Firestore VectorValue.",
|
|
530
|
+
"INVALID_QUERY"
|
|
531
|
+
);
|
|
532
|
+
}
|
|
533
|
+
async function runFirestoreFindNearest(base, params) {
|
|
534
|
+
const vec = toNumberArray(params.queryVector);
|
|
535
|
+
if (vec.length === 0) {
|
|
536
|
+
throw new FiregraphError(
|
|
537
|
+
"findNearest(): queryVector is empty \u2014 at least one dimension is required.",
|
|
538
|
+
"INVALID_QUERY"
|
|
539
|
+
);
|
|
540
|
+
}
|
|
541
|
+
if (!Number.isInteger(params.limit) || params.limit <= 0 || params.limit > 1e3) {
|
|
542
|
+
throw new FiregraphError(
|
|
543
|
+
`findNearest(): limit must be a positive integer \u2264 1000 (got ${params.limit}).`,
|
|
544
|
+
"INVALID_QUERY"
|
|
545
|
+
);
|
|
546
|
+
}
|
|
547
|
+
const vectorField = normalizeVectorFieldPath("vectorField", params.vectorField);
|
|
548
|
+
const distanceResultField = params.distanceResultField !== void 0 ? normalizeVectorFieldPath("distanceResultField", params.distanceResultField) : void 0;
|
|
549
|
+
const filtered = applyFiltersToQuery(base, buildVectorFilters(params));
|
|
550
|
+
const opts = {
|
|
551
|
+
vectorField,
|
|
552
|
+
queryVector: vec,
|
|
553
|
+
limit: params.limit,
|
|
554
|
+
distanceMeasure: params.distanceMeasure
|
|
555
|
+
};
|
|
556
|
+
if (params.distanceThreshold !== void 0) opts.distanceThreshold = params.distanceThreshold;
|
|
557
|
+
if (distanceResultField !== void 0) opts.distanceResultField = distanceResultField;
|
|
558
|
+
const snap = await filtered.findNearest(opts).get();
|
|
559
|
+
return snap.docs.map((doc) => doc.data());
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
export {
|
|
563
|
+
bulkRemoveEdges,
|
|
564
|
+
removeNodeCascade,
|
|
565
|
+
runFirestoreAggregate,
|
|
566
|
+
createFirestoreAdapter,
|
|
567
|
+
createTransactionAdapter,
|
|
568
|
+
createBatchAdapter,
|
|
569
|
+
runFirestoreClassicExpand,
|
|
570
|
+
runFirestoreFindEdgesProjected,
|
|
571
|
+
runFirestoreFindNearest
|
|
572
|
+
};
|
|
573
|
+
//# sourceMappingURL=chunk-PAD7WFFU.js.map
|