@typicalday/firegraph 0.12.0 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +317 -73
- package/dist/backend-DuvHGgK1.d.cts +1897 -0
- package/dist/backend-DuvHGgK1.d.ts +1897 -0
- package/dist/backend.cjs +222 -3
- package/dist/backend.cjs.map +1 -1
- package/dist/backend.d.cts +25 -5
- package/dist/backend.d.ts +25 -5
- package/dist/backend.js +197 -4
- package/dist/backend.js.map +1 -1
- package/dist/chunk-2DHMNTV6.js +16 -0
- package/dist/chunk-2DHMNTV6.js.map +1 -0
- package/dist/chunk-4MMQ5W74.js +288 -0
- package/dist/chunk-4MMQ5W74.js.map +1 -0
- package/dist/chunk-D4J7Z4FE.js +67 -0
- package/dist/chunk-D4J7Z4FE.js.map +1 -0
- package/dist/chunk-N5HFDWQX.js +23 -0
- package/dist/chunk-N5HFDWQX.js.map +1 -0
- package/dist/chunk-PAD7WFFU.js +573 -0
- package/dist/chunk-PAD7WFFU.js.map +1 -0
- package/dist/{chunk-AWW4MUJ5.js → chunk-TK64DNVK.js} +12 -1
- package/dist/chunk-TK64DNVK.js.map +1 -0
- package/dist/{chunk-HONQY4HF.js → chunk-WRTFC5NG.js} +362 -17
- package/dist/chunk-WRTFC5NG.js.map +1 -0
- package/dist/client-BKi3vk0Q.d.ts +34 -0
- package/dist/client-BrsaXtDV.d.cts +34 -0
- package/dist/cloudflare/index.cjs +930 -3
- package/dist/cloudflare/index.cjs.map +1 -1
- package/dist/cloudflare/index.d.cts +213 -12
- package/dist/cloudflare/index.d.ts +213 -12
- package/dist/cloudflare/index.js +562 -281
- package/dist/cloudflare/index.js.map +1 -1
- package/dist/codegen/index.d.cts +1 -1
- package/dist/codegen/index.d.ts +1 -1
- package/dist/errors-BRc3I_eH.d.cts +73 -0
- package/dist/errors-BRc3I_eH.d.ts +73 -0
- package/dist/firestore-enterprise/index.cjs +3877 -0
- package/dist/firestore-enterprise/index.cjs.map +1 -0
- package/dist/firestore-enterprise/index.d.cts +141 -0
- package/dist/firestore-enterprise/index.d.ts +141 -0
- package/dist/firestore-enterprise/index.js +985 -0
- package/dist/firestore-enterprise/index.js.map +1 -0
- package/dist/firestore-standard/index.cjs +3117 -0
- package/dist/firestore-standard/index.cjs.map +1 -0
- package/dist/firestore-standard/index.d.cts +49 -0
- package/dist/firestore-standard/index.d.ts +49 -0
- package/dist/firestore-standard/index.js +283 -0
- package/dist/firestore-standard/index.js.map +1 -0
- package/dist/index.cjs +590 -550
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +9 -37
- package/dist/index.d.ts +9 -37
- package/dist/index.js +178 -555
- package/dist/index.js.map +1 -1
- package/dist/{registry-Fi074zVa.d.ts → registry-Bc7h6WTM.d.cts} +1 -1
- package/dist/{registry-B1qsVL0E.d.cts → registry-C2KUPVZj.d.ts} +1 -1
- package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.cts} +1 -56
- package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.ts} +1 -56
- package/dist/sqlite/index.cjs +3631 -0
- package/dist/sqlite/index.cjs.map +1 -0
- package/dist/sqlite/index.d.cts +111 -0
- package/dist/sqlite/index.d.ts +111 -0
- package/dist/sqlite/index.js +1164 -0
- package/dist/sqlite/index.js.map +1 -0
- package/package.json +33 -3
- package/dist/backend-BsR0lnFL.d.ts +0 -200
- package/dist/backend-Ct-fLlkG.d.cts +0 -200
- package/dist/chunk-AWW4MUJ5.js.map +0 -1
- package/dist/chunk-HONQY4HF.js.map +0 -1
- package/dist/types-DxYLy8Ol.d.cts +0 -770
- package/dist/types-DxYLy8Ol.d.ts +0 -770
package/dist/index.cjs
CHANGED
|
@@ -168,6 +168,7 @@ var init_serialization = __esm({
|
|
|
168
168
|
var index_exports = {};
|
|
169
169
|
__export(index_exports, {
|
|
170
170
|
BOOTSTRAP_ENTRIES: () => BOOTSTRAP_ENTRIES,
|
|
171
|
+
CapabilityNotSupportedError: () => CapabilityNotSupportedError,
|
|
171
172
|
CrossBackendTransactionError: () => CrossBackendTransactionError,
|
|
172
173
|
DEFAULT_CORE_INDEXES: () => DEFAULT_CORE_INDEXES,
|
|
173
174
|
DEFAULT_QUERY_LIMIT: () => DEFAULT_QUERY_LIMIT,
|
|
@@ -194,9 +195,7 @@ __export(index_exports, {
|
|
|
194
195
|
appendStorageScope: () => appendStorageScope,
|
|
195
196
|
applyMigrationChain: () => applyMigrationChain,
|
|
196
197
|
buildEdgeQueryPlan: () => buildEdgeQueryPlan,
|
|
197
|
-
buildEdgeRecord: () => buildEdgeRecord,
|
|
198
198
|
buildNodeQueryPlan: () => buildNodeQueryPlan,
|
|
199
|
-
buildNodeRecord: () => buildNodeRecord,
|
|
200
199
|
compileMigrationFn: () => compileMigrationFn,
|
|
201
200
|
compileMigrations: () => compileMigrations,
|
|
202
201
|
compileSchema: () => compileSchema,
|
|
@@ -300,13 +299,6 @@ function isTerminalValue(value) {
|
|
|
300
299
|
return true;
|
|
301
300
|
}
|
|
302
301
|
var SAFE_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
303
|
-
function assertUpdatePayloadExclusive(update) {
|
|
304
|
-
if (update.replaceData !== void 0 && update.dataOps !== void 0) {
|
|
305
|
-
throw new Error(
|
|
306
|
-
"firegraph: UpdatePayload cannot specify both `replaceData` and `dataOps`. Use one or the other \u2014 `replaceData` is the migration-write-back form, `dataOps` is the standard partial-update form."
|
|
307
|
-
);
|
|
308
|
-
}
|
|
309
|
-
}
|
|
310
302
|
function assertNoDeleteSentinels(data, callerLabel) {
|
|
311
303
|
walkForDeleteSentinels(data, [], { kind: "root" }, ({ path }) => {
|
|
312
304
|
const where = path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
@@ -563,6 +555,16 @@ var CrossBackendTransactionError = class extends FiregraphError {
|
|
|
563
555
|
this.name = "CrossBackendTransactionError";
|
|
564
556
|
}
|
|
565
557
|
};
|
|
558
|
+
var CapabilityNotSupportedError = class extends FiregraphError {
|
|
559
|
+
constructor(capability, backendDescription) {
|
|
560
|
+
super(
|
|
561
|
+
`Capability "${capability}" is not supported by ${backendDescription}.`,
|
|
562
|
+
"CAPABILITY_NOT_SUPPORTED"
|
|
563
|
+
);
|
|
564
|
+
this.capability = capability;
|
|
565
|
+
this.name = "CapabilityNotSupportedError";
|
|
566
|
+
}
|
|
567
|
+
};
|
|
566
568
|
|
|
567
569
|
// src/json-schema.ts
|
|
568
570
|
var import_json_schema = require("@cfworker/json-schema");
|
|
@@ -1663,6 +1665,15 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
1663
1665
|
this.scanProtection = options?.scanProtection ?? "error";
|
|
1664
1666
|
}
|
|
1665
1667
|
scanProtection;
|
|
1668
|
+
/**
|
|
1669
|
+
* Capability set of the underlying backend. Mirrors `backend.capabilities`
|
|
1670
|
+
* verbatim so callers can portability-check (`client.capabilities.has(
|
|
1671
|
+
* 'query.join')`) without reaching for the backend handle. Static for the
|
|
1672
|
+
* lifetime of the client.
|
|
1673
|
+
*/
|
|
1674
|
+
get capabilities() {
|
|
1675
|
+
return this.backend.capabilities;
|
|
1676
|
+
}
|
|
1666
1677
|
// Static mode
|
|
1667
1678
|
staticRegistry;
|
|
1668
1679
|
// Dynamic mode
|
|
@@ -1957,6 +1968,33 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
1957
1968
|
return this.applyMigrations(records);
|
|
1958
1969
|
}
|
|
1959
1970
|
// ---------------------------------------------------------------------------
|
|
1971
|
+
// Aggregate query (capability: query.aggregate)
|
|
1972
|
+
// ---------------------------------------------------------------------------
|
|
1973
|
+
async aggregate(params) {
|
|
1974
|
+
if (!this.backend.aggregate) {
|
|
1975
|
+
throw new FiregraphError(
|
|
1976
|
+
"aggregate() is not supported by the current storage backend.",
|
|
1977
|
+
"UNSUPPORTED_OPERATION"
|
|
1978
|
+
);
|
|
1979
|
+
}
|
|
1980
|
+
const hasAnyFilter = params.aType || params.aUid || params.axbType || params.bType || params.bUid || params.where && params.where.length > 0;
|
|
1981
|
+
if (!hasAnyFilter) {
|
|
1982
|
+
this.checkQuerySafety([], params.allowCollectionScan);
|
|
1983
|
+
const result2 = await this.backend.aggregate(params.aggregates, []);
|
|
1984
|
+
return result2;
|
|
1985
|
+
}
|
|
1986
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1987
|
+
if (plan.strategy === "get") {
|
|
1988
|
+
throw new FiregraphError(
|
|
1989
|
+
"aggregate() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
1990
|
+
"INVALID_QUERY"
|
|
1991
|
+
);
|
|
1992
|
+
}
|
|
1993
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1994
|
+
const result = await this.backend.aggregate(params.aggregates, plan.filters);
|
|
1995
|
+
return result;
|
|
1996
|
+
}
|
|
1997
|
+
// ---------------------------------------------------------------------------
|
|
1960
1998
|
// Bulk operations
|
|
1961
1999
|
// ---------------------------------------------------------------------------
|
|
1962
2000
|
async removeNodeCascade(uid, options) {
|
|
@@ -1966,6 +2004,327 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
1966
2004
|
return this.backend.bulkRemoveEdges(params, this, options);
|
|
1967
2005
|
}
|
|
1968
2006
|
// ---------------------------------------------------------------------------
|
|
2007
|
+
// Server-side DML (capability: query.dml)
|
|
2008
|
+
// ---------------------------------------------------------------------------
|
|
2009
|
+
/**
|
|
2010
|
+
* Single-statement bulk DELETE. Translates `params` to a filter list via
|
|
2011
|
+
* `buildEdgeQueryPlan` (the same plan `findEdges` uses) and dispatches to
|
|
2012
|
+
* `backend.bulkDelete`. The fetch-then-delete loop in `bulkRemoveEdges`
|
|
2013
|
+
* is the cap-less fallback; this method is the fast path on backends
|
|
2014
|
+
* declaring `query.dml`.
|
|
2015
|
+
*
|
|
2016
|
+
* Scan-protection rules match `findEdges`: a query with no identifying
|
|
2017
|
+
* fields requires `allowCollectionScan: true` to pass. A bare-empty
|
|
2018
|
+
* filter set (no `aType`, `aUid`, etc., no `where`) is allowed at this
|
|
2019
|
+
* layer — shared SQLite bounds the blast radius via its leading `scope`
|
|
2020
|
+
* predicate — but the DO RPC backend rejects empty filters at the wire
|
|
2021
|
+
* boundary as defense-in-depth. To wipe a routed subgraph DO, use
|
|
2022
|
+
* `removeNodeCascade` on the parent node instead.
|
|
2023
|
+
*/
|
|
2024
|
+
async bulkDelete(params, options) {
|
|
2025
|
+
if (!this.backend.bulkDelete) {
|
|
2026
|
+
throw new FiregraphError(
|
|
2027
|
+
"bulkDelete() is not supported by the current storage backend. Fall back to bulkRemoveEdges() for backends without query.dml (e.g. Firestore Standard).",
|
|
2028
|
+
"UNSUPPORTED_OPERATION"
|
|
2029
|
+
);
|
|
2030
|
+
}
|
|
2031
|
+
const filters = this.buildDmlFilters(params);
|
|
2032
|
+
return this.backend.bulkDelete(filters, options);
|
|
2033
|
+
}
|
|
2034
|
+
/**
|
|
2035
|
+
* Single-statement bulk UPDATE. Same translation path as `bulkDelete`,
|
|
2036
|
+
* but the patch is deep-merged into each matching row's `data` via the
|
|
2037
|
+
* shared `flattenPatch` pipeline. Identifying columns are immutable
|
|
2038
|
+
* through this path (see `BulkUpdatePatch` JSDoc).
|
|
2039
|
+
*
|
|
2040
|
+
* Empty-patch rejection happens inside the backend (`compileBulkUpdate`)
|
|
2041
|
+
* — a `data: {}` payload would only rewrite `updated_at`, which is
|
|
2042
|
+
* almost certainly a bug.
|
|
2043
|
+
*/
|
|
2044
|
+
async bulkUpdate(params, patch, options) {
|
|
2045
|
+
if (!this.backend.bulkUpdate) {
|
|
2046
|
+
throw new FiregraphError(
|
|
2047
|
+
"bulkUpdate() is not supported by the current storage backend.",
|
|
2048
|
+
"UNSUPPORTED_OPERATION"
|
|
2049
|
+
);
|
|
2050
|
+
}
|
|
2051
|
+
const filters = this.buildDmlFilters(params);
|
|
2052
|
+
return this.backend.bulkUpdate(filters, patch, options);
|
|
2053
|
+
}
|
|
2054
|
+
// ---------------------------------------------------------------------------
|
|
2055
|
+
// Multi-source fan-out (capability: query.join)
|
|
2056
|
+
// ---------------------------------------------------------------------------
|
|
2057
|
+
/**
|
|
2058
|
+
* Fan out from `params.sources` over a single edge type in one round trip.
|
|
2059
|
+
* On backends without `query.join`, throws `UNSUPPORTED_OPERATION` — the
|
|
2060
|
+
* cap-less fallback is the per-source `findEdges` loop, which lives in
|
|
2061
|
+
* `traverse.ts` (the higher-level traversal walker) rather than here.
|
|
2062
|
+
*
|
|
2063
|
+
* `expand()` is intentionally edge-type-only — the source set is a flat
|
|
2064
|
+
* UID list and the hop matches one `axbType`. Multi-axbType expansions
|
|
2065
|
+
* become multiple `expand()` calls, one per relation.
|
|
2066
|
+
*
|
|
2067
|
+
* `params.sources.length === 0` short-circuits to an empty result. The
|
|
2068
|
+
* backend never sees the call. (`compileExpand` itself rejects empty
|
|
2069
|
+
* because `IN ()` is not valid SQL.)
|
|
2070
|
+
*/
|
|
2071
|
+
async expand(params) {
|
|
2072
|
+
if (!this.backend.expand) {
|
|
2073
|
+
throw new FiregraphError(
|
|
2074
|
+
"expand() is not supported by the current storage backend. Backends without `query.join` can use createTraversal() instead \u2014 the per-hop loop is functionally equivalent (just slower).",
|
|
2075
|
+
"UNSUPPORTED_OPERATION"
|
|
2076
|
+
);
|
|
2077
|
+
}
|
|
2078
|
+
if (params.sources.length === 0) {
|
|
2079
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
2080
|
+
}
|
|
2081
|
+
return this.backend.expand(params);
|
|
2082
|
+
}
|
|
2083
|
+
// ---------------------------------------------------------------------------
|
|
2084
|
+
// Engine-level multi-hop traversal (capability: traversal.serverSide)
|
|
2085
|
+
// ---------------------------------------------------------------------------
|
|
2086
|
+
/**
|
|
2087
|
+
* Compile a multi-hop traversal spec into one server-side nested
|
|
2088
|
+
* Pipeline and dispatch a single round trip.
|
|
2089
|
+
*
|
|
2090
|
+
* Backends declaring `traversal.serverSide` (Firestore Enterprise
|
|
2091
|
+
* today) install this method; everywhere else, it throws
|
|
2092
|
+
* `UNSUPPORTED_OPERATION`. The capability gate matches the type-level
|
|
2093
|
+
* surface — `GraphClient<C>` only exposes `runEngineTraversal` when
|
|
2094
|
+
* `'traversal.serverSide' extends C`.
|
|
2095
|
+
*
|
|
2096
|
+
* Most callers should not invoke this method directly; the
|
|
2097
|
+
* `createTraversal(...).run()` builder routes through it
|
|
2098
|
+
* automatically when `engineTraversal: 'auto'` (the default) and
|
|
2099
|
+
* the spec is eligible per `firestore-traverse-compiler.ts`. Calling
|
|
2100
|
+
* directly is appropriate for benchmarking or for callers that have
|
|
2101
|
+
* already shaped their hop chain into the strict
|
|
2102
|
+
* `EngineTraversalParams` shape.
|
|
2103
|
+
*
|
|
2104
|
+
* `params.sources.length === 0` short-circuits to empty per-hop
|
|
2105
|
+
* arrays. The backend never sees the call.
|
|
2106
|
+
*/
|
|
2107
|
+
async runEngineTraversal(params) {
|
|
2108
|
+
if (!this.backend.runEngineTraversal) {
|
|
2109
|
+
throw new FiregraphError(
|
|
2110
|
+
"runEngineTraversal() is not supported by the current storage backend. Backends without `traversal.serverSide` can use createTraversal() instead \u2014 the per-hop loop is functionally equivalent for in-graph specs (different round-trip profile).",
|
|
2111
|
+
"UNSUPPORTED_OPERATION"
|
|
2112
|
+
);
|
|
2113
|
+
}
|
|
2114
|
+
if (params.sources.length === 0) {
|
|
2115
|
+
return {
|
|
2116
|
+
hops: params.hops.map(() => ({ edges: [], sourceCount: 0 })),
|
|
2117
|
+
totalReads: 0
|
|
2118
|
+
};
|
|
2119
|
+
}
|
|
2120
|
+
return this.backend.runEngineTraversal(params);
|
|
2121
|
+
}
|
|
2122
|
+
// ---------------------------------------------------------------------------
|
|
2123
|
+
// Server-side projection (capability: query.select)
|
|
2124
|
+
// ---------------------------------------------------------------------------
|
|
2125
|
+
/**
|
|
2126
|
+
* Server-side projection — fetch only the requested fields from each
|
|
2127
|
+
* matching edge. The backend translates the call into a projecting query
|
|
2128
|
+
* (`SELECT json_extract(...)` on SQLite/DO, `Query.select(...)` on
|
|
2129
|
+
* Firestore Standard, classic projection on Enterprise) so the wire
|
|
2130
|
+
* payload is reduced to just the requested fields.
|
|
2131
|
+
*
|
|
2132
|
+
* Resolution rules for `select` (mirrored across all backends):
|
|
2133
|
+
*
|
|
2134
|
+
* - Built-in envelope fields (`aType`, `aUid`, `axbType`, `bType`,
|
|
2135
|
+
* `bUid`, `createdAt`, `updatedAt`, `v`) → resolve to the typed
|
|
2136
|
+
* column / Firestore field directly.
|
|
2137
|
+
* - `'data'` literal → returns the whole user payload.
|
|
2138
|
+
* - `'data.<x>'` → explicit nested path, returned at the same shape.
|
|
2139
|
+
* - bare name → rewritten to `data.<name>` (the canonical "give me a
|
|
2140
|
+
* few keys out of the JSON payload" shape).
|
|
2141
|
+
*
|
|
2142
|
+
* Empty `select: []` is rejected with `INVALID_QUERY`. Duplicate entries
|
|
2143
|
+
* are de-duped (first-occurrence order preserved); the result row carries
|
|
2144
|
+
* one slot per unique field.
|
|
2145
|
+
*
|
|
2146
|
+
* Migrations are *not* applied to the result. The caller asked for a
|
|
2147
|
+
* partial shape, and rehydrating it through the migration pipeline would
|
|
2148
|
+
* require synthesising every absent field — see
|
|
2149
|
+
* `StorageBackend.findEdgesProjected` for the rationale.
|
|
2150
|
+
*
|
|
2151
|
+
* Scan protection follows the `findEdges` rules: a query with no
|
|
2152
|
+
* identifying fields requires `allowCollectionScan: true` to pass. The
|
|
2153
|
+
* cap-less fallback would be `findEdges` + JS-side projection, but that
|
|
2154
|
+
* defeats the wire-payload reduction; backends without `query.select`
|
|
2155
|
+
* throw `UNSUPPORTED_OPERATION` rather than silently materialising full
|
|
2156
|
+
* rows.
|
|
2157
|
+
*/
|
|
2158
|
+
async findEdgesProjected(params) {
|
|
2159
|
+
if (!this.backend.findEdgesProjected) {
|
|
2160
|
+
throw new FiregraphError(
|
|
2161
|
+
"findEdgesProjected() is not supported by the current storage backend. There is no client-side fallback because the wire-payload reduction is the entire point of the API \u2014 use findEdges() and project in JS if the backend does not declare `query.select`.",
|
|
2162
|
+
"UNSUPPORTED_OPERATION"
|
|
2163
|
+
);
|
|
2164
|
+
}
|
|
2165
|
+
if (params.select.length === 0) {
|
|
2166
|
+
throw new FiregraphError(
|
|
2167
|
+
"findEdgesProjected() requires a non-empty `select` list.",
|
|
2168
|
+
"INVALID_QUERY"
|
|
2169
|
+
);
|
|
2170
|
+
}
|
|
2171
|
+
const plan = buildEdgeQueryPlan(params);
|
|
2172
|
+
let filters;
|
|
2173
|
+
let options;
|
|
2174
|
+
if (plan.strategy === "get") {
|
|
2175
|
+
filters = [
|
|
2176
|
+
{ field: "aUid", op: "==", value: params.aUid },
|
|
2177
|
+
{ field: "axbType", op: "==", value: params.axbType },
|
|
2178
|
+
{ field: "bUid", op: "==", value: params.bUid }
|
|
2179
|
+
];
|
|
2180
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2181
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2182
|
+
options = void 0;
|
|
2183
|
+
} else {
|
|
2184
|
+
filters = plan.filters;
|
|
2185
|
+
options = plan.options;
|
|
2186
|
+
}
|
|
2187
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2188
|
+
const rows = await this.backend.findEdgesProjected(params.select, filters, options);
|
|
2189
|
+
return rows;
|
|
2190
|
+
}
|
|
2191
|
+
/**
|
|
2192
|
+
* Native vector / nearest-neighbour search (capability `search.vector`).
|
|
2193
|
+
*
|
|
2194
|
+
* Resolves to the top-K records by similarity, sorted nearest-first
|
|
2195
|
+
* (`EUCLIDEAN` / `COSINE`) or highest-first (`DOT_PRODUCT`). The wrapper
|
|
2196
|
+
* is intentionally thin: capability check, scan-protection, then forward
|
|
2197
|
+
* `params` verbatim to the backend. All field-path normalisation and
|
|
2198
|
+
* SDK-shape validation lives in the shared
|
|
2199
|
+
* `runFirestoreFindNearest` helper that both Firestore editions call —
|
|
2200
|
+
* keeping it there means the validation surface stays in lockstep with
|
|
2201
|
+
* the SDK call site, regardless of which backend is plugged in.
|
|
2202
|
+
*
|
|
2203
|
+
* Migrations are NOT applied. The vector index walked the raw stored
|
|
2204
|
+
* shape; rehydrating each row through the migration pipeline would
|
|
2205
|
+
* change the candidate set the index already chose. If you need
|
|
2206
|
+
* migrated shape, follow up with `getNode` / `findEdges` on the
|
|
2207
|
+
* returned UIDs — those paths apply migrations normally.
|
|
2208
|
+
*
|
|
2209
|
+
* Scan-protection mirrors `findEdges`: if no identifying filters
|
|
2210
|
+
* (`aType` / `axbType` / `bType`) and no `where` clauses are supplied,
|
|
2211
|
+
* the request must opt in via `allowCollectionScan: true`. The ANN
|
|
2212
|
+
* query still walks the candidate set the WHERE clause produces, so
|
|
2213
|
+
* an unfiltered nearest-neighbour search over a million-row collection
|
|
2214
|
+
* is the same scan trap as an unfiltered `findEdges`.
|
|
2215
|
+
*
|
|
2216
|
+
* Backends without `search.vector` throw `UNSUPPORTED_OPERATION` —
|
|
2217
|
+
* there is no client-side fallback because emulating ANN over the
|
|
2218
|
+
* generic backend surface (`findEdges` + JS-side cosine) doesn't scale
|
|
2219
|
+
* past trivial datasets and would give callers the wrong mental model
|
|
2220
|
+
* about cost.
|
|
2221
|
+
*/
|
|
2222
|
+
async findNearest(params) {
|
|
2223
|
+
if (!this.backend.findNearest) {
|
|
2224
|
+
throw new FiregraphError(
|
|
2225
|
+
"findNearest() is not supported by the current storage backend. Vector search requires a backend that declares `search.vector` (currently Firestore Standard and Enterprise). There is no client-side fallback because emulating ANN on top of the generic backend surface does not scale beyond toy datasets.",
|
|
2226
|
+
"UNSUPPORTED_OPERATION"
|
|
2227
|
+
);
|
|
2228
|
+
}
|
|
2229
|
+
const filters = [];
|
|
2230
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2231
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2232
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2233
|
+
if (params.where) filters.push(...params.where);
|
|
2234
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2235
|
+
return this.backend.findNearest(params);
|
|
2236
|
+
}
|
|
2237
|
+
/**
|
|
2238
|
+
* Native full-text search (capability `search.fullText`).
|
|
2239
|
+
*
|
|
2240
|
+
* Returns the top-N records by relevance, ordered by the search
|
|
2241
|
+
* index's score. Only Firestore Enterprise declares this capability
|
|
2242
|
+
* today — the underlying Pipelines `search({ query: documentMatches(...) })`
|
|
2243
|
+
* stage requires Enterprise's FTS index. Standard does not declare
|
|
2244
|
+
* the cap (FTS is an Enterprise-only product feature, not a
|
|
2245
|
+
* typed-API gap), and the SQLite-shaped backends have no native
|
|
2246
|
+
* FTS index. Backends without `search.fullText` throw
|
|
2247
|
+
* `UNSUPPORTED_OPERATION` from this wrapper.
|
|
2248
|
+
*
|
|
2249
|
+
* Scan-protection mirrors `findNearest`: a search with no
|
|
2250
|
+
* identifying filters (`aType` / `axbType` / `bType`) walks every
|
|
2251
|
+
* row the index scored, so the request must opt in via
|
|
2252
|
+
* `allowCollectionScan: true`.
|
|
2253
|
+
*
|
|
2254
|
+
* Migrations are NOT applied. The FTS index walked the raw stored
|
|
2255
|
+
* shape; rehydrating each row through the migration pipeline would
|
|
2256
|
+
* change the candidate set the index already scored. If you need
|
|
2257
|
+
* migrated shape, follow up with `getNode` / `findEdges` on the
|
|
2258
|
+
* returned UIDs.
|
|
2259
|
+
*/
|
|
2260
|
+
async fullTextSearch(params) {
|
|
2261
|
+
if (!this.backend.fullTextSearch) {
|
|
2262
|
+
throw new FiregraphError(
|
|
2263
|
+
"fullTextSearch() is not supported by the current storage backend. Full-text search requires a backend that declares `search.fullText` (currently Firestore Enterprise only \u2014 FTS is an Enterprise product feature). There is no client-side fallback because emulating FTS over the generic backend surface would not scale beyond toy datasets.",
|
|
2264
|
+
"UNSUPPORTED_OPERATION"
|
|
2265
|
+
);
|
|
2266
|
+
}
|
|
2267
|
+
const filters = [];
|
|
2268
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2269
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2270
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2271
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2272
|
+
return this.backend.fullTextSearch(params);
|
|
2273
|
+
}
|
|
2274
|
+
/**
|
|
2275
|
+
* Native geospatial distance search (capability `search.geo`).
|
|
2276
|
+
*
|
|
2277
|
+
* Returns rows whose `geoField` lies within `radiusMeters` of
|
|
2278
|
+
* `point`, ordered nearest-first by default. Only Firestore
|
|
2279
|
+
* Enterprise declares this capability — same Enterprise-only
|
|
2280
|
+
* gating as `fullTextSearch`. Backends without `search.geo` throw
|
|
2281
|
+
* `UNSUPPORTED_OPERATION` from this wrapper.
|
|
2282
|
+
*
|
|
2283
|
+
* Scan-protection mirrors `findNearest` and `fullTextSearch`.
|
|
2284
|
+
*
|
|
2285
|
+
* Migrations are NOT applied — same rationale as the other search
|
|
2286
|
+
* extensions.
|
|
2287
|
+
*/
|
|
2288
|
+
async geoSearch(params) {
|
|
2289
|
+
if (!this.backend.geoSearch) {
|
|
2290
|
+
throw new FiregraphError(
|
|
2291
|
+
"geoSearch() is not supported by the current storage backend. Geospatial search requires a backend that declares `search.geo` (currently Firestore Enterprise only \u2014 geo queries are an Enterprise product feature). There is no client-side fallback because emulating geo over the generic backend surface (haversine over `findEdges`) would not scale beyond trivial datasets.",
|
|
2292
|
+
"UNSUPPORTED_OPERATION"
|
|
2293
|
+
);
|
|
2294
|
+
}
|
|
2295
|
+
const filters = [];
|
|
2296
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2297
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2298
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2299
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2300
|
+
return this.backend.geoSearch(params);
|
|
2301
|
+
}
|
|
2302
|
+
/**
|
|
2303
|
+
* Translate a `FindEdgesParams` into the `QueryFilter[]` shape the
|
|
2304
|
+
* backend `bulkDelete` / `bulkUpdate` methods expect. Mirrors the
|
|
2305
|
+
* `aggregate()` plan: a bare-empty params object becomes an empty
|
|
2306
|
+
* filter list (after a scan-protection check); a GET-shape (all three
|
|
2307
|
+
* identifiers) is rejected so we never silently turn a single-row
|
|
2308
|
+
* lookup into a server-side DML; otherwise we run `buildEdgeQueryPlan`
|
|
2309
|
+
* and surface its filters.
|
|
2310
|
+
*/
|
|
2311
|
+
buildDmlFilters(params) {
|
|
2312
|
+
const hasAnyFilter = params.aType || params.aUid || params.axbType || params.bType || params.bUid || params.where && params.where.length > 0;
|
|
2313
|
+
if (!hasAnyFilter) {
|
|
2314
|
+
this.checkQuerySafety([], params.allowCollectionScan);
|
|
2315
|
+
return [];
|
|
2316
|
+
}
|
|
2317
|
+
const plan = buildEdgeQueryPlan(params);
|
|
2318
|
+
if (plan.strategy === "get") {
|
|
2319
|
+
throw new FiregraphError(
|
|
2320
|
+
"bulkDelete() / bulkUpdate() require a query, not a direct document lookup. Use removeEdge() / updateEdge() for single-row operations, or omit one of aUid/axbType/bUid to force a query strategy.",
|
|
2321
|
+
"INVALID_QUERY"
|
|
2322
|
+
);
|
|
2323
|
+
}
|
|
2324
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
2325
|
+
return plan.filters;
|
|
2326
|
+
}
|
|
2327
|
+
// ---------------------------------------------------------------------------
|
|
1969
2328
|
// Dynamic registry methods
|
|
1970
2329
|
// ---------------------------------------------------------------------------
|
|
1971
2330
|
async defineNodeType(name, jsonSchema, description, options) {
|
|
@@ -2105,9 +2464,10 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
2105
2464
|
};
|
|
2106
2465
|
}
|
|
2107
2466
|
};
|
|
2108
|
-
function
|
|
2467
|
+
function createGraphClient(backend, options, metaBackend) {
|
|
2109
2468
|
return new GraphClientImpl(backend, options, metaBackend);
|
|
2110
2469
|
}
|
|
2470
|
+
var createGraphClientFromBackend = createGraphClient;
|
|
2111
2471
|
|
|
2112
2472
|
// src/codegen/index.ts
|
|
2113
2473
|
function pascalCase(s) {
|
|
@@ -2391,515 +2751,6 @@ function discoverEntities(entitiesDir) {
|
|
|
2391
2751
|
};
|
|
2392
2752
|
}
|
|
2393
2753
|
|
|
2394
|
-
// src/internal/firestore-backend.ts
|
|
2395
|
-
var import_firestore2 = require("@google-cloud/firestore");
|
|
2396
|
-
|
|
2397
|
-
// src/bulk.ts
|
|
2398
|
-
var MAX_BATCH_SIZE = 500;
|
|
2399
|
-
var DEFAULT_MAX_RETRIES = 3;
|
|
2400
|
-
var BASE_DELAY_MS = 200;
|
|
2401
|
-
function sleep(ms) {
|
|
2402
|
-
return new Promise((resolve2) => setTimeout(resolve2, ms));
|
|
2403
|
-
}
|
|
2404
|
-
function chunk(arr, size) {
|
|
2405
|
-
const chunks = [];
|
|
2406
|
-
for (let i = 0; i < arr.length; i += size) {
|
|
2407
|
-
chunks.push(arr.slice(i, i + size));
|
|
2408
|
-
}
|
|
2409
|
-
return chunks;
|
|
2410
|
-
}
|
|
2411
|
-
async function bulkDeleteDocIds(db, collectionPath, docIds, options) {
|
|
2412
|
-
if (docIds.length === 0) {
|
|
2413
|
-
return { deleted: 0, batches: 0, errors: [] };
|
|
2414
|
-
}
|
|
2415
|
-
const batchSize = Math.min(options?.batchSize ?? MAX_BATCH_SIZE, MAX_BATCH_SIZE);
|
|
2416
|
-
const maxRetries = options?.maxRetries ?? DEFAULT_MAX_RETRIES;
|
|
2417
|
-
const onProgress = options?.onProgress;
|
|
2418
|
-
const chunks = chunk(docIds, batchSize);
|
|
2419
|
-
const errors = [];
|
|
2420
|
-
let deleted = 0;
|
|
2421
|
-
let completedBatches = 0;
|
|
2422
|
-
for (let i = 0; i < chunks.length; i++) {
|
|
2423
|
-
const ids = chunks[i];
|
|
2424
|
-
let committed = false;
|
|
2425
|
-
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
2426
|
-
try {
|
|
2427
|
-
const batch = db.batch();
|
|
2428
|
-
const collectionRef = db.collection(collectionPath);
|
|
2429
|
-
for (const id of ids) {
|
|
2430
|
-
batch.delete(collectionRef.doc(id));
|
|
2431
|
-
}
|
|
2432
|
-
await batch.commit();
|
|
2433
|
-
committed = true;
|
|
2434
|
-
deleted += ids.length;
|
|
2435
|
-
break;
|
|
2436
|
-
} catch (err) {
|
|
2437
|
-
if (attempt < maxRetries) {
|
|
2438
|
-
const delay = BASE_DELAY_MS * Math.pow(2, attempt);
|
|
2439
|
-
await sleep(delay);
|
|
2440
|
-
} else {
|
|
2441
|
-
errors.push({
|
|
2442
|
-
batchIndex: i,
|
|
2443
|
-
error: err instanceof Error ? err : new Error(String(err)),
|
|
2444
|
-
operationCount: ids.length
|
|
2445
|
-
});
|
|
2446
|
-
}
|
|
2447
|
-
}
|
|
2448
|
-
}
|
|
2449
|
-
if (committed) {
|
|
2450
|
-
completedBatches++;
|
|
2451
|
-
}
|
|
2452
|
-
if (onProgress) {
|
|
2453
|
-
onProgress({
|
|
2454
|
-
completedBatches,
|
|
2455
|
-
totalBatches: chunks.length,
|
|
2456
|
-
deletedSoFar: deleted
|
|
2457
|
-
});
|
|
2458
|
-
}
|
|
2459
|
-
}
|
|
2460
|
-
return { deleted, batches: completedBatches, errors };
|
|
2461
|
-
}
|
|
2462
|
-
async function bulkRemoveEdges(db, collectionPath, reader, params, options) {
|
|
2463
|
-
const effectiveParams = params.limit !== void 0 ? { ...params, allowCollectionScan: params.allowCollectionScan ?? true } : { ...params, limit: 0, allowCollectionScan: params.allowCollectionScan ?? true };
|
|
2464
|
-
const edges = await reader.findEdges(effectiveParams);
|
|
2465
|
-
const docIds = edges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
|
|
2466
|
-
return bulkDeleteDocIds(db, collectionPath, docIds, options);
|
|
2467
|
-
}
|
|
2468
|
-
async function deleteSubcollectionsRecursive(db, collectionPath, docId, options) {
|
|
2469
|
-
const docRef = db.collection(collectionPath).doc(docId);
|
|
2470
|
-
const subcollections = await docRef.listCollections();
|
|
2471
|
-
if (subcollections.length === 0) return { deleted: 0, errors: [] };
|
|
2472
|
-
let totalDeleted = 0;
|
|
2473
|
-
const allErrors = [];
|
|
2474
|
-
const subOptions = options ? { batchSize: options.batchSize, maxRetries: options.maxRetries } : void 0;
|
|
2475
|
-
for (const subCollRef of subcollections) {
|
|
2476
|
-
const subCollPath = subCollRef.path;
|
|
2477
|
-
const snapshot = await subCollRef.select().get();
|
|
2478
|
-
const subDocIds = snapshot.docs.map((d) => d.id);
|
|
2479
|
-
for (const subDocId of subDocIds) {
|
|
2480
|
-
const subResult = await deleteSubcollectionsRecursive(db, subCollPath, subDocId, subOptions);
|
|
2481
|
-
totalDeleted += subResult.deleted;
|
|
2482
|
-
allErrors.push(...subResult.errors);
|
|
2483
|
-
}
|
|
2484
|
-
if (subDocIds.length > 0) {
|
|
2485
|
-
const result = await bulkDeleteDocIds(db, subCollPath, subDocIds, subOptions);
|
|
2486
|
-
totalDeleted += result.deleted;
|
|
2487
|
-
allErrors.push(...result.errors);
|
|
2488
|
-
}
|
|
2489
|
-
}
|
|
2490
|
-
return { deleted: totalDeleted, errors: allErrors };
|
|
2491
|
-
}
|
|
2492
|
-
async function removeNodeCascade(db, collectionPath, reader, uid, options) {
|
|
2493
|
-
const [outgoingRaw, incomingRaw] = await Promise.all([
|
|
2494
|
-
reader.findEdges({ aUid: uid, allowCollectionScan: true, limit: 0 }),
|
|
2495
|
-
reader.findEdges({ bUid: uid, allowCollectionScan: true, limit: 0 })
|
|
2496
|
-
]);
|
|
2497
|
-
const outgoing = outgoingRaw.filter((e) => e.axbType !== NODE_RELATION);
|
|
2498
|
-
const incoming = incomingRaw.filter((e) => e.axbType !== NODE_RELATION);
|
|
2499
|
-
const edgeDocIdSet = /* @__PURE__ */ new Set();
|
|
2500
|
-
const allEdges = [];
|
|
2501
|
-
for (const edge of [...outgoing, ...incoming]) {
|
|
2502
|
-
const docId = computeEdgeDocId(edge.aUid, edge.axbType, edge.bUid);
|
|
2503
|
-
if (!edgeDocIdSet.has(docId)) {
|
|
2504
|
-
edgeDocIdSet.add(docId);
|
|
2505
|
-
allEdges.push(edge);
|
|
2506
|
-
}
|
|
2507
|
-
}
|
|
2508
|
-
const shouldDeleteSubcollections = options?.deleteSubcollections !== false;
|
|
2509
|
-
const nodeDocId = computeNodeDocId(uid);
|
|
2510
|
-
let subcollectionResult = { deleted: 0, errors: [] };
|
|
2511
|
-
if (shouldDeleteSubcollections) {
|
|
2512
|
-
subcollectionResult = await deleteSubcollectionsRecursive(
|
|
2513
|
-
db,
|
|
2514
|
-
collectionPath,
|
|
2515
|
-
nodeDocId,
|
|
2516
|
-
options
|
|
2517
|
-
);
|
|
2518
|
-
}
|
|
2519
|
-
const edgeDocIds = allEdges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
|
|
2520
|
-
const allDocIds = [...edgeDocIds, nodeDocId];
|
|
2521
|
-
const batchSize = Math.min(options?.batchSize ?? MAX_BATCH_SIZE, MAX_BATCH_SIZE);
|
|
2522
|
-
const result = await bulkDeleteDocIds(db, collectionPath, allDocIds, {
|
|
2523
|
-
...options,
|
|
2524
|
-
batchSize
|
|
2525
|
-
});
|
|
2526
|
-
const totalChunks = Math.ceil(allDocIds.length / batchSize);
|
|
2527
|
-
const nodeChunkIndex = totalChunks - 1;
|
|
2528
|
-
const nodeDeleted = !result.errors.some((e) => e.batchIndex === nodeChunkIndex);
|
|
2529
|
-
const topLevelEdgesDeleted = nodeDeleted ? result.deleted - 1 : result.deleted;
|
|
2530
|
-
return {
|
|
2531
|
-
deleted: result.deleted + subcollectionResult.deleted,
|
|
2532
|
-
batches: result.batches,
|
|
2533
|
-
errors: [...result.errors, ...subcollectionResult.errors],
|
|
2534
|
-
edgesDeleted: topLevelEdgesDeleted,
|
|
2535
|
-
nodeDeleted
|
|
2536
|
-
};
|
|
2537
|
-
}
|
|
2538
|
-
|
|
2539
|
-
// src/internal/firestore-backend.ts
|
|
2540
|
-
init_serialization();
|
|
2541
|
-
|
|
2542
|
-
// src/internal/firestore-adapter.ts
|
|
2543
|
-
function createFirestoreAdapter(db, collectionPath) {
|
|
2544
|
-
const collectionRef = db.collection(collectionPath);
|
|
2545
|
-
return {
|
|
2546
|
-
collectionPath,
|
|
2547
|
-
async getDoc(docId) {
|
|
2548
|
-
const snap = await collectionRef.doc(docId).get();
|
|
2549
|
-
if (!snap.exists) return null;
|
|
2550
|
-
return snap.data();
|
|
2551
|
-
},
|
|
2552
|
-
async setDoc(docId, data, options) {
|
|
2553
|
-
if (options?.merge) {
|
|
2554
|
-
await collectionRef.doc(docId).set(data, { merge: true });
|
|
2555
|
-
} else {
|
|
2556
|
-
await collectionRef.doc(docId).set(data);
|
|
2557
|
-
}
|
|
2558
|
-
},
|
|
2559
|
-
async updateDoc(docId, data) {
|
|
2560
|
-
await collectionRef.doc(docId).update(data);
|
|
2561
|
-
},
|
|
2562
|
-
async deleteDoc(docId) {
|
|
2563
|
-
await collectionRef.doc(docId).delete();
|
|
2564
|
-
},
|
|
2565
|
-
async query(filters, options) {
|
|
2566
|
-
let q = collectionRef;
|
|
2567
|
-
for (const f of filters) {
|
|
2568
|
-
q = q.where(f.field, f.op, f.value);
|
|
2569
|
-
}
|
|
2570
|
-
if (options?.orderBy) {
|
|
2571
|
-
q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
|
|
2572
|
-
}
|
|
2573
|
-
if (options?.limit !== void 0) {
|
|
2574
|
-
q = q.limit(options.limit);
|
|
2575
|
-
}
|
|
2576
|
-
const snap = await q.get();
|
|
2577
|
-
return snap.docs.map((doc) => doc.data());
|
|
2578
|
-
}
|
|
2579
|
-
};
|
|
2580
|
-
}
|
|
2581
|
-
function createTransactionAdapter(db, collectionPath, tx) {
|
|
2582
|
-
const collectionRef = db.collection(collectionPath);
|
|
2583
|
-
return {
|
|
2584
|
-
async getDoc(docId) {
|
|
2585
|
-
const snap = await tx.get(collectionRef.doc(docId));
|
|
2586
|
-
if (!snap.exists) return null;
|
|
2587
|
-
return snap.data();
|
|
2588
|
-
},
|
|
2589
|
-
setDoc(docId, data, options) {
|
|
2590
|
-
if (options?.merge) {
|
|
2591
|
-
tx.set(collectionRef.doc(docId), data, { merge: true });
|
|
2592
|
-
} else {
|
|
2593
|
-
tx.set(collectionRef.doc(docId), data);
|
|
2594
|
-
}
|
|
2595
|
-
},
|
|
2596
|
-
updateDoc(docId, data) {
|
|
2597
|
-
tx.update(collectionRef.doc(docId), data);
|
|
2598
|
-
},
|
|
2599
|
-
deleteDoc(docId) {
|
|
2600
|
-
tx.delete(collectionRef.doc(docId));
|
|
2601
|
-
},
|
|
2602
|
-
async query(filters, options) {
|
|
2603
|
-
let q = collectionRef;
|
|
2604
|
-
for (const f of filters) {
|
|
2605
|
-
q = q.where(f.field, f.op, f.value);
|
|
2606
|
-
}
|
|
2607
|
-
if (options?.orderBy) {
|
|
2608
|
-
q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
|
|
2609
|
-
}
|
|
2610
|
-
if (options?.limit !== void 0) {
|
|
2611
|
-
q = q.limit(options.limit);
|
|
2612
|
-
}
|
|
2613
|
-
const snap = await tx.get(q);
|
|
2614
|
-
return snap.docs.map((doc) => doc.data());
|
|
2615
|
-
}
|
|
2616
|
-
};
|
|
2617
|
-
}
|
|
2618
|
-
function createBatchAdapter(db, collectionPath) {
|
|
2619
|
-
const collectionRef = db.collection(collectionPath);
|
|
2620
|
-
const batch = db.batch();
|
|
2621
|
-
return {
|
|
2622
|
-
setDoc(docId, data, options) {
|
|
2623
|
-
if (options?.merge) {
|
|
2624
|
-
batch.set(collectionRef.doc(docId), data, { merge: true });
|
|
2625
|
-
} else {
|
|
2626
|
-
batch.set(collectionRef.doc(docId), data);
|
|
2627
|
-
}
|
|
2628
|
-
},
|
|
2629
|
-
updateDoc(docId, data) {
|
|
2630
|
-
batch.update(collectionRef.doc(docId), data);
|
|
2631
|
-
},
|
|
2632
|
-
deleteDoc(docId) {
|
|
2633
|
-
batch.delete(collectionRef.doc(docId));
|
|
2634
|
-
},
|
|
2635
|
-
async commit() {
|
|
2636
|
-
await batch.commit();
|
|
2637
|
-
}
|
|
2638
|
-
};
|
|
2639
|
-
}
|
|
2640
|
-
|
|
2641
|
-
// src/internal/pipeline-adapter.ts
|
|
2642
|
-
var _Pipelines = null;
|
|
2643
|
-
async function getPipelines() {
|
|
2644
|
-
if (!_Pipelines) {
|
|
2645
|
-
const mod = await import("@google-cloud/firestore");
|
|
2646
|
-
_Pipelines = mod.Pipelines;
|
|
2647
|
-
}
|
|
2648
|
-
return _Pipelines;
|
|
2649
|
-
}
|
|
2650
|
-
function buildFilterExpression(P, filter) {
|
|
2651
|
-
const { field: fieldName, op, value } = filter;
|
|
2652
|
-
switch (op) {
|
|
2653
|
-
case "==":
|
|
2654
|
-
return P.equal(fieldName, value);
|
|
2655
|
-
case "!=":
|
|
2656
|
-
return P.notEqual(fieldName, value);
|
|
2657
|
-
case "<":
|
|
2658
|
-
return P.lessThan(fieldName, value);
|
|
2659
|
-
case "<=":
|
|
2660
|
-
return P.lessThanOrEqual(fieldName, value);
|
|
2661
|
-
case ">":
|
|
2662
|
-
return P.greaterThan(fieldName, value);
|
|
2663
|
-
case ">=":
|
|
2664
|
-
return P.greaterThanOrEqual(fieldName, value);
|
|
2665
|
-
case "in":
|
|
2666
|
-
return P.equalAny(fieldName, value);
|
|
2667
|
-
case "not-in":
|
|
2668
|
-
return P.notEqualAny(fieldName, value);
|
|
2669
|
-
case "array-contains":
|
|
2670
|
-
return P.arrayContains(fieldName, value);
|
|
2671
|
-
case "array-contains-any":
|
|
2672
|
-
return P.arrayContainsAny(fieldName, value);
|
|
2673
|
-
default:
|
|
2674
|
-
throw new Error(`Unsupported filter op for pipeline mode: ${op}`);
|
|
2675
|
-
}
|
|
2676
|
-
}
|
|
2677
|
-
function createPipelineQueryAdapter(db, collectionPath) {
|
|
2678
|
-
return {
|
|
2679
|
-
async query(filters, options) {
|
|
2680
|
-
const P = await getPipelines();
|
|
2681
|
-
let pipeline = db.pipeline().collection(collectionPath);
|
|
2682
|
-
if (filters.length === 1) {
|
|
2683
|
-
pipeline = pipeline.where(buildFilterExpression(P, filters[0]));
|
|
2684
|
-
} else if (filters.length > 1) {
|
|
2685
|
-
const [first, second, ...rest] = filters.map((f) => buildFilterExpression(P, f));
|
|
2686
|
-
pipeline = pipeline.where(P.and(first, second, ...rest));
|
|
2687
|
-
}
|
|
2688
|
-
if (options?.orderBy) {
|
|
2689
|
-
const f = P.field(options.orderBy.field);
|
|
2690
|
-
const ordering = options.orderBy.direction === "desc" ? f.descending() : f.ascending();
|
|
2691
|
-
pipeline = pipeline.sort(ordering);
|
|
2692
|
-
}
|
|
2693
|
-
if (options?.limit !== void 0) {
|
|
2694
|
-
pipeline = pipeline.limit(options.limit);
|
|
2695
|
-
}
|
|
2696
|
-
const snap = await pipeline.execute();
|
|
2697
|
-
return snap.results.map((r) => r.data());
|
|
2698
|
-
}
|
|
2699
|
-
};
|
|
2700
|
-
}
|
|
2701
|
-
|
|
2702
|
-
// src/internal/firestore-backend.ts
|
|
2703
|
-
function dottedDataPath(op) {
|
|
2704
|
-
assertSafePath(op.path);
|
|
2705
|
-
return `data.${op.path.join(".")}`;
|
|
2706
|
-
}
|
|
2707
|
-
function buildFirestoreUpdate(update, db) {
|
|
2708
|
-
assertUpdatePayloadExclusive(update);
|
|
2709
|
-
const out = {
|
|
2710
|
-
updatedAt: import_firestore2.FieldValue.serverTimestamp()
|
|
2711
|
-
};
|
|
2712
|
-
if (update.replaceData) {
|
|
2713
|
-
out.data = deserializeFirestoreTypes(update.replaceData, db);
|
|
2714
|
-
} else if (update.dataOps) {
|
|
2715
|
-
for (const op of update.dataOps) {
|
|
2716
|
-
const key = dottedDataPath(op);
|
|
2717
|
-
out[key] = op.delete ? import_firestore2.FieldValue.delete() : op.value;
|
|
2718
|
-
}
|
|
2719
|
-
}
|
|
2720
|
-
if (update.v !== void 0) {
|
|
2721
|
-
out.v = update.v;
|
|
2722
|
-
}
|
|
2723
|
-
return out;
|
|
2724
|
-
}
|
|
2725
|
-
function stampWritableRecord(record) {
|
|
2726
|
-
const now = import_firestore2.FieldValue.serverTimestamp();
|
|
2727
|
-
const out = {
|
|
2728
|
-
aType: record.aType,
|
|
2729
|
-
aUid: record.aUid,
|
|
2730
|
-
axbType: record.axbType,
|
|
2731
|
-
bType: record.bType,
|
|
2732
|
-
bUid: record.bUid,
|
|
2733
|
-
data: record.data,
|
|
2734
|
-
createdAt: now,
|
|
2735
|
-
updatedAt: now
|
|
2736
|
-
};
|
|
2737
|
-
if (record.v !== void 0) out.v = record.v;
|
|
2738
|
-
return out;
|
|
2739
|
-
}
|
|
2740
|
-
var FirestoreTransactionBackend = class {
|
|
2741
|
-
constructor(adapter, db) {
|
|
2742
|
-
this.adapter = adapter;
|
|
2743
|
-
this.db = db;
|
|
2744
|
-
}
|
|
2745
|
-
getDoc(docId) {
|
|
2746
|
-
return this.adapter.getDoc(docId);
|
|
2747
|
-
}
|
|
2748
|
-
query(filters, options) {
|
|
2749
|
-
return this.adapter.query(filters, options);
|
|
2750
|
-
}
|
|
2751
|
-
async setDoc(docId, record, mode) {
|
|
2752
|
-
this.adapter.setDoc(
|
|
2753
|
-
docId,
|
|
2754
|
-
stampWritableRecord(record),
|
|
2755
|
-
mode === "merge" ? { merge: true } : void 0
|
|
2756
|
-
);
|
|
2757
|
-
}
|
|
2758
|
-
async updateDoc(docId, update) {
|
|
2759
|
-
this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
2760
|
-
}
|
|
2761
|
-
async deleteDoc(docId) {
|
|
2762
|
-
this.adapter.deleteDoc(docId);
|
|
2763
|
-
}
|
|
2764
|
-
};
|
|
2765
|
-
var FirestoreBatchBackend = class {
|
|
2766
|
-
constructor(adapter, db) {
|
|
2767
|
-
this.adapter = adapter;
|
|
2768
|
-
this.db = db;
|
|
2769
|
-
}
|
|
2770
|
-
setDoc(docId, record, mode) {
|
|
2771
|
-
this.adapter.setDoc(
|
|
2772
|
-
docId,
|
|
2773
|
-
stampWritableRecord(record),
|
|
2774
|
-
mode === "merge" ? { merge: true } : void 0
|
|
2775
|
-
);
|
|
2776
|
-
}
|
|
2777
|
-
updateDoc(docId, update) {
|
|
2778
|
-
this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
2779
|
-
}
|
|
2780
|
-
deleteDoc(docId) {
|
|
2781
|
-
this.adapter.deleteDoc(docId);
|
|
2782
|
-
}
|
|
2783
|
-
commit() {
|
|
2784
|
-
return this.adapter.commit();
|
|
2785
|
-
}
|
|
2786
|
-
};
|
|
2787
|
-
var FirestoreBackendImpl = class _FirestoreBackendImpl {
|
|
2788
|
-
constructor(db, collectionPath, queryMode, scopePath) {
|
|
2789
|
-
this.db = db;
|
|
2790
|
-
this.queryMode = queryMode;
|
|
2791
|
-
this.collectionPath = collectionPath;
|
|
2792
|
-
this.scopePath = scopePath;
|
|
2793
|
-
this.adapter = createFirestoreAdapter(db, collectionPath);
|
|
2794
|
-
if (queryMode === "pipeline") {
|
|
2795
|
-
this.pipelineAdapter = createPipelineQueryAdapter(db, collectionPath);
|
|
2796
|
-
}
|
|
2797
|
-
}
|
|
2798
|
-
collectionPath;
|
|
2799
|
-
scopePath;
|
|
2800
|
-
adapter;
|
|
2801
|
-
pipelineAdapter;
|
|
2802
|
-
// --- Reads ---
|
|
2803
|
-
getDoc(docId) {
|
|
2804
|
-
return this.adapter.getDoc(docId);
|
|
2805
|
-
}
|
|
2806
|
-
query(filters, options) {
|
|
2807
|
-
if (this.pipelineAdapter) {
|
|
2808
|
-
return this.pipelineAdapter.query(filters, options);
|
|
2809
|
-
}
|
|
2810
|
-
return this.adapter.query(filters, options);
|
|
2811
|
-
}
|
|
2812
|
-
// --- Writes ---
|
|
2813
|
-
setDoc(docId, record, mode) {
|
|
2814
|
-
return this.adapter.setDoc(
|
|
2815
|
-
docId,
|
|
2816
|
-
stampWritableRecord(record),
|
|
2817
|
-
mode === "merge" ? { merge: true } : void 0
|
|
2818
|
-
);
|
|
2819
|
-
}
|
|
2820
|
-
updateDoc(docId, update) {
|
|
2821
|
-
return this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
2822
|
-
}
|
|
2823
|
-
deleteDoc(docId) {
|
|
2824
|
-
return this.adapter.deleteDoc(docId);
|
|
2825
|
-
}
|
|
2826
|
-
// --- Transactions / Batches ---
|
|
2827
|
-
runTransaction(fn) {
|
|
2828
|
-
return this.db.runTransaction(async (firestoreTx) => {
|
|
2829
|
-
const txAdapter = createTransactionAdapter(this.db, this.collectionPath, firestoreTx);
|
|
2830
|
-
return fn(new FirestoreTransactionBackend(txAdapter, this.db));
|
|
2831
|
-
});
|
|
2832
|
-
}
|
|
2833
|
-
createBatch() {
|
|
2834
|
-
const batchAdapter = createBatchAdapter(this.db, this.collectionPath);
|
|
2835
|
-
return new FirestoreBatchBackend(batchAdapter, this.db);
|
|
2836
|
-
}
|
|
2837
|
-
// --- Subgraphs ---
|
|
2838
|
-
subgraph(parentNodeUid, name) {
|
|
2839
|
-
const subPath = `${this.collectionPath}/${parentNodeUid}/${name}`;
|
|
2840
|
-
const newScope = this.scopePath ? `${this.scopePath}/${name}` : name;
|
|
2841
|
-
return new _FirestoreBackendImpl(this.db, subPath, this.queryMode, newScope);
|
|
2842
|
-
}
|
|
2843
|
-
// --- Cascade & bulk ---
|
|
2844
|
-
removeNodeCascade(uid, reader, options) {
|
|
2845
|
-
return removeNodeCascade(this.db, this.collectionPath, reader, uid, options);
|
|
2846
|
-
}
|
|
2847
|
-
bulkRemoveEdges(params, reader, options) {
|
|
2848
|
-
return bulkRemoveEdges(this.db, this.collectionPath, reader, params, options);
|
|
2849
|
-
}
|
|
2850
|
-
// --- Cross-collection ---
|
|
2851
|
-
async findEdgesGlobal(params, collectionName) {
|
|
2852
|
-
const name = collectionName ?? this.collectionPath.split("/").pop();
|
|
2853
|
-
const plan = buildEdgeQueryPlan(params);
|
|
2854
|
-
if (plan.strategy === "get") {
|
|
2855
|
-
throw new FiregraphError(
|
|
2856
|
-
"findEdgesGlobal() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
2857
|
-
"INVALID_QUERY"
|
|
2858
|
-
);
|
|
2859
|
-
}
|
|
2860
|
-
const collectionGroupRef = this.db.collectionGroup(name);
|
|
2861
|
-
let q = collectionGroupRef;
|
|
2862
|
-
for (const f of plan.filters) {
|
|
2863
|
-
q = q.where(f.field, f.op, f.value);
|
|
2864
|
-
}
|
|
2865
|
-
if (plan.options?.orderBy) {
|
|
2866
|
-
q = q.orderBy(plan.options.orderBy.field, plan.options.orderBy.direction ?? "asc");
|
|
2867
|
-
}
|
|
2868
|
-
if (plan.options?.limit !== void 0) {
|
|
2869
|
-
q = q.limit(plan.options.limit);
|
|
2870
|
-
}
|
|
2871
|
-
const snap = await q.get();
|
|
2872
|
-
return snap.docs.map((doc) => doc.data());
|
|
2873
|
-
}
|
|
2874
|
-
};
|
|
2875
|
-
function createFirestoreBackend(db, collectionPath, options = {}) {
|
|
2876
|
-
const queryMode = options.queryMode ?? "pipeline";
|
|
2877
|
-
const scopePath = options.scopePath ?? "";
|
|
2878
|
-
return new FirestoreBackendImpl(db, collectionPath, queryMode, scopePath);
|
|
2879
|
-
}
|
|
2880
|
-
|
|
2881
|
-
// src/firestore.ts
|
|
2882
|
-
var _standardModeWarned = false;
|
|
2883
|
-
function createGraphClient(db, collectionPath, options) {
|
|
2884
|
-
const requestedMode = options?.queryMode ?? "pipeline";
|
|
2885
|
-
const isEmulator = !!process.env.FIRESTORE_EMULATOR_HOST;
|
|
2886
|
-
const effectiveMode = isEmulator ? "standard" : requestedMode;
|
|
2887
|
-
if (effectiveMode === "standard" && !isEmulator && requestedMode === "standard" && !_standardModeWarned) {
|
|
2888
|
-
_standardModeWarned = true;
|
|
2889
|
-
console.warn(
|
|
2890
|
-
"[firegraph] Standard query mode enabled. This is NOT recommended for production:\n - Enterprise Firestore: data.* filters cause full collection scans (high billing)\n - Standard Firestore: data.* filters without composite indexes will fail\n See: https://github.com/typicalday/firegraph#query-modes"
|
|
2891
|
-
);
|
|
2892
|
-
}
|
|
2893
|
-
const backend = createFirestoreBackend(db, collectionPath, { queryMode: effectiveMode });
|
|
2894
|
-
let metaBackend;
|
|
2895
|
-
if (options?.registryMode?.collection && options.registryMode.collection !== collectionPath) {
|
|
2896
|
-
metaBackend = createFirestoreBackend(db, options.registryMode.collection, {
|
|
2897
|
-
queryMode: effectiveMode
|
|
2898
|
-
});
|
|
2899
|
-
}
|
|
2900
|
-
return new GraphClientImpl(backend, options, metaBackend);
|
|
2901
|
-
}
|
|
2902
|
-
|
|
2903
2754
|
// src/id.ts
|
|
2904
2755
|
var import_nanoid = require("nanoid");
|
|
2905
2756
|
function generateId() {
|
|
@@ -3258,35 +3109,6 @@ var QueryClient = class {
|
|
|
3258
3109
|
}
|
|
3259
3110
|
};
|
|
3260
3111
|
|
|
3261
|
-
// src/record.ts
|
|
3262
|
-
var import_firestore3 = require("@google-cloud/firestore");
|
|
3263
|
-
function buildNodeRecord(aType, uid, data) {
|
|
3264
|
-
const now = import_firestore3.FieldValue.serverTimestamp();
|
|
3265
|
-
return {
|
|
3266
|
-
aType,
|
|
3267
|
-
aUid: uid,
|
|
3268
|
-
axbType: NODE_RELATION,
|
|
3269
|
-
bType: aType,
|
|
3270
|
-
bUid: uid,
|
|
3271
|
-
data,
|
|
3272
|
-
createdAt: now,
|
|
3273
|
-
updatedAt: now
|
|
3274
|
-
};
|
|
3275
|
-
}
|
|
3276
|
-
function buildEdgeRecord(aType, aUid, axbType, bType, bUid, data) {
|
|
3277
|
-
const now = import_firestore3.FieldValue.serverTimestamp();
|
|
3278
|
-
return {
|
|
3279
|
-
aType,
|
|
3280
|
-
aUid,
|
|
3281
|
-
axbType,
|
|
3282
|
-
bType,
|
|
3283
|
-
bUid,
|
|
3284
|
-
data,
|
|
3285
|
-
createdAt: now,
|
|
3286
|
-
updatedAt: now
|
|
3287
|
-
};
|
|
3288
|
-
}
|
|
3289
|
-
|
|
3290
3112
|
// src/scope-path.ts
|
|
3291
3113
|
function parseStorageScope(scope) {
|
|
3292
3114
|
if (scope === "") return [];
|
|
@@ -3340,6 +3162,69 @@ function appendStorageScope(parentScope, uid, name) {
|
|
|
3340
3162
|
// src/index.ts
|
|
3341
3163
|
init_serialization();
|
|
3342
3164
|
|
|
3165
|
+
// src/internal/firestore-traverse-compiler.ts
|
|
3166
|
+
var MAX_PIPELINE_DEPTH = 5;
|
|
3167
|
+
function compileEngineTraversal(params, opts) {
|
|
3168
|
+
const maxDepth = opts?.maxDepth ?? MAX_PIPELINE_DEPTH;
|
|
3169
|
+
const maxReads = opts?.maxReads ?? params.maxReads;
|
|
3170
|
+
if (!Array.isArray(params.hops) || params.hops.length === 0) {
|
|
3171
|
+
return { eligible: false, reason: "engine traversal requires at least one hop" };
|
|
3172
|
+
}
|
|
3173
|
+
if (params.hops.length > maxDepth) {
|
|
3174
|
+
return {
|
|
3175
|
+
eligible: false,
|
|
3176
|
+
reason: `engine traversal depth ${params.hops.length} exceeds MAX_PIPELINE_DEPTH (${maxDepth})`
|
|
3177
|
+
};
|
|
3178
|
+
}
|
|
3179
|
+
if (!Array.isArray(params.sources)) {
|
|
3180
|
+
return { eligible: false, reason: "engine traversal requires a sources array" };
|
|
3181
|
+
}
|
|
3182
|
+
const normalizedHops = [];
|
|
3183
|
+
for (let i = 0; i < params.hops.length; i++) {
|
|
3184
|
+
const hop = params.hops[i];
|
|
3185
|
+
if (!hop.axbType || hop.axbType.length === 0) {
|
|
3186
|
+
return {
|
|
3187
|
+
eligible: false,
|
|
3188
|
+
reason: `engine traversal hop ${i} is missing axbType`
|
|
3189
|
+
};
|
|
3190
|
+
}
|
|
3191
|
+
if (typeof hop.limitPerSource !== "number" || hop.limitPerSource <= 0 || !Number.isFinite(hop.limitPerSource)) {
|
|
3192
|
+
return {
|
|
3193
|
+
eligible: false,
|
|
3194
|
+
reason: `engine traversal hop ${i} (${hop.axbType}) requires a positive limitPerSource`
|
|
3195
|
+
};
|
|
3196
|
+
}
|
|
3197
|
+
normalizedHops.push({
|
|
3198
|
+
...hop,
|
|
3199
|
+
axbType: hop.axbType,
|
|
3200
|
+
direction: hop.direction ?? "forward",
|
|
3201
|
+
limitPerSource: hop.limitPerSource
|
|
3202
|
+
});
|
|
3203
|
+
}
|
|
3204
|
+
let estimatedReads = Math.max(1, params.sources.length);
|
|
3205
|
+
for (const hop of normalizedHops) {
|
|
3206
|
+
estimatedReads *= hop.limitPerSource;
|
|
3207
|
+
if (estimatedReads > Number.MAX_SAFE_INTEGER) {
|
|
3208
|
+
estimatedReads = Number.MAX_SAFE_INTEGER;
|
|
3209
|
+
break;
|
|
3210
|
+
}
|
|
3211
|
+
}
|
|
3212
|
+
if (maxReads !== void 0 && estimatedReads > maxReads) {
|
|
3213
|
+
return {
|
|
3214
|
+
eligible: false,
|
|
3215
|
+
reason: `engine traversal worst-case response size ${estimatedReads} exceeds maxReads budget ${maxReads}`
|
|
3216
|
+
};
|
|
3217
|
+
}
|
|
3218
|
+
return {
|
|
3219
|
+
eligible: true,
|
|
3220
|
+
normalized: {
|
|
3221
|
+
sources: params.sources,
|
|
3222
|
+
hops: normalizedHops,
|
|
3223
|
+
estimatedReads
|
|
3224
|
+
}
|
|
3225
|
+
};
|
|
3226
|
+
}
|
|
3227
|
+
|
|
3343
3228
|
// src/traverse.ts
|
|
3344
3229
|
var DEFAULT_LIMIT = 10;
|
|
3345
3230
|
var DEFAULT_MAX_READS = 100;
|
|
@@ -3348,6 +3233,16 @@ var _crossGraphWarned = false;
|
|
|
3348
3233
|
function isGraphClient(reader) {
|
|
3349
3234
|
return "subgraph" in reader && typeof reader.subgraph === "function";
|
|
3350
3235
|
}
|
|
3236
|
+
function readerSupportsExpand(reader) {
|
|
3237
|
+
if (!isGraphClient(reader)) return false;
|
|
3238
|
+
const client = reader;
|
|
3239
|
+
return "capabilities" in client && typeof client.capabilities?.has === "function" && client.capabilities.has("query.join") && typeof client.expand === "function";
|
|
3240
|
+
}
|
|
3241
|
+
function readerSupportsEngineTraversal(reader) {
|
|
3242
|
+
if (!isGraphClient(reader)) return false;
|
|
3243
|
+
const client = reader;
|
|
3244
|
+
return "capabilities" in client && typeof client.capabilities?.has === "function" && client.capabilities.has("traversal.serverSide") && typeof client.runEngineTraversal === "function";
|
|
3245
|
+
}
|
|
3351
3246
|
var Semaphore = class {
|
|
3352
3247
|
constructor(slots) {
|
|
3353
3248
|
this.slots = slots;
|
|
@@ -3390,7 +3285,15 @@ var TraversalBuilderImpl = class {
|
|
|
3390
3285
|
const maxReads = options?.maxReads ?? DEFAULT_MAX_READS;
|
|
3391
3286
|
const concurrency = options?.concurrency ?? DEFAULT_CONCURRENCY;
|
|
3392
3287
|
const returnIntermediates = options?.returnIntermediates ?? false;
|
|
3288
|
+
const engineMode = options?.engineTraversal ?? "auto";
|
|
3393
3289
|
const semaphore = new Semaphore(concurrency);
|
|
3290
|
+
if (engineMode !== "off") {
|
|
3291
|
+
const engineResult = await this.tryEngineTraversal({
|
|
3292
|
+
engineMode,
|
|
3293
|
+
returnIntermediates
|
|
3294
|
+
});
|
|
3295
|
+
if (engineResult) return engineResult;
|
|
3296
|
+
}
|
|
3394
3297
|
let totalReads = 0;
|
|
3395
3298
|
let truncated = false;
|
|
3396
3299
|
let sources = [
|
|
@@ -3415,6 +3318,62 @@ var TraversalBuilderImpl = class {
|
|
|
3415
3318
|
const resolvedTargetGraph = this.resolveTargetGraph(hop);
|
|
3416
3319
|
const direction = hop.direction ?? "forward";
|
|
3417
3320
|
const isCrossGraph = direction === "forward" && !!resolvedTargetGraph;
|
|
3321
|
+
const sharedReader = sources.every((s) => s.reader === sources[0].reader) ? sources[0].reader : null;
|
|
3322
|
+
const canFastPath = !isCrossGraph && sharedReader && readerSupportsExpand(sharedReader);
|
|
3323
|
+
if (canFastPath && sharedReader) {
|
|
3324
|
+
if (totalReads >= maxReads) {
|
|
3325
|
+
hopTruncated = true;
|
|
3326
|
+
} else {
|
|
3327
|
+
totalReads++;
|
|
3328
|
+
const limit = hop.limit ?? DEFAULT_LIMIT;
|
|
3329
|
+
const expandParams = {
|
|
3330
|
+
sources: sources.map((s) => s.uid),
|
|
3331
|
+
axbType: hop.axbType,
|
|
3332
|
+
direction
|
|
3333
|
+
};
|
|
3334
|
+
if (hop.aType) expandParams.aType = hop.aType;
|
|
3335
|
+
if (hop.bType) expandParams.bType = hop.bType;
|
|
3336
|
+
if (hop.orderBy) expandParams.orderBy = hop.orderBy;
|
|
3337
|
+
if (!hop.filter) {
|
|
3338
|
+
expandParams.limitPerSource = limit;
|
|
3339
|
+
}
|
|
3340
|
+
const result = await sharedReader.expand(expandParams);
|
|
3341
|
+
let edges2 = result.edges;
|
|
3342
|
+
if (hop.filter) {
|
|
3343
|
+
edges2 = edges2.filter(hop.filter);
|
|
3344
|
+
const counts = /* @__PURE__ */ new Map();
|
|
3345
|
+
const kept = [];
|
|
3346
|
+
for (const e of edges2) {
|
|
3347
|
+
const sourceUid = direction === "forward" ? e.aUid : e.bUid;
|
|
3348
|
+
const c = counts.get(sourceUid) ?? 0;
|
|
3349
|
+
if (c < limit) {
|
|
3350
|
+
counts.set(sourceUid, c + 1);
|
|
3351
|
+
kept.push(e);
|
|
3352
|
+
}
|
|
3353
|
+
}
|
|
3354
|
+
edges2 = kept;
|
|
3355
|
+
}
|
|
3356
|
+
for (const edge of edges2) {
|
|
3357
|
+
hopEdges.push({ edge, reader: sharedReader });
|
|
3358
|
+
}
|
|
3359
|
+
}
|
|
3360
|
+
const fastEdges = hopEdges.map((h) => h.edge);
|
|
3361
|
+
hopResults.push({
|
|
3362
|
+
axbType: hop.axbType,
|
|
3363
|
+
depth,
|
|
3364
|
+
edges: returnIntermediates ? [...fastEdges] : fastEdges,
|
|
3365
|
+
sourceCount,
|
|
3366
|
+
truncated: hopTruncated
|
|
3367
|
+
});
|
|
3368
|
+
if (hopTruncated) truncated = true;
|
|
3369
|
+
const seen2 = /* @__PURE__ */ new Map();
|
|
3370
|
+
for (const { edge, reader: edgeReader } of hopEdges) {
|
|
3371
|
+
const nextUid = direction === "forward" ? edge.bUid : edge.aUid;
|
|
3372
|
+
if (!seen2.has(nextUid)) seen2.set(nextUid, edgeReader);
|
|
3373
|
+
}
|
|
3374
|
+
sources = [...seen2.entries()].map(([uid, reader]) => ({ uid, reader }));
|
|
3375
|
+
continue;
|
|
3376
|
+
}
|
|
3418
3377
|
const tasks = sources.map(({ uid, reader: sourceReader }) => async () => {
|
|
3419
3378
|
if (totalReads >= maxReads) {
|
|
3420
3379
|
hopTruncated = true;
|
|
@@ -3509,6 +3468,88 @@ var TraversalBuilderImpl = class {
|
|
|
3509
3468
|
truncated
|
|
3510
3469
|
};
|
|
3511
3470
|
}
|
|
3471
|
+
/**
|
|
3472
|
+
* Try to dispatch the entire hop chain as one engine-traversal call.
|
|
3473
|
+
* Returns a `TraversalResult` on success, or `undefined` if the spec is
|
|
3474
|
+
* ineligible and the caller should fall through to the per-hop loop.
|
|
3475
|
+
*
|
|
3476
|
+
* `'force'` mode throws on any ineligibility instead of returning
|
|
3477
|
+
* `undefined` — the caller intentionally opted out of fallback.
|
|
3478
|
+
*/
|
|
3479
|
+
async tryEngineTraversal(args) {
|
|
3480
|
+
const { engineMode, returnIntermediates } = args;
|
|
3481
|
+
const refuse = (reason) => {
|
|
3482
|
+
if (engineMode === "force") {
|
|
3483
|
+
throw new FiregraphError(`engineTraversal: 'force' but ${reason}`, "UNSUPPORTED_OPERATION");
|
|
3484
|
+
}
|
|
3485
|
+
return void 0;
|
|
3486
|
+
};
|
|
3487
|
+
if (!readerSupportsEngineTraversal(this.reader)) {
|
|
3488
|
+
return refuse("reader does not declare traversal.serverSide capability");
|
|
3489
|
+
}
|
|
3490
|
+
const client = this.reader;
|
|
3491
|
+
const engineHops = [];
|
|
3492
|
+
for (let i = 0; i < this.hops.length; i++) {
|
|
3493
|
+
const hop = this.hops[i];
|
|
3494
|
+
if (hop.filter) {
|
|
3495
|
+
return refuse(`hop ${i} (${hop.axbType}) carries a JS filter callback`);
|
|
3496
|
+
}
|
|
3497
|
+
const targetGraph = this.resolveTargetGraph(hop);
|
|
3498
|
+
const direction = hop.direction ?? "forward";
|
|
3499
|
+
if (targetGraph) {
|
|
3500
|
+
return refuse(`hop ${i} (${hop.axbType}) is cross-graph (targetGraph=${targetGraph})`);
|
|
3501
|
+
}
|
|
3502
|
+
const limit = hop.limit ?? DEFAULT_LIMIT;
|
|
3503
|
+
const engineHop = {
|
|
3504
|
+
axbType: hop.axbType,
|
|
3505
|
+
direction,
|
|
3506
|
+
limitPerSource: limit
|
|
3507
|
+
};
|
|
3508
|
+
if (hop.aType) engineHop.aType = hop.aType;
|
|
3509
|
+
if (hop.bType) engineHop.bType = hop.bType;
|
|
3510
|
+
if (hop.orderBy) engineHop.orderBy = hop.orderBy;
|
|
3511
|
+
engineHops.push(engineHop);
|
|
3512
|
+
}
|
|
3513
|
+
const params = {
|
|
3514
|
+
sources: [this.startUid],
|
|
3515
|
+
hops: engineHops
|
|
3516
|
+
};
|
|
3517
|
+
const compiled = compileEngineTraversal(params);
|
|
3518
|
+
if (!compiled.eligible) {
|
|
3519
|
+
return refuse(compiled.reason);
|
|
3520
|
+
}
|
|
3521
|
+
let engineResult;
|
|
3522
|
+
try {
|
|
3523
|
+
engineResult = await client.runEngineTraversal(params);
|
|
3524
|
+
} catch (err) {
|
|
3525
|
+
if (engineMode === "force") throw err;
|
|
3526
|
+
return void 0;
|
|
3527
|
+
}
|
|
3528
|
+
const hopResults = [];
|
|
3529
|
+
for (let i = 0; i < this.hops.length; i++) {
|
|
3530
|
+
const definedHop = this.hops[i];
|
|
3531
|
+
const engineHopResult = engineResult.hops[i] ?? { edges: [], sourceCount: 0 };
|
|
3532
|
+
const edges = engineHopResult.edges;
|
|
3533
|
+
const hopTruncated = edges.length >= engineHops[i].limitPerSource;
|
|
3534
|
+
hopResults.push({
|
|
3535
|
+
axbType: definedHop.axbType,
|
|
3536
|
+
depth: i,
|
|
3537
|
+
edges: returnIntermediates ? [...edges] : edges,
|
|
3538
|
+
sourceCount: engineHopResult.sourceCount,
|
|
3539
|
+
truncated: hopTruncated
|
|
3540
|
+
});
|
|
3541
|
+
}
|
|
3542
|
+
const lastHop = hopResults[hopResults.length - 1];
|
|
3543
|
+
return {
|
|
3544
|
+
nodes: lastHop.edges,
|
|
3545
|
+
hops: hopResults,
|
|
3546
|
+
// One server-side round trip — same accounting as the `expand()`
|
|
3547
|
+
// fast path. The tree response can carry up to `estimatedReads`
|
|
3548
|
+
// docs total, but the budget is in round trips, not docs.
|
|
3549
|
+
totalReads: 1,
|
|
3550
|
+
truncated: hopResults.some((h) => h.truncated)
|
|
3551
|
+
};
|
|
3552
|
+
}
|
|
3512
3553
|
/**
|
|
3513
3554
|
* Resolve the targetGraph for a hop. Priority:
|
|
3514
3555
|
* 1. Explicit `hop.targetGraph` (user override)
|
|
@@ -3631,6 +3672,7 @@ function defineViews(input) {
|
|
|
3631
3672
|
// Annotate the CommonJS export names for ESM import in node:
|
|
3632
3673
|
0 && (module.exports = {
|
|
3633
3674
|
BOOTSTRAP_ENTRIES,
|
|
3675
|
+
CapabilityNotSupportedError,
|
|
3634
3676
|
CrossBackendTransactionError,
|
|
3635
3677
|
DEFAULT_CORE_INDEXES,
|
|
3636
3678
|
DEFAULT_QUERY_LIMIT,
|
|
@@ -3657,9 +3699,7 @@ function defineViews(input) {
|
|
|
3657
3699
|
appendStorageScope,
|
|
3658
3700
|
applyMigrationChain,
|
|
3659
3701
|
buildEdgeQueryPlan,
|
|
3660
|
-
buildEdgeRecord,
|
|
3661
3702
|
buildNodeQueryPlan,
|
|
3662
|
-
buildNodeRecord,
|
|
3663
3703
|
compileMigrationFn,
|
|
3664
3704
|
compileMigrations,
|
|
3665
3705
|
compileSchema,
|