@typicalday/firegraph 0.11.2 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +355 -78
- package/dist/backend-DuvHGgK1.d.cts +1897 -0
- package/dist/backend-DuvHGgK1.d.ts +1897 -0
- package/dist/backend.cjs +365 -5
- package/dist/backend.cjs.map +1 -1
- package/dist/backend.d.cts +25 -5
- package/dist/backend.d.ts +25 -5
- package/dist/backend.js +209 -7
- package/dist/backend.js.map +1 -1
- package/dist/chunk-2DHMNTV6.js +16 -0
- package/dist/chunk-2DHMNTV6.js.map +1 -0
- package/dist/chunk-4MMQ5W74.js +288 -0
- package/dist/chunk-4MMQ5W74.js.map +1 -0
- package/dist/{chunk-5753Y42M.js → chunk-C2QMD7RY.js} +6 -10
- package/dist/chunk-C2QMD7RY.js.map +1 -0
- package/dist/chunk-D4J7Z4FE.js +67 -0
- package/dist/chunk-D4J7Z4FE.js.map +1 -0
- package/dist/chunk-EQJUUVFG.js +14 -0
- package/dist/chunk-EQJUUVFG.js.map +1 -0
- package/dist/chunk-N5HFDWQX.js +23 -0
- package/dist/chunk-N5HFDWQX.js.map +1 -0
- package/dist/chunk-PAD7WFFU.js +573 -0
- package/dist/chunk-PAD7WFFU.js.map +1 -0
- package/dist/chunk-TK64DNVK.js +256 -0
- package/dist/chunk-TK64DNVK.js.map +1 -0
- package/dist/{chunk-NJSOD64C.js → chunk-WRTFC5NG.js} +438 -30
- package/dist/chunk-WRTFC5NG.js.map +1 -0
- package/dist/client-BKi3vk0Q.d.ts +34 -0
- package/dist/client-BrsaXtDV.d.cts +34 -0
- package/dist/cloudflare/index.cjs +1386 -74
- package/dist/cloudflare/index.cjs.map +1 -1
- package/dist/cloudflare/index.d.cts +217 -13
- package/dist/cloudflare/index.d.ts +217 -13
- package/dist/cloudflare/index.js +639 -180
- package/dist/cloudflare/index.js.map +1 -1
- package/dist/codegen/index.d.cts +1 -1
- package/dist/codegen/index.d.ts +1 -1
- package/dist/errors-BRc3I_eH.d.cts +73 -0
- package/dist/errors-BRc3I_eH.d.ts +73 -0
- package/dist/firestore-enterprise/index.cjs +3877 -0
- package/dist/firestore-enterprise/index.cjs.map +1 -0
- package/dist/firestore-enterprise/index.d.cts +141 -0
- package/dist/firestore-enterprise/index.d.ts +141 -0
- package/dist/firestore-enterprise/index.js +985 -0
- package/dist/firestore-enterprise/index.js.map +1 -0
- package/dist/firestore-standard/index.cjs +3117 -0
- package/dist/firestore-standard/index.cjs.map +1 -0
- package/dist/firestore-standard/index.d.cts +49 -0
- package/dist/firestore-standard/index.d.ts +49 -0
- package/dist/firestore-standard/index.js +283 -0
- package/dist/firestore-standard/index.js.map +1 -0
- package/dist/index.cjs +809 -534
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +24 -100
- package/dist/index.d.ts +24 -100
- package/dist/index.js +184 -531
- package/dist/index.js.map +1 -1
- package/dist/registry-Bc7h6WTM.d.cts +64 -0
- package/dist/registry-C2KUPVZj.d.ts +64 -0
- package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.cts} +1 -56
- package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.ts} +1 -56
- package/dist/{serialization-ZZ7RSDRX.js → serialization-OE2PFZMY.js} +6 -4
- package/dist/sqlite/index.cjs +3631 -0
- package/dist/sqlite/index.cjs.map +1 -0
- package/dist/sqlite/index.d.cts +111 -0
- package/dist/sqlite/index.d.ts +111 -0
- package/dist/sqlite/index.js +1164 -0
- package/dist/sqlite/index.js.map +1 -0
- package/package.json +33 -3
- package/dist/backend-U-MLShlg.d.ts +0 -97
- package/dist/backend-np4gEVhB.d.cts +0 -97
- package/dist/chunk-5753Y42M.js.map +0 -1
- package/dist/chunk-NJSOD64C.js.map +0 -1
- package/dist/chunk-R7CRGYY4.js +0 -94
- package/dist/chunk-R7CRGYY4.js.map +0 -1
- package/dist/types-BGWxcpI_.d.cts +0 -736
- package/dist/types-BGWxcpI_.d.ts +0 -736
- /package/dist/{serialization-ZZ7RSDRX.js.map → serialization-OE2PFZMY.js.map} +0 -0
package/dist/index.cjs
CHANGED
|
@@ -30,6 +30,21 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
|
|
|
30
30
|
));
|
|
31
31
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
32
32
|
|
|
33
|
+
// src/internal/serialization-tag.ts
|
|
34
|
+
function isTaggedValue(value) {
|
|
35
|
+
if (value === null || typeof value !== "object") return false;
|
|
36
|
+
const tag = value[SERIALIZATION_TAG];
|
|
37
|
+
return typeof tag === "string" && KNOWN_TYPES.has(tag);
|
|
38
|
+
}
|
|
39
|
+
var SERIALIZATION_TAG, KNOWN_TYPES;
|
|
40
|
+
var init_serialization_tag = __esm({
|
|
41
|
+
"src/internal/serialization-tag.ts"() {
|
|
42
|
+
"use strict";
|
|
43
|
+
SERIALIZATION_TAG = "__firegraph_ser__";
|
|
44
|
+
KNOWN_TYPES = /* @__PURE__ */ new Set(["Timestamp", "GeoPoint", "VectorValue", "DocumentReference"]);
|
|
45
|
+
}
|
|
46
|
+
});
|
|
47
|
+
|
|
33
48
|
// src/serialization.ts
|
|
34
49
|
var serialization_exports = {};
|
|
35
50
|
__export(serialization_exports, {
|
|
@@ -38,11 +53,6 @@ __export(serialization_exports, {
|
|
|
38
53
|
isTaggedValue: () => isTaggedValue,
|
|
39
54
|
serializeFirestoreTypes: () => serializeFirestoreTypes
|
|
40
55
|
});
|
|
41
|
-
function isTaggedValue(value) {
|
|
42
|
-
if (value === null || typeof value !== "object") return false;
|
|
43
|
-
const tag = value[SERIALIZATION_TAG];
|
|
44
|
-
return typeof tag === "string" && KNOWN_TYPES.has(tag);
|
|
45
|
-
}
|
|
46
56
|
function isTimestamp(value) {
|
|
47
57
|
return value instanceof import_firestore.Timestamp;
|
|
48
58
|
}
|
|
@@ -143,13 +153,13 @@ function deserializeValue(value, db) {
|
|
|
143
153
|
}
|
|
144
154
|
return result;
|
|
145
155
|
}
|
|
146
|
-
var import_firestore,
|
|
156
|
+
var import_firestore, _docRefWarned;
|
|
147
157
|
var init_serialization = __esm({
|
|
148
158
|
"src/serialization.ts"() {
|
|
149
159
|
"use strict";
|
|
150
160
|
import_firestore = require("@google-cloud/firestore");
|
|
151
|
-
|
|
152
|
-
|
|
161
|
+
init_serialization_tag();
|
|
162
|
+
init_serialization_tag();
|
|
153
163
|
_docRefWarned = false;
|
|
154
164
|
}
|
|
155
165
|
});
|
|
@@ -158,6 +168,7 @@ var init_serialization = __esm({
|
|
|
158
168
|
var index_exports = {};
|
|
159
169
|
__export(index_exports, {
|
|
160
170
|
BOOTSTRAP_ENTRIES: () => BOOTSTRAP_ENTRIES,
|
|
171
|
+
CapabilityNotSupportedError: () => CapabilityNotSupportedError,
|
|
161
172
|
CrossBackendTransactionError: () => CrossBackendTransactionError,
|
|
162
173
|
DEFAULT_CORE_INDEXES: () => DEFAULT_CORE_INDEXES,
|
|
163
174
|
DEFAULT_QUERY_LIMIT: () => DEFAULT_QUERY_LIMIT,
|
|
@@ -184,9 +195,7 @@ __export(index_exports, {
|
|
|
184
195
|
appendStorageScope: () => appendStorageScope,
|
|
185
196
|
applyMigrationChain: () => applyMigrationChain,
|
|
186
197
|
buildEdgeQueryPlan: () => buildEdgeQueryPlan,
|
|
187
|
-
buildEdgeRecord: () => buildEdgeRecord,
|
|
188
198
|
buildNodeQueryPlan: () => buildNodeQueryPlan,
|
|
189
|
-
buildNodeRecord: () => buildNodeRecord,
|
|
190
199
|
compileMigrationFn: () => compileMigrationFn,
|
|
191
200
|
compileMigrations: () => compileMigrations,
|
|
192
201
|
compileSchema: () => compileSchema,
|
|
@@ -202,6 +211,7 @@ __export(index_exports, {
|
|
|
202
211
|
defaultExecutor: () => defaultExecutor,
|
|
203
212
|
defineConfig: () => defineConfig,
|
|
204
213
|
defineViews: () => defineViews,
|
|
214
|
+
deleteField: () => deleteField,
|
|
205
215
|
deserializeFirestoreTypes: () => deserializeFirestoreTypes,
|
|
206
216
|
destroySandboxWorker: () => destroySandboxWorker,
|
|
207
217
|
discoverEntities: () => discoverEntities,
|
|
@@ -255,6 +265,139 @@ function computeEdgeDocId(aUid, axbType, bUid) {
|
|
|
255
265
|
return `${shard}${SHARD_SEPARATOR}${aUid}${SHARD_SEPARATOR}${axbType}${SHARD_SEPARATOR}${bUid}`;
|
|
256
266
|
}
|
|
257
267
|
|
|
268
|
+
// src/internal/write-plan.ts
|
|
269
|
+
init_serialization_tag();
|
|
270
|
+
var DELETE_FIELD = /* @__PURE__ */ Symbol.for("firegraph.deleteField");
|
|
271
|
+
function deleteField() {
|
|
272
|
+
return DELETE_FIELD;
|
|
273
|
+
}
|
|
274
|
+
function isDeleteSentinel(value) {
|
|
275
|
+
return value === DELETE_FIELD;
|
|
276
|
+
}
|
|
277
|
+
var FIRESTORE_TERMINAL_CTOR = /* @__PURE__ */ new Set([
|
|
278
|
+
"Timestamp",
|
|
279
|
+
"GeoPoint",
|
|
280
|
+
"VectorValue",
|
|
281
|
+
"DocumentReference",
|
|
282
|
+
"FieldValue",
|
|
283
|
+
"NumericIncrementTransform",
|
|
284
|
+
"ArrayUnionTransform",
|
|
285
|
+
"ArrayRemoveTransform",
|
|
286
|
+
"ServerTimestampTransform",
|
|
287
|
+
"DeleteTransform"
|
|
288
|
+
]);
|
|
289
|
+
function isTerminalValue(value) {
|
|
290
|
+
if (value === null) return true;
|
|
291
|
+
const t = typeof value;
|
|
292
|
+
if (t !== "object") return true;
|
|
293
|
+
if (Array.isArray(value)) return true;
|
|
294
|
+
if (isTaggedValue(value)) return true;
|
|
295
|
+
const proto = Object.getPrototypeOf(value);
|
|
296
|
+
if (proto === null || proto === Object.prototype) return false;
|
|
297
|
+
const ctor = value.constructor;
|
|
298
|
+
if (ctor && typeof ctor.name === "string" && FIRESTORE_TERMINAL_CTOR.has(ctor.name)) return true;
|
|
299
|
+
return true;
|
|
300
|
+
}
|
|
301
|
+
var SAFE_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
302
|
+
function assertNoDeleteSentinels(data, callerLabel) {
|
|
303
|
+
walkForDeleteSentinels(data, [], { kind: "root" }, ({ path }) => {
|
|
304
|
+
const where = path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
305
|
+
throw new Error(
|
|
306
|
+
`firegraph: ${callerLabel} payload contains a deleteField() sentinel at ${where}. deleteField() is only valid inside updateNode/updateEdge \u2014 full-data writes (put*, replace*) cannot delete individual fields. Use updateNode with a deleteField() value, or omit the field from the replace payload.`
|
|
307
|
+
);
|
|
308
|
+
});
|
|
309
|
+
}
|
|
310
|
+
function walkForDeleteSentinels(node, path, parent, visit) {
|
|
311
|
+
if (node === null || node === void 0) return;
|
|
312
|
+
if (isDeleteSentinel(node)) {
|
|
313
|
+
visit({ path, parent });
|
|
314
|
+
return;
|
|
315
|
+
}
|
|
316
|
+
if (typeof node !== "object") return;
|
|
317
|
+
if (isTaggedValue(node)) return;
|
|
318
|
+
if (Array.isArray(node)) {
|
|
319
|
+
for (let i = 0; i < node.length; i++) {
|
|
320
|
+
walkForDeleteSentinels(node[i], [...path, String(i)], { kind: "array", index: i }, visit);
|
|
321
|
+
}
|
|
322
|
+
return;
|
|
323
|
+
}
|
|
324
|
+
const proto = Object.getPrototypeOf(node);
|
|
325
|
+
if (proto !== null && proto !== Object.prototype) return;
|
|
326
|
+
const obj = node;
|
|
327
|
+
for (const key of Object.keys(obj)) {
|
|
328
|
+
walkForDeleteSentinels(obj[key], [...path, key], { kind: "object" }, visit);
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
function assertSafePath(path) {
|
|
332
|
+
for (const seg of path) {
|
|
333
|
+
if (!SAFE_KEY_RE.test(seg)) {
|
|
334
|
+
throw new Error(
|
|
335
|
+
`firegraph: unsafe object key ${JSON.stringify(seg)} at path ${path.map((p) => JSON.stringify(p)).join(" > ")}. Keys used inside update payloads must match /^[A-Za-z_][A-Za-z0-9_-]*$/ so they can be embedded safely in SQLite JSON paths.`
|
|
336
|
+
);
|
|
337
|
+
}
|
|
338
|
+
}
|
|
339
|
+
}
|
|
340
|
+
function flattenPatch(data) {
|
|
341
|
+
const ops = [];
|
|
342
|
+
walk(data, [], ops);
|
|
343
|
+
return ops;
|
|
344
|
+
}
|
|
345
|
+
function assertNoDeleteSentinelsInArrayValue(arr, arrayPath) {
|
|
346
|
+
walkForDeleteSentinels(arr, arrayPath, { kind: "root" }, ({ parent }) => {
|
|
347
|
+
const arrayPathStr = arrayPath.length === 0 ? "<root>" : arrayPath.map((p) => JSON.stringify(p)).join(" > ");
|
|
348
|
+
if (parent.kind === "array") {
|
|
349
|
+
throw new Error(
|
|
350
|
+
`firegraph: deleteField() sentinel at index ${parent.index} inside an array at path ${arrayPathStr}. Arrays are terminal in update payloads (replaced as a unit), so the sentinel would be silently dropped by JSON serialization. To remove the field entirely, pass deleteField() in place of the whole array.`
|
|
351
|
+
);
|
|
352
|
+
}
|
|
353
|
+
throw new Error(
|
|
354
|
+
`firegraph: deleteField() sentinel inside an array element at path ${arrayPathStr}. Arrays are terminal in update payloads \u2014 the sentinel would be silently dropped by JSON serialization.`
|
|
355
|
+
);
|
|
356
|
+
});
|
|
357
|
+
}
|
|
358
|
+
function walk(node, path, out) {
|
|
359
|
+
if (node === void 0) return;
|
|
360
|
+
if (isDeleteSentinel(node)) {
|
|
361
|
+
if (path.length === 0) {
|
|
362
|
+
throw new Error("firegraph: deleteField() cannot be the entire update payload.");
|
|
363
|
+
}
|
|
364
|
+
assertSafePath(path);
|
|
365
|
+
out.push({ path: [...path], value: void 0, delete: true });
|
|
366
|
+
return;
|
|
367
|
+
}
|
|
368
|
+
if (isTerminalValue(node)) {
|
|
369
|
+
if (path.length === 0) {
|
|
370
|
+
throw new Error(
|
|
371
|
+
"firegraph: update payload must be a plain object. Got " + (node === null ? "null" : Array.isArray(node) ? "array" : typeof node) + "."
|
|
372
|
+
);
|
|
373
|
+
}
|
|
374
|
+
if (Array.isArray(node)) {
|
|
375
|
+
assertNoDeleteSentinelsInArrayValue(node, path);
|
|
376
|
+
}
|
|
377
|
+
assertSafePath(path);
|
|
378
|
+
out.push({ path: [...path], value: node, delete: false });
|
|
379
|
+
return;
|
|
380
|
+
}
|
|
381
|
+
const obj = node;
|
|
382
|
+
const keys = Object.keys(obj);
|
|
383
|
+
if (keys.length === 0) {
|
|
384
|
+
if (path.length > 0) {
|
|
385
|
+
assertSafePath(path);
|
|
386
|
+
out.push({ path: [...path], value: {}, delete: false });
|
|
387
|
+
}
|
|
388
|
+
return;
|
|
389
|
+
}
|
|
390
|
+
for (const key of keys) {
|
|
391
|
+
if (key === SERIALIZATION_TAG) {
|
|
392
|
+
const where = path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
393
|
+
throw new Error(
|
|
394
|
+
`firegraph: update payload contains a literal \`${SERIALIZATION_TAG}\` key at ${where}. That key is reserved for firegraph's serialization envelope and cannot appear on a plain object in user data. Use a different field name, or pass a recognized tagged value through replaceNode/replaceEdge instead.`
|
|
395
|
+
);
|
|
396
|
+
}
|
|
397
|
+
walk(obj[key], [...path, key], out);
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
|
|
258
401
|
// src/batch.ts
|
|
259
402
|
function buildWritableNodeRecord(aType, uid, data) {
|
|
260
403
|
return { aType, aUid: uid, axbType: NODE_RELATION, bType: aType, bUid: uid, data };
|
|
@@ -269,6 +412,19 @@ var GraphBatchImpl = class {
|
|
|
269
412
|
this.scopePath = scopePath;
|
|
270
413
|
}
|
|
271
414
|
async putNode(aType, uid, data) {
|
|
415
|
+
this.writeNode(aType, uid, data, "merge");
|
|
416
|
+
}
|
|
417
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
418
|
+
this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
419
|
+
}
|
|
420
|
+
async replaceNode(aType, uid, data) {
|
|
421
|
+
this.writeNode(aType, uid, data, "replace");
|
|
422
|
+
}
|
|
423
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
424
|
+
this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
425
|
+
}
|
|
426
|
+
writeNode(aType, uid, data, mode) {
|
|
427
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
272
428
|
if (this.registry) {
|
|
273
429
|
this.registry.validate(aType, NODE_RELATION, aType, data, this.scopePath);
|
|
274
430
|
}
|
|
@@ -280,9 +436,10 @@ var GraphBatchImpl = class {
|
|
|
280
436
|
record.v = entry.schemaVersion;
|
|
281
437
|
}
|
|
282
438
|
}
|
|
283
|
-
this.backend.setDoc(docId, record);
|
|
439
|
+
this.backend.setDoc(docId, record, mode);
|
|
284
440
|
}
|
|
285
|
-
|
|
441
|
+
writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
442
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
286
443
|
if (this.registry) {
|
|
287
444
|
this.registry.validate(aType, axbType, bType, data, this.scopePath);
|
|
288
445
|
}
|
|
@@ -294,11 +451,15 @@ var GraphBatchImpl = class {
|
|
|
294
451
|
record.v = entry.schemaVersion;
|
|
295
452
|
}
|
|
296
453
|
}
|
|
297
|
-
this.backend.setDoc(docId, record);
|
|
454
|
+
this.backend.setDoc(docId, record, mode);
|
|
298
455
|
}
|
|
299
456
|
async updateNode(uid, data) {
|
|
300
457
|
const docId = computeNodeDocId(uid);
|
|
301
|
-
this.backend.updateDoc(docId, {
|
|
458
|
+
this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
459
|
+
}
|
|
460
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
461
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
462
|
+
this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
302
463
|
}
|
|
303
464
|
async removeNode(uid) {
|
|
304
465
|
const docId = computeNodeDocId(uid);
|
|
@@ -394,6 +555,16 @@ var CrossBackendTransactionError = class extends FiregraphError {
|
|
|
394
555
|
this.name = "CrossBackendTransactionError";
|
|
395
556
|
}
|
|
396
557
|
};
|
|
558
|
+
var CapabilityNotSupportedError = class extends FiregraphError {
|
|
559
|
+
constructor(capability, backendDescription) {
|
|
560
|
+
super(
|
|
561
|
+
`Capability "${capability}" is not supported by ${backendDescription}.`,
|
|
562
|
+
"CAPABILITY_NOT_SUPPORTED"
|
|
563
|
+
);
|
|
564
|
+
this.capability = capability;
|
|
565
|
+
this.name = "CapabilityNotSupportedError";
|
|
566
|
+
}
|
|
567
|
+
};
|
|
397
568
|
|
|
398
569
|
// src/json-schema.ts
|
|
399
570
|
var import_json_schema = require("@cfworker/json-schema");
|
|
@@ -1409,6 +1580,19 @@ var GraphTransactionImpl = class {
|
|
|
1409
1580
|
return results.map((r) => r.record);
|
|
1410
1581
|
}
|
|
1411
1582
|
async putNode(aType, uid, data) {
|
|
1583
|
+
await this.writeNode(aType, uid, data, "merge");
|
|
1584
|
+
}
|
|
1585
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1586
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
1587
|
+
}
|
|
1588
|
+
async replaceNode(aType, uid, data) {
|
|
1589
|
+
await this.writeNode(aType, uid, data, "replace");
|
|
1590
|
+
}
|
|
1591
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1592
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
1593
|
+
}
|
|
1594
|
+
async writeNode(aType, uid, data, mode) {
|
|
1595
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
1412
1596
|
if (this.registry) {
|
|
1413
1597
|
this.registry.validate(aType, NODE_RELATION, aType, data, this.scopePath);
|
|
1414
1598
|
}
|
|
@@ -1420,9 +1604,10 @@ var GraphTransactionImpl = class {
|
|
|
1420
1604
|
record.v = entry.schemaVersion;
|
|
1421
1605
|
}
|
|
1422
1606
|
}
|
|
1423
|
-
await this.backend.setDoc(docId, record);
|
|
1607
|
+
await this.backend.setDoc(docId, record, mode);
|
|
1424
1608
|
}
|
|
1425
|
-
async
|
|
1609
|
+
async writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
1610
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
1426
1611
|
if (this.registry) {
|
|
1427
1612
|
this.registry.validate(aType, axbType, bType, data, this.scopePath);
|
|
1428
1613
|
}
|
|
@@ -1434,11 +1619,15 @@ var GraphTransactionImpl = class {
|
|
|
1434
1619
|
record.v = entry.schemaVersion;
|
|
1435
1620
|
}
|
|
1436
1621
|
}
|
|
1437
|
-
await this.backend.setDoc(docId, record);
|
|
1622
|
+
await this.backend.setDoc(docId, record, mode);
|
|
1438
1623
|
}
|
|
1439
1624
|
async updateNode(uid, data) {
|
|
1440
1625
|
const docId = computeNodeDocId(uid);
|
|
1441
|
-
await this.backend.updateDoc(docId, {
|
|
1626
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1627
|
+
}
|
|
1628
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
1629
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1630
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1442
1631
|
}
|
|
1443
1632
|
async removeNode(uid) {
|
|
1444
1633
|
const docId = computeNodeDocId(uid);
|
|
@@ -1476,6 +1665,15 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
1476
1665
|
this.scanProtection = options?.scanProtection ?? "error";
|
|
1477
1666
|
}
|
|
1478
1667
|
scanProtection;
|
|
1668
|
+
/**
|
|
1669
|
+
* Capability set of the underlying backend. Mirrors `backend.capabilities`
|
|
1670
|
+
* verbatim so callers can portability-check (`client.capabilities.has(
|
|
1671
|
+
* 'query.join')`) without reaching for the backend handle. Static for the
|
|
1672
|
+
* lifetime of the client.
|
|
1673
|
+
*/
|
|
1674
|
+
get capabilities() {
|
|
1675
|
+
return this.backend.capabilities;
|
|
1676
|
+
}
|
|
1479
1677
|
// Static mode
|
|
1480
1678
|
staticRegistry;
|
|
1481
1679
|
// Dynamic mode
|
|
@@ -1637,6 +1835,19 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
1637
1835
|
// GraphWriter
|
|
1638
1836
|
// ---------------------------------------------------------------------------
|
|
1639
1837
|
async putNode(aType, uid, data) {
|
|
1838
|
+
await this.writeNode(aType, uid, data, "merge");
|
|
1839
|
+
}
|
|
1840
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1841
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
1842
|
+
}
|
|
1843
|
+
async replaceNode(aType, uid, data) {
|
|
1844
|
+
await this.writeNode(aType, uid, data, "replace");
|
|
1845
|
+
}
|
|
1846
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1847
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
1848
|
+
}
|
|
1849
|
+
async writeNode(aType, uid, data, mode) {
|
|
1850
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
1640
1851
|
const registry = this.getRegistryForType(aType);
|
|
1641
1852
|
if (registry) {
|
|
1642
1853
|
registry.validate(aType, NODE_RELATION, aType, data, this.backend.scopePath);
|
|
@@ -1650,9 +1861,10 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
1650
1861
|
record.v = entry.schemaVersion;
|
|
1651
1862
|
}
|
|
1652
1863
|
}
|
|
1653
|
-
await backend.setDoc(docId, record);
|
|
1864
|
+
await backend.setDoc(docId, record, mode);
|
|
1654
1865
|
}
|
|
1655
|
-
async
|
|
1866
|
+
async writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
1867
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
1656
1868
|
const registry = this.getRegistryForType(aType);
|
|
1657
1869
|
if (registry) {
|
|
1658
1870
|
registry.validate(aType, axbType, bType, data, this.backend.scopePath);
|
|
@@ -1666,11 +1878,15 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
1666
1878
|
record.v = entry.schemaVersion;
|
|
1667
1879
|
}
|
|
1668
1880
|
}
|
|
1669
|
-
await backend.setDoc(docId, record);
|
|
1881
|
+
await backend.setDoc(docId, record, mode);
|
|
1670
1882
|
}
|
|
1671
1883
|
async updateNode(uid, data) {
|
|
1672
1884
|
const docId = computeNodeDocId(uid);
|
|
1673
|
-
await this.backend.updateDoc(docId, {
|
|
1885
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1886
|
+
}
|
|
1887
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
1888
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1889
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1674
1890
|
}
|
|
1675
1891
|
async removeNode(uid) {
|
|
1676
1892
|
const docId = computeNodeDocId(uid);
|
|
@@ -1752,6 +1968,33 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
1752
1968
|
return this.applyMigrations(records);
|
|
1753
1969
|
}
|
|
1754
1970
|
// ---------------------------------------------------------------------------
|
|
1971
|
+
// Aggregate query (capability: query.aggregate)
|
|
1972
|
+
// ---------------------------------------------------------------------------
|
|
1973
|
+
async aggregate(params) {
|
|
1974
|
+
if (!this.backend.aggregate) {
|
|
1975
|
+
throw new FiregraphError(
|
|
1976
|
+
"aggregate() is not supported by the current storage backend.",
|
|
1977
|
+
"UNSUPPORTED_OPERATION"
|
|
1978
|
+
);
|
|
1979
|
+
}
|
|
1980
|
+
const hasAnyFilter = params.aType || params.aUid || params.axbType || params.bType || params.bUid || params.where && params.where.length > 0;
|
|
1981
|
+
if (!hasAnyFilter) {
|
|
1982
|
+
this.checkQuerySafety([], params.allowCollectionScan);
|
|
1983
|
+
const result2 = await this.backend.aggregate(params.aggregates, []);
|
|
1984
|
+
return result2;
|
|
1985
|
+
}
|
|
1986
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1987
|
+
if (plan.strategy === "get") {
|
|
1988
|
+
throw new FiregraphError(
|
|
1989
|
+
"aggregate() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
1990
|
+
"INVALID_QUERY"
|
|
1991
|
+
);
|
|
1992
|
+
}
|
|
1993
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1994
|
+
const result = await this.backend.aggregate(params.aggregates, plan.filters);
|
|
1995
|
+
return result;
|
|
1996
|
+
}
|
|
1997
|
+
// ---------------------------------------------------------------------------
|
|
1755
1998
|
// Bulk operations
|
|
1756
1999
|
// ---------------------------------------------------------------------------
|
|
1757
2000
|
async removeNodeCascade(uid, options) {
|
|
@@ -1761,6 +2004,327 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
1761
2004
|
return this.backend.bulkRemoveEdges(params, this, options);
|
|
1762
2005
|
}
|
|
1763
2006
|
// ---------------------------------------------------------------------------
|
|
2007
|
+
// Server-side DML (capability: query.dml)
|
|
2008
|
+
// ---------------------------------------------------------------------------
|
|
2009
|
+
/**
|
|
2010
|
+
* Single-statement bulk DELETE. Translates `params` to a filter list via
|
|
2011
|
+
* `buildEdgeQueryPlan` (the same plan `findEdges` uses) and dispatches to
|
|
2012
|
+
* `backend.bulkDelete`. The fetch-then-delete loop in `bulkRemoveEdges`
|
|
2013
|
+
* is the cap-less fallback; this method is the fast path on backends
|
|
2014
|
+
* declaring `query.dml`.
|
|
2015
|
+
*
|
|
2016
|
+
* Scan-protection rules match `findEdges`: a query with no identifying
|
|
2017
|
+
* fields requires `allowCollectionScan: true` to pass. A bare-empty
|
|
2018
|
+
* filter set (no `aType`, `aUid`, etc., no `where`) is allowed at this
|
|
2019
|
+
* layer — shared SQLite bounds the blast radius via its leading `scope`
|
|
2020
|
+
* predicate — but the DO RPC backend rejects empty filters at the wire
|
|
2021
|
+
* boundary as defense-in-depth. To wipe a routed subgraph DO, use
|
|
2022
|
+
* `removeNodeCascade` on the parent node instead.
|
|
2023
|
+
*/
|
|
2024
|
+
async bulkDelete(params, options) {
|
|
2025
|
+
if (!this.backend.bulkDelete) {
|
|
2026
|
+
throw new FiregraphError(
|
|
2027
|
+
"bulkDelete() is not supported by the current storage backend. Fall back to bulkRemoveEdges() for backends without query.dml (e.g. Firestore Standard).",
|
|
2028
|
+
"UNSUPPORTED_OPERATION"
|
|
2029
|
+
);
|
|
2030
|
+
}
|
|
2031
|
+
const filters = this.buildDmlFilters(params);
|
|
2032
|
+
return this.backend.bulkDelete(filters, options);
|
|
2033
|
+
}
|
|
2034
|
+
/**
|
|
2035
|
+
* Single-statement bulk UPDATE. Same translation path as `bulkDelete`,
|
|
2036
|
+
* but the patch is deep-merged into each matching row's `data` via the
|
|
2037
|
+
* shared `flattenPatch` pipeline. Identifying columns are immutable
|
|
2038
|
+
* through this path (see `BulkUpdatePatch` JSDoc).
|
|
2039
|
+
*
|
|
2040
|
+
* Empty-patch rejection happens inside the backend (`compileBulkUpdate`)
|
|
2041
|
+
* — a `data: {}` payload would only rewrite `updated_at`, which is
|
|
2042
|
+
* almost certainly a bug.
|
|
2043
|
+
*/
|
|
2044
|
+
async bulkUpdate(params, patch, options) {
|
|
2045
|
+
if (!this.backend.bulkUpdate) {
|
|
2046
|
+
throw new FiregraphError(
|
|
2047
|
+
"bulkUpdate() is not supported by the current storage backend.",
|
|
2048
|
+
"UNSUPPORTED_OPERATION"
|
|
2049
|
+
);
|
|
2050
|
+
}
|
|
2051
|
+
const filters = this.buildDmlFilters(params);
|
|
2052
|
+
return this.backend.bulkUpdate(filters, patch, options);
|
|
2053
|
+
}
|
|
2054
|
+
// ---------------------------------------------------------------------------
|
|
2055
|
+
// Multi-source fan-out (capability: query.join)
|
|
2056
|
+
// ---------------------------------------------------------------------------
|
|
2057
|
+
/**
|
|
2058
|
+
* Fan out from `params.sources` over a single edge type in one round trip.
|
|
2059
|
+
* On backends without `query.join`, throws `UNSUPPORTED_OPERATION` — the
|
|
2060
|
+
* cap-less fallback is the per-source `findEdges` loop, which lives in
|
|
2061
|
+
* `traverse.ts` (the higher-level traversal walker) rather than here.
|
|
2062
|
+
*
|
|
2063
|
+
* `expand()` is intentionally edge-type-only — the source set is a flat
|
|
2064
|
+
* UID list and the hop matches one `axbType`. Multi-axbType expansions
|
|
2065
|
+
* become multiple `expand()` calls, one per relation.
|
|
2066
|
+
*
|
|
2067
|
+
* `params.sources.length === 0` short-circuits to an empty result. The
|
|
2068
|
+
* backend never sees the call. (`compileExpand` itself rejects empty
|
|
2069
|
+
* because `IN ()` is not valid SQL.)
|
|
2070
|
+
*/
|
|
2071
|
+
async expand(params) {
|
|
2072
|
+
if (!this.backend.expand) {
|
|
2073
|
+
throw new FiregraphError(
|
|
2074
|
+
"expand() is not supported by the current storage backend. Backends without `query.join` can use createTraversal() instead \u2014 the per-hop loop is functionally equivalent (just slower).",
|
|
2075
|
+
"UNSUPPORTED_OPERATION"
|
|
2076
|
+
);
|
|
2077
|
+
}
|
|
2078
|
+
if (params.sources.length === 0) {
|
|
2079
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
2080
|
+
}
|
|
2081
|
+
return this.backend.expand(params);
|
|
2082
|
+
}
|
|
2083
|
+
// ---------------------------------------------------------------------------
|
|
2084
|
+
// Engine-level multi-hop traversal (capability: traversal.serverSide)
|
|
2085
|
+
// ---------------------------------------------------------------------------
|
|
2086
|
+
/**
|
|
2087
|
+
* Compile a multi-hop traversal spec into one server-side nested
|
|
2088
|
+
* Pipeline and dispatch a single round trip.
|
|
2089
|
+
*
|
|
2090
|
+
* Backends declaring `traversal.serverSide` (Firestore Enterprise
|
|
2091
|
+
* today) install this method; everywhere else, it throws
|
|
2092
|
+
* `UNSUPPORTED_OPERATION`. The capability gate matches the type-level
|
|
2093
|
+
* surface — `GraphClient<C>` only exposes `runEngineTraversal` when
|
|
2094
|
+
* `'traversal.serverSide' extends C`.
|
|
2095
|
+
*
|
|
2096
|
+
* Most callers should not invoke this method directly; the
|
|
2097
|
+
* `createTraversal(...).run()` builder routes through it
|
|
2098
|
+
* automatically when `engineTraversal: 'auto'` (the default) and
|
|
2099
|
+
* the spec is eligible per `firestore-traverse-compiler.ts`. Calling
|
|
2100
|
+
* directly is appropriate for benchmarking or for callers that have
|
|
2101
|
+
* already shaped their hop chain into the strict
|
|
2102
|
+
* `EngineTraversalParams` shape.
|
|
2103
|
+
*
|
|
2104
|
+
* `params.sources.length === 0` short-circuits to empty per-hop
|
|
2105
|
+
* arrays. The backend never sees the call.
|
|
2106
|
+
*/
|
|
2107
|
+
async runEngineTraversal(params) {
|
|
2108
|
+
if (!this.backend.runEngineTraversal) {
|
|
2109
|
+
throw new FiregraphError(
|
|
2110
|
+
"runEngineTraversal() is not supported by the current storage backend. Backends without `traversal.serverSide` can use createTraversal() instead \u2014 the per-hop loop is functionally equivalent for in-graph specs (different round-trip profile).",
|
|
2111
|
+
"UNSUPPORTED_OPERATION"
|
|
2112
|
+
);
|
|
2113
|
+
}
|
|
2114
|
+
if (params.sources.length === 0) {
|
|
2115
|
+
return {
|
|
2116
|
+
hops: params.hops.map(() => ({ edges: [], sourceCount: 0 })),
|
|
2117
|
+
totalReads: 0
|
|
2118
|
+
};
|
|
2119
|
+
}
|
|
2120
|
+
return this.backend.runEngineTraversal(params);
|
|
2121
|
+
}
|
|
2122
|
+
// ---------------------------------------------------------------------------
|
|
2123
|
+
// Server-side projection (capability: query.select)
|
|
2124
|
+
// ---------------------------------------------------------------------------
|
|
2125
|
+
/**
|
|
2126
|
+
* Server-side projection — fetch only the requested fields from each
|
|
2127
|
+
* matching edge. The backend translates the call into a projecting query
|
|
2128
|
+
* (`SELECT json_extract(...)` on SQLite/DO, `Query.select(...)` on
|
|
2129
|
+
* Firestore Standard, classic projection on Enterprise) so the wire
|
|
2130
|
+
* payload is reduced to just the requested fields.
|
|
2131
|
+
*
|
|
2132
|
+
* Resolution rules for `select` (mirrored across all backends):
|
|
2133
|
+
*
|
|
2134
|
+
* - Built-in envelope fields (`aType`, `aUid`, `axbType`, `bType`,
|
|
2135
|
+
* `bUid`, `createdAt`, `updatedAt`, `v`) → resolve to the typed
|
|
2136
|
+
* column / Firestore field directly.
|
|
2137
|
+
* - `'data'` literal → returns the whole user payload.
|
|
2138
|
+
* - `'data.<x>'` → explicit nested path, returned at the same shape.
|
|
2139
|
+
* - bare name → rewritten to `data.<name>` (the canonical "give me a
|
|
2140
|
+
* few keys out of the JSON payload" shape).
|
|
2141
|
+
*
|
|
2142
|
+
* Empty `select: []` is rejected with `INVALID_QUERY`. Duplicate entries
|
|
2143
|
+
* are de-duped (first-occurrence order preserved); the result row carries
|
|
2144
|
+
* one slot per unique field.
|
|
2145
|
+
*
|
|
2146
|
+
* Migrations are *not* applied to the result. The caller asked for a
|
|
2147
|
+
* partial shape, and rehydrating it through the migration pipeline would
|
|
2148
|
+
* require synthesising every absent field — see
|
|
2149
|
+
* `StorageBackend.findEdgesProjected` for the rationale.
|
|
2150
|
+
*
|
|
2151
|
+
* Scan protection follows the `findEdges` rules: a query with no
|
|
2152
|
+
* identifying fields requires `allowCollectionScan: true` to pass. The
|
|
2153
|
+
* cap-less fallback would be `findEdges` + JS-side projection, but that
|
|
2154
|
+
* defeats the wire-payload reduction; backends without `query.select`
|
|
2155
|
+
* throw `UNSUPPORTED_OPERATION` rather than silently materialising full
|
|
2156
|
+
* rows.
|
|
2157
|
+
*/
|
|
2158
|
+
async findEdgesProjected(params) {
|
|
2159
|
+
if (!this.backend.findEdgesProjected) {
|
|
2160
|
+
throw new FiregraphError(
|
|
2161
|
+
"findEdgesProjected() is not supported by the current storage backend. There is no client-side fallback because the wire-payload reduction is the entire point of the API \u2014 use findEdges() and project in JS if the backend does not declare `query.select`.",
|
|
2162
|
+
"UNSUPPORTED_OPERATION"
|
|
2163
|
+
);
|
|
2164
|
+
}
|
|
2165
|
+
if (params.select.length === 0) {
|
|
2166
|
+
throw new FiregraphError(
|
|
2167
|
+
"findEdgesProjected() requires a non-empty `select` list.",
|
|
2168
|
+
"INVALID_QUERY"
|
|
2169
|
+
);
|
|
2170
|
+
}
|
|
2171
|
+
const plan = buildEdgeQueryPlan(params);
|
|
2172
|
+
let filters;
|
|
2173
|
+
let options;
|
|
2174
|
+
if (plan.strategy === "get") {
|
|
2175
|
+
filters = [
|
|
2176
|
+
{ field: "aUid", op: "==", value: params.aUid },
|
|
2177
|
+
{ field: "axbType", op: "==", value: params.axbType },
|
|
2178
|
+
{ field: "bUid", op: "==", value: params.bUid }
|
|
2179
|
+
];
|
|
2180
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2181
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2182
|
+
options = void 0;
|
|
2183
|
+
} else {
|
|
2184
|
+
filters = plan.filters;
|
|
2185
|
+
options = plan.options;
|
|
2186
|
+
}
|
|
2187
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2188
|
+
const rows = await this.backend.findEdgesProjected(params.select, filters, options);
|
|
2189
|
+
return rows;
|
|
2190
|
+
}
|
|
2191
|
+
/**
|
|
2192
|
+
* Native vector / nearest-neighbour search (capability `search.vector`).
|
|
2193
|
+
*
|
|
2194
|
+
* Resolves to the top-K records by similarity, sorted nearest-first
|
|
2195
|
+
* (`EUCLIDEAN` / `COSINE`) or highest-first (`DOT_PRODUCT`). The wrapper
|
|
2196
|
+
* is intentionally thin: capability check, scan-protection, then forward
|
|
2197
|
+
* `params` verbatim to the backend. All field-path normalisation and
|
|
2198
|
+
* SDK-shape validation lives in the shared
|
|
2199
|
+
* `runFirestoreFindNearest` helper that both Firestore editions call —
|
|
2200
|
+
* keeping it there means the validation surface stays in lockstep with
|
|
2201
|
+
* the SDK call site, regardless of which backend is plugged in.
|
|
2202
|
+
*
|
|
2203
|
+
* Migrations are NOT applied. The vector index walked the raw stored
|
|
2204
|
+
* shape; rehydrating each row through the migration pipeline would
|
|
2205
|
+
* change the candidate set the index already chose. If you need
|
|
2206
|
+
* migrated shape, follow up with `getNode` / `findEdges` on the
|
|
2207
|
+
* returned UIDs — those paths apply migrations normally.
|
|
2208
|
+
*
|
|
2209
|
+
* Scan-protection mirrors `findEdges`: if no identifying filters
|
|
2210
|
+
* (`aType` / `axbType` / `bType`) and no `where` clauses are supplied,
|
|
2211
|
+
* the request must opt in via `allowCollectionScan: true`. The ANN
|
|
2212
|
+
* query still walks the candidate set the WHERE clause produces, so
|
|
2213
|
+
* an unfiltered nearest-neighbour search over a million-row collection
|
|
2214
|
+
* is the same scan trap as an unfiltered `findEdges`.
|
|
2215
|
+
*
|
|
2216
|
+
* Backends without `search.vector` throw `UNSUPPORTED_OPERATION` —
|
|
2217
|
+
* there is no client-side fallback because emulating ANN over the
|
|
2218
|
+
* generic backend surface (`findEdges` + JS-side cosine) doesn't scale
|
|
2219
|
+
* past trivial datasets and would give callers the wrong mental model
|
|
2220
|
+
* about cost.
|
|
2221
|
+
*/
|
|
2222
|
+
async findNearest(params) {
|
|
2223
|
+
if (!this.backend.findNearest) {
|
|
2224
|
+
throw new FiregraphError(
|
|
2225
|
+
"findNearest() is not supported by the current storage backend. Vector search requires a backend that declares `search.vector` (currently Firestore Standard and Enterprise). There is no client-side fallback because emulating ANN on top of the generic backend surface does not scale beyond toy datasets.",
|
|
2226
|
+
"UNSUPPORTED_OPERATION"
|
|
2227
|
+
);
|
|
2228
|
+
}
|
|
2229
|
+
const filters = [];
|
|
2230
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2231
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2232
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2233
|
+
if (params.where) filters.push(...params.where);
|
|
2234
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2235
|
+
return this.backend.findNearest(params);
|
|
2236
|
+
}
|
|
2237
|
+
/**
|
|
2238
|
+
* Native full-text search (capability `search.fullText`).
|
|
2239
|
+
*
|
|
2240
|
+
* Returns the top-N records by relevance, ordered by the search
|
|
2241
|
+
* index's score. Only Firestore Enterprise declares this capability
|
|
2242
|
+
* today — the underlying Pipelines `search({ query: documentMatches(...) })`
|
|
2243
|
+
* stage requires Enterprise's FTS index. Standard does not declare
|
|
2244
|
+
* the cap (FTS is an Enterprise-only product feature, not a
|
|
2245
|
+
* typed-API gap), and the SQLite-shaped backends have no native
|
|
2246
|
+
* FTS index. Backends without `search.fullText` throw
|
|
2247
|
+
* `UNSUPPORTED_OPERATION` from this wrapper.
|
|
2248
|
+
*
|
|
2249
|
+
* Scan-protection mirrors `findNearest`: a search with no
|
|
2250
|
+
* identifying filters (`aType` / `axbType` / `bType`) walks every
|
|
2251
|
+
* row the index scored, so the request must opt in via
|
|
2252
|
+
* `allowCollectionScan: true`.
|
|
2253
|
+
*
|
|
2254
|
+
* Migrations are NOT applied. The FTS index walked the raw stored
|
|
2255
|
+
* shape; rehydrating each row through the migration pipeline would
|
|
2256
|
+
* change the candidate set the index already scored. If you need
|
|
2257
|
+
* migrated shape, follow up with `getNode` / `findEdges` on the
|
|
2258
|
+
* returned UIDs.
|
|
2259
|
+
*/
|
|
2260
|
+
async fullTextSearch(params) {
|
|
2261
|
+
if (!this.backend.fullTextSearch) {
|
|
2262
|
+
throw new FiregraphError(
|
|
2263
|
+
"fullTextSearch() is not supported by the current storage backend. Full-text search requires a backend that declares `search.fullText` (currently Firestore Enterprise only \u2014 FTS is an Enterprise product feature). There is no client-side fallback because emulating FTS over the generic backend surface would not scale beyond toy datasets.",
|
|
2264
|
+
"UNSUPPORTED_OPERATION"
|
|
2265
|
+
);
|
|
2266
|
+
}
|
|
2267
|
+
const filters = [];
|
|
2268
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2269
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2270
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2271
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2272
|
+
return this.backend.fullTextSearch(params);
|
|
2273
|
+
}
|
|
2274
|
+
/**
|
|
2275
|
+
* Native geospatial distance search (capability `search.geo`).
|
|
2276
|
+
*
|
|
2277
|
+
* Returns rows whose `geoField` lies within `radiusMeters` of
|
|
2278
|
+
* `point`, ordered nearest-first by default. Only Firestore
|
|
2279
|
+
* Enterprise declares this capability — same Enterprise-only
|
|
2280
|
+
* gating as `fullTextSearch`. Backends without `search.geo` throw
|
|
2281
|
+
* `UNSUPPORTED_OPERATION` from this wrapper.
|
|
2282
|
+
*
|
|
2283
|
+
* Scan-protection mirrors `findNearest` and `fullTextSearch`.
|
|
2284
|
+
*
|
|
2285
|
+
* Migrations are NOT applied — same rationale as the other search
|
|
2286
|
+
* extensions.
|
|
2287
|
+
*/
|
|
2288
|
+
async geoSearch(params) {
|
|
2289
|
+
if (!this.backend.geoSearch) {
|
|
2290
|
+
throw new FiregraphError(
|
|
2291
|
+
"geoSearch() is not supported by the current storage backend. Geospatial search requires a backend that declares `search.geo` (currently Firestore Enterprise only \u2014 geo queries are an Enterprise product feature). There is no client-side fallback because emulating geo over the generic backend surface (haversine over `findEdges`) would not scale beyond trivial datasets.",
|
|
2292
|
+
"UNSUPPORTED_OPERATION"
|
|
2293
|
+
);
|
|
2294
|
+
}
|
|
2295
|
+
const filters = [];
|
|
2296
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2297
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2298
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2299
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2300
|
+
return this.backend.geoSearch(params);
|
|
2301
|
+
}
|
|
2302
|
+
/**
|
|
2303
|
+
* Translate a `FindEdgesParams` into the `QueryFilter[]` shape the
|
|
2304
|
+
* backend `bulkDelete` / `bulkUpdate` methods expect. Mirrors the
|
|
2305
|
+
* `aggregate()` plan: a bare-empty params object becomes an empty
|
|
2306
|
+
* filter list (after a scan-protection check); a GET-shape (all three
|
|
2307
|
+
* identifiers) is rejected so we never silently turn a single-row
|
|
2308
|
+
* lookup into a server-side DML; otherwise we run `buildEdgeQueryPlan`
|
|
2309
|
+
* and surface its filters.
|
|
2310
|
+
*/
|
|
2311
|
+
buildDmlFilters(params) {
|
|
2312
|
+
const hasAnyFilter = params.aType || params.aUid || params.axbType || params.bType || params.bUid || params.where && params.where.length > 0;
|
|
2313
|
+
if (!hasAnyFilter) {
|
|
2314
|
+
this.checkQuerySafety([], params.allowCollectionScan);
|
|
2315
|
+
return [];
|
|
2316
|
+
}
|
|
2317
|
+
const plan = buildEdgeQueryPlan(params);
|
|
2318
|
+
if (plan.strategy === "get") {
|
|
2319
|
+
throw new FiregraphError(
|
|
2320
|
+
"bulkDelete() / bulkUpdate() require a query, not a direct document lookup. Use removeEdge() / updateEdge() for single-row operations, or omit one of aUid/axbType/bUid to force a query strategy.",
|
|
2321
|
+
"INVALID_QUERY"
|
|
2322
|
+
);
|
|
2323
|
+
}
|
|
2324
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
2325
|
+
return plan.filters;
|
|
2326
|
+
}
|
|
2327
|
+
// ---------------------------------------------------------------------------
|
|
1764
2328
|
// Dynamic registry methods
|
|
1765
2329
|
// ---------------------------------------------------------------------------
|
|
1766
2330
|
async defineNodeType(name, jsonSchema, description, options) {
|
|
@@ -1900,9 +2464,10 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
1900
2464
|
};
|
|
1901
2465
|
}
|
|
1902
2466
|
};
|
|
1903
|
-
function
|
|
2467
|
+
function createGraphClient(backend, options, metaBackend) {
|
|
1904
2468
|
return new GraphClientImpl(backend, options, metaBackend);
|
|
1905
2469
|
}
|
|
2470
|
+
var createGraphClientFromBackend = createGraphClient;
|
|
1906
2471
|
|
|
1907
2472
|
// src/codegen/index.ts
|
|
1908
2473
|
function pascalCase(s) {
|
|
@@ -2186,486 +2751,6 @@ function discoverEntities(entitiesDir) {
|
|
|
2186
2751
|
};
|
|
2187
2752
|
}
|
|
2188
2753
|
|
|
2189
|
-
// src/internal/firestore-backend.ts
|
|
2190
|
-
var import_firestore2 = require("@google-cloud/firestore");
|
|
2191
|
-
|
|
2192
|
-
// src/bulk.ts
|
|
2193
|
-
var MAX_BATCH_SIZE = 500;
|
|
2194
|
-
var DEFAULT_MAX_RETRIES = 3;
|
|
2195
|
-
var BASE_DELAY_MS = 200;
|
|
2196
|
-
function sleep(ms) {
|
|
2197
|
-
return new Promise((resolve2) => setTimeout(resolve2, ms));
|
|
2198
|
-
}
|
|
2199
|
-
function chunk(arr, size) {
|
|
2200
|
-
const chunks = [];
|
|
2201
|
-
for (let i = 0; i < arr.length; i += size) {
|
|
2202
|
-
chunks.push(arr.slice(i, i + size));
|
|
2203
|
-
}
|
|
2204
|
-
return chunks;
|
|
2205
|
-
}
|
|
2206
|
-
async function bulkDeleteDocIds(db, collectionPath, docIds, options) {
|
|
2207
|
-
if (docIds.length === 0) {
|
|
2208
|
-
return { deleted: 0, batches: 0, errors: [] };
|
|
2209
|
-
}
|
|
2210
|
-
const batchSize = Math.min(options?.batchSize ?? MAX_BATCH_SIZE, MAX_BATCH_SIZE);
|
|
2211
|
-
const maxRetries = options?.maxRetries ?? DEFAULT_MAX_RETRIES;
|
|
2212
|
-
const onProgress = options?.onProgress;
|
|
2213
|
-
const chunks = chunk(docIds, batchSize);
|
|
2214
|
-
const errors = [];
|
|
2215
|
-
let deleted = 0;
|
|
2216
|
-
let completedBatches = 0;
|
|
2217
|
-
for (let i = 0; i < chunks.length; i++) {
|
|
2218
|
-
const ids = chunks[i];
|
|
2219
|
-
let committed = false;
|
|
2220
|
-
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
2221
|
-
try {
|
|
2222
|
-
const batch = db.batch();
|
|
2223
|
-
const collectionRef = db.collection(collectionPath);
|
|
2224
|
-
for (const id of ids) {
|
|
2225
|
-
batch.delete(collectionRef.doc(id));
|
|
2226
|
-
}
|
|
2227
|
-
await batch.commit();
|
|
2228
|
-
committed = true;
|
|
2229
|
-
deleted += ids.length;
|
|
2230
|
-
break;
|
|
2231
|
-
} catch (err) {
|
|
2232
|
-
if (attempt < maxRetries) {
|
|
2233
|
-
const delay = BASE_DELAY_MS * Math.pow(2, attempt);
|
|
2234
|
-
await sleep(delay);
|
|
2235
|
-
} else {
|
|
2236
|
-
errors.push({
|
|
2237
|
-
batchIndex: i,
|
|
2238
|
-
error: err instanceof Error ? err : new Error(String(err)),
|
|
2239
|
-
operationCount: ids.length
|
|
2240
|
-
});
|
|
2241
|
-
}
|
|
2242
|
-
}
|
|
2243
|
-
}
|
|
2244
|
-
if (committed) {
|
|
2245
|
-
completedBatches++;
|
|
2246
|
-
}
|
|
2247
|
-
if (onProgress) {
|
|
2248
|
-
onProgress({
|
|
2249
|
-
completedBatches,
|
|
2250
|
-
totalBatches: chunks.length,
|
|
2251
|
-
deletedSoFar: deleted
|
|
2252
|
-
});
|
|
2253
|
-
}
|
|
2254
|
-
}
|
|
2255
|
-
return { deleted, batches: completedBatches, errors };
|
|
2256
|
-
}
|
|
2257
|
-
async function bulkRemoveEdges(db, collectionPath, reader, params, options) {
|
|
2258
|
-
const effectiveParams = params.limit !== void 0 ? { ...params, allowCollectionScan: params.allowCollectionScan ?? true } : { ...params, limit: 0, allowCollectionScan: params.allowCollectionScan ?? true };
|
|
2259
|
-
const edges = await reader.findEdges(effectiveParams);
|
|
2260
|
-
const docIds = edges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
|
|
2261
|
-
return bulkDeleteDocIds(db, collectionPath, docIds, options);
|
|
2262
|
-
}
|
|
2263
|
-
async function deleteSubcollectionsRecursive(db, collectionPath, docId, options) {
|
|
2264
|
-
const docRef = db.collection(collectionPath).doc(docId);
|
|
2265
|
-
const subcollections = await docRef.listCollections();
|
|
2266
|
-
if (subcollections.length === 0) return { deleted: 0, errors: [] };
|
|
2267
|
-
let totalDeleted = 0;
|
|
2268
|
-
const allErrors = [];
|
|
2269
|
-
const subOptions = options ? { batchSize: options.batchSize, maxRetries: options.maxRetries } : void 0;
|
|
2270
|
-
for (const subCollRef of subcollections) {
|
|
2271
|
-
const subCollPath = subCollRef.path;
|
|
2272
|
-
const snapshot = await subCollRef.select().get();
|
|
2273
|
-
const subDocIds = snapshot.docs.map((d) => d.id);
|
|
2274
|
-
for (const subDocId of subDocIds) {
|
|
2275
|
-
const subResult = await deleteSubcollectionsRecursive(db, subCollPath, subDocId, subOptions);
|
|
2276
|
-
totalDeleted += subResult.deleted;
|
|
2277
|
-
allErrors.push(...subResult.errors);
|
|
2278
|
-
}
|
|
2279
|
-
if (subDocIds.length > 0) {
|
|
2280
|
-
const result = await bulkDeleteDocIds(db, subCollPath, subDocIds, subOptions);
|
|
2281
|
-
totalDeleted += result.deleted;
|
|
2282
|
-
allErrors.push(...result.errors);
|
|
2283
|
-
}
|
|
2284
|
-
}
|
|
2285
|
-
return { deleted: totalDeleted, errors: allErrors };
|
|
2286
|
-
}
|
|
2287
|
-
async function removeNodeCascade(db, collectionPath, reader, uid, options) {
|
|
2288
|
-
const [outgoingRaw, incomingRaw] = await Promise.all([
|
|
2289
|
-
reader.findEdges({ aUid: uid, allowCollectionScan: true, limit: 0 }),
|
|
2290
|
-
reader.findEdges({ bUid: uid, allowCollectionScan: true, limit: 0 })
|
|
2291
|
-
]);
|
|
2292
|
-
const outgoing = outgoingRaw.filter((e) => e.axbType !== NODE_RELATION);
|
|
2293
|
-
const incoming = incomingRaw.filter((e) => e.axbType !== NODE_RELATION);
|
|
2294
|
-
const edgeDocIdSet = /* @__PURE__ */ new Set();
|
|
2295
|
-
const allEdges = [];
|
|
2296
|
-
for (const edge of [...outgoing, ...incoming]) {
|
|
2297
|
-
const docId = computeEdgeDocId(edge.aUid, edge.axbType, edge.bUid);
|
|
2298
|
-
if (!edgeDocIdSet.has(docId)) {
|
|
2299
|
-
edgeDocIdSet.add(docId);
|
|
2300
|
-
allEdges.push(edge);
|
|
2301
|
-
}
|
|
2302
|
-
}
|
|
2303
|
-
const shouldDeleteSubcollections = options?.deleteSubcollections !== false;
|
|
2304
|
-
const nodeDocId = computeNodeDocId(uid);
|
|
2305
|
-
let subcollectionResult = { deleted: 0, errors: [] };
|
|
2306
|
-
if (shouldDeleteSubcollections) {
|
|
2307
|
-
subcollectionResult = await deleteSubcollectionsRecursive(
|
|
2308
|
-
db,
|
|
2309
|
-
collectionPath,
|
|
2310
|
-
nodeDocId,
|
|
2311
|
-
options
|
|
2312
|
-
);
|
|
2313
|
-
}
|
|
2314
|
-
const edgeDocIds = allEdges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
|
|
2315
|
-
const allDocIds = [...edgeDocIds, nodeDocId];
|
|
2316
|
-
const batchSize = Math.min(options?.batchSize ?? MAX_BATCH_SIZE, MAX_BATCH_SIZE);
|
|
2317
|
-
const result = await bulkDeleteDocIds(db, collectionPath, allDocIds, {
|
|
2318
|
-
...options,
|
|
2319
|
-
batchSize
|
|
2320
|
-
});
|
|
2321
|
-
const totalChunks = Math.ceil(allDocIds.length / batchSize);
|
|
2322
|
-
const nodeChunkIndex = totalChunks - 1;
|
|
2323
|
-
const nodeDeleted = !result.errors.some((e) => e.batchIndex === nodeChunkIndex);
|
|
2324
|
-
const topLevelEdgesDeleted = nodeDeleted ? result.deleted - 1 : result.deleted;
|
|
2325
|
-
return {
|
|
2326
|
-
deleted: result.deleted + subcollectionResult.deleted,
|
|
2327
|
-
batches: result.batches,
|
|
2328
|
-
errors: [...result.errors, ...subcollectionResult.errors],
|
|
2329
|
-
edgesDeleted: topLevelEdgesDeleted,
|
|
2330
|
-
nodeDeleted
|
|
2331
|
-
};
|
|
2332
|
-
}
|
|
2333
|
-
|
|
2334
|
-
// src/internal/firestore-backend.ts
|
|
2335
|
-
init_serialization();
|
|
2336
|
-
|
|
2337
|
-
// src/internal/firestore-adapter.ts
|
|
2338
|
-
function createFirestoreAdapter(db, collectionPath) {
|
|
2339
|
-
const collectionRef = db.collection(collectionPath);
|
|
2340
|
-
return {
|
|
2341
|
-
collectionPath,
|
|
2342
|
-
async getDoc(docId) {
|
|
2343
|
-
const snap = await collectionRef.doc(docId).get();
|
|
2344
|
-
if (!snap.exists) return null;
|
|
2345
|
-
return snap.data();
|
|
2346
|
-
},
|
|
2347
|
-
async setDoc(docId, data) {
|
|
2348
|
-
await collectionRef.doc(docId).set(data);
|
|
2349
|
-
},
|
|
2350
|
-
async updateDoc(docId, data) {
|
|
2351
|
-
await collectionRef.doc(docId).update(data);
|
|
2352
|
-
},
|
|
2353
|
-
async deleteDoc(docId) {
|
|
2354
|
-
await collectionRef.doc(docId).delete();
|
|
2355
|
-
},
|
|
2356
|
-
async query(filters, options) {
|
|
2357
|
-
let q = collectionRef;
|
|
2358
|
-
for (const f of filters) {
|
|
2359
|
-
q = q.where(f.field, f.op, f.value);
|
|
2360
|
-
}
|
|
2361
|
-
if (options?.orderBy) {
|
|
2362
|
-
q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
|
|
2363
|
-
}
|
|
2364
|
-
if (options?.limit !== void 0) {
|
|
2365
|
-
q = q.limit(options.limit);
|
|
2366
|
-
}
|
|
2367
|
-
const snap = await q.get();
|
|
2368
|
-
return snap.docs.map((doc) => doc.data());
|
|
2369
|
-
}
|
|
2370
|
-
};
|
|
2371
|
-
}
|
|
2372
|
-
function createTransactionAdapter(db, collectionPath, tx) {
|
|
2373
|
-
const collectionRef = db.collection(collectionPath);
|
|
2374
|
-
return {
|
|
2375
|
-
async getDoc(docId) {
|
|
2376
|
-
const snap = await tx.get(collectionRef.doc(docId));
|
|
2377
|
-
if (!snap.exists) return null;
|
|
2378
|
-
return snap.data();
|
|
2379
|
-
},
|
|
2380
|
-
setDoc(docId, data) {
|
|
2381
|
-
tx.set(collectionRef.doc(docId), data);
|
|
2382
|
-
},
|
|
2383
|
-
updateDoc(docId, data) {
|
|
2384
|
-
tx.update(collectionRef.doc(docId), data);
|
|
2385
|
-
},
|
|
2386
|
-
deleteDoc(docId) {
|
|
2387
|
-
tx.delete(collectionRef.doc(docId));
|
|
2388
|
-
},
|
|
2389
|
-
async query(filters, options) {
|
|
2390
|
-
let q = collectionRef;
|
|
2391
|
-
for (const f of filters) {
|
|
2392
|
-
q = q.where(f.field, f.op, f.value);
|
|
2393
|
-
}
|
|
2394
|
-
if (options?.orderBy) {
|
|
2395
|
-
q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
|
|
2396
|
-
}
|
|
2397
|
-
if (options?.limit !== void 0) {
|
|
2398
|
-
q = q.limit(options.limit);
|
|
2399
|
-
}
|
|
2400
|
-
const snap = await tx.get(q);
|
|
2401
|
-
return snap.docs.map((doc) => doc.data());
|
|
2402
|
-
}
|
|
2403
|
-
};
|
|
2404
|
-
}
|
|
2405
|
-
function createBatchAdapter(db, collectionPath) {
|
|
2406
|
-
const collectionRef = db.collection(collectionPath);
|
|
2407
|
-
const batch = db.batch();
|
|
2408
|
-
return {
|
|
2409
|
-
setDoc(docId, data) {
|
|
2410
|
-
batch.set(collectionRef.doc(docId), data);
|
|
2411
|
-
},
|
|
2412
|
-
updateDoc(docId, data) {
|
|
2413
|
-
batch.update(collectionRef.doc(docId), data);
|
|
2414
|
-
},
|
|
2415
|
-
deleteDoc(docId) {
|
|
2416
|
-
batch.delete(collectionRef.doc(docId));
|
|
2417
|
-
},
|
|
2418
|
-
async commit() {
|
|
2419
|
-
await batch.commit();
|
|
2420
|
-
}
|
|
2421
|
-
};
|
|
2422
|
-
}
|
|
2423
|
-
|
|
2424
|
-
// src/internal/pipeline-adapter.ts
|
|
2425
|
-
var _Pipelines = null;
|
|
2426
|
-
async function getPipelines() {
|
|
2427
|
-
if (!_Pipelines) {
|
|
2428
|
-
const mod = await import("@google-cloud/firestore");
|
|
2429
|
-
_Pipelines = mod.Pipelines;
|
|
2430
|
-
}
|
|
2431
|
-
return _Pipelines;
|
|
2432
|
-
}
|
|
2433
|
-
function buildFilterExpression(P, filter) {
|
|
2434
|
-
const { field: fieldName, op, value } = filter;
|
|
2435
|
-
switch (op) {
|
|
2436
|
-
case "==":
|
|
2437
|
-
return P.equal(fieldName, value);
|
|
2438
|
-
case "!=":
|
|
2439
|
-
return P.notEqual(fieldName, value);
|
|
2440
|
-
case "<":
|
|
2441
|
-
return P.lessThan(fieldName, value);
|
|
2442
|
-
case "<=":
|
|
2443
|
-
return P.lessThanOrEqual(fieldName, value);
|
|
2444
|
-
case ">":
|
|
2445
|
-
return P.greaterThan(fieldName, value);
|
|
2446
|
-
case ">=":
|
|
2447
|
-
return P.greaterThanOrEqual(fieldName, value);
|
|
2448
|
-
case "in":
|
|
2449
|
-
return P.equalAny(fieldName, value);
|
|
2450
|
-
case "not-in":
|
|
2451
|
-
return P.notEqualAny(fieldName, value);
|
|
2452
|
-
case "array-contains":
|
|
2453
|
-
return P.arrayContains(fieldName, value);
|
|
2454
|
-
case "array-contains-any":
|
|
2455
|
-
return P.arrayContainsAny(fieldName, value);
|
|
2456
|
-
default:
|
|
2457
|
-
throw new Error(`Unsupported filter op for pipeline mode: ${op}`);
|
|
2458
|
-
}
|
|
2459
|
-
}
|
|
2460
|
-
function createPipelineQueryAdapter(db, collectionPath) {
|
|
2461
|
-
return {
|
|
2462
|
-
async query(filters, options) {
|
|
2463
|
-
const P = await getPipelines();
|
|
2464
|
-
let pipeline = db.pipeline().collection(collectionPath);
|
|
2465
|
-
if (filters.length === 1) {
|
|
2466
|
-
pipeline = pipeline.where(buildFilterExpression(P, filters[0]));
|
|
2467
|
-
} else if (filters.length > 1) {
|
|
2468
|
-
const [first, second, ...rest] = filters.map((f) => buildFilterExpression(P, f));
|
|
2469
|
-
pipeline = pipeline.where(P.and(first, second, ...rest));
|
|
2470
|
-
}
|
|
2471
|
-
if (options?.orderBy) {
|
|
2472
|
-
const f = P.field(options.orderBy.field);
|
|
2473
|
-
const ordering = options.orderBy.direction === "desc" ? f.descending() : f.ascending();
|
|
2474
|
-
pipeline = pipeline.sort(ordering);
|
|
2475
|
-
}
|
|
2476
|
-
if (options?.limit !== void 0) {
|
|
2477
|
-
pipeline = pipeline.limit(options.limit);
|
|
2478
|
-
}
|
|
2479
|
-
const snap = await pipeline.execute();
|
|
2480
|
-
return snap.results.map((r) => r.data());
|
|
2481
|
-
}
|
|
2482
|
-
};
|
|
2483
|
-
}
|
|
2484
|
-
|
|
2485
|
-
// src/internal/firestore-backend.ts
|
|
2486
|
-
function buildFirestoreUpdate(update, db) {
|
|
2487
|
-
const out = {
|
|
2488
|
-
updatedAt: import_firestore2.FieldValue.serverTimestamp()
|
|
2489
|
-
};
|
|
2490
|
-
if (update.replaceData) {
|
|
2491
|
-
out.data = deserializeFirestoreTypes(update.replaceData, db);
|
|
2492
|
-
}
|
|
2493
|
-
if (update.dataFields) {
|
|
2494
|
-
for (const [k, v] of Object.entries(update.dataFields)) {
|
|
2495
|
-
out[`data.${k}`] = v;
|
|
2496
|
-
}
|
|
2497
|
-
}
|
|
2498
|
-
if (update.v !== void 0) {
|
|
2499
|
-
out.v = update.v;
|
|
2500
|
-
}
|
|
2501
|
-
return out;
|
|
2502
|
-
}
|
|
2503
|
-
function stampWritableRecord(record) {
|
|
2504
|
-
const now = import_firestore2.FieldValue.serverTimestamp();
|
|
2505
|
-
const out = {
|
|
2506
|
-
aType: record.aType,
|
|
2507
|
-
aUid: record.aUid,
|
|
2508
|
-
axbType: record.axbType,
|
|
2509
|
-
bType: record.bType,
|
|
2510
|
-
bUid: record.bUid,
|
|
2511
|
-
data: record.data,
|
|
2512
|
-
createdAt: now,
|
|
2513
|
-
updatedAt: now
|
|
2514
|
-
};
|
|
2515
|
-
if (record.v !== void 0) out.v = record.v;
|
|
2516
|
-
return out;
|
|
2517
|
-
}
|
|
2518
|
-
var FirestoreTransactionBackend = class {
|
|
2519
|
-
constructor(adapter, db) {
|
|
2520
|
-
this.adapter = adapter;
|
|
2521
|
-
this.db = db;
|
|
2522
|
-
}
|
|
2523
|
-
getDoc(docId) {
|
|
2524
|
-
return this.adapter.getDoc(docId);
|
|
2525
|
-
}
|
|
2526
|
-
query(filters, options) {
|
|
2527
|
-
return this.adapter.query(filters, options);
|
|
2528
|
-
}
|
|
2529
|
-
async setDoc(docId, record) {
|
|
2530
|
-
this.adapter.setDoc(docId, stampWritableRecord(record));
|
|
2531
|
-
}
|
|
2532
|
-
async updateDoc(docId, update) {
|
|
2533
|
-
this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
2534
|
-
}
|
|
2535
|
-
async deleteDoc(docId) {
|
|
2536
|
-
this.adapter.deleteDoc(docId);
|
|
2537
|
-
}
|
|
2538
|
-
};
|
|
2539
|
-
var FirestoreBatchBackend = class {
|
|
2540
|
-
constructor(adapter, db) {
|
|
2541
|
-
this.adapter = adapter;
|
|
2542
|
-
this.db = db;
|
|
2543
|
-
}
|
|
2544
|
-
setDoc(docId, record) {
|
|
2545
|
-
this.adapter.setDoc(docId, stampWritableRecord(record));
|
|
2546
|
-
}
|
|
2547
|
-
updateDoc(docId, update) {
|
|
2548
|
-
this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
2549
|
-
}
|
|
2550
|
-
deleteDoc(docId) {
|
|
2551
|
-
this.adapter.deleteDoc(docId);
|
|
2552
|
-
}
|
|
2553
|
-
commit() {
|
|
2554
|
-
return this.adapter.commit();
|
|
2555
|
-
}
|
|
2556
|
-
};
|
|
2557
|
-
var FirestoreBackendImpl = class _FirestoreBackendImpl {
|
|
2558
|
-
constructor(db, collectionPath, queryMode, scopePath) {
|
|
2559
|
-
this.db = db;
|
|
2560
|
-
this.queryMode = queryMode;
|
|
2561
|
-
this.collectionPath = collectionPath;
|
|
2562
|
-
this.scopePath = scopePath;
|
|
2563
|
-
this.adapter = createFirestoreAdapter(db, collectionPath);
|
|
2564
|
-
if (queryMode === "pipeline") {
|
|
2565
|
-
this.pipelineAdapter = createPipelineQueryAdapter(db, collectionPath);
|
|
2566
|
-
}
|
|
2567
|
-
}
|
|
2568
|
-
collectionPath;
|
|
2569
|
-
scopePath;
|
|
2570
|
-
adapter;
|
|
2571
|
-
pipelineAdapter;
|
|
2572
|
-
// --- Reads ---
|
|
2573
|
-
getDoc(docId) {
|
|
2574
|
-
return this.adapter.getDoc(docId);
|
|
2575
|
-
}
|
|
2576
|
-
query(filters, options) {
|
|
2577
|
-
if (this.pipelineAdapter) {
|
|
2578
|
-
return this.pipelineAdapter.query(filters, options);
|
|
2579
|
-
}
|
|
2580
|
-
return this.adapter.query(filters, options);
|
|
2581
|
-
}
|
|
2582
|
-
// --- Writes ---
|
|
2583
|
-
setDoc(docId, record) {
|
|
2584
|
-
return this.adapter.setDoc(docId, stampWritableRecord(record));
|
|
2585
|
-
}
|
|
2586
|
-
updateDoc(docId, update) {
|
|
2587
|
-
return this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
2588
|
-
}
|
|
2589
|
-
deleteDoc(docId) {
|
|
2590
|
-
return this.adapter.deleteDoc(docId);
|
|
2591
|
-
}
|
|
2592
|
-
// --- Transactions / Batches ---
|
|
2593
|
-
runTransaction(fn) {
|
|
2594
|
-
return this.db.runTransaction(async (firestoreTx) => {
|
|
2595
|
-
const txAdapter = createTransactionAdapter(this.db, this.collectionPath, firestoreTx);
|
|
2596
|
-
return fn(new FirestoreTransactionBackend(txAdapter, this.db));
|
|
2597
|
-
});
|
|
2598
|
-
}
|
|
2599
|
-
createBatch() {
|
|
2600
|
-
const batchAdapter = createBatchAdapter(this.db, this.collectionPath);
|
|
2601
|
-
return new FirestoreBatchBackend(batchAdapter, this.db);
|
|
2602
|
-
}
|
|
2603
|
-
// --- Subgraphs ---
|
|
2604
|
-
subgraph(parentNodeUid, name) {
|
|
2605
|
-
const subPath = `${this.collectionPath}/${parentNodeUid}/${name}`;
|
|
2606
|
-
const newScope = this.scopePath ? `${this.scopePath}/${name}` : name;
|
|
2607
|
-
return new _FirestoreBackendImpl(this.db, subPath, this.queryMode, newScope);
|
|
2608
|
-
}
|
|
2609
|
-
// --- Cascade & bulk ---
|
|
2610
|
-
removeNodeCascade(uid, reader, options) {
|
|
2611
|
-
return removeNodeCascade(this.db, this.collectionPath, reader, uid, options);
|
|
2612
|
-
}
|
|
2613
|
-
bulkRemoveEdges(params, reader, options) {
|
|
2614
|
-
return bulkRemoveEdges(this.db, this.collectionPath, reader, params, options);
|
|
2615
|
-
}
|
|
2616
|
-
// --- Cross-collection ---
|
|
2617
|
-
async findEdgesGlobal(params, collectionName) {
|
|
2618
|
-
const name = collectionName ?? this.collectionPath.split("/").pop();
|
|
2619
|
-
const plan = buildEdgeQueryPlan(params);
|
|
2620
|
-
if (plan.strategy === "get") {
|
|
2621
|
-
throw new FiregraphError(
|
|
2622
|
-
"findEdgesGlobal() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
2623
|
-
"INVALID_QUERY"
|
|
2624
|
-
);
|
|
2625
|
-
}
|
|
2626
|
-
const collectionGroupRef = this.db.collectionGroup(name);
|
|
2627
|
-
let q = collectionGroupRef;
|
|
2628
|
-
for (const f of plan.filters) {
|
|
2629
|
-
q = q.where(f.field, f.op, f.value);
|
|
2630
|
-
}
|
|
2631
|
-
if (plan.options?.orderBy) {
|
|
2632
|
-
q = q.orderBy(plan.options.orderBy.field, plan.options.orderBy.direction ?? "asc");
|
|
2633
|
-
}
|
|
2634
|
-
if (plan.options?.limit !== void 0) {
|
|
2635
|
-
q = q.limit(plan.options.limit);
|
|
2636
|
-
}
|
|
2637
|
-
const snap = await q.get();
|
|
2638
|
-
return snap.docs.map((doc) => doc.data());
|
|
2639
|
-
}
|
|
2640
|
-
};
|
|
2641
|
-
function createFirestoreBackend(db, collectionPath, options = {}) {
|
|
2642
|
-
const queryMode = options.queryMode ?? "pipeline";
|
|
2643
|
-
const scopePath = options.scopePath ?? "";
|
|
2644
|
-
return new FirestoreBackendImpl(db, collectionPath, queryMode, scopePath);
|
|
2645
|
-
}
|
|
2646
|
-
|
|
2647
|
-
// src/firestore.ts
|
|
2648
|
-
var _standardModeWarned = false;
|
|
2649
|
-
function createGraphClient(db, collectionPath, options) {
|
|
2650
|
-
const requestedMode = options?.queryMode ?? "pipeline";
|
|
2651
|
-
const isEmulator = !!process.env.FIRESTORE_EMULATOR_HOST;
|
|
2652
|
-
const effectiveMode = isEmulator ? "standard" : requestedMode;
|
|
2653
|
-
if (effectiveMode === "standard" && !isEmulator && requestedMode === "standard" && !_standardModeWarned) {
|
|
2654
|
-
_standardModeWarned = true;
|
|
2655
|
-
console.warn(
|
|
2656
|
-
"[firegraph] Standard query mode enabled. This is NOT recommended for production:\n - Enterprise Firestore: data.* filters cause full collection scans (high billing)\n - Standard Firestore: data.* filters without composite indexes will fail\n See: https://github.com/typicalday/firegraph#query-modes"
|
|
2657
|
-
);
|
|
2658
|
-
}
|
|
2659
|
-
const backend = createFirestoreBackend(db, collectionPath, { queryMode: effectiveMode });
|
|
2660
|
-
let metaBackend;
|
|
2661
|
-
if (options?.registryMode?.collection && options.registryMode.collection !== collectionPath) {
|
|
2662
|
-
metaBackend = createFirestoreBackend(db, options.registryMode.collection, {
|
|
2663
|
-
queryMode: effectiveMode
|
|
2664
|
-
});
|
|
2665
|
-
}
|
|
2666
|
-
return new GraphClientImpl(backend, options, metaBackend);
|
|
2667
|
-
}
|
|
2668
|
-
|
|
2669
2754
|
// src/id.ts
|
|
2670
2755
|
var import_nanoid = require("nanoid");
|
|
2671
2756
|
function generateId() {
|
|
@@ -3024,35 +3109,6 @@ var QueryClient = class {
|
|
|
3024
3109
|
}
|
|
3025
3110
|
};
|
|
3026
3111
|
|
|
3027
|
-
// src/record.ts
|
|
3028
|
-
var import_firestore3 = require("@google-cloud/firestore");
|
|
3029
|
-
function buildNodeRecord(aType, uid, data) {
|
|
3030
|
-
const now = import_firestore3.FieldValue.serverTimestamp();
|
|
3031
|
-
return {
|
|
3032
|
-
aType,
|
|
3033
|
-
aUid: uid,
|
|
3034
|
-
axbType: NODE_RELATION,
|
|
3035
|
-
bType: aType,
|
|
3036
|
-
bUid: uid,
|
|
3037
|
-
data,
|
|
3038
|
-
createdAt: now,
|
|
3039
|
-
updatedAt: now
|
|
3040
|
-
};
|
|
3041
|
-
}
|
|
3042
|
-
function buildEdgeRecord(aType, aUid, axbType, bType, bUid, data) {
|
|
3043
|
-
const now = import_firestore3.FieldValue.serverTimestamp();
|
|
3044
|
-
return {
|
|
3045
|
-
aType,
|
|
3046
|
-
aUid,
|
|
3047
|
-
axbType,
|
|
3048
|
-
bType,
|
|
3049
|
-
bUid,
|
|
3050
|
-
data,
|
|
3051
|
-
createdAt: now,
|
|
3052
|
-
updatedAt: now
|
|
3053
|
-
};
|
|
3054
|
-
}
|
|
3055
|
-
|
|
3056
3112
|
// src/scope-path.ts
|
|
3057
3113
|
function parseStorageScope(scope) {
|
|
3058
3114
|
if (scope === "") return [];
|
|
@@ -3106,6 +3162,69 @@ function appendStorageScope(parentScope, uid, name) {
|
|
|
3106
3162
|
// src/index.ts
|
|
3107
3163
|
init_serialization();
|
|
3108
3164
|
|
|
3165
|
+
// src/internal/firestore-traverse-compiler.ts
|
|
3166
|
+
var MAX_PIPELINE_DEPTH = 5;
|
|
3167
|
+
function compileEngineTraversal(params, opts) {
|
|
3168
|
+
const maxDepth = opts?.maxDepth ?? MAX_PIPELINE_DEPTH;
|
|
3169
|
+
const maxReads = opts?.maxReads ?? params.maxReads;
|
|
3170
|
+
if (!Array.isArray(params.hops) || params.hops.length === 0) {
|
|
3171
|
+
return { eligible: false, reason: "engine traversal requires at least one hop" };
|
|
3172
|
+
}
|
|
3173
|
+
if (params.hops.length > maxDepth) {
|
|
3174
|
+
return {
|
|
3175
|
+
eligible: false,
|
|
3176
|
+
reason: `engine traversal depth ${params.hops.length} exceeds MAX_PIPELINE_DEPTH (${maxDepth})`
|
|
3177
|
+
};
|
|
3178
|
+
}
|
|
3179
|
+
if (!Array.isArray(params.sources)) {
|
|
3180
|
+
return { eligible: false, reason: "engine traversal requires a sources array" };
|
|
3181
|
+
}
|
|
3182
|
+
const normalizedHops = [];
|
|
3183
|
+
for (let i = 0; i < params.hops.length; i++) {
|
|
3184
|
+
const hop = params.hops[i];
|
|
3185
|
+
if (!hop.axbType || hop.axbType.length === 0) {
|
|
3186
|
+
return {
|
|
3187
|
+
eligible: false,
|
|
3188
|
+
reason: `engine traversal hop ${i} is missing axbType`
|
|
3189
|
+
};
|
|
3190
|
+
}
|
|
3191
|
+
if (typeof hop.limitPerSource !== "number" || hop.limitPerSource <= 0 || !Number.isFinite(hop.limitPerSource)) {
|
|
3192
|
+
return {
|
|
3193
|
+
eligible: false,
|
|
3194
|
+
reason: `engine traversal hop ${i} (${hop.axbType}) requires a positive limitPerSource`
|
|
3195
|
+
};
|
|
3196
|
+
}
|
|
3197
|
+
normalizedHops.push({
|
|
3198
|
+
...hop,
|
|
3199
|
+
axbType: hop.axbType,
|
|
3200
|
+
direction: hop.direction ?? "forward",
|
|
3201
|
+
limitPerSource: hop.limitPerSource
|
|
3202
|
+
});
|
|
3203
|
+
}
|
|
3204
|
+
let estimatedReads = Math.max(1, params.sources.length);
|
|
3205
|
+
for (const hop of normalizedHops) {
|
|
3206
|
+
estimatedReads *= hop.limitPerSource;
|
|
3207
|
+
if (estimatedReads > Number.MAX_SAFE_INTEGER) {
|
|
3208
|
+
estimatedReads = Number.MAX_SAFE_INTEGER;
|
|
3209
|
+
break;
|
|
3210
|
+
}
|
|
3211
|
+
}
|
|
3212
|
+
if (maxReads !== void 0 && estimatedReads > maxReads) {
|
|
3213
|
+
return {
|
|
3214
|
+
eligible: false,
|
|
3215
|
+
reason: `engine traversal worst-case response size ${estimatedReads} exceeds maxReads budget ${maxReads}`
|
|
3216
|
+
};
|
|
3217
|
+
}
|
|
3218
|
+
return {
|
|
3219
|
+
eligible: true,
|
|
3220
|
+
normalized: {
|
|
3221
|
+
sources: params.sources,
|
|
3222
|
+
hops: normalizedHops,
|
|
3223
|
+
estimatedReads
|
|
3224
|
+
}
|
|
3225
|
+
};
|
|
3226
|
+
}
|
|
3227
|
+
|
|
3109
3228
|
// src/traverse.ts
|
|
3110
3229
|
var DEFAULT_LIMIT = 10;
|
|
3111
3230
|
var DEFAULT_MAX_READS = 100;
|
|
@@ -3114,6 +3233,16 @@ var _crossGraphWarned = false;
|
|
|
3114
3233
|
function isGraphClient(reader) {
|
|
3115
3234
|
return "subgraph" in reader && typeof reader.subgraph === "function";
|
|
3116
3235
|
}
|
|
3236
|
+
function readerSupportsExpand(reader) {
|
|
3237
|
+
if (!isGraphClient(reader)) return false;
|
|
3238
|
+
const client = reader;
|
|
3239
|
+
return "capabilities" in client && typeof client.capabilities?.has === "function" && client.capabilities.has("query.join") && typeof client.expand === "function";
|
|
3240
|
+
}
|
|
3241
|
+
function readerSupportsEngineTraversal(reader) {
|
|
3242
|
+
if (!isGraphClient(reader)) return false;
|
|
3243
|
+
const client = reader;
|
|
3244
|
+
return "capabilities" in client && typeof client.capabilities?.has === "function" && client.capabilities.has("traversal.serverSide") && typeof client.runEngineTraversal === "function";
|
|
3245
|
+
}
|
|
3117
3246
|
var Semaphore = class {
|
|
3118
3247
|
constructor(slots) {
|
|
3119
3248
|
this.slots = slots;
|
|
@@ -3156,7 +3285,15 @@ var TraversalBuilderImpl = class {
|
|
|
3156
3285
|
const maxReads = options?.maxReads ?? DEFAULT_MAX_READS;
|
|
3157
3286
|
const concurrency = options?.concurrency ?? DEFAULT_CONCURRENCY;
|
|
3158
3287
|
const returnIntermediates = options?.returnIntermediates ?? false;
|
|
3288
|
+
const engineMode = options?.engineTraversal ?? "auto";
|
|
3159
3289
|
const semaphore = new Semaphore(concurrency);
|
|
3290
|
+
if (engineMode !== "off") {
|
|
3291
|
+
const engineResult = await this.tryEngineTraversal({
|
|
3292
|
+
engineMode,
|
|
3293
|
+
returnIntermediates
|
|
3294
|
+
});
|
|
3295
|
+
if (engineResult) return engineResult;
|
|
3296
|
+
}
|
|
3160
3297
|
let totalReads = 0;
|
|
3161
3298
|
let truncated = false;
|
|
3162
3299
|
let sources = [
|
|
@@ -3181,6 +3318,62 @@ var TraversalBuilderImpl = class {
|
|
|
3181
3318
|
const resolvedTargetGraph = this.resolveTargetGraph(hop);
|
|
3182
3319
|
const direction = hop.direction ?? "forward";
|
|
3183
3320
|
const isCrossGraph = direction === "forward" && !!resolvedTargetGraph;
|
|
3321
|
+
const sharedReader = sources.every((s) => s.reader === sources[0].reader) ? sources[0].reader : null;
|
|
3322
|
+
const canFastPath = !isCrossGraph && sharedReader && readerSupportsExpand(sharedReader);
|
|
3323
|
+
if (canFastPath && sharedReader) {
|
|
3324
|
+
if (totalReads >= maxReads) {
|
|
3325
|
+
hopTruncated = true;
|
|
3326
|
+
} else {
|
|
3327
|
+
totalReads++;
|
|
3328
|
+
const limit = hop.limit ?? DEFAULT_LIMIT;
|
|
3329
|
+
const expandParams = {
|
|
3330
|
+
sources: sources.map((s) => s.uid),
|
|
3331
|
+
axbType: hop.axbType,
|
|
3332
|
+
direction
|
|
3333
|
+
};
|
|
3334
|
+
if (hop.aType) expandParams.aType = hop.aType;
|
|
3335
|
+
if (hop.bType) expandParams.bType = hop.bType;
|
|
3336
|
+
if (hop.orderBy) expandParams.orderBy = hop.orderBy;
|
|
3337
|
+
if (!hop.filter) {
|
|
3338
|
+
expandParams.limitPerSource = limit;
|
|
3339
|
+
}
|
|
3340
|
+
const result = await sharedReader.expand(expandParams);
|
|
3341
|
+
let edges2 = result.edges;
|
|
3342
|
+
if (hop.filter) {
|
|
3343
|
+
edges2 = edges2.filter(hop.filter);
|
|
3344
|
+
const counts = /* @__PURE__ */ new Map();
|
|
3345
|
+
const kept = [];
|
|
3346
|
+
for (const e of edges2) {
|
|
3347
|
+
const sourceUid = direction === "forward" ? e.aUid : e.bUid;
|
|
3348
|
+
const c = counts.get(sourceUid) ?? 0;
|
|
3349
|
+
if (c < limit) {
|
|
3350
|
+
counts.set(sourceUid, c + 1);
|
|
3351
|
+
kept.push(e);
|
|
3352
|
+
}
|
|
3353
|
+
}
|
|
3354
|
+
edges2 = kept;
|
|
3355
|
+
}
|
|
3356
|
+
for (const edge of edges2) {
|
|
3357
|
+
hopEdges.push({ edge, reader: sharedReader });
|
|
3358
|
+
}
|
|
3359
|
+
}
|
|
3360
|
+
const fastEdges = hopEdges.map((h) => h.edge);
|
|
3361
|
+
hopResults.push({
|
|
3362
|
+
axbType: hop.axbType,
|
|
3363
|
+
depth,
|
|
3364
|
+
edges: returnIntermediates ? [...fastEdges] : fastEdges,
|
|
3365
|
+
sourceCount,
|
|
3366
|
+
truncated: hopTruncated
|
|
3367
|
+
});
|
|
3368
|
+
if (hopTruncated) truncated = true;
|
|
3369
|
+
const seen2 = /* @__PURE__ */ new Map();
|
|
3370
|
+
for (const { edge, reader: edgeReader } of hopEdges) {
|
|
3371
|
+
const nextUid = direction === "forward" ? edge.bUid : edge.aUid;
|
|
3372
|
+
if (!seen2.has(nextUid)) seen2.set(nextUid, edgeReader);
|
|
3373
|
+
}
|
|
3374
|
+
sources = [...seen2.entries()].map(([uid, reader]) => ({ uid, reader }));
|
|
3375
|
+
continue;
|
|
3376
|
+
}
|
|
3184
3377
|
const tasks = sources.map(({ uid, reader: sourceReader }) => async () => {
|
|
3185
3378
|
if (totalReads >= maxReads) {
|
|
3186
3379
|
hopTruncated = true;
|
|
@@ -3275,6 +3468,88 @@ var TraversalBuilderImpl = class {
|
|
|
3275
3468
|
truncated
|
|
3276
3469
|
};
|
|
3277
3470
|
}
|
|
3471
|
+
/**
|
|
3472
|
+
* Try to dispatch the entire hop chain as one engine-traversal call.
|
|
3473
|
+
* Returns a `TraversalResult` on success, or `undefined` if the spec is
|
|
3474
|
+
* ineligible and the caller should fall through to the per-hop loop.
|
|
3475
|
+
*
|
|
3476
|
+
* `'force'` mode throws on any ineligibility instead of returning
|
|
3477
|
+
* `undefined` — the caller intentionally opted out of fallback.
|
|
3478
|
+
*/
|
|
3479
|
+
async tryEngineTraversal(args) {
|
|
3480
|
+
const { engineMode, returnIntermediates } = args;
|
|
3481
|
+
const refuse = (reason) => {
|
|
3482
|
+
if (engineMode === "force") {
|
|
3483
|
+
throw new FiregraphError(`engineTraversal: 'force' but ${reason}`, "UNSUPPORTED_OPERATION");
|
|
3484
|
+
}
|
|
3485
|
+
return void 0;
|
|
3486
|
+
};
|
|
3487
|
+
if (!readerSupportsEngineTraversal(this.reader)) {
|
|
3488
|
+
return refuse("reader does not declare traversal.serverSide capability");
|
|
3489
|
+
}
|
|
3490
|
+
const client = this.reader;
|
|
3491
|
+
const engineHops = [];
|
|
3492
|
+
for (let i = 0; i < this.hops.length; i++) {
|
|
3493
|
+
const hop = this.hops[i];
|
|
3494
|
+
if (hop.filter) {
|
|
3495
|
+
return refuse(`hop ${i} (${hop.axbType}) carries a JS filter callback`);
|
|
3496
|
+
}
|
|
3497
|
+
const targetGraph = this.resolveTargetGraph(hop);
|
|
3498
|
+
const direction = hop.direction ?? "forward";
|
|
3499
|
+
if (targetGraph) {
|
|
3500
|
+
return refuse(`hop ${i} (${hop.axbType}) is cross-graph (targetGraph=${targetGraph})`);
|
|
3501
|
+
}
|
|
3502
|
+
const limit = hop.limit ?? DEFAULT_LIMIT;
|
|
3503
|
+
const engineHop = {
|
|
3504
|
+
axbType: hop.axbType,
|
|
3505
|
+
direction,
|
|
3506
|
+
limitPerSource: limit
|
|
3507
|
+
};
|
|
3508
|
+
if (hop.aType) engineHop.aType = hop.aType;
|
|
3509
|
+
if (hop.bType) engineHop.bType = hop.bType;
|
|
3510
|
+
if (hop.orderBy) engineHop.orderBy = hop.orderBy;
|
|
3511
|
+
engineHops.push(engineHop);
|
|
3512
|
+
}
|
|
3513
|
+
const params = {
|
|
3514
|
+
sources: [this.startUid],
|
|
3515
|
+
hops: engineHops
|
|
3516
|
+
};
|
|
3517
|
+
const compiled = compileEngineTraversal(params);
|
|
3518
|
+
if (!compiled.eligible) {
|
|
3519
|
+
return refuse(compiled.reason);
|
|
3520
|
+
}
|
|
3521
|
+
let engineResult;
|
|
3522
|
+
try {
|
|
3523
|
+
engineResult = await client.runEngineTraversal(params);
|
|
3524
|
+
} catch (err) {
|
|
3525
|
+
if (engineMode === "force") throw err;
|
|
3526
|
+
return void 0;
|
|
3527
|
+
}
|
|
3528
|
+
const hopResults = [];
|
|
3529
|
+
for (let i = 0; i < this.hops.length; i++) {
|
|
3530
|
+
const definedHop = this.hops[i];
|
|
3531
|
+
const engineHopResult = engineResult.hops[i] ?? { edges: [], sourceCount: 0 };
|
|
3532
|
+
const edges = engineHopResult.edges;
|
|
3533
|
+
const hopTruncated = edges.length >= engineHops[i].limitPerSource;
|
|
3534
|
+
hopResults.push({
|
|
3535
|
+
axbType: definedHop.axbType,
|
|
3536
|
+
depth: i,
|
|
3537
|
+
edges: returnIntermediates ? [...edges] : edges,
|
|
3538
|
+
sourceCount: engineHopResult.sourceCount,
|
|
3539
|
+
truncated: hopTruncated
|
|
3540
|
+
});
|
|
3541
|
+
}
|
|
3542
|
+
const lastHop = hopResults[hopResults.length - 1];
|
|
3543
|
+
return {
|
|
3544
|
+
nodes: lastHop.edges,
|
|
3545
|
+
hops: hopResults,
|
|
3546
|
+
// One server-side round trip — same accounting as the `expand()`
|
|
3547
|
+
// fast path. The tree response can carry up to `estimatedReads`
|
|
3548
|
+
// docs total, but the budget is in round trips, not docs.
|
|
3549
|
+
totalReads: 1,
|
|
3550
|
+
truncated: hopResults.some((h) => h.truncated)
|
|
3551
|
+
};
|
|
3552
|
+
}
|
|
3278
3553
|
/**
|
|
3279
3554
|
* Resolve the targetGraph for a hop. Priority:
|
|
3280
3555
|
* 1. Explicit `hop.targetGraph` (user override)
|
|
@@ -3397,6 +3672,7 @@ function defineViews(input) {
|
|
|
3397
3672
|
// Annotate the CommonJS export names for ESM import in node:
|
|
3398
3673
|
0 && (module.exports = {
|
|
3399
3674
|
BOOTSTRAP_ENTRIES,
|
|
3675
|
+
CapabilityNotSupportedError,
|
|
3400
3676
|
CrossBackendTransactionError,
|
|
3401
3677
|
DEFAULT_CORE_INDEXES,
|
|
3402
3678
|
DEFAULT_QUERY_LIMIT,
|
|
@@ -3423,9 +3699,7 @@ function defineViews(input) {
|
|
|
3423
3699
|
appendStorageScope,
|
|
3424
3700
|
applyMigrationChain,
|
|
3425
3701
|
buildEdgeQueryPlan,
|
|
3426
|
-
buildEdgeRecord,
|
|
3427
3702
|
buildNodeQueryPlan,
|
|
3428
|
-
buildNodeRecord,
|
|
3429
3703
|
compileMigrationFn,
|
|
3430
3704
|
compileMigrations,
|
|
3431
3705
|
compileSchema,
|
|
@@ -3441,6 +3715,7 @@ function defineViews(input) {
|
|
|
3441
3715
|
defaultExecutor,
|
|
3442
3716
|
defineConfig,
|
|
3443
3717
|
defineViews,
|
|
3718
|
+
deleteField,
|
|
3444
3719
|
deserializeFirestoreTypes,
|
|
3445
3720
|
destroySandboxWorker,
|
|
3446
3721
|
discoverEntities,
|