@typicalday/firegraph 0.11.2 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +355 -78
- package/dist/backend-DuvHGgK1.d.cts +1897 -0
- package/dist/backend-DuvHGgK1.d.ts +1897 -0
- package/dist/backend.cjs +365 -5
- package/dist/backend.cjs.map +1 -1
- package/dist/backend.d.cts +25 -5
- package/dist/backend.d.ts +25 -5
- package/dist/backend.js +209 -7
- package/dist/backend.js.map +1 -1
- package/dist/chunk-2DHMNTV6.js +16 -0
- package/dist/chunk-2DHMNTV6.js.map +1 -0
- package/dist/chunk-4MMQ5W74.js +288 -0
- package/dist/chunk-4MMQ5W74.js.map +1 -0
- package/dist/{chunk-5753Y42M.js → chunk-C2QMD7RY.js} +6 -10
- package/dist/chunk-C2QMD7RY.js.map +1 -0
- package/dist/chunk-D4J7Z4FE.js +67 -0
- package/dist/chunk-D4J7Z4FE.js.map +1 -0
- package/dist/chunk-EQJUUVFG.js +14 -0
- package/dist/chunk-EQJUUVFG.js.map +1 -0
- package/dist/chunk-N5HFDWQX.js +23 -0
- package/dist/chunk-N5HFDWQX.js.map +1 -0
- package/dist/chunk-PAD7WFFU.js +573 -0
- package/dist/chunk-PAD7WFFU.js.map +1 -0
- package/dist/chunk-TK64DNVK.js +256 -0
- package/dist/chunk-TK64DNVK.js.map +1 -0
- package/dist/{chunk-NJSOD64C.js → chunk-WRTFC5NG.js} +438 -30
- package/dist/chunk-WRTFC5NG.js.map +1 -0
- package/dist/client-BKi3vk0Q.d.ts +34 -0
- package/dist/client-BrsaXtDV.d.cts +34 -0
- package/dist/cloudflare/index.cjs +1386 -74
- package/dist/cloudflare/index.cjs.map +1 -1
- package/dist/cloudflare/index.d.cts +217 -13
- package/dist/cloudflare/index.d.ts +217 -13
- package/dist/cloudflare/index.js +639 -180
- package/dist/cloudflare/index.js.map +1 -1
- package/dist/codegen/index.d.cts +1 -1
- package/dist/codegen/index.d.ts +1 -1
- package/dist/errors-BRc3I_eH.d.cts +73 -0
- package/dist/errors-BRc3I_eH.d.ts +73 -0
- package/dist/firestore-enterprise/index.cjs +3877 -0
- package/dist/firestore-enterprise/index.cjs.map +1 -0
- package/dist/firestore-enterprise/index.d.cts +141 -0
- package/dist/firestore-enterprise/index.d.ts +141 -0
- package/dist/firestore-enterprise/index.js +985 -0
- package/dist/firestore-enterprise/index.js.map +1 -0
- package/dist/firestore-standard/index.cjs +3117 -0
- package/dist/firestore-standard/index.cjs.map +1 -0
- package/dist/firestore-standard/index.d.cts +49 -0
- package/dist/firestore-standard/index.d.ts +49 -0
- package/dist/firestore-standard/index.js +283 -0
- package/dist/firestore-standard/index.js.map +1 -0
- package/dist/index.cjs +809 -534
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +24 -100
- package/dist/index.d.ts +24 -100
- package/dist/index.js +184 -531
- package/dist/index.js.map +1 -1
- package/dist/registry-Bc7h6WTM.d.cts +64 -0
- package/dist/registry-C2KUPVZj.d.ts +64 -0
- package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.cts} +1 -56
- package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.ts} +1 -56
- package/dist/{serialization-ZZ7RSDRX.js → serialization-OE2PFZMY.js} +6 -4
- package/dist/sqlite/index.cjs +3631 -0
- package/dist/sqlite/index.cjs.map +1 -0
- package/dist/sqlite/index.d.cts +111 -0
- package/dist/sqlite/index.d.ts +111 -0
- package/dist/sqlite/index.js +1164 -0
- package/dist/sqlite/index.js.map +1 -0
- package/package.json +33 -3
- package/dist/backend-U-MLShlg.d.ts +0 -97
- package/dist/backend-np4gEVhB.d.cts +0 -97
- package/dist/chunk-5753Y42M.js.map +0 -1
- package/dist/chunk-NJSOD64C.js.map +0 -1
- package/dist/chunk-R7CRGYY4.js +0 -94
- package/dist/chunk-R7CRGYY4.js.map +0 -1
- package/dist/types-BGWxcpI_.d.cts +0 -736
- package/dist/types-BGWxcpI_.d.ts +0 -736
- /package/dist/{serialization-ZZ7RSDRX.js.map → serialization-OE2PFZMY.js.map} +0 -0
|
@@ -30,6 +30,21 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
|
|
|
30
30
|
));
|
|
31
31
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
32
32
|
|
|
33
|
+
// src/internal/serialization-tag.ts
|
|
34
|
+
function isTaggedValue(value) {
|
|
35
|
+
if (value === null || typeof value !== "object") return false;
|
|
36
|
+
const tag = value[SERIALIZATION_TAG];
|
|
37
|
+
return typeof tag === "string" && KNOWN_TYPES.has(tag);
|
|
38
|
+
}
|
|
39
|
+
var SERIALIZATION_TAG, KNOWN_TYPES;
|
|
40
|
+
var init_serialization_tag = __esm({
|
|
41
|
+
"src/internal/serialization-tag.ts"() {
|
|
42
|
+
"use strict";
|
|
43
|
+
SERIALIZATION_TAG = "__firegraph_ser__";
|
|
44
|
+
KNOWN_TYPES = /* @__PURE__ */ new Set(["Timestamp", "GeoPoint", "VectorValue", "DocumentReference"]);
|
|
45
|
+
}
|
|
46
|
+
});
|
|
47
|
+
|
|
33
48
|
// src/serialization.ts
|
|
34
49
|
var serialization_exports = {};
|
|
35
50
|
__export(serialization_exports, {
|
|
@@ -38,11 +53,6 @@ __export(serialization_exports, {
|
|
|
38
53
|
isTaggedValue: () => isTaggedValue,
|
|
39
54
|
serializeFirestoreTypes: () => serializeFirestoreTypes
|
|
40
55
|
});
|
|
41
|
-
function isTaggedValue(value) {
|
|
42
|
-
if (value === null || typeof value !== "object") return false;
|
|
43
|
-
const tag = value[SERIALIZATION_TAG];
|
|
44
|
-
return typeof tag === "string" && KNOWN_TYPES.has(tag);
|
|
45
|
-
}
|
|
46
56
|
function isTimestamp(value) {
|
|
47
57
|
return value instanceof import_firestore.Timestamp;
|
|
48
58
|
}
|
|
@@ -143,13 +153,13 @@ function deserializeValue(value, db) {
|
|
|
143
153
|
}
|
|
144
154
|
return result;
|
|
145
155
|
}
|
|
146
|
-
var import_firestore,
|
|
156
|
+
var import_firestore, _docRefWarned;
|
|
147
157
|
var init_serialization = __esm({
|
|
148
158
|
"src/serialization.ts"() {
|
|
149
159
|
"use strict";
|
|
150
160
|
import_firestore = require("@google-cloud/firestore");
|
|
151
|
-
|
|
152
|
-
|
|
161
|
+
init_serialization_tag();
|
|
162
|
+
init_serialization_tag();
|
|
153
163
|
_docRefWarned = false;
|
|
154
164
|
}
|
|
155
165
|
});
|
|
@@ -157,11 +167,20 @@ var init_serialization = __esm({
|
|
|
157
167
|
// src/cloudflare/index.ts
|
|
158
168
|
var cloudflare_exports = {};
|
|
159
169
|
__export(cloudflare_exports, {
|
|
170
|
+
CapabilityNotSupportedError: () => CapabilityNotSupportedError,
|
|
160
171
|
DORPCBackend: () => DORPCBackend,
|
|
161
172
|
FiregraphDO: () => FiregraphDO,
|
|
173
|
+
META_EDGE_TYPE: () => META_EDGE_TYPE,
|
|
174
|
+
META_NODE_TYPE: () => META_NODE_TYPE,
|
|
162
175
|
buildDOSchemaStatements: () => buildDOSchemaStatements,
|
|
176
|
+
createCapabilities: () => createCapabilities,
|
|
163
177
|
createDOClient: () => createDOClient,
|
|
164
|
-
|
|
178
|
+
createMergedRegistry: () => createMergedRegistry,
|
|
179
|
+
createRegistry: () => createRegistry,
|
|
180
|
+
createSiblingClient: () => createSiblingClient,
|
|
181
|
+
deleteField: () => deleteField,
|
|
182
|
+
generateId: () => generateId,
|
|
183
|
+
intersectCapabilities: () => intersectCapabilities
|
|
165
184
|
});
|
|
166
185
|
module.exports = __toCommonJS(cloudflare_exports);
|
|
167
186
|
|
|
@@ -219,6 +238,34 @@ var MigrationError = class extends FiregraphError {
|
|
|
219
238
|
this.name = "MigrationError";
|
|
220
239
|
}
|
|
221
240
|
};
|
|
241
|
+
var CapabilityNotSupportedError = class extends FiregraphError {
|
|
242
|
+
constructor(capability, backendDescription) {
|
|
243
|
+
super(
|
|
244
|
+
`Capability "${capability}" is not supported by ${backendDescription}.`,
|
|
245
|
+
"CAPABILITY_NOT_SUPPORTED"
|
|
246
|
+
);
|
|
247
|
+
this.capability = capability;
|
|
248
|
+
this.name = "CapabilityNotSupportedError";
|
|
249
|
+
}
|
|
250
|
+
};
|
|
251
|
+
|
|
252
|
+
// src/internal/backend.ts
|
|
253
|
+
function createCapabilities(caps) {
|
|
254
|
+
return {
|
|
255
|
+
has: (capability) => caps.has(capability),
|
|
256
|
+
values: () => caps.values()
|
|
257
|
+
};
|
|
258
|
+
}
|
|
259
|
+
function intersectCapabilities(parts) {
|
|
260
|
+
if (parts.length === 0) return createCapabilities(/* @__PURE__ */ new Set());
|
|
261
|
+
const sets = parts.map((p) => new Set(p.values()));
|
|
262
|
+
const [first, ...rest] = sets;
|
|
263
|
+
const intersection = /* @__PURE__ */ new Set();
|
|
264
|
+
for (const c of first) {
|
|
265
|
+
if (rest.every((s) => s.has(c))) intersection.add(c);
|
|
266
|
+
}
|
|
267
|
+
return createCapabilities(intersection);
|
|
268
|
+
}
|
|
222
269
|
|
|
223
270
|
// src/internal/constants.ts
|
|
224
271
|
var NODE_RELATION = "is";
|
|
@@ -234,6 +281,296 @@ var BUILTIN_FIELDS = /* @__PURE__ */ new Set([
|
|
|
234
281
|
]);
|
|
235
282
|
var SHARD_SEPARATOR = ":";
|
|
236
283
|
|
|
284
|
+
// src/internal/sqlite-data-ops.ts
|
|
285
|
+
var FIRESTORE_TYPE_NAMES = /* @__PURE__ */ new Set([
|
|
286
|
+
"Timestamp",
|
|
287
|
+
"GeoPoint",
|
|
288
|
+
"VectorValue",
|
|
289
|
+
"DocumentReference",
|
|
290
|
+
"FieldValue"
|
|
291
|
+
]);
|
|
292
|
+
function isFirestoreSpecialType(value) {
|
|
293
|
+
const ctorName = value.constructor?.name;
|
|
294
|
+
if (ctorName && FIRESTORE_TYPE_NAMES.has(ctorName)) return ctorName;
|
|
295
|
+
return null;
|
|
296
|
+
}
|
|
297
|
+
var JSON_PATH_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
298
|
+
function validateJsonPathKey(key, backendLabel) {
|
|
299
|
+
if (key.length === 0) {
|
|
300
|
+
throw new FiregraphError(
|
|
301
|
+
`${backendLabel}: empty JSON path component is not allowed`,
|
|
302
|
+
"INVALID_QUERY"
|
|
303
|
+
);
|
|
304
|
+
}
|
|
305
|
+
if (!JSON_PATH_KEY_RE.test(key)) {
|
|
306
|
+
throw new FiregraphError(
|
|
307
|
+
`${backendLabel}: data field path component "${key}" is not a safe JSON-path identifier. Allowed pattern: /^[A-Za-z_][A-Za-z0-9_-]*$/. Use replaceNode/replaceEdge (full-data overwrite) for keys with reserved characters (whitespace, dots, brackets, quotes, etc.).`,
|
|
308
|
+
"INVALID_QUERY"
|
|
309
|
+
);
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
function jsonBind(value, backendLabel) {
|
|
313
|
+
if (value === void 0) return "null";
|
|
314
|
+
if (value !== null && typeof value === "object") {
|
|
315
|
+
const firestoreType = isFirestoreSpecialType(value);
|
|
316
|
+
if (firestoreType) {
|
|
317
|
+
throw new FiregraphError(
|
|
318
|
+
`${backendLabel} cannot persist a Firestore ${firestoreType} value. Convert to a primitive before writing (e.g. \`ts.toMillis()\` for Timestamp).`,
|
|
319
|
+
"INVALID_ARGUMENT"
|
|
320
|
+
);
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
return JSON.stringify(value);
|
|
324
|
+
}
|
|
325
|
+
function compileDataOpsExpr(ops, base, params, backendLabel) {
|
|
326
|
+
if (ops.length === 0) return null;
|
|
327
|
+
const deletes = [];
|
|
328
|
+
const sets = [];
|
|
329
|
+
for (const op of ops) (op.delete ? deletes : sets).push(op);
|
|
330
|
+
let expr = base;
|
|
331
|
+
if (deletes.length > 0) {
|
|
332
|
+
const placeholders = deletes.map(() => "?").join(", ");
|
|
333
|
+
expr = `json_remove(${expr}, ${placeholders})`;
|
|
334
|
+
for (const op of deletes) {
|
|
335
|
+
for (const seg of op.path) validateJsonPathKey(seg, backendLabel);
|
|
336
|
+
params.push(`$.${op.path.join(".")}`);
|
|
337
|
+
}
|
|
338
|
+
}
|
|
339
|
+
if (sets.length > 0) {
|
|
340
|
+
const pieces = sets.map(() => "?, json(?)").join(", ");
|
|
341
|
+
expr = `json_set(${expr}, ${pieces})`;
|
|
342
|
+
for (const op of sets) {
|
|
343
|
+
for (const seg of op.path) validateJsonPathKey(seg, backendLabel);
|
|
344
|
+
params.push(`$.${op.path.join(".")}`);
|
|
345
|
+
params.push(jsonBind(op.value, backendLabel));
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
return expr;
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
// src/internal/sqlite-payload-guard.ts
|
|
352
|
+
init_serialization_tag();
|
|
353
|
+
|
|
354
|
+
// src/internal/write-plan.ts
|
|
355
|
+
init_serialization_tag();
|
|
356
|
+
var DELETE_FIELD = /* @__PURE__ */ Symbol.for("firegraph.deleteField");
|
|
357
|
+
function deleteField() {
|
|
358
|
+
return DELETE_FIELD;
|
|
359
|
+
}
|
|
360
|
+
function isDeleteSentinel(value) {
|
|
361
|
+
return value === DELETE_FIELD;
|
|
362
|
+
}
|
|
363
|
+
var FIRESTORE_TERMINAL_CTOR = /* @__PURE__ */ new Set([
|
|
364
|
+
"Timestamp",
|
|
365
|
+
"GeoPoint",
|
|
366
|
+
"VectorValue",
|
|
367
|
+
"DocumentReference",
|
|
368
|
+
"FieldValue",
|
|
369
|
+
"NumericIncrementTransform",
|
|
370
|
+
"ArrayUnionTransform",
|
|
371
|
+
"ArrayRemoveTransform",
|
|
372
|
+
"ServerTimestampTransform",
|
|
373
|
+
"DeleteTransform"
|
|
374
|
+
]);
|
|
375
|
+
function isTerminalValue(value) {
|
|
376
|
+
if (value === null) return true;
|
|
377
|
+
const t = typeof value;
|
|
378
|
+
if (t !== "object") return true;
|
|
379
|
+
if (Array.isArray(value)) return true;
|
|
380
|
+
if (isTaggedValue(value)) return true;
|
|
381
|
+
const proto = Object.getPrototypeOf(value);
|
|
382
|
+
if (proto === null || proto === Object.prototype) return false;
|
|
383
|
+
const ctor = value.constructor;
|
|
384
|
+
if (ctor && typeof ctor.name === "string" && FIRESTORE_TERMINAL_CTOR.has(ctor.name)) return true;
|
|
385
|
+
return true;
|
|
386
|
+
}
|
|
387
|
+
var SAFE_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
388
|
+
function assertUpdatePayloadExclusive(update) {
|
|
389
|
+
if (update.replaceData !== void 0 && update.dataOps !== void 0) {
|
|
390
|
+
throw new Error(
|
|
391
|
+
"firegraph: UpdatePayload cannot specify both `replaceData` and `dataOps`. Use one or the other \u2014 `replaceData` is the migration-write-back form, `dataOps` is the standard partial-update form."
|
|
392
|
+
);
|
|
393
|
+
}
|
|
394
|
+
}
|
|
395
|
+
function assertNoDeleteSentinels(data, callerLabel) {
|
|
396
|
+
walkForDeleteSentinels(data, [], { kind: "root" }, ({ path }) => {
|
|
397
|
+
const where = path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
398
|
+
throw new Error(
|
|
399
|
+
`firegraph: ${callerLabel} payload contains a deleteField() sentinel at ${where}. deleteField() is only valid inside updateNode/updateEdge \u2014 full-data writes (put*, replace*) cannot delete individual fields. Use updateNode with a deleteField() value, or omit the field from the replace payload.`
|
|
400
|
+
);
|
|
401
|
+
});
|
|
402
|
+
}
|
|
403
|
+
function walkForDeleteSentinels(node, path, parent, visit) {
|
|
404
|
+
if (node === null || node === void 0) return;
|
|
405
|
+
if (isDeleteSentinel(node)) {
|
|
406
|
+
visit({ path, parent });
|
|
407
|
+
return;
|
|
408
|
+
}
|
|
409
|
+
if (typeof node !== "object") return;
|
|
410
|
+
if (isTaggedValue(node)) return;
|
|
411
|
+
if (Array.isArray(node)) {
|
|
412
|
+
for (let i = 0; i < node.length; i++) {
|
|
413
|
+
walkForDeleteSentinels(node[i], [...path, String(i)], { kind: "array", index: i }, visit);
|
|
414
|
+
}
|
|
415
|
+
return;
|
|
416
|
+
}
|
|
417
|
+
const proto = Object.getPrototypeOf(node);
|
|
418
|
+
if (proto !== null && proto !== Object.prototype) return;
|
|
419
|
+
const obj = node;
|
|
420
|
+
for (const key of Object.keys(obj)) {
|
|
421
|
+
walkForDeleteSentinels(obj[key], [...path, key], { kind: "object" }, visit);
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
function assertSafePath(path) {
|
|
425
|
+
for (const seg of path) {
|
|
426
|
+
if (!SAFE_KEY_RE.test(seg)) {
|
|
427
|
+
throw new Error(
|
|
428
|
+
`firegraph: unsafe object key ${JSON.stringify(seg)} at path ${path.map((p) => JSON.stringify(p)).join(" > ")}. Keys used inside update payloads must match /^[A-Za-z_][A-Za-z0-9_-]*$/ so they can be embedded safely in SQLite JSON paths.`
|
|
429
|
+
);
|
|
430
|
+
}
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
function flattenPatch(data) {
|
|
434
|
+
const ops = [];
|
|
435
|
+
walk(data, [], ops);
|
|
436
|
+
return ops;
|
|
437
|
+
}
|
|
438
|
+
function assertNoDeleteSentinelsInArrayValue(arr, arrayPath) {
|
|
439
|
+
walkForDeleteSentinels(arr, arrayPath, { kind: "root" }, ({ parent }) => {
|
|
440
|
+
const arrayPathStr = arrayPath.length === 0 ? "<root>" : arrayPath.map((p) => JSON.stringify(p)).join(" > ");
|
|
441
|
+
if (parent.kind === "array") {
|
|
442
|
+
throw new Error(
|
|
443
|
+
`firegraph: deleteField() sentinel at index ${parent.index} inside an array at path ${arrayPathStr}. Arrays are terminal in update payloads (replaced as a unit), so the sentinel would be silently dropped by JSON serialization. To remove the field entirely, pass deleteField() in place of the whole array.`
|
|
444
|
+
);
|
|
445
|
+
}
|
|
446
|
+
throw new Error(
|
|
447
|
+
`firegraph: deleteField() sentinel inside an array element at path ${arrayPathStr}. Arrays are terminal in update payloads \u2014 the sentinel would be silently dropped by JSON serialization.`
|
|
448
|
+
);
|
|
449
|
+
});
|
|
450
|
+
}
|
|
451
|
+
function walk(node, path, out) {
|
|
452
|
+
if (node === void 0) return;
|
|
453
|
+
if (isDeleteSentinel(node)) {
|
|
454
|
+
if (path.length === 0) {
|
|
455
|
+
throw new Error("firegraph: deleteField() cannot be the entire update payload.");
|
|
456
|
+
}
|
|
457
|
+
assertSafePath(path);
|
|
458
|
+
out.push({ path: [...path], value: void 0, delete: true });
|
|
459
|
+
return;
|
|
460
|
+
}
|
|
461
|
+
if (isTerminalValue(node)) {
|
|
462
|
+
if (path.length === 0) {
|
|
463
|
+
throw new Error(
|
|
464
|
+
"firegraph: update payload must be a plain object. Got " + (node === null ? "null" : Array.isArray(node) ? "array" : typeof node) + "."
|
|
465
|
+
);
|
|
466
|
+
}
|
|
467
|
+
if (Array.isArray(node)) {
|
|
468
|
+
assertNoDeleteSentinelsInArrayValue(node, path);
|
|
469
|
+
}
|
|
470
|
+
assertSafePath(path);
|
|
471
|
+
out.push({ path: [...path], value: node, delete: false });
|
|
472
|
+
return;
|
|
473
|
+
}
|
|
474
|
+
const obj = node;
|
|
475
|
+
const keys = Object.keys(obj);
|
|
476
|
+
if (keys.length === 0) {
|
|
477
|
+
if (path.length > 0) {
|
|
478
|
+
assertSafePath(path);
|
|
479
|
+
out.push({ path: [...path], value: {}, delete: false });
|
|
480
|
+
}
|
|
481
|
+
return;
|
|
482
|
+
}
|
|
483
|
+
for (const key of keys) {
|
|
484
|
+
if (key === SERIALIZATION_TAG) {
|
|
485
|
+
const where = path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
486
|
+
throw new Error(
|
|
487
|
+
`firegraph: update payload contains a literal \`${SERIALIZATION_TAG}\` key at ${where}. That key is reserved for firegraph's serialization envelope and cannot appear on a plain object in user data. Use a different field name, or pass a recognized tagged value through replaceNode/replaceEdge instead.`
|
|
488
|
+
);
|
|
489
|
+
}
|
|
490
|
+
walk(obj[key], [...path, key], out);
|
|
491
|
+
}
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
// src/internal/sqlite-payload-guard.ts
|
|
495
|
+
var FIRESTORE_TYPE_NAMES2 = /* @__PURE__ */ new Set([
|
|
496
|
+
"Timestamp",
|
|
497
|
+
"GeoPoint",
|
|
498
|
+
"VectorValue",
|
|
499
|
+
"DocumentReference",
|
|
500
|
+
"FieldValue"
|
|
501
|
+
]);
|
|
502
|
+
function assertJsonSafePayload(data, label) {
|
|
503
|
+
walk2(data, [], label);
|
|
504
|
+
}
|
|
505
|
+
function walk2(node, path, label) {
|
|
506
|
+
if (node === null || node === void 0) return;
|
|
507
|
+
if (isDeleteSentinel(node)) {
|
|
508
|
+
throw new FiregraphError(
|
|
509
|
+
`${label} backend cannot persist a deleteField() sentinel inside a full-data payload (replaceNode/replaceEdge or first-insert). The sentinel is only valid inside an updateNode/updateEdge dataOps patch. Path: ${formatPath(path)}.`,
|
|
510
|
+
"INVALID_ARGUMENT"
|
|
511
|
+
);
|
|
512
|
+
}
|
|
513
|
+
const t = typeof node;
|
|
514
|
+
if (t === "symbol" || t === "function") {
|
|
515
|
+
throw new FiregraphError(
|
|
516
|
+
`${label} backend cannot persist a value of type ${t}. JSON.stringify drops it silently. Path: ${formatPath(path)}.`,
|
|
517
|
+
"INVALID_ARGUMENT"
|
|
518
|
+
);
|
|
519
|
+
}
|
|
520
|
+
if (t === "bigint") {
|
|
521
|
+
throw new FiregraphError(
|
|
522
|
+
`${label} backend cannot persist a value of type bigint. JSON.stringify cannot serialize this type (throws TypeError). Path: ${formatPath(path)}.`,
|
|
523
|
+
"INVALID_ARGUMENT"
|
|
524
|
+
);
|
|
525
|
+
}
|
|
526
|
+
if (t !== "object") return;
|
|
527
|
+
if (Array.isArray(node)) {
|
|
528
|
+
for (let i = 0; i < node.length; i++) {
|
|
529
|
+
walk2(node[i], [...path, String(i)], label);
|
|
530
|
+
}
|
|
531
|
+
return;
|
|
532
|
+
}
|
|
533
|
+
const obj = node;
|
|
534
|
+
if (Object.prototype.hasOwnProperty.call(obj, SERIALIZATION_TAG)) {
|
|
535
|
+
const tagValue = obj[SERIALIZATION_TAG];
|
|
536
|
+
throw new FiregraphError(
|
|
537
|
+
`${label} backend cannot persist an object with a \`${SERIALIZATION_TAG}\` key (value: ${formatTagValue(tagValue)}). Recognised tags are valid only on the Firestore backend (migration-sandbox output); a literal \`${SERIALIZATION_TAG}\` field in user data is reserved and not allowed. Path: ${formatPath(path)}.`,
|
|
538
|
+
"INVALID_ARGUMENT"
|
|
539
|
+
);
|
|
540
|
+
}
|
|
541
|
+
const proto = Object.getPrototypeOf(node);
|
|
542
|
+
if (proto !== null && proto !== Object.prototype) {
|
|
543
|
+
const ctor = node.constructor;
|
|
544
|
+
const ctorName = ctor && typeof ctor.name === "string" ? ctor.name : "<anonymous>";
|
|
545
|
+
if (FIRESTORE_TYPE_NAMES2.has(ctorName)) {
|
|
546
|
+
throw new FiregraphError(
|
|
547
|
+
`${label} backend cannot persist a Firestore ${ctorName} value. Convert to a primitive before writing (e.g. \`ts.toMillis()\` for Timestamp, \`{lat,lng}\` for GeoPoint). Path: ${formatPath(path)}.`,
|
|
548
|
+
"INVALID_ARGUMENT"
|
|
549
|
+
);
|
|
550
|
+
}
|
|
551
|
+
if (node instanceof Date) return;
|
|
552
|
+
throw new FiregraphError(
|
|
553
|
+
`${label} backend cannot persist a class instance of type ${ctorName}. Only plain objects, arrays, and primitives round-trip safely through JSON storage. Path: ${formatPath(path)}.`,
|
|
554
|
+
"INVALID_ARGUMENT"
|
|
555
|
+
);
|
|
556
|
+
}
|
|
557
|
+
for (const key of Object.keys(obj)) {
|
|
558
|
+
walk2(obj[key], [...path, key], label);
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
function formatPath(path) {
|
|
562
|
+
return path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
563
|
+
}
|
|
564
|
+
function formatTagValue(value) {
|
|
565
|
+
if (value === null) return "null";
|
|
566
|
+
if (value === void 0) return "undefined";
|
|
567
|
+
if (typeof value === "string") return JSON.stringify(value);
|
|
568
|
+
if (typeof value === "number" || typeof value === "boolean" || typeof value === "bigint") {
|
|
569
|
+
return String(value);
|
|
570
|
+
}
|
|
571
|
+
return typeof value;
|
|
572
|
+
}
|
|
573
|
+
|
|
237
574
|
// src/timestamp.ts
|
|
238
575
|
var GraphTimestampImpl = class _GraphTimestampImpl {
|
|
239
576
|
constructor(seconds, nanoseconds) {
|
|
@@ -273,7 +610,7 @@ var DEFAULT_CORE_INDEXES = Object.freeze([
|
|
|
273
610
|
|
|
274
611
|
// src/internal/sqlite-index-ddl.ts
|
|
275
612
|
var IDENT_RE = /^[A-Za-z_][A-Za-z0-9_]*$/;
|
|
276
|
-
var
|
|
613
|
+
var JSON_PATH_KEY_RE2 = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
277
614
|
function quoteIdent(name) {
|
|
278
615
|
if (!IDENT_RE.test(name)) {
|
|
279
616
|
throw new FiregraphError(
|
|
@@ -321,7 +658,7 @@ function compileFieldExpr(path, fieldToColumn) {
|
|
|
321
658
|
const suffix = path.slice(5);
|
|
322
659
|
const parts = suffix.split(".");
|
|
323
660
|
for (const part of parts) {
|
|
324
|
-
if (!
|
|
661
|
+
if (!JSON_PATH_KEY_RE2.test(part)) {
|
|
325
662
|
throw new FiregraphError(
|
|
326
663
|
`IndexSpec data path "${path}" has invalid component "${part}". Each component must match /^[A-Za-z_][A-Za-z0-9_-]*$/.`,
|
|
327
664
|
"INVALID_INDEX"
|
|
@@ -390,6 +727,9 @@ function quoteDOIdent(name) {
|
|
|
390
727
|
validateDOTableName(name);
|
|
391
728
|
return `"${name}"`;
|
|
392
729
|
}
|
|
730
|
+
function quoteDOColumnAlias(label) {
|
|
731
|
+
return `"${label.replace(/"/g, '""')}"`;
|
|
732
|
+
}
|
|
393
733
|
function buildDOSchemaStatements(table, options = {}) {
|
|
394
734
|
const t = quoteDOIdent(table);
|
|
395
735
|
const statements = [
|
|
@@ -416,6 +756,8 @@ function buildDOSchemaStatements(table, options = {}) {
|
|
|
416
756
|
}
|
|
417
757
|
|
|
418
758
|
// src/cloudflare/sql.ts
|
|
759
|
+
var DO_BACKEND_LABEL = "DO SQLite";
|
|
760
|
+
var DO_BACKEND_ERR_LABEL = "DO SQLite backend";
|
|
419
761
|
function compileFieldRef(field) {
|
|
420
762
|
const column = DO_FIELD_TO_COLUMN[field];
|
|
421
763
|
if (column) {
|
|
@@ -424,7 +766,7 @@ function compileFieldRef(field) {
|
|
|
424
766
|
if (field.startsWith("data.")) {
|
|
425
767
|
const suffix = field.slice(5);
|
|
426
768
|
for (const part of suffix.split(".")) {
|
|
427
|
-
validateJsonPathKey(part);
|
|
769
|
+
validateJsonPathKey(part, DO_BACKEND_ERR_LABEL);
|
|
428
770
|
}
|
|
429
771
|
return { expr: `json_extract("data", '$.${suffix}')` };
|
|
430
772
|
}
|
|
@@ -436,18 +778,6 @@ function compileFieldRef(field) {
|
|
|
436
778
|
"INVALID_QUERY"
|
|
437
779
|
);
|
|
438
780
|
}
|
|
439
|
-
var FIRESTORE_TYPE_NAMES = /* @__PURE__ */ new Set([
|
|
440
|
-
"Timestamp",
|
|
441
|
-
"GeoPoint",
|
|
442
|
-
"VectorValue",
|
|
443
|
-
"DocumentReference",
|
|
444
|
-
"FieldValue"
|
|
445
|
-
]);
|
|
446
|
-
function isFirestoreSpecialType(value) {
|
|
447
|
-
const ctorName = value.constructor?.name;
|
|
448
|
-
if (ctorName && FIRESTORE_TYPE_NAMES.has(ctorName)) return ctorName;
|
|
449
|
-
return null;
|
|
450
|
-
}
|
|
451
781
|
function bindValue(value) {
|
|
452
782
|
if (value === null || value === void 0) return null;
|
|
453
783
|
if (typeof value === "string" || typeof value === "number" || typeof value === "bigint" || typeof value === "boolean") {
|
|
@@ -466,21 +796,6 @@ function bindValue(value) {
|
|
|
466
796
|
}
|
|
467
797
|
return String(value);
|
|
468
798
|
}
|
|
469
|
-
var JSON_PATH_KEY_RE2 = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
470
|
-
function validateJsonPathKey(key) {
|
|
471
|
-
if (key.length === 0) {
|
|
472
|
-
throw new FiregraphError(
|
|
473
|
-
"DO SQLite backend: empty JSON path component is not allowed",
|
|
474
|
-
"INVALID_QUERY"
|
|
475
|
-
);
|
|
476
|
-
}
|
|
477
|
-
if (!JSON_PATH_KEY_RE2.test(key)) {
|
|
478
|
-
throw new FiregraphError(
|
|
479
|
-
`DO SQLite backend: data field path component "${key}" is not a safe JSON-path identifier. Allowed pattern: /^[A-Za-z_][A-Za-z0-9_-]*$/. Use replaceData (full-data overwrite) for keys with reserved characters (whitespace, dots, brackets, quotes, etc.).`,
|
|
480
|
-
"INVALID_QUERY"
|
|
481
|
-
);
|
|
482
|
-
}
|
|
483
|
-
}
|
|
484
799
|
function compileFilter(filter, params) {
|
|
485
800
|
const { expr } = compileFieldRef(filter.field);
|
|
486
801
|
switch (filter.op) {
|
|
@@ -561,17 +876,244 @@ function compileDOSelect(table, filters, options) {
|
|
|
561
876
|
sql += compileLimit(options, params);
|
|
562
877
|
return { sql, params };
|
|
563
878
|
}
|
|
879
|
+
function compileDOExpand(table, params) {
|
|
880
|
+
if (params.sources.length === 0) {
|
|
881
|
+
throw new FiregraphError(
|
|
882
|
+
"compileDOExpand requires a non-empty sources list \u2014 empty IN () is invalid SQL.",
|
|
883
|
+
"INVALID_QUERY"
|
|
884
|
+
);
|
|
885
|
+
}
|
|
886
|
+
const direction = params.direction ?? "forward";
|
|
887
|
+
const aUidCol = compileFieldRef("aUid").expr;
|
|
888
|
+
const bUidCol = compileFieldRef("bUid").expr;
|
|
889
|
+
const aTypeCol = compileFieldRef("aType").expr;
|
|
890
|
+
const bTypeCol = compileFieldRef("bType").expr;
|
|
891
|
+
const axbTypeCol = compileFieldRef("axbType").expr;
|
|
892
|
+
const sourceColumn = direction === "forward" ? aUidCol : bUidCol;
|
|
893
|
+
const sqlParams = [params.axbType];
|
|
894
|
+
const conditions = [`${axbTypeCol} = ?`];
|
|
895
|
+
const placeholders = params.sources.map(() => "?").join(", ");
|
|
896
|
+
conditions.push(`${sourceColumn} IN (${placeholders})`);
|
|
897
|
+
for (const uid of params.sources) sqlParams.push(uid);
|
|
898
|
+
if (params.aType !== void 0) {
|
|
899
|
+
conditions.push(`${aTypeCol} = ?`);
|
|
900
|
+
sqlParams.push(params.aType);
|
|
901
|
+
}
|
|
902
|
+
if (params.bType !== void 0) {
|
|
903
|
+
conditions.push(`${bTypeCol} = ?`);
|
|
904
|
+
sqlParams.push(params.bType);
|
|
905
|
+
}
|
|
906
|
+
if (params.axbType === NODE_RELATION) {
|
|
907
|
+
conditions.push(`${aUidCol} != ${bUidCol}`);
|
|
908
|
+
}
|
|
909
|
+
let sql = `SELECT * FROM ${quoteDOIdent(table)} WHERE ${conditions.join(" AND ")}`;
|
|
910
|
+
if (params.orderBy) {
|
|
911
|
+
sql += compileOrderBy({ orderBy: params.orderBy }, sqlParams);
|
|
912
|
+
}
|
|
913
|
+
if (params.limitPerSource !== void 0) {
|
|
914
|
+
const totalLimit = params.sources.length * params.limitPerSource;
|
|
915
|
+
sql += ` LIMIT ?`;
|
|
916
|
+
sqlParams.push(totalLimit);
|
|
917
|
+
}
|
|
918
|
+
return { sql, params: sqlParams };
|
|
919
|
+
}
|
|
920
|
+
function compileDOExpandHydrate(table, targetUids) {
|
|
921
|
+
if (targetUids.length === 0) {
|
|
922
|
+
throw new FiregraphError(
|
|
923
|
+
"compileDOExpandHydrate requires a non-empty target list \u2014 empty IN () is invalid SQL.",
|
|
924
|
+
"INVALID_QUERY"
|
|
925
|
+
);
|
|
926
|
+
}
|
|
927
|
+
const placeholders = targetUids.map(() => "?").join(", ");
|
|
928
|
+
const sqlParams = [NODE_RELATION];
|
|
929
|
+
for (const uid of targetUids) sqlParams.push(uid);
|
|
930
|
+
const aUidCol = compileFieldRef("aUid").expr;
|
|
931
|
+
const bUidCol = compileFieldRef("bUid").expr;
|
|
932
|
+
const axbTypeCol = compileFieldRef("axbType").expr;
|
|
933
|
+
return {
|
|
934
|
+
sql: `SELECT * FROM ${quoteDOIdent(table)} WHERE ${axbTypeCol} = ? AND ${aUidCol} = ${bUidCol} AND ${bUidCol} IN (${placeholders})`,
|
|
935
|
+
params: sqlParams
|
|
936
|
+
};
|
|
937
|
+
}
|
|
564
938
|
function compileDOSelectByDocId(table, docId) {
|
|
565
939
|
return {
|
|
566
940
|
sql: `SELECT * FROM ${quoteDOIdent(table)} WHERE "doc_id" = ? LIMIT 1`,
|
|
567
941
|
params: [docId]
|
|
568
942
|
};
|
|
569
943
|
}
|
|
570
|
-
function
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
944
|
+
function normalizeDOProjectionField(field) {
|
|
945
|
+
if (field in DO_FIELD_TO_COLUMN) return field;
|
|
946
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
947
|
+
return `data.${field}`;
|
|
948
|
+
}
|
|
949
|
+
function compileDOFindEdgesProjected(table, select, filters, options) {
|
|
950
|
+
if (select.length === 0) {
|
|
951
|
+
throw new FiregraphError(
|
|
952
|
+
"compileDOFindEdgesProjected requires a non-empty select list \u2014 an empty projection has no SQL representation distinct from `findEdges`.",
|
|
953
|
+
"INVALID_QUERY"
|
|
954
|
+
);
|
|
955
|
+
}
|
|
956
|
+
const seen = /* @__PURE__ */ new Set();
|
|
957
|
+
const uniqueFields = [];
|
|
958
|
+
for (const f of select) {
|
|
959
|
+
if (!seen.has(f)) {
|
|
960
|
+
seen.add(f);
|
|
961
|
+
uniqueFields.push(f);
|
|
962
|
+
}
|
|
963
|
+
}
|
|
964
|
+
const projections = [];
|
|
965
|
+
const columns = [];
|
|
966
|
+
for (let idx = 0; idx < uniqueFields.length; idx++) {
|
|
967
|
+
const field = uniqueFields[idx];
|
|
968
|
+
const canonical = normalizeDOProjectionField(field);
|
|
969
|
+
const { expr } = compileFieldRef(canonical);
|
|
970
|
+
const alias = quoteDOColumnAlias(field);
|
|
971
|
+
projections.push(`${expr} AS ${alias}`);
|
|
972
|
+
let kind;
|
|
973
|
+
let typeAliasName;
|
|
974
|
+
if (canonical === "data") {
|
|
975
|
+
kind = "data";
|
|
976
|
+
} else if (canonical.startsWith("data.")) {
|
|
977
|
+
kind = "json";
|
|
978
|
+
typeAliasName = `__fg_t_${idx}`;
|
|
979
|
+
const typeAlias = quoteDOColumnAlias(typeAliasName);
|
|
980
|
+
projections.push(`json_type("data", '$.${canonical.slice(5)}') AS ${typeAlias}`);
|
|
981
|
+
} else {
|
|
982
|
+
if (canonical === "v") kind = "builtin-int";
|
|
983
|
+
else if (canonical === "createdAt" || canonical === "updatedAt") kind = "builtin-timestamp";
|
|
984
|
+
else kind = "builtin-text";
|
|
985
|
+
}
|
|
986
|
+
columns.push({ field, kind, typeAlias: typeAliasName });
|
|
987
|
+
}
|
|
988
|
+
const params = [];
|
|
989
|
+
const conditions = [];
|
|
990
|
+
for (const f of filters) {
|
|
991
|
+
conditions.push(compileFilter(f, params));
|
|
992
|
+
}
|
|
993
|
+
const where = conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "";
|
|
994
|
+
let sql = `SELECT ${projections.join(", ")} FROM ${quoteDOIdent(table)}${where}`;
|
|
995
|
+
sql += compileOrderBy(options, params);
|
|
996
|
+
sql += compileLimit(options, params);
|
|
997
|
+
return { stmt: { sql, params }, columns };
|
|
998
|
+
}
|
|
999
|
+
function decodeDOProjectedRow(row, columns) {
|
|
1000
|
+
const out = {};
|
|
1001
|
+
for (const c of columns) {
|
|
1002
|
+
const raw = row[c.field];
|
|
1003
|
+
switch (c.kind) {
|
|
1004
|
+
case "builtin-text":
|
|
1005
|
+
out[c.field] = raw === null || raw === void 0 ? null : String(raw);
|
|
1006
|
+
break;
|
|
1007
|
+
case "builtin-int":
|
|
1008
|
+
if (raw === null || raw === void 0) {
|
|
1009
|
+
out[c.field] = null;
|
|
1010
|
+
} else if (typeof raw === "bigint") {
|
|
1011
|
+
out[c.field] = Number(raw);
|
|
1012
|
+
} else if (typeof raw === "number") {
|
|
1013
|
+
out[c.field] = raw;
|
|
1014
|
+
} else {
|
|
1015
|
+
out[c.field] = Number(raw);
|
|
1016
|
+
}
|
|
1017
|
+
break;
|
|
1018
|
+
case "builtin-timestamp": {
|
|
1019
|
+
const ms = toMillis(raw);
|
|
1020
|
+
out[c.field] = GraphTimestampImpl.fromMillis(ms);
|
|
1021
|
+
break;
|
|
1022
|
+
}
|
|
1023
|
+
case "data":
|
|
1024
|
+
if (raw === null || raw === void 0 || raw === "") {
|
|
1025
|
+
out[c.field] = {};
|
|
1026
|
+
} else {
|
|
1027
|
+
out[c.field] = JSON.parse(raw);
|
|
1028
|
+
}
|
|
1029
|
+
break;
|
|
1030
|
+
case "json": {
|
|
1031
|
+
const t = row[c.typeAlias];
|
|
1032
|
+
if (raw === null || raw === void 0) {
|
|
1033
|
+
out[c.field] = null;
|
|
1034
|
+
} else if (t === "object" || t === "array") {
|
|
1035
|
+
out[c.field] = typeof raw === "string" ? JSON.parse(raw) : raw;
|
|
1036
|
+
} else if (t === "integer" && typeof raw === "bigint") {
|
|
1037
|
+
out[c.field] = Number(raw);
|
|
1038
|
+
} else {
|
|
1039
|
+
out[c.field] = raw;
|
|
1040
|
+
}
|
|
1041
|
+
break;
|
|
1042
|
+
}
|
|
1043
|
+
}
|
|
1044
|
+
}
|
|
1045
|
+
return out;
|
|
1046
|
+
}
|
|
1047
|
+
function compileDOAggregate(table, spec, filters) {
|
|
1048
|
+
const aliases = Object.keys(spec);
|
|
1049
|
+
if (aliases.length === 0) {
|
|
1050
|
+
throw new FiregraphError(
|
|
1051
|
+
"aggregate() requires at least one aggregation in the `aggregates` map.",
|
|
1052
|
+
"INVALID_QUERY"
|
|
1053
|
+
);
|
|
1054
|
+
}
|
|
1055
|
+
const projections = [];
|
|
1056
|
+
for (const alias of aliases) {
|
|
1057
|
+
const { op, field } = spec[alias];
|
|
1058
|
+
validateJsonPathKey(alias, DO_BACKEND_ERR_LABEL);
|
|
1059
|
+
if (op === "count") {
|
|
1060
|
+
if (field !== void 0) {
|
|
1061
|
+
throw new FiregraphError(
|
|
1062
|
+
`Aggregate '${alias}' op 'count' must not specify a field \u2014 count operates on rows, not a column expression.`,
|
|
1063
|
+
"INVALID_QUERY"
|
|
1064
|
+
);
|
|
1065
|
+
}
|
|
1066
|
+
projections.push(`COUNT(*) AS ${quoteDOIdent(alias)}`);
|
|
1067
|
+
continue;
|
|
1068
|
+
}
|
|
1069
|
+
if (!field) {
|
|
1070
|
+
throw new FiregraphError(
|
|
1071
|
+
`Aggregate '${alias}' op '${op}' requires a field.`,
|
|
1072
|
+
"INVALID_QUERY"
|
|
1073
|
+
);
|
|
1074
|
+
}
|
|
1075
|
+
const { expr } = compileFieldRef(field);
|
|
1076
|
+
const numeric = `CAST(${expr} AS REAL)`;
|
|
1077
|
+
if (op === "sum") projections.push(`SUM(${numeric}) AS ${quoteDOIdent(alias)}`);
|
|
1078
|
+
else if (op === "avg") projections.push(`AVG(${numeric}) AS ${quoteDOIdent(alias)}`);
|
|
1079
|
+
else if (op === "min") projections.push(`MIN(${numeric}) AS ${quoteDOIdent(alias)}`);
|
|
1080
|
+
else if (op === "max") projections.push(`MAX(${numeric}) AS ${quoteDOIdent(alias)}`);
|
|
1081
|
+
else
|
|
1082
|
+
throw new FiregraphError(
|
|
1083
|
+
`DO SQLite backend does not support aggregate op: ${String(op)}`,
|
|
1084
|
+
"INVALID_QUERY"
|
|
1085
|
+
);
|
|
1086
|
+
}
|
|
1087
|
+
const params = [];
|
|
1088
|
+
const conditions = [];
|
|
1089
|
+
for (const f of filters) {
|
|
1090
|
+
conditions.push(compileFilter(f, params));
|
|
1091
|
+
}
|
|
1092
|
+
const where = conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "";
|
|
1093
|
+
const sql = `SELECT ${projections.join(", ")} FROM ${quoteDOIdent(table)}${where}`;
|
|
1094
|
+
return { stmt: { sql, params }, aliases };
|
|
1095
|
+
}
|
|
1096
|
+
function compileDOSet(table, docId, record, nowMillis, mode) {
|
|
1097
|
+
assertJsonSafePayload(record.data, DO_BACKEND_LABEL);
|
|
1098
|
+
if (mode === "replace") {
|
|
1099
|
+
const sql2 = `INSERT OR REPLACE INTO ${quoteDOIdent(table)} (
|
|
1100
|
+
doc_id, a_type, a_uid, axb_type, b_type, b_uid, data, v, created_at, updated_at
|
|
1101
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`;
|
|
1102
|
+
const params = [
|
|
1103
|
+
docId,
|
|
1104
|
+
record.aType,
|
|
1105
|
+
record.aUid,
|
|
1106
|
+
record.axbType,
|
|
1107
|
+
record.bType,
|
|
1108
|
+
record.bUid,
|
|
1109
|
+
JSON.stringify(record.data ?? {}),
|
|
1110
|
+
record.v ?? null,
|
|
1111
|
+
nowMillis,
|
|
1112
|
+
nowMillis
|
|
1113
|
+
];
|
|
1114
|
+
return { sql: sql2, params };
|
|
1115
|
+
}
|
|
1116
|
+
const insertParams = [
|
|
575
1117
|
docId,
|
|
576
1118
|
record.aType,
|
|
577
1119
|
record.aUid,
|
|
@@ -583,22 +1125,44 @@ function compileDOSet(table, docId, record, nowMillis) {
|
|
|
583
1125
|
nowMillis,
|
|
584
1126
|
nowMillis
|
|
585
1127
|
];
|
|
586
|
-
|
|
1128
|
+
const ops = flattenPatch(record.data ?? {});
|
|
1129
|
+
const updateParams = [];
|
|
1130
|
+
const dataExpr = compileDataOpsExpr(ops, `COALESCE("data", '{}')`, updateParams, DO_BACKEND_ERR_LABEL) ?? `COALESCE("data", '{}')`;
|
|
1131
|
+
const sql = `INSERT INTO ${quoteDOIdent(table)} (
|
|
1132
|
+
doc_id, a_type, a_uid, axb_type, b_type, b_uid, data, v, created_at, updated_at
|
|
1133
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
1134
|
+
ON CONFLICT(doc_id) DO UPDATE SET
|
|
1135
|
+
"a_type" = excluded."a_type",
|
|
1136
|
+
"a_uid" = excluded."a_uid",
|
|
1137
|
+
"axb_type" = excluded."axb_type",
|
|
1138
|
+
"b_type" = excluded."b_type",
|
|
1139
|
+
"b_uid" = excluded."b_uid",
|
|
1140
|
+
"data" = ${dataExpr},
|
|
1141
|
+
"v" = COALESCE(excluded."v", "v"),
|
|
1142
|
+
"created_at" = excluded."created_at",
|
|
1143
|
+
"updated_at" = excluded."updated_at"`;
|
|
1144
|
+
return { sql, params: [...insertParams, ...updateParams] };
|
|
587
1145
|
}
|
|
588
1146
|
function compileDOUpdate(table, docId, update, nowMillis) {
|
|
1147
|
+
assertUpdatePayloadExclusive(update);
|
|
589
1148
|
const setClauses = [];
|
|
590
1149
|
const params = [];
|
|
591
1150
|
if (update.replaceData) {
|
|
1151
|
+
assertJsonSafePayload(update.replaceData, DO_BACKEND_LABEL);
|
|
592
1152
|
setClauses.push(`"data" = ?`);
|
|
593
1153
|
params.push(JSON.stringify(update.replaceData));
|
|
594
|
-
} else if (update.
|
|
595
|
-
const
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
params
|
|
1154
|
+
} else if (update.dataOps && update.dataOps.length > 0) {
|
|
1155
|
+
for (const op of update.dataOps) {
|
|
1156
|
+
if (!op.delete) assertJsonSafePayload(op.value, DO_BACKEND_LABEL);
|
|
1157
|
+
}
|
|
1158
|
+
const expr = compileDataOpsExpr(
|
|
1159
|
+
update.dataOps,
|
|
1160
|
+
`COALESCE("data", '{}')`,
|
|
1161
|
+
params,
|
|
1162
|
+
DO_BACKEND_ERR_LABEL
|
|
1163
|
+
);
|
|
1164
|
+
if (expr !== null) {
|
|
1165
|
+
setClauses.push(`"data" = ${expr}`);
|
|
602
1166
|
}
|
|
603
1167
|
}
|
|
604
1168
|
if (update.v !== void 0) {
|
|
@@ -619,6 +1183,55 @@ function compileDODelete(table, docId) {
|
|
|
619
1183
|
params: [docId]
|
|
620
1184
|
};
|
|
621
1185
|
}
|
|
1186
|
+
function compileDOBulkDelete(table, filters) {
|
|
1187
|
+
const params = [];
|
|
1188
|
+
const conditions = [];
|
|
1189
|
+
for (const f of filters) {
|
|
1190
|
+
conditions.push(compileFilter(f, params));
|
|
1191
|
+
}
|
|
1192
|
+
const where = conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "";
|
|
1193
|
+
return {
|
|
1194
|
+
sql: `DELETE FROM ${quoteDOIdent(table)}${where}`,
|
|
1195
|
+
params
|
|
1196
|
+
};
|
|
1197
|
+
}
|
|
1198
|
+
function compileDOBulkUpdate(table, filters, patchData, nowMillis) {
|
|
1199
|
+
const dataOps = flattenPatch(patchData);
|
|
1200
|
+
if (dataOps.length === 0) {
|
|
1201
|
+
throw new FiregraphError(
|
|
1202
|
+
"bulkUpdate() patch.data must contain at least one leaf \u2014 an empty patch would only rewrite `updated_at`, which is almost certainly a bug. Use `setDoc` with merge mode if you want to stamp without editing data.",
|
|
1203
|
+
"INVALID_QUERY"
|
|
1204
|
+
);
|
|
1205
|
+
}
|
|
1206
|
+
for (const op of dataOps) {
|
|
1207
|
+
if (!op.delete) assertJsonSafePayload(op.value, DO_BACKEND_LABEL);
|
|
1208
|
+
}
|
|
1209
|
+
const setParams = [];
|
|
1210
|
+
const expr = compileDataOpsExpr(
|
|
1211
|
+
dataOps,
|
|
1212
|
+
`COALESCE("data", '{}')`,
|
|
1213
|
+
setParams,
|
|
1214
|
+
DO_BACKEND_ERR_LABEL
|
|
1215
|
+
);
|
|
1216
|
+
if (expr === null) {
|
|
1217
|
+
throw new FiregraphError(
|
|
1218
|
+
"bulkUpdate() patch produced no SQL operations \u2014 internal invariant violated.",
|
|
1219
|
+
"INVALID_ARGUMENT"
|
|
1220
|
+
);
|
|
1221
|
+
}
|
|
1222
|
+
const setClauses = [`"data" = ${expr}`, `"updated_at" = ?`];
|
|
1223
|
+
setParams.push(nowMillis);
|
|
1224
|
+
const whereParams = [];
|
|
1225
|
+
const conditions = [];
|
|
1226
|
+
for (const f of filters) {
|
|
1227
|
+
conditions.push(compileFilter(f, whereParams));
|
|
1228
|
+
}
|
|
1229
|
+
const where = conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "";
|
|
1230
|
+
return {
|
|
1231
|
+
sql: `UPDATE ${quoteDOIdent(table)} SET ${setClauses.join(", ")}${where}`,
|
|
1232
|
+
params: [...setParams, ...whereParams]
|
|
1233
|
+
};
|
|
1234
|
+
}
|
|
622
1235
|
function compileDODeleteAll(table) {
|
|
623
1236
|
return {
|
|
624
1237
|
sql: `DELETE FROM ${quoteDOIdent(table)}`,
|
|
@@ -694,8 +1307,8 @@ var DORPCBatchBackend = class {
|
|
|
694
1307
|
this.getStub = getStub;
|
|
695
1308
|
}
|
|
696
1309
|
ops = [];
|
|
697
|
-
setDoc(docId, record) {
|
|
698
|
-
this.ops.push({ kind: "set", docId, record });
|
|
1310
|
+
setDoc(docId, record, mode) {
|
|
1311
|
+
this.ops.push({ kind: "set", docId, record, mode });
|
|
699
1312
|
}
|
|
700
1313
|
updateDoc(docId, update) {
|
|
701
1314
|
this.ops.push({ kind: "update", docId, update });
|
|
@@ -710,7 +1323,18 @@ var DORPCBatchBackend = class {
|
|
|
710
1323
|
await this.getStub()._fgBatch(ops);
|
|
711
1324
|
}
|
|
712
1325
|
};
|
|
1326
|
+
var DO_CAPS = /* @__PURE__ */ new Set([
|
|
1327
|
+
"core.read",
|
|
1328
|
+
"core.write",
|
|
1329
|
+
"core.batch",
|
|
1330
|
+
"core.subgraph",
|
|
1331
|
+
"query.aggregate",
|
|
1332
|
+
"query.dml",
|
|
1333
|
+
"query.join",
|
|
1334
|
+
"query.select"
|
|
1335
|
+
]);
|
|
713
1336
|
var DORPCBackend = class _DORPCBackend {
|
|
1337
|
+
capabilities = createCapabilities(DO_CAPS);
|
|
714
1338
|
collectionPath = "firegraph";
|
|
715
1339
|
scopePath;
|
|
716
1340
|
/** @internal */
|
|
@@ -744,9 +1368,37 @@ var DORPCBackend = class _DORPCBackend {
|
|
|
744
1368
|
const wires = await this.stub._fgQuery(filters, options);
|
|
745
1369
|
return wires.map(hydrateDORecord);
|
|
746
1370
|
}
|
|
1371
|
+
// --- Aggregate ---
|
|
1372
|
+
/**
|
|
1373
|
+
* Run an aggregate query inside the backing DO. The DO returns a row of
|
|
1374
|
+
* `{ alias: number | null }` (null = SQLite NULL for SUM/MIN/MAX over an
|
|
1375
|
+
* empty set, or the count being literally 0); this method resolves NULL
|
|
1376
|
+
* to 0 for SUM/MIN/MAX and to NaN for AVG, matching the SQLite backend
|
|
1377
|
+
* and the Firestore Standard helper.
|
|
1378
|
+
*/
|
|
1379
|
+
async aggregate(spec, filters) {
|
|
1380
|
+
const stub = this.stub;
|
|
1381
|
+
if (!stub._fgAggregate) {
|
|
1382
|
+
throw new FiregraphError(
|
|
1383
|
+
"aggregate() not supported by this Durable Object stub. The wrapped stub does not implement `_fgAggregate`. If you control the stub wrapper, forward `_fgAggregate` to the underlying DO.",
|
|
1384
|
+
"UNSUPPORTED_OPERATION"
|
|
1385
|
+
);
|
|
1386
|
+
}
|
|
1387
|
+
const wire = await stub._fgAggregate(spec, filters);
|
|
1388
|
+
const out = {};
|
|
1389
|
+
for (const [alias, { op }] of Object.entries(spec)) {
|
|
1390
|
+
const v = wire[alias];
|
|
1391
|
+
if (v === null || v === void 0) {
|
|
1392
|
+
out[alias] = op === "avg" ? Number.NaN : 0;
|
|
1393
|
+
} else {
|
|
1394
|
+
out[alias] = v;
|
|
1395
|
+
}
|
|
1396
|
+
}
|
|
1397
|
+
return out;
|
|
1398
|
+
}
|
|
747
1399
|
// --- Writes ---
|
|
748
|
-
async setDoc(docId, record) {
|
|
749
|
-
return this.stub._fgSetDoc(docId, record);
|
|
1400
|
+
async setDoc(docId, record, mode) {
|
|
1401
|
+
return this.stub._fgSetDoc(docId, record, mode);
|
|
750
1402
|
}
|
|
751
1403
|
async updateDoc(docId, update) {
|
|
752
1404
|
return this.stub._fgUpdateDoc(docId, update);
|
|
@@ -800,6 +1452,105 @@ var DORPCBackend = class _DORPCBackend {
|
|
|
800
1452
|
void _reader;
|
|
801
1453
|
return this.stub._fgBulkRemoveEdges(params, options);
|
|
802
1454
|
}
|
|
1455
|
+
// --- Server-side DML (capability: query.dml) ---
|
|
1456
|
+
/**
|
|
1457
|
+
* Single-statement bulk DELETE inside the backing DO. The DO compiles
|
|
1458
|
+
* the filter list to one `DELETE … WHERE …` statement and returns a
|
|
1459
|
+
* `BulkResult` whose `deleted` is the affected-row count.
|
|
1460
|
+
*
|
|
1461
|
+
* Defensive `_fgBulkDelete` presence check mirrors `aggregate()`: the
|
|
1462
|
+
* RPC method is optional on `FiregraphStub` so external worker code with
|
|
1463
|
+
* a hand-rolled stub wrapper still type-checks. Surface a clear
|
|
1464
|
+
* `UNSUPPORTED_OPERATION` rather than `TypeError: stub._fgBulkDelete is
|
|
1465
|
+
* not a function` when the wrapper hasn't forwarded the method.
|
|
1466
|
+
*/
|
|
1467
|
+
async bulkDelete(filters, options) {
|
|
1468
|
+
const stub = this.stub;
|
|
1469
|
+
if (!stub._fgBulkDelete) {
|
|
1470
|
+
throw new FiregraphError(
|
|
1471
|
+
"bulkDelete() not supported by this Durable Object stub. The wrapped stub does not implement `_fgBulkDelete`. If you control the stub wrapper, forward `_fgBulkDelete` to the underlying DO.",
|
|
1472
|
+
"UNSUPPORTED_OPERATION"
|
|
1473
|
+
);
|
|
1474
|
+
}
|
|
1475
|
+
return stub._fgBulkDelete(filters, options);
|
|
1476
|
+
}
|
|
1477
|
+
/**
|
|
1478
|
+
* Single-statement bulk UPDATE inside the backing DO. Same contract as
|
|
1479
|
+
* `bulkDelete` for the missing-method case; the DO compiles the patch to
|
|
1480
|
+
* one `UPDATE … SET data = json_patch(...) WHERE …` statement.
|
|
1481
|
+
*/
|
|
1482
|
+
async bulkUpdate(filters, patch, options) {
|
|
1483
|
+
const stub = this.stub;
|
|
1484
|
+
if (!stub._fgBulkUpdate) {
|
|
1485
|
+
throw new FiregraphError(
|
|
1486
|
+
"bulkUpdate() not supported by this Durable Object stub. The wrapped stub does not implement `_fgBulkUpdate`. If you control the stub wrapper, forward `_fgBulkUpdate` to the underlying DO.",
|
|
1487
|
+
"UNSUPPORTED_OPERATION"
|
|
1488
|
+
);
|
|
1489
|
+
}
|
|
1490
|
+
return stub._fgBulkUpdate(filters, patch, options);
|
|
1491
|
+
}
|
|
1492
|
+
/**
|
|
1493
|
+
* Multi-source fan-out — `query.join` capability. Routes the call through
|
|
1494
|
+
* the DO's `_fgExpand` RPC, which compiles to one `SELECT … WHERE
|
|
1495
|
+
* "aUid" IN (?, …)` statement (plus, when `params.hydrate === true`, a
|
|
1496
|
+
* second IN-clause statement against the node rows).
|
|
1497
|
+
*
|
|
1498
|
+
* Defensive `_fgExpand` presence check matches the bulk-DML pattern: the
|
|
1499
|
+
* RPC method is optional on `FiregraphStub` so external worker code with
|
|
1500
|
+
* a hand-rolled stub wrapper still type-checks. We surface a clear
|
|
1501
|
+
* `UNSUPPORTED_OPERATION` rather than `TypeError: stub._fgExpand is not a
|
|
1502
|
+
* function` if the wrapper hasn't forwarded the method.
|
|
1503
|
+
*/
|
|
1504
|
+
async expand(params) {
|
|
1505
|
+
const stub = this.stub;
|
|
1506
|
+
if (!stub._fgExpand) {
|
|
1507
|
+
throw new FiregraphError(
|
|
1508
|
+
"expand() not supported by this Durable Object stub. The wrapped stub does not implement `_fgExpand`. If you control the stub wrapper, forward `_fgExpand` to the underlying DO.",
|
|
1509
|
+
"UNSUPPORTED_OPERATION"
|
|
1510
|
+
);
|
|
1511
|
+
}
|
|
1512
|
+
if (params.sources.length === 0) {
|
|
1513
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
1514
|
+
}
|
|
1515
|
+
const wire = await stub._fgExpand(params);
|
|
1516
|
+
const edges = wire.edges.map(hydrateDORecord);
|
|
1517
|
+
if (!params.hydrate) {
|
|
1518
|
+
return { edges };
|
|
1519
|
+
}
|
|
1520
|
+
const targets = (wire.targets ?? []).map((row) => row ? hydrateDORecord(row) : null);
|
|
1521
|
+
return { edges, targets };
|
|
1522
|
+
}
|
|
1523
|
+
// --- Server-side projection (capability: query.select) ---
|
|
1524
|
+
/**
|
|
1525
|
+
* Server-side projection — `query.select` capability. Forwards the call to
|
|
1526
|
+
* the DO's `_fgFindEdgesProjected` RPC, which compiles to a single
|
|
1527
|
+
* `SELECT json_extract(...) AS …, json_type(...) AS …__t FROM <table>
|
|
1528
|
+
* WHERE …` statement. The DO returns raw rows + per-column metadata; this
|
|
1529
|
+
* method decodes each row locally via `decodeDOProjectedRow`.
|
|
1530
|
+
*
|
|
1531
|
+
* Decoding lives on this side (not inside the DO) because
|
|
1532
|
+
* `GraphTimestampImpl` is a class — its prototype does not survive
|
|
1533
|
+
* workerd's structured-clone boundary — so timestamp rehydration must
|
|
1534
|
+
* happen wherever the rows are consumed by the GraphClient.
|
|
1535
|
+
*
|
|
1536
|
+
* Defensive `_fgFindEdgesProjected` presence check matches the `expand` /
|
|
1537
|
+
* bulk-DML / aggregate pattern: the RPC method is optional on
|
|
1538
|
+
* `FiregraphStub` so external worker code with a hand-rolled stub wrapper
|
|
1539
|
+
* still type-checks. Surface a clear `UNSUPPORTED_OPERATION` rather than
|
|
1540
|
+
* `TypeError: stub._fgFindEdgesProjected is not a function` if the
|
|
1541
|
+
* wrapper hasn't forwarded the method.
|
|
1542
|
+
*/
|
|
1543
|
+
async findEdgesProjected(select, filters, options) {
|
|
1544
|
+
const stub = this.stub;
|
|
1545
|
+
if (!stub._fgFindEdgesProjected) {
|
|
1546
|
+
throw new FiregraphError(
|
|
1547
|
+
"findEdgesProjected() not supported by this Durable Object stub. The wrapped stub does not implement `_fgFindEdgesProjected`. If you control the stub wrapper, forward `_fgFindEdgesProjected` to the underlying DO.",
|
|
1548
|
+
"UNSUPPORTED_OPERATION"
|
|
1549
|
+
);
|
|
1550
|
+
}
|
|
1551
|
+
const { rows, columns } = await stub._fgFindEdgesProjected(select, filters, options);
|
|
1552
|
+
return rows.map((row) => decodeDOProjectedRow(row, columns));
|
|
1553
|
+
}
|
|
803
1554
|
// --- Cross-scope queries ---
|
|
804
1555
|
//
|
|
805
1556
|
// `findEdgesGlobal` is deliberately NOT defined on this class. The
|
|
@@ -875,6 +1626,19 @@ var GraphBatchImpl = class {
|
|
|
875
1626
|
this.scopePath = scopePath;
|
|
876
1627
|
}
|
|
877
1628
|
async putNode(aType, uid, data) {
|
|
1629
|
+
this.writeNode(aType, uid, data, "merge");
|
|
1630
|
+
}
|
|
1631
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1632
|
+
this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
1633
|
+
}
|
|
1634
|
+
async replaceNode(aType, uid, data) {
|
|
1635
|
+
this.writeNode(aType, uid, data, "replace");
|
|
1636
|
+
}
|
|
1637
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1638
|
+
this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
1639
|
+
}
|
|
1640
|
+
writeNode(aType, uid, data, mode) {
|
|
1641
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
878
1642
|
if (this.registry) {
|
|
879
1643
|
this.registry.validate(aType, NODE_RELATION, aType, data, this.scopePath);
|
|
880
1644
|
}
|
|
@@ -886,9 +1650,10 @@ var GraphBatchImpl = class {
|
|
|
886
1650
|
record.v = entry.schemaVersion;
|
|
887
1651
|
}
|
|
888
1652
|
}
|
|
889
|
-
this.backend.setDoc(docId, record);
|
|
1653
|
+
this.backend.setDoc(docId, record, mode);
|
|
890
1654
|
}
|
|
891
|
-
|
|
1655
|
+
writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
1656
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
892
1657
|
if (this.registry) {
|
|
893
1658
|
this.registry.validate(aType, axbType, bType, data, this.scopePath);
|
|
894
1659
|
}
|
|
@@ -900,11 +1665,15 @@ var GraphBatchImpl = class {
|
|
|
900
1665
|
record.v = entry.schemaVersion;
|
|
901
1666
|
}
|
|
902
1667
|
}
|
|
903
|
-
this.backend.setDoc(docId, record);
|
|
1668
|
+
this.backend.setDoc(docId, record, mode);
|
|
904
1669
|
}
|
|
905
1670
|
async updateNode(uid, data) {
|
|
906
1671
|
const docId = computeNodeDocId(uid);
|
|
907
|
-
this.backend.updateDoc(docId, {
|
|
1672
|
+
this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1673
|
+
}
|
|
1674
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
1675
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1676
|
+
this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
908
1677
|
}
|
|
909
1678
|
async removeNode(uid) {
|
|
910
1679
|
const docId = computeNodeDocId(uid);
|
|
@@ -1853,6 +2622,19 @@ var GraphTransactionImpl = class {
|
|
|
1853
2622
|
return results.map((r) => r.record);
|
|
1854
2623
|
}
|
|
1855
2624
|
async putNode(aType, uid, data) {
|
|
2625
|
+
await this.writeNode(aType, uid, data, "merge");
|
|
2626
|
+
}
|
|
2627
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
2628
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
2629
|
+
}
|
|
2630
|
+
async replaceNode(aType, uid, data) {
|
|
2631
|
+
await this.writeNode(aType, uid, data, "replace");
|
|
2632
|
+
}
|
|
2633
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
2634
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
2635
|
+
}
|
|
2636
|
+
async writeNode(aType, uid, data, mode) {
|
|
2637
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
1856
2638
|
if (this.registry) {
|
|
1857
2639
|
this.registry.validate(aType, NODE_RELATION, aType, data, this.scopePath);
|
|
1858
2640
|
}
|
|
@@ -1864,9 +2646,10 @@ var GraphTransactionImpl = class {
|
|
|
1864
2646
|
record.v = entry.schemaVersion;
|
|
1865
2647
|
}
|
|
1866
2648
|
}
|
|
1867
|
-
await this.backend.setDoc(docId, record);
|
|
2649
|
+
await this.backend.setDoc(docId, record, mode);
|
|
1868
2650
|
}
|
|
1869
|
-
async
|
|
2651
|
+
async writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
2652
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
1870
2653
|
if (this.registry) {
|
|
1871
2654
|
this.registry.validate(aType, axbType, bType, data, this.scopePath);
|
|
1872
2655
|
}
|
|
@@ -1878,11 +2661,15 @@ var GraphTransactionImpl = class {
|
|
|
1878
2661
|
record.v = entry.schemaVersion;
|
|
1879
2662
|
}
|
|
1880
2663
|
}
|
|
1881
|
-
await this.backend.setDoc(docId, record);
|
|
2664
|
+
await this.backend.setDoc(docId, record, mode);
|
|
1882
2665
|
}
|
|
1883
2666
|
async updateNode(uid, data) {
|
|
1884
2667
|
const docId = computeNodeDocId(uid);
|
|
1885
|
-
await this.backend.updateDoc(docId, {
|
|
2668
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
2669
|
+
}
|
|
2670
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
2671
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
2672
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1886
2673
|
}
|
|
1887
2674
|
async removeNode(uid) {
|
|
1888
2675
|
const docId = computeNodeDocId(uid);
|
|
@@ -1920,6 +2707,15 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
1920
2707
|
this.scanProtection = options?.scanProtection ?? "error";
|
|
1921
2708
|
}
|
|
1922
2709
|
scanProtection;
|
|
2710
|
+
/**
|
|
2711
|
+
* Capability set of the underlying backend. Mirrors `backend.capabilities`
|
|
2712
|
+
* verbatim so callers can portability-check (`client.capabilities.has(
|
|
2713
|
+
* 'query.join')`) without reaching for the backend handle. Static for the
|
|
2714
|
+
* lifetime of the client.
|
|
2715
|
+
*/
|
|
2716
|
+
get capabilities() {
|
|
2717
|
+
return this.backend.capabilities;
|
|
2718
|
+
}
|
|
1923
2719
|
// Static mode
|
|
1924
2720
|
staticRegistry;
|
|
1925
2721
|
// Dynamic mode
|
|
@@ -2081,6 +2877,19 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
2081
2877
|
// GraphWriter
|
|
2082
2878
|
// ---------------------------------------------------------------------------
|
|
2083
2879
|
async putNode(aType, uid, data) {
|
|
2880
|
+
await this.writeNode(aType, uid, data, "merge");
|
|
2881
|
+
}
|
|
2882
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
2883
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
2884
|
+
}
|
|
2885
|
+
async replaceNode(aType, uid, data) {
|
|
2886
|
+
await this.writeNode(aType, uid, data, "replace");
|
|
2887
|
+
}
|
|
2888
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
2889
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
2890
|
+
}
|
|
2891
|
+
async writeNode(aType, uid, data, mode) {
|
|
2892
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
2084
2893
|
const registry = this.getRegistryForType(aType);
|
|
2085
2894
|
if (registry) {
|
|
2086
2895
|
registry.validate(aType, NODE_RELATION, aType, data, this.backend.scopePath);
|
|
@@ -2094,9 +2903,10 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
2094
2903
|
record.v = entry.schemaVersion;
|
|
2095
2904
|
}
|
|
2096
2905
|
}
|
|
2097
|
-
await backend.setDoc(docId, record);
|
|
2906
|
+
await backend.setDoc(docId, record, mode);
|
|
2098
2907
|
}
|
|
2099
|
-
async
|
|
2908
|
+
async writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
2909
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
2100
2910
|
const registry = this.getRegistryForType(aType);
|
|
2101
2911
|
if (registry) {
|
|
2102
2912
|
registry.validate(aType, axbType, bType, data, this.backend.scopePath);
|
|
@@ -2110,11 +2920,15 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
2110
2920
|
record.v = entry.schemaVersion;
|
|
2111
2921
|
}
|
|
2112
2922
|
}
|
|
2113
|
-
await backend.setDoc(docId, record);
|
|
2923
|
+
await backend.setDoc(docId, record, mode);
|
|
2114
2924
|
}
|
|
2115
2925
|
async updateNode(uid, data) {
|
|
2116
2926
|
const docId = computeNodeDocId(uid);
|
|
2117
|
-
await this.backend.updateDoc(docId, {
|
|
2927
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
2928
|
+
}
|
|
2929
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
2930
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
2931
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
2118
2932
|
}
|
|
2119
2933
|
async removeNode(uid) {
|
|
2120
2934
|
const docId = computeNodeDocId(uid);
|
|
@@ -2196,6 +3010,33 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
2196
3010
|
return this.applyMigrations(records);
|
|
2197
3011
|
}
|
|
2198
3012
|
// ---------------------------------------------------------------------------
|
|
3013
|
+
// Aggregate query (capability: query.aggregate)
|
|
3014
|
+
// ---------------------------------------------------------------------------
|
|
3015
|
+
async aggregate(params) {
|
|
3016
|
+
if (!this.backend.aggregate) {
|
|
3017
|
+
throw new FiregraphError(
|
|
3018
|
+
"aggregate() is not supported by the current storage backend.",
|
|
3019
|
+
"UNSUPPORTED_OPERATION"
|
|
3020
|
+
);
|
|
3021
|
+
}
|
|
3022
|
+
const hasAnyFilter = params.aType || params.aUid || params.axbType || params.bType || params.bUid || params.where && params.where.length > 0;
|
|
3023
|
+
if (!hasAnyFilter) {
|
|
3024
|
+
this.checkQuerySafety([], params.allowCollectionScan);
|
|
3025
|
+
const result2 = await this.backend.aggregate(params.aggregates, []);
|
|
3026
|
+
return result2;
|
|
3027
|
+
}
|
|
3028
|
+
const plan = buildEdgeQueryPlan(params);
|
|
3029
|
+
if (plan.strategy === "get") {
|
|
3030
|
+
throw new FiregraphError(
|
|
3031
|
+
"aggregate() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
3032
|
+
"INVALID_QUERY"
|
|
3033
|
+
);
|
|
3034
|
+
}
|
|
3035
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
3036
|
+
const result = await this.backend.aggregate(params.aggregates, plan.filters);
|
|
3037
|
+
return result;
|
|
3038
|
+
}
|
|
3039
|
+
// ---------------------------------------------------------------------------
|
|
2199
3040
|
// Bulk operations
|
|
2200
3041
|
// ---------------------------------------------------------------------------
|
|
2201
3042
|
async removeNodeCascade(uid, options) {
|
|
@@ -2205,6 +3046,327 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
2205
3046
|
return this.backend.bulkRemoveEdges(params, this, options);
|
|
2206
3047
|
}
|
|
2207
3048
|
// ---------------------------------------------------------------------------
|
|
3049
|
+
// Server-side DML (capability: query.dml)
|
|
3050
|
+
// ---------------------------------------------------------------------------
|
|
3051
|
+
/**
|
|
3052
|
+
* Single-statement bulk DELETE. Translates `params` to a filter list via
|
|
3053
|
+
* `buildEdgeQueryPlan` (the same plan `findEdges` uses) and dispatches to
|
|
3054
|
+
* `backend.bulkDelete`. The fetch-then-delete loop in `bulkRemoveEdges`
|
|
3055
|
+
* is the cap-less fallback; this method is the fast path on backends
|
|
3056
|
+
* declaring `query.dml`.
|
|
3057
|
+
*
|
|
3058
|
+
* Scan-protection rules match `findEdges`: a query with no identifying
|
|
3059
|
+
* fields requires `allowCollectionScan: true` to pass. A bare-empty
|
|
3060
|
+
* filter set (no `aType`, `aUid`, etc., no `where`) is allowed at this
|
|
3061
|
+
* layer — shared SQLite bounds the blast radius via its leading `scope`
|
|
3062
|
+
* predicate — but the DO RPC backend rejects empty filters at the wire
|
|
3063
|
+
* boundary as defense-in-depth. To wipe a routed subgraph DO, use
|
|
3064
|
+
* `removeNodeCascade` on the parent node instead.
|
|
3065
|
+
*/
|
|
3066
|
+
async bulkDelete(params, options) {
|
|
3067
|
+
if (!this.backend.bulkDelete) {
|
|
3068
|
+
throw new FiregraphError(
|
|
3069
|
+
"bulkDelete() is not supported by the current storage backend. Fall back to bulkRemoveEdges() for backends without query.dml (e.g. Firestore Standard).",
|
|
3070
|
+
"UNSUPPORTED_OPERATION"
|
|
3071
|
+
);
|
|
3072
|
+
}
|
|
3073
|
+
const filters = this.buildDmlFilters(params);
|
|
3074
|
+
return this.backend.bulkDelete(filters, options);
|
|
3075
|
+
}
|
|
3076
|
+
/**
|
|
3077
|
+
* Single-statement bulk UPDATE. Same translation path as `bulkDelete`,
|
|
3078
|
+
* but the patch is deep-merged into each matching row's `data` via the
|
|
3079
|
+
* shared `flattenPatch` pipeline. Identifying columns are immutable
|
|
3080
|
+
* through this path (see `BulkUpdatePatch` JSDoc).
|
|
3081
|
+
*
|
|
3082
|
+
* Empty-patch rejection happens inside the backend (`compileBulkUpdate`)
|
|
3083
|
+
* — a `data: {}` payload would only rewrite `updated_at`, which is
|
|
3084
|
+
* almost certainly a bug.
|
|
3085
|
+
*/
|
|
3086
|
+
async bulkUpdate(params, patch, options) {
|
|
3087
|
+
if (!this.backend.bulkUpdate) {
|
|
3088
|
+
throw new FiregraphError(
|
|
3089
|
+
"bulkUpdate() is not supported by the current storage backend.",
|
|
3090
|
+
"UNSUPPORTED_OPERATION"
|
|
3091
|
+
);
|
|
3092
|
+
}
|
|
3093
|
+
const filters = this.buildDmlFilters(params);
|
|
3094
|
+
return this.backend.bulkUpdate(filters, patch, options);
|
|
3095
|
+
}
|
|
3096
|
+
// ---------------------------------------------------------------------------
|
|
3097
|
+
// Multi-source fan-out (capability: query.join)
|
|
3098
|
+
// ---------------------------------------------------------------------------
|
|
3099
|
+
/**
|
|
3100
|
+
* Fan out from `params.sources` over a single edge type in one round trip.
|
|
3101
|
+
* On backends without `query.join`, throws `UNSUPPORTED_OPERATION` — the
|
|
3102
|
+
* cap-less fallback is the per-source `findEdges` loop, which lives in
|
|
3103
|
+
* `traverse.ts` (the higher-level traversal walker) rather than here.
|
|
3104
|
+
*
|
|
3105
|
+
* `expand()` is intentionally edge-type-only — the source set is a flat
|
|
3106
|
+
* UID list and the hop matches one `axbType`. Multi-axbType expansions
|
|
3107
|
+
* become multiple `expand()` calls, one per relation.
|
|
3108
|
+
*
|
|
3109
|
+
* `params.sources.length === 0` short-circuits to an empty result. The
|
|
3110
|
+
* backend never sees the call. (`compileExpand` itself rejects empty
|
|
3111
|
+
* because `IN ()` is not valid SQL.)
|
|
3112
|
+
*/
|
|
3113
|
+
async expand(params) {
|
|
3114
|
+
if (!this.backend.expand) {
|
|
3115
|
+
throw new FiregraphError(
|
|
3116
|
+
"expand() is not supported by the current storage backend. Backends without `query.join` can use createTraversal() instead \u2014 the per-hop loop is functionally equivalent (just slower).",
|
|
3117
|
+
"UNSUPPORTED_OPERATION"
|
|
3118
|
+
);
|
|
3119
|
+
}
|
|
3120
|
+
if (params.sources.length === 0) {
|
|
3121
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
3122
|
+
}
|
|
3123
|
+
return this.backend.expand(params);
|
|
3124
|
+
}
|
|
3125
|
+
// ---------------------------------------------------------------------------
|
|
3126
|
+
// Engine-level multi-hop traversal (capability: traversal.serverSide)
|
|
3127
|
+
// ---------------------------------------------------------------------------
|
|
3128
|
+
/**
|
|
3129
|
+
* Compile a multi-hop traversal spec into one server-side nested
|
|
3130
|
+
* Pipeline and dispatch a single round trip.
|
|
3131
|
+
*
|
|
3132
|
+
* Backends declaring `traversal.serverSide` (Firestore Enterprise
|
|
3133
|
+
* today) install this method; everywhere else, it throws
|
|
3134
|
+
* `UNSUPPORTED_OPERATION`. The capability gate matches the type-level
|
|
3135
|
+
* surface — `GraphClient<C>` only exposes `runEngineTraversal` when
|
|
3136
|
+
* `'traversal.serverSide' extends C`.
|
|
3137
|
+
*
|
|
3138
|
+
* Most callers should not invoke this method directly; the
|
|
3139
|
+
* `createTraversal(...).run()` builder routes through it
|
|
3140
|
+
* automatically when `engineTraversal: 'auto'` (the default) and
|
|
3141
|
+
* the spec is eligible per `firestore-traverse-compiler.ts`. Calling
|
|
3142
|
+
* directly is appropriate for benchmarking or for callers that have
|
|
3143
|
+
* already shaped their hop chain into the strict
|
|
3144
|
+
* `EngineTraversalParams` shape.
|
|
3145
|
+
*
|
|
3146
|
+
* `params.sources.length === 0` short-circuits to empty per-hop
|
|
3147
|
+
* arrays. The backend never sees the call.
|
|
3148
|
+
*/
|
|
3149
|
+
async runEngineTraversal(params) {
|
|
3150
|
+
if (!this.backend.runEngineTraversal) {
|
|
3151
|
+
throw new FiregraphError(
|
|
3152
|
+
"runEngineTraversal() is not supported by the current storage backend. Backends without `traversal.serverSide` can use createTraversal() instead \u2014 the per-hop loop is functionally equivalent for in-graph specs (different round-trip profile).",
|
|
3153
|
+
"UNSUPPORTED_OPERATION"
|
|
3154
|
+
);
|
|
3155
|
+
}
|
|
3156
|
+
if (params.sources.length === 0) {
|
|
3157
|
+
return {
|
|
3158
|
+
hops: params.hops.map(() => ({ edges: [], sourceCount: 0 })),
|
|
3159
|
+
totalReads: 0
|
|
3160
|
+
};
|
|
3161
|
+
}
|
|
3162
|
+
return this.backend.runEngineTraversal(params);
|
|
3163
|
+
}
|
|
3164
|
+
// ---------------------------------------------------------------------------
|
|
3165
|
+
// Server-side projection (capability: query.select)
|
|
3166
|
+
// ---------------------------------------------------------------------------
|
|
3167
|
+
/**
|
|
3168
|
+
* Server-side projection — fetch only the requested fields from each
|
|
3169
|
+
* matching edge. The backend translates the call into a projecting query
|
|
3170
|
+
* (`SELECT json_extract(...)` on SQLite/DO, `Query.select(...)` on
|
|
3171
|
+
* Firestore Standard, classic projection on Enterprise) so the wire
|
|
3172
|
+
* payload is reduced to just the requested fields.
|
|
3173
|
+
*
|
|
3174
|
+
* Resolution rules for `select` (mirrored across all backends):
|
|
3175
|
+
*
|
|
3176
|
+
* - Built-in envelope fields (`aType`, `aUid`, `axbType`, `bType`,
|
|
3177
|
+
* `bUid`, `createdAt`, `updatedAt`, `v`) → resolve to the typed
|
|
3178
|
+
* column / Firestore field directly.
|
|
3179
|
+
* - `'data'` literal → returns the whole user payload.
|
|
3180
|
+
* - `'data.<x>'` → explicit nested path, returned at the same shape.
|
|
3181
|
+
* - bare name → rewritten to `data.<name>` (the canonical "give me a
|
|
3182
|
+
* few keys out of the JSON payload" shape).
|
|
3183
|
+
*
|
|
3184
|
+
* Empty `select: []` is rejected with `INVALID_QUERY`. Duplicate entries
|
|
3185
|
+
* are de-duped (first-occurrence order preserved); the result row carries
|
|
3186
|
+
* one slot per unique field.
|
|
3187
|
+
*
|
|
3188
|
+
* Migrations are *not* applied to the result. The caller asked for a
|
|
3189
|
+
* partial shape, and rehydrating it through the migration pipeline would
|
|
3190
|
+
* require synthesising every absent field — see
|
|
3191
|
+
* `StorageBackend.findEdgesProjected` for the rationale.
|
|
3192
|
+
*
|
|
3193
|
+
* Scan protection follows the `findEdges` rules: a query with no
|
|
3194
|
+
* identifying fields requires `allowCollectionScan: true` to pass. The
|
|
3195
|
+
* cap-less fallback would be `findEdges` + JS-side projection, but that
|
|
3196
|
+
* defeats the wire-payload reduction; backends without `query.select`
|
|
3197
|
+
* throw `UNSUPPORTED_OPERATION` rather than silently materialising full
|
|
3198
|
+
* rows.
|
|
3199
|
+
*/
|
|
3200
|
+
async findEdgesProjected(params) {
|
|
3201
|
+
if (!this.backend.findEdgesProjected) {
|
|
3202
|
+
throw new FiregraphError(
|
|
3203
|
+
"findEdgesProjected() is not supported by the current storage backend. There is no client-side fallback because the wire-payload reduction is the entire point of the API \u2014 use findEdges() and project in JS if the backend does not declare `query.select`.",
|
|
3204
|
+
"UNSUPPORTED_OPERATION"
|
|
3205
|
+
);
|
|
3206
|
+
}
|
|
3207
|
+
if (params.select.length === 0) {
|
|
3208
|
+
throw new FiregraphError(
|
|
3209
|
+
"findEdgesProjected() requires a non-empty `select` list.",
|
|
3210
|
+
"INVALID_QUERY"
|
|
3211
|
+
);
|
|
3212
|
+
}
|
|
3213
|
+
const plan = buildEdgeQueryPlan(params);
|
|
3214
|
+
let filters;
|
|
3215
|
+
let options;
|
|
3216
|
+
if (plan.strategy === "get") {
|
|
3217
|
+
filters = [
|
|
3218
|
+
{ field: "aUid", op: "==", value: params.aUid },
|
|
3219
|
+
{ field: "axbType", op: "==", value: params.axbType },
|
|
3220
|
+
{ field: "bUid", op: "==", value: params.bUid }
|
|
3221
|
+
];
|
|
3222
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
3223
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
3224
|
+
options = void 0;
|
|
3225
|
+
} else {
|
|
3226
|
+
filters = plan.filters;
|
|
3227
|
+
options = plan.options;
|
|
3228
|
+
}
|
|
3229
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
3230
|
+
const rows = await this.backend.findEdgesProjected(params.select, filters, options);
|
|
3231
|
+
return rows;
|
|
3232
|
+
}
|
|
3233
|
+
/**
|
|
3234
|
+
* Native vector / nearest-neighbour search (capability `search.vector`).
|
|
3235
|
+
*
|
|
3236
|
+
* Resolves to the top-K records by similarity, sorted nearest-first
|
|
3237
|
+
* (`EUCLIDEAN` / `COSINE`) or highest-first (`DOT_PRODUCT`). The wrapper
|
|
3238
|
+
* is intentionally thin: capability check, scan-protection, then forward
|
|
3239
|
+
* `params` verbatim to the backend. All field-path normalisation and
|
|
3240
|
+
* SDK-shape validation lives in the shared
|
|
3241
|
+
* `runFirestoreFindNearest` helper that both Firestore editions call —
|
|
3242
|
+
* keeping it there means the validation surface stays in lockstep with
|
|
3243
|
+
* the SDK call site, regardless of which backend is plugged in.
|
|
3244
|
+
*
|
|
3245
|
+
* Migrations are NOT applied. The vector index walked the raw stored
|
|
3246
|
+
* shape; rehydrating each row through the migration pipeline would
|
|
3247
|
+
* change the candidate set the index already chose. If you need
|
|
3248
|
+
* migrated shape, follow up with `getNode` / `findEdges` on the
|
|
3249
|
+
* returned UIDs — those paths apply migrations normally.
|
|
3250
|
+
*
|
|
3251
|
+
* Scan-protection mirrors `findEdges`: if no identifying filters
|
|
3252
|
+
* (`aType` / `axbType` / `bType`) and no `where` clauses are supplied,
|
|
3253
|
+
* the request must opt in via `allowCollectionScan: true`. The ANN
|
|
3254
|
+
* query still walks the candidate set the WHERE clause produces, so
|
|
3255
|
+
* an unfiltered nearest-neighbour search over a million-row collection
|
|
3256
|
+
* is the same scan trap as an unfiltered `findEdges`.
|
|
3257
|
+
*
|
|
3258
|
+
* Backends without `search.vector` throw `UNSUPPORTED_OPERATION` —
|
|
3259
|
+
* there is no client-side fallback because emulating ANN over the
|
|
3260
|
+
* generic backend surface (`findEdges` + JS-side cosine) doesn't scale
|
|
3261
|
+
* past trivial datasets and would give callers the wrong mental model
|
|
3262
|
+
* about cost.
|
|
3263
|
+
*/
|
|
3264
|
+
async findNearest(params) {
|
|
3265
|
+
if (!this.backend.findNearest) {
|
|
3266
|
+
throw new FiregraphError(
|
|
3267
|
+
"findNearest() is not supported by the current storage backend. Vector search requires a backend that declares `search.vector` (currently Firestore Standard and Enterprise). There is no client-side fallback because emulating ANN on top of the generic backend surface does not scale beyond toy datasets.",
|
|
3268
|
+
"UNSUPPORTED_OPERATION"
|
|
3269
|
+
);
|
|
3270
|
+
}
|
|
3271
|
+
const filters = [];
|
|
3272
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
3273
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
3274
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
3275
|
+
if (params.where) filters.push(...params.where);
|
|
3276
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
3277
|
+
return this.backend.findNearest(params);
|
|
3278
|
+
}
|
|
3279
|
+
/**
|
|
3280
|
+
* Native full-text search (capability `search.fullText`).
|
|
3281
|
+
*
|
|
3282
|
+
* Returns the top-N records by relevance, ordered by the search
|
|
3283
|
+
* index's score. Only Firestore Enterprise declares this capability
|
|
3284
|
+
* today — the underlying Pipelines `search({ query: documentMatches(...) })`
|
|
3285
|
+
* stage requires Enterprise's FTS index. Standard does not declare
|
|
3286
|
+
* the cap (FTS is an Enterprise-only product feature, not a
|
|
3287
|
+
* typed-API gap), and the SQLite-shaped backends have no native
|
|
3288
|
+
* FTS index. Backends without `search.fullText` throw
|
|
3289
|
+
* `UNSUPPORTED_OPERATION` from this wrapper.
|
|
3290
|
+
*
|
|
3291
|
+
* Scan-protection mirrors `findNearest`: a search with no
|
|
3292
|
+
* identifying filters (`aType` / `axbType` / `bType`) walks every
|
|
3293
|
+
* row the index scored, so the request must opt in via
|
|
3294
|
+
* `allowCollectionScan: true`.
|
|
3295
|
+
*
|
|
3296
|
+
* Migrations are NOT applied. The FTS index walked the raw stored
|
|
3297
|
+
* shape; rehydrating each row through the migration pipeline would
|
|
3298
|
+
* change the candidate set the index already scored. If you need
|
|
3299
|
+
* migrated shape, follow up with `getNode` / `findEdges` on the
|
|
3300
|
+
* returned UIDs.
|
|
3301
|
+
*/
|
|
3302
|
+
async fullTextSearch(params) {
|
|
3303
|
+
if (!this.backend.fullTextSearch) {
|
|
3304
|
+
throw new FiregraphError(
|
|
3305
|
+
"fullTextSearch() is not supported by the current storage backend. Full-text search requires a backend that declares `search.fullText` (currently Firestore Enterprise only \u2014 FTS is an Enterprise product feature). There is no client-side fallback because emulating FTS over the generic backend surface would not scale beyond toy datasets.",
|
|
3306
|
+
"UNSUPPORTED_OPERATION"
|
|
3307
|
+
);
|
|
3308
|
+
}
|
|
3309
|
+
const filters = [];
|
|
3310
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
3311
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
3312
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
3313
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
3314
|
+
return this.backend.fullTextSearch(params);
|
|
3315
|
+
}
|
|
3316
|
+
/**
|
|
3317
|
+
* Native geospatial distance search (capability `search.geo`).
|
|
3318
|
+
*
|
|
3319
|
+
* Returns rows whose `geoField` lies within `radiusMeters` of
|
|
3320
|
+
* `point`, ordered nearest-first by default. Only Firestore
|
|
3321
|
+
* Enterprise declares this capability — same Enterprise-only
|
|
3322
|
+
* gating as `fullTextSearch`. Backends without `search.geo` throw
|
|
3323
|
+
* `UNSUPPORTED_OPERATION` from this wrapper.
|
|
3324
|
+
*
|
|
3325
|
+
* Scan-protection mirrors `findNearest` and `fullTextSearch`.
|
|
3326
|
+
*
|
|
3327
|
+
* Migrations are NOT applied — same rationale as the other search
|
|
3328
|
+
* extensions.
|
|
3329
|
+
*/
|
|
3330
|
+
async geoSearch(params) {
|
|
3331
|
+
if (!this.backend.geoSearch) {
|
|
3332
|
+
throw new FiregraphError(
|
|
3333
|
+
"geoSearch() is not supported by the current storage backend. Geospatial search requires a backend that declares `search.geo` (currently Firestore Enterprise only \u2014 geo queries are an Enterprise product feature). There is no client-side fallback because emulating geo over the generic backend surface (haversine over `findEdges`) would not scale beyond trivial datasets.",
|
|
3334
|
+
"UNSUPPORTED_OPERATION"
|
|
3335
|
+
);
|
|
3336
|
+
}
|
|
3337
|
+
const filters = [];
|
|
3338
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
3339
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
3340
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
3341
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
3342
|
+
return this.backend.geoSearch(params);
|
|
3343
|
+
}
|
|
3344
|
+
/**
|
|
3345
|
+
* Translate a `FindEdgesParams` into the `QueryFilter[]` shape the
|
|
3346
|
+
* backend `bulkDelete` / `bulkUpdate` methods expect. Mirrors the
|
|
3347
|
+
* `aggregate()` plan: a bare-empty params object becomes an empty
|
|
3348
|
+
* filter list (after a scan-protection check); a GET-shape (all three
|
|
3349
|
+
* identifiers) is rejected so we never silently turn a single-row
|
|
3350
|
+
* lookup into a server-side DML; otherwise we run `buildEdgeQueryPlan`
|
|
3351
|
+
* and surface its filters.
|
|
3352
|
+
*/
|
|
3353
|
+
buildDmlFilters(params) {
|
|
3354
|
+
const hasAnyFilter = params.aType || params.aUid || params.axbType || params.bType || params.bUid || params.where && params.where.length > 0;
|
|
3355
|
+
if (!hasAnyFilter) {
|
|
3356
|
+
this.checkQuerySafety([], params.allowCollectionScan);
|
|
3357
|
+
return [];
|
|
3358
|
+
}
|
|
3359
|
+
const plan = buildEdgeQueryPlan(params);
|
|
3360
|
+
if (plan.strategy === "get") {
|
|
3361
|
+
throw new FiregraphError(
|
|
3362
|
+
"bulkDelete() / bulkUpdate() require a query, not a direct document lookup. Use removeEdge() / updateEdge() for single-row operations, or omit one of aUid/axbType/bUid to force a query strategy.",
|
|
3363
|
+
"INVALID_QUERY"
|
|
3364
|
+
);
|
|
3365
|
+
}
|
|
3366
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
3367
|
+
return plan.filters;
|
|
3368
|
+
}
|
|
3369
|
+
// ---------------------------------------------------------------------------
|
|
2208
3370
|
// Dynamic registry methods
|
|
2209
3371
|
// ---------------------------------------------------------------------------
|
|
2210
3372
|
async defineNodeType(name, jsonSchema, description, options) {
|
|
@@ -2344,9 +3506,10 @@ var GraphClientImpl = class _GraphClientImpl {
|
|
|
2344
3506
|
};
|
|
2345
3507
|
}
|
|
2346
3508
|
};
|
|
2347
|
-
function
|
|
3509
|
+
function createGraphClient(backend, options, metaBackend) {
|
|
2348
3510
|
return new GraphClientImpl(backend, options, metaBackend);
|
|
2349
3511
|
}
|
|
3512
|
+
var createGraphClientFromBackend = createGraphClient;
|
|
2350
3513
|
|
|
2351
3514
|
// src/cloudflare/client.ts
|
|
2352
3515
|
function createDOClient(namespace, rootKey, options = {}) {
|
|
@@ -2492,11 +3655,32 @@ var FiregraphDO = class extends import_cloudflare_workers.DurableObject {
|
|
|
2492
3655
|
const rows = this.execAll(stmt);
|
|
2493
3656
|
return rows.map(rowToDORecord);
|
|
2494
3657
|
}
|
|
3658
|
+
/**
|
|
3659
|
+
* Aggregate query (capability `query.aggregate`). Compiles a single
|
|
3660
|
+
* `SELECT` projecting one column per alias; SQLite handles count, sum,
|
|
3661
|
+
* avg, min, max natively. Empty-set fix-ups (NULL → 0 for sum/min/max,
|
|
3662
|
+
* NaN for avg) happen on the client side in `DORPCBackend.aggregate` so
|
|
3663
|
+
* the wire payload stays a plain row of (alias → number | null).
|
|
3664
|
+
*/
|
|
3665
|
+
async _fgAggregate(spec, filters) {
|
|
3666
|
+
const { stmt, aliases } = compileDOAggregate(this.table, spec, filters);
|
|
3667
|
+
const rows = this.execAll(stmt);
|
|
3668
|
+
const row = rows[0] ?? {};
|
|
3669
|
+
const out = {};
|
|
3670
|
+
for (const alias of aliases) {
|
|
3671
|
+
const v = row[alias];
|
|
3672
|
+
if (v === null || v === void 0) out[alias] = null;
|
|
3673
|
+
else if (typeof v === "bigint") out[alias] = Number(v);
|
|
3674
|
+
else if (typeof v === "number") out[alias] = v;
|
|
3675
|
+
else out[alias] = Number(v);
|
|
3676
|
+
}
|
|
3677
|
+
return out;
|
|
3678
|
+
}
|
|
2495
3679
|
// ---------------------------------------------------------------------------
|
|
2496
3680
|
// RPC: writes
|
|
2497
3681
|
// ---------------------------------------------------------------------------
|
|
2498
|
-
async _fgSetDoc(docId, record) {
|
|
2499
|
-
const stmt = compileDOSet(this.table, docId, record, Date.now());
|
|
3682
|
+
async _fgSetDoc(docId, record, mode) {
|
|
3683
|
+
const stmt = compileDOSet(this.table, docId, record, Date.now(), mode);
|
|
2500
3684
|
this.execRun(stmt);
|
|
2501
3685
|
}
|
|
2502
3686
|
async _fgUpdateDoc(docId, update) {
|
|
@@ -2526,7 +3710,7 @@ var FiregraphDO = class extends import_cloudflare_workers.DurableObject {
|
|
|
2526
3710
|
const statements = ops.map((op) => {
|
|
2527
3711
|
switch (op.kind) {
|
|
2528
3712
|
case "set":
|
|
2529
|
-
return compileDOSet(this.table, op.docId, op.record, now);
|
|
3713
|
+
return compileDOSet(this.table, op.docId, op.record, now, op.mode);
|
|
2530
3714
|
case "update":
|
|
2531
3715
|
return compileDOUpdate(this.table, op.docId, op.update, now);
|
|
2532
3716
|
case "delete":
|
|
@@ -2642,6 +3826,119 @@ var FiregraphDO = class extends import_cloudflare_workers.DurableObject {
|
|
|
2642
3826
|
}
|
|
2643
3827
|
}
|
|
2644
3828
|
// ---------------------------------------------------------------------------
|
|
3829
|
+
// RPC: server-side DML (capability `query.dml`)
|
|
3830
|
+
//
|
|
3831
|
+
// Single-statement DELETE/UPDATE WHERE that the SQLite engine handles in
|
|
3832
|
+
// one shot — the cap-less alternative is `_fgBulkRemoveEdges` which fetches
|
|
3833
|
+
// doc IDs first, then deletes them one-by-one inside a transaction. The
|
|
3834
|
+
// DML path skips the round-trip and lets SQLite optimize the WHERE.
|
|
3835
|
+
//
|
|
3836
|
+
// RETURNING "doc_id" gives us an authoritative affected-row count; SQLite
|
|
3837
|
+
// ≥3.35 supports it for both DELETE and UPDATE and DO SQLite is always
|
|
3838
|
+
// recent enough.
|
|
3839
|
+
//
|
|
3840
|
+
// Retry policy: unlike `SqliteBackendImpl.bulkDelete` / `bulkUpdate`, which
|
|
3841
|
+
// wrap a chunked retry/backoff loop around each batch (D1's 1000-statement
|
|
3842
|
+
// cap forces chunking, so a single transient failure shouldn't kill the
|
|
3843
|
+
// whole job), the DO path runs a single un-chunked statement against
|
|
3844
|
+
// `state.storage.sql` synchronously. There's nothing to retry inside the
|
|
3845
|
+
// DO — the engine commits or it doesn't. If a caller wants retry semantics
|
|
3846
|
+
// on the wire, they wrap the `bulkDelete` / `bulkUpdate` call themselves.
|
|
3847
|
+
// ---------------------------------------------------------------------------
|
|
3848
|
+
async _fgBulkDelete(filters, _options) {
|
|
3849
|
+
void _options;
|
|
3850
|
+
if (filters.length === 0) {
|
|
3851
|
+
throw new FiregraphError(
|
|
3852
|
+
"bulkDelete() requires at least one filter when targeting a Durable Object backend. An empty filter list would wipe every row in the DO. To wipe a routed subgraph DO, use `removeNodeCascade` on the parent node or `_fgDestroy` directly on the stub.",
|
|
3853
|
+
"INVALID_ARGUMENT"
|
|
3854
|
+
);
|
|
3855
|
+
}
|
|
3856
|
+
const stmt = compileDOBulkDelete(this.table, filters);
|
|
3857
|
+
return this.execDmlWithReturning(stmt);
|
|
3858
|
+
}
|
|
3859
|
+
async _fgBulkUpdate(filters, patch, _options) {
|
|
3860
|
+
void _options;
|
|
3861
|
+
const stmt = compileDOBulkUpdate(this.table, filters, patch.data, Date.now());
|
|
3862
|
+
return this.execDmlWithReturning(stmt);
|
|
3863
|
+
}
|
|
3864
|
+
// ---------------------------------------------------------------------------
|
|
3865
|
+
// RPC: multi-source fan-out (`query.join`)
|
|
3866
|
+
//
|
|
3867
|
+
// One `SELECT … WHERE "aUid" IN (?, ?, …)` (or `"bUid"` for reverse)
|
|
3868
|
+
// collapses N per-source `findEdges` round trips into one. When the
|
|
3869
|
+
// caller asks for hydration, a second IN-clause statement fetches the
|
|
3870
|
+
// target node rows; the DO does the alignment in JS so the wire payload
|
|
3871
|
+
// is two `DORecordWire[]` arrays instead of a JOIN-shaped row that
|
|
3872
|
+
// would force a custom client-side decoder.
|
|
3873
|
+
// ---------------------------------------------------------------------------
|
|
3874
|
+
async _fgExpand(params) {
|
|
3875
|
+
if (params.sources.length === 0) {
|
|
3876
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
3877
|
+
}
|
|
3878
|
+
const stmt = compileDOExpand(this.table, params);
|
|
3879
|
+
const rows = this.state.storage.sql.exec(stmt.sql, ...stmt.params).toArray();
|
|
3880
|
+
const edges = rows.map((row) => rowToDORecord(row));
|
|
3881
|
+
if (!params.hydrate) {
|
|
3882
|
+
return { edges };
|
|
3883
|
+
}
|
|
3884
|
+
const direction = params.direction ?? "forward";
|
|
3885
|
+
const targetUids = edges.map((e) => direction === "forward" ? e.bUid : e.aUid);
|
|
3886
|
+
const uniqueTargets = [...new Set(targetUids)];
|
|
3887
|
+
if (uniqueTargets.length === 0) {
|
|
3888
|
+
return { edges, targets: [] };
|
|
3889
|
+
}
|
|
3890
|
+
const hydrateStmt = compileDOExpandHydrate(this.table, uniqueTargets);
|
|
3891
|
+
const hydrateRows = this.state.storage.sql.exec(hydrateStmt.sql, ...hydrateStmt.params).toArray();
|
|
3892
|
+
const byUid = /* @__PURE__ */ new Map();
|
|
3893
|
+
for (const row of hydrateRows) {
|
|
3894
|
+
const node = rowToDORecord(row);
|
|
3895
|
+
byUid.set(node.bUid, node);
|
|
3896
|
+
}
|
|
3897
|
+
const targets = targetUids.map((uid) => byUid.get(uid) ?? null);
|
|
3898
|
+
return { edges, targets };
|
|
3899
|
+
}
|
|
3900
|
+
// ---------------------------------------------------------------------------
|
|
3901
|
+
// RPC: server-side projection (`query.select`)
|
|
3902
|
+
//
|
|
3903
|
+
// One `SELECT json_extract(data, '$.f1'), …` returns the projected fields.
|
|
3904
|
+
// The DO leaves decoding to the client because timestamp values need to
|
|
3905
|
+
// rewrap as `GraphTimestampImpl` (a class instance, lost by structured
|
|
3906
|
+
// clone) — instead of inventing per-field timestamp sentinels, we send the
|
|
3907
|
+
// raw rows and the column spec, and let `DORPCBackend.findEdgesProjected`
|
|
3908
|
+
// call `decodeDOProjectedRow` once. The spec is small (≤ ~100 bytes for
|
|
3909
|
+
// a typical projection); structured clone copes happily.
|
|
3910
|
+
// ---------------------------------------------------------------------------
|
|
3911
|
+
async _fgFindEdgesProjected(select, filters, options) {
|
|
3912
|
+
const { stmt, columns } = compileDOFindEdgesProjected(this.table, select, filters, options);
|
|
3913
|
+
const rows = this.state.storage.sql.exec(stmt.sql, ...stmt.params).toArray();
|
|
3914
|
+
return { rows, columns };
|
|
3915
|
+
}
|
|
3916
|
+
/**
|
|
3917
|
+
* Run a DML statement with `RETURNING "doc_id"` so the affected-row count
|
|
3918
|
+
* comes back authoritatively. Errors are caught and surfaced via the
|
|
3919
|
+
* `BulkResult.errors` array (single batch, batchIndex 0) so the wire
|
|
3920
|
+
* payload stays a regular `BulkResult` and the client doesn't have to
|
|
3921
|
+
* differentiate "RPC threw" from "single-statement failure."
|
|
3922
|
+
*/
|
|
3923
|
+
execDmlWithReturning(stmt) {
|
|
3924
|
+
const sqlWithReturning = `${stmt.sql} RETURNING "doc_id"`;
|
|
3925
|
+
try {
|
|
3926
|
+
const rows = this.state.storage.sql.exec(sqlWithReturning, ...stmt.params).toArray();
|
|
3927
|
+
return { deleted: rows.length, batches: 1, errors: [] };
|
|
3928
|
+
} catch (err) {
|
|
3929
|
+
const error = err instanceof Error ? err : new Error(String(err));
|
|
3930
|
+
return {
|
|
3931
|
+
deleted: 0,
|
|
3932
|
+
batches: 0,
|
|
3933
|
+
// Like `_fgBulkRemoveEdges`'s catch arm: a single failed statement
|
|
3934
|
+
// is one batch, and the operationCount is "unknown" for a server-
|
|
3935
|
+
// side DML — we report 0 as the lower bound. Callers that care
|
|
3936
|
+
// about partial state should re-query and reconcile.
|
|
3937
|
+
errors: [{ batchIndex: 0, error, operationCount: 0 }]
|
|
3938
|
+
};
|
|
3939
|
+
}
|
|
3940
|
+
}
|
|
3941
|
+
// ---------------------------------------------------------------------------
|
|
2645
3942
|
// RPC: admin
|
|
2646
3943
|
// ---------------------------------------------------------------------------
|
|
2647
3944
|
/**
|
|
@@ -2672,12 +3969,27 @@ var FiregraphDO = class extends import_cloudflare_workers.DurableObject {
|
|
|
2672
3969
|
this.state.storage.sql.exec(stmt.sql, ...stmt.params).toArray();
|
|
2673
3970
|
}
|
|
2674
3971
|
};
|
|
3972
|
+
|
|
3973
|
+
// src/id.ts
|
|
3974
|
+
var import_nanoid = require("nanoid");
|
|
3975
|
+
function generateId() {
|
|
3976
|
+
return (0, import_nanoid.nanoid)();
|
|
3977
|
+
}
|
|
2675
3978
|
// Annotate the CommonJS export names for ESM import in node:
|
|
2676
3979
|
0 && (module.exports = {
|
|
3980
|
+
CapabilityNotSupportedError,
|
|
2677
3981
|
DORPCBackend,
|
|
2678
3982
|
FiregraphDO,
|
|
3983
|
+
META_EDGE_TYPE,
|
|
3984
|
+
META_NODE_TYPE,
|
|
2679
3985
|
buildDOSchemaStatements,
|
|
3986
|
+
createCapabilities,
|
|
2680
3987
|
createDOClient,
|
|
2681
|
-
|
|
3988
|
+
createMergedRegistry,
|
|
3989
|
+
createRegistry,
|
|
3990
|
+
createSiblingClient,
|
|
3991
|
+
deleteField,
|
|
3992
|
+
generateId,
|
|
3993
|
+
intersectCapabilities
|
|
2682
3994
|
});
|
|
2683
3995
|
//# sourceMappingURL=index.cjs.map
|