@typicalday/firegraph 0.12.0 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +317 -73
- package/dist/backend-DuvHGgK1.d.cts +1897 -0
- package/dist/backend-DuvHGgK1.d.ts +1897 -0
- package/dist/backend.cjs +222 -3
- package/dist/backend.cjs.map +1 -1
- package/dist/backend.d.cts +25 -5
- package/dist/backend.d.ts +25 -5
- package/dist/backend.js +197 -4
- package/dist/backend.js.map +1 -1
- package/dist/chunk-2DHMNTV6.js +16 -0
- package/dist/chunk-2DHMNTV6.js.map +1 -0
- package/dist/chunk-4MMQ5W74.js +288 -0
- package/dist/chunk-4MMQ5W74.js.map +1 -0
- package/dist/chunk-D4J7Z4FE.js +67 -0
- package/dist/chunk-D4J7Z4FE.js.map +1 -0
- package/dist/chunk-N5HFDWQX.js +23 -0
- package/dist/chunk-N5HFDWQX.js.map +1 -0
- package/dist/chunk-PAD7WFFU.js +573 -0
- package/dist/chunk-PAD7WFFU.js.map +1 -0
- package/dist/{chunk-AWW4MUJ5.js → chunk-TK64DNVK.js} +12 -1
- package/dist/chunk-TK64DNVK.js.map +1 -0
- package/dist/{chunk-HONQY4HF.js → chunk-WRTFC5NG.js} +362 -17
- package/dist/chunk-WRTFC5NG.js.map +1 -0
- package/dist/client-BKi3vk0Q.d.ts +34 -0
- package/dist/client-BrsaXtDV.d.cts +34 -0
- package/dist/cloudflare/index.cjs +930 -3
- package/dist/cloudflare/index.cjs.map +1 -1
- package/dist/cloudflare/index.d.cts +213 -12
- package/dist/cloudflare/index.d.ts +213 -12
- package/dist/cloudflare/index.js +562 -281
- package/dist/cloudflare/index.js.map +1 -1
- package/dist/codegen/index.d.cts +1 -1
- package/dist/codegen/index.d.ts +1 -1
- package/dist/errors-BRc3I_eH.d.cts +73 -0
- package/dist/errors-BRc3I_eH.d.ts +73 -0
- package/dist/firestore-enterprise/index.cjs +3877 -0
- package/dist/firestore-enterprise/index.cjs.map +1 -0
- package/dist/firestore-enterprise/index.d.cts +141 -0
- package/dist/firestore-enterprise/index.d.ts +141 -0
- package/dist/firestore-enterprise/index.js +985 -0
- package/dist/firestore-enterprise/index.js.map +1 -0
- package/dist/firestore-standard/index.cjs +3117 -0
- package/dist/firestore-standard/index.cjs.map +1 -0
- package/dist/firestore-standard/index.d.cts +49 -0
- package/dist/firestore-standard/index.d.ts +49 -0
- package/dist/firestore-standard/index.js +283 -0
- package/dist/firestore-standard/index.js.map +1 -0
- package/dist/index.cjs +590 -550
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +9 -37
- package/dist/index.d.ts +9 -37
- package/dist/index.js +178 -555
- package/dist/index.js.map +1 -1
- package/dist/{registry-Fi074zVa.d.ts → registry-Bc7h6WTM.d.cts} +1 -1
- package/dist/{registry-B1qsVL0E.d.cts → registry-C2KUPVZj.d.ts} +1 -1
- package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.cts} +1 -56
- package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.ts} +1 -56
- package/dist/sqlite/index.cjs +3631 -0
- package/dist/sqlite/index.cjs.map +1 -0
- package/dist/sqlite/index.d.cts +111 -0
- package/dist/sqlite/index.d.ts +111 -0
- package/dist/sqlite/index.js +1164 -0
- package/dist/sqlite/index.js.map +1 -0
- package/package.json +33 -3
- package/dist/backend-BsR0lnFL.d.ts +0 -200
- package/dist/backend-Ct-fLlkG.d.cts +0 -200
- package/dist/chunk-AWW4MUJ5.js.map +0 -1
- package/dist/chunk-HONQY4HF.js.map +0 -1
- package/dist/types-DxYLy8Ol.d.cts +0 -770
- package/dist/types-DxYLy8Ol.d.ts +0 -770
|
@@ -0,0 +1,3117 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __create = Object.create;
|
|
3
|
+
var __defProp = Object.defineProperty;
|
|
4
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
7
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
+
var __esm = (fn, res) => function __init() {
|
|
9
|
+
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
|
|
10
|
+
};
|
|
11
|
+
var __export = (target, all) => {
|
|
12
|
+
for (var name in all)
|
|
13
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
14
|
+
};
|
|
15
|
+
var __copyProps = (to, from, except, desc) => {
|
|
16
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
17
|
+
for (let key of __getOwnPropNames(from))
|
|
18
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
19
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
20
|
+
}
|
|
21
|
+
return to;
|
|
22
|
+
};
|
|
23
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
24
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
25
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
26
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
27
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
28
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
29
|
+
mod
|
|
30
|
+
));
|
|
31
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
32
|
+
|
|
33
|
+
// src/internal/serialization-tag.ts
|
|
34
|
+
function isTaggedValue(value) {
|
|
35
|
+
if (value === null || typeof value !== "object") return false;
|
|
36
|
+
const tag = value[SERIALIZATION_TAG];
|
|
37
|
+
return typeof tag === "string" && KNOWN_TYPES.has(tag);
|
|
38
|
+
}
|
|
39
|
+
var SERIALIZATION_TAG, KNOWN_TYPES;
|
|
40
|
+
var init_serialization_tag = __esm({
|
|
41
|
+
"src/internal/serialization-tag.ts"() {
|
|
42
|
+
"use strict";
|
|
43
|
+
SERIALIZATION_TAG = "__firegraph_ser__";
|
|
44
|
+
KNOWN_TYPES = /* @__PURE__ */ new Set(["Timestamp", "GeoPoint", "VectorValue", "DocumentReference"]);
|
|
45
|
+
}
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
// src/serialization.ts
|
|
49
|
+
var serialization_exports = {};
|
|
50
|
+
__export(serialization_exports, {
|
|
51
|
+
SERIALIZATION_TAG: () => SERIALIZATION_TAG,
|
|
52
|
+
deserializeFirestoreTypes: () => deserializeFirestoreTypes,
|
|
53
|
+
isTaggedValue: () => isTaggedValue,
|
|
54
|
+
serializeFirestoreTypes: () => serializeFirestoreTypes
|
|
55
|
+
});
|
|
56
|
+
function isTimestamp(value) {
|
|
57
|
+
return value instanceof import_firestore.Timestamp;
|
|
58
|
+
}
|
|
59
|
+
function isGeoPoint(value) {
|
|
60
|
+
return value instanceof import_firestore.GeoPoint;
|
|
61
|
+
}
|
|
62
|
+
function isDocumentReference(value) {
|
|
63
|
+
if (value === null || typeof value !== "object") return false;
|
|
64
|
+
const v = value;
|
|
65
|
+
return typeof v.path === "string" && v.firestore !== void 0 && typeof v.id === "string" && v.constructor?.name === "DocumentReference";
|
|
66
|
+
}
|
|
67
|
+
function isVectorValue(value) {
|
|
68
|
+
if (value === null || typeof value !== "object") return false;
|
|
69
|
+
const v = value;
|
|
70
|
+
return v.constructor?.name === "VectorValue" && Array.isArray(v._values);
|
|
71
|
+
}
|
|
72
|
+
function serializeFirestoreTypes(data) {
|
|
73
|
+
return serializeValue(data);
|
|
74
|
+
}
|
|
75
|
+
function serializeValue(value) {
|
|
76
|
+
if (value === null || value === void 0) return value;
|
|
77
|
+
if (typeof value !== "object") return value;
|
|
78
|
+
if (isTimestamp(value)) {
|
|
79
|
+
return {
|
|
80
|
+
[SERIALIZATION_TAG]: "Timestamp",
|
|
81
|
+
seconds: value.seconds,
|
|
82
|
+
nanoseconds: value.nanoseconds
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
if (isGeoPoint(value)) {
|
|
86
|
+
return {
|
|
87
|
+
[SERIALIZATION_TAG]: "GeoPoint",
|
|
88
|
+
latitude: value.latitude,
|
|
89
|
+
longitude: value.longitude
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
if (isDocumentReference(value)) {
|
|
93
|
+
return { [SERIALIZATION_TAG]: "DocumentReference", path: value.path };
|
|
94
|
+
}
|
|
95
|
+
if (isVectorValue(value)) {
|
|
96
|
+
const v = value;
|
|
97
|
+
const values = typeof v.toArray === "function" ? v.toArray() : v._values;
|
|
98
|
+
return { [SERIALIZATION_TAG]: "VectorValue", values: [...values] };
|
|
99
|
+
}
|
|
100
|
+
if (Array.isArray(value)) {
|
|
101
|
+
return value.map(serializeValue);
|
|
102
|
+
}
|
|
103
|
+
const result = {};
|
|
104
|
+
for (const key of Object.keys(value)) {
|
|
105
|
+
result[key] = serializeValue(value[key]);
|
|
106
|
+
}
|
|
107
|
+
return result;
|
|
108
|
+
}
|
|
109
|
+
function deserializeFirestoreTypes(data, db) {
|
|
110
|
+
return deserializeValue(data, db);
|
|
111
|
+
}
|
|
112
|
+
function deserializeValue(value, db) {
|
|
113
|
+
if (value === null || value === void 0) return value;
|
|
114
|
+
if (typeof value !== "object") return value;
|
|
115
|
+
if (isTimestamp(value) || isGeoPoint(value) || isDocumentReference(value) || isVectorValue(value)) {
|
|
116
|
+
return value;
|
|
117
|
+
}
|
|
118
|
+
if (Array.isArray(value)) {
|
|
119
|
+
return value.map((v) => deserializeValue(v, db));
|
|
120
|
+
}
|
|
121
|
+
const obj = value;
|
|
122
|
+
if (isTaggedValue(obj)) {
|
|
123
|
+
const tag = obj[SERIALIZATION_TAG];
|
|
124
|
+
switch (tag) {
|
|
125
|
+
case "Timestamp":
|
|
126
|
+
if (typeof obj.seconds !== "number" || typeof obj.nanoseconds !== "number") return obj;
|
|
127
|
+
return new import_firestore.Timestamp(obj.seconds, obj.nanoseconds);
|
|
128
|
+
case "GeoPoint":
|
|
129
|
+
if (typeof obj.latitude !== "number" || typeof obj.longitude !== "number") return obj;
|
|
130
|
+
return new import_firestore.GeoPoint(obj.latitude, obj.longitude);
|
|
131
|
+
case "VectorValue":
|
|
132
|
+
if (!Array.isArray(obj.values)) return obj;
|
|
133
|
+
return import_firestore.FieldValue.vector(obj.values);
|
|
134
|
+
case "DocumentReference":
|
|
135
|
+
if (typeof obj.path !== "string") return obj;
|
|
136
|
+
if (db) {
|
|
137
|
+
return db.doc(obj.path);
|
|
138
|
+
}
|
|
139
|
+
if (!_docRefWarned) {
|
|
140
|
+
_docRefWarned = true;
|
|
141
|
+
console.warn(
|
|
142
|
+
"[firegraph] DocumentReference encountered during migration deserialization but no Firestore instance available. The reference will remain as a tagged object with its path. Enable write-back for full reconstruction."
|
|
143
|
+
);
|
|
144
|
+
}
|
|
145
|
+
return obj;
|
|
146
|
+
default:
|
|
147
|
+
return obj;
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
const result = {};
|
|
151
|
+
for (const key of Object.keys(obj)) {
|
|
152
|
+
result[key] = deserializeValue(obj[key], db);
|
|
153
|
+
}
|
|
154
|
+
return result;
|
|
155
|
+
}
|
|
156
|
+
var import_firestore, _docRefWarned;
|
|
157
|
+
var init_serialization = __esm({
|
|
158
|
+
"src/serialization.ts"() {
|
|
159
|
+
"use strict";
|
|
160
|
+
import_firestore = require("@google-cloud/firestore");
|
|
161
|
+
init_serialization_tag();
|
|
162
|
+
init_serialization_tag();
|
|
163
|
+
_docRefWarned = false;
|
|
164
|
+
}
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
// src/firestore-standard/index.ts
|
|
168
|
+
var firestore_standard_exports = {};
|
|
169
|
+
__export(firestore_standard_exports, {
|
|
170
|
+
META_EDGE_TYPE: () => META_EDGE_TYPE,
|
|
171
|
+
META_NODE_TYPE: () => META_NODE_TYPE,
|
|
172
|
+
createFirestoreStandardBackend: () => createFirestoreStandardBackend,
|
|
173
|
+
createGraphClient: () => createGraphClient,
|
|
174
|
+
createMergedRegistry: () => createMergedRegistry,
|
|
175
|
+
createRegistry: () => createRegistry,
|
|
176
|
+
generateId: () => generateId
|
|
177
|
+
});
|
|
178
|
+
module.exports = __toCommonJS(firestore_standard_exports);
|
|
179
|
+
|
|
180
|
+
// src/docid.ts
|
|
181
|
+
var import_node_crypto = require("crypto");
|
|
182
|
+
|
|
183
|
+
// src/internal/constants.ts
|
|
184
|
+
var NODE_RELATION = "is";
|
|
185
|
+
var DEFAULT_QUERY_LIMIT = 500;
|
|
186
|
+
var BUILTIN_FIELDS = /* @__PURE__ */ new Set([
|
|
187
|
+
"aType",
|
|
188
|
+
"aUid",
|
|
189
|
+
"axbType",
|
|
190
|
+
"bType",
|
|
191
|
+
"bUid",
|
|
192
|
+
"createdAt",
|
|
193
|
+
"updatedAt"
|
|
194
|
+
]);
|
|
195
|
+
var SHARD_SEPARATOR = ":";
|
|
196
|
+
|
|
197
|
+
// src/docid.ts
|
|
198
|
+
function computeNodeDocId(uid) {
|
|
199
|
+
return uid;
|
|
200
|
+
}
|
|
201
|
+
function computeEdgeDocId(aUid, axbType, bUid) {
|
|
202
|
+
const composite = `${aUid}${SHARD_SEPARATOR}${axbType}${SHARD_SEPARATOR}${bUid}`;
|
|
203
|
+
const hash = (0, import_node_crypto.createHash)("sha256").update(composite).digest("hex");
|
|
204
|
+
const shard = hash[0];
|
|
205
|
+
return `${shard}${SHARD_SEPARATOR}${aUid}${SHARD_SEPARATOR}${axbType}${SHARD_SEPARATOR}${bUid}`;
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// src/internal/write-plan.ts
|
|
209
|
+
init_serialization_tag();
|
|
210
|
+
var DELETE_FIELD = /* @__PURE__ */ Symbol.for("firegraph.deleteField");
|
|
211
|
+
function isDeleteSentinel(value) {
|
|
212
|
+
return value === DELETE_FIELD;
|
|
213
|
+
}
|
|
214
|
+
var FIRESTORE_TERMINAL_CTOR = /* @__PURE__ */ new Set([
|
|
215
|
+
"Timestamp",
|
|
216
|
+
"GeoPoint",
|
|
217
|
+
"VectorValue",
|
|
218
|
+
"DocumentReference",
|
|
219
|
+
"FieldValue",
|
|
220
|
+
"NumericIncrementTransform",
|
|
221
|
+
"ArrayUnionTransform",
|
|
222
|
+
"ArrayRemoveTransform",
|
|
223
|
+
"ServerTimestampTransform",
|
|
224
|
+
"DeleteTransform"
|
|
225
|
+
]);
|
|
226
|
+
function isTerminalValue(value) {
|
|
227
|
+
if (value === null) return true;
|
|
228
|
+
const t = typeof value;
|
|
229
|
+
if (t !== "object") return true;
|
|
230
|
+
if (Array.isArray(value)) return true;
|
|
231
|
+
if (isTaggedValue(value)) return true;
|
|
232
|
+
const proto = Object.getPrototypeOf(value);
|
|
233
|
+
if (proto === null || proto === Object.prototype) return false;
|
|
234
|
+
const ctor = value.constructor;
|
|
235
|
+
if (ctor && typeof ctor.name === "string" && FIRESTORE_TERMINAL_CTOR.has(ctor.name)) return true;
|
|
236
|
+
return true;
|
|
237
|
+
}
|
|
238
|
+
var SAFE_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
239
|
+
function assertUpdatePayloadExclusive(update) {
|
|
240
|
+
if (update.replaceData !== void 0 && update.dataOps !== void 0) {
|
|
241
|
+
throw new Error(
|
|
242
|
+
"firegraph: UpdatePayload cannot specify both `replaceData` and `dataOps`. Use one or the other \u2014 `replaceData` is the migration-write-back form, `dataOps` is the standard partial-update form."
|
|
243
|
+
);
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
function assertNoDeleteSentinels(data, callerLabel) {
|
|
247
|
+
walkForDeleteSentinels(data, [], { kind: "root" }, ({ path }) => {
|
|
248
|
+
const where = path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
249
|
+
throw new Error(
|
|
250
|
+
`firegraph: ${callerLabel} payload contains a deleteField() sentinel at ${where}. deleteField() is only valid inside updateNode/updateEdge \u2014 full-data writes (put*, replace*) cannot delete individual fields. Use updateNode with a deleteField() value, or omit the field from the replace payload.`
|
|
251
|
+
);
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
function walkForDeleteSentinels(node, path, parent, visit) {
|
|
255
|
+
if (node === null || node === void 0) return;
|
|
256
|
+
if (isDeleteSentinel(node)) {
|
|
257
|
+
visit({ path, parent });
|
|
258
|
+
return;
|
|
259
|
+
}
|
|
260
|
+
if (typeof node !== "object") return;
|
|
261
|
+
if (isTaggedValue(node)) return;
|
|
262
|
+
if (Array.isArray(node)) {
|
|
263
|
+
for (let i = 0; i < node.length; i++) {
|
|
264
|
+
walkForDeleteSentinels(node[i], [...path, String(i)], { kind: "array", index: i }, visit);
|
|
265
|
+
}
|
|
266
|
+
return;
|
|
267
|
+
}
|
|
268
|
+
const proto = Object.getPrototypeOf(node);
|
|
269
|
+
if (proto !== null && proto !== Object.prototype) return;
|
|
270
|
+
const obj = node;
|
|
271
|
+
for (const key of Object.keys(obj)) {
|
|
272
|
+
walkForDeleteSentinels(obj[key], [...path, key], { kind: "object" }, visit);
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
function assertSafePath(path) {
|
|
276
|
+
for (const seg of path) {
|
|
277
|
+
if (!SAFE_KEY_RE.test(seg)) {
|
|
278
|
+
throw new Error(
|
|
279
|
+
`firegraph: unsafe object key ${JSON.stringify(seg)} at path ${path.map((p) => JSON.stringify(p)).join(" > ")}. Keys used inside update payloads must match /^[A-Za-z_][A-Za-z0-9_-]*$/ so they can be embedded safely in SQLite JSON paths.`
|
|
280
|
+
);
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
function flattenPatch(data) {
|
|
285
|
+
const ops = [];
|
|
286
|
+
walk(data, [], ops);
|
|
287
|
+
return ops;
|
|
288
|
+
}
|
|
289
|
+
function assertNoDeleteSentinelsInArrayValue(arr, arrayPath) {
|
|
290
|
+
walkForDeleteSentinels(arr, arrayPath, { kind: "root" }, ({ parent }) => {
|
|
291
|
+
const arrayPathStr = arrayPath.length === 0 ? "<root>" : arrayPath.map((p) => JSON.stringify(p)).join(" > ");
|
|
292
|
+
if (parent.kind === "array") {
|
|
293
|
+
throw new Error(
|
|
294
|
+
`firegraph: deleteField() sentinel at index ${parent.index} inside an array at path ${arrayPathStr}. Arrays are terminal in update payloads (replaced as a unit), so the sentinel would be silently dropped by JSON serialization. To remove the field entirely, pass deleteField() in place of the whole array.`
|
|
295
|
+
);
|
|
296
|
+
}
|
|
297
|
+
throw new Error(
|
|
298
|
+
`firegraph: deleteField() sentinel inside an array element at path ${arrayPathStr}. Arrays are terminal in update payloads \u2014 the sentinel would be silently dropped by JSON serialization.`
|
|
299
|
+
);
|
|
300
|
+
});
|
|
301
|
+
}
|
|
302
|
+
function walk(node, path, out) {
|
|
303
|
+
if (node === void 0) return;
|
|
304
|
+
if (isDeleteSentinel(node)) {
|
|
305
|
+
if (path.length === 0) {
|
|
306
|
+
throw new Error("firegraph: deleteField() cannot be the entire update payload.");
|
|
307
|
+
}
|
|
308
|
+
assertSafePath(path);
|
|
309
|
+
out.push({ path: [...path], value: void 0, delete: true });
|
|
310
|
+
return;
|
|
311
|
+
}
|
|
312
|
+
if (isTerminalValue(node)) {
|
|
313
|
+
if (path.length === 0) {
|
|
314
|
+
throw new Error(
|
|
315
|
+
"firegraph: update payload must be a plain object. Got " + (node === null ? "null" : Array.isArray(node) ? "array" : typeof node) + "."
|
|
316
|
+
);
|
|
317
|
+
}
|
|
318
|
+
if (Array.isArray(node)) {
|
|
319
|
+
assertNoDeleteSentinelsInArrayValue(node, path);
|
|
320
|
+
}
|
|
321
|
+
assertSafePath(path);
|
|
322
|
+
out.push({ path: [...path], value: node, delete: false });
|
|
323
|
+
return;
|
|
324
|
+
}
|
|
325
|
+
const obj = node;
|
|
326
|
+
const keys = Object.keys(obj);
|
|
327
|
+
if (keys.length === 0) {
|
|
328
|
+
if (path.length > 0) {
|
|
329
|
+
assertSafePath(path);
|
|
330
|
+
out.push({ path: [...path], value: {}, delete: false });
|
|
331
|
+
}
|
|
332
|
+
return;
|
|
333
|
+
}
|
|
334
|
+
for (const key of keys) {
|
|
335
|
+
if (key === SERIALIZATION_TAG) {
|
|
336
|
+
const where = path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
337
|
+
throw new Error(
|
|
338
|
+
`firegraph: update payload contains a literal \`${SERIALIZATION_TAG}\` key at ${where}. That key is reserved for firegraph's serialization envelope and cannot appear on a plain object in user data. Use a different field name, or pass a recognized tagged value through replaceNode/replaceEdge instead.`
|
|
339
|
+
);
|
|
340
|
+
}
|
|
341
|
+
walk(obj[key], [...path, key], out);
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
// src/batch.ts
|
|
346
|
+
function buildWritableNodeRecord(aType, uid, data) {
|
|
347
|
+
return { aType, aUid: uid, axbType: NODE_RELATION, bType: aType, bUid: uid, data };
|
|
348
|
+
}
|
|
349
|
+
function buildWritableEdgeRecord(aType, aUid, axbType, bType, bUid, data) {
|
|
350
|
+
return { aType, aUid, axbType, bType, bUid, data };
|
|
351
|
+
}
|
|
352
|
+
var GraphBatchImpl = class {
|
|
353
|
+
constructor(backend, registry, scopePath = "") {
|
|
354
|
+
this.backend = backend;
|
|
355
|
+
this.registry = registry;
|
|
356
|
+
this.scopePath = scopePath;
|
|
357
|
+
}
|
|
358
|
+
async putNode(aType, uid, data) {
|
|
359
|
+
this.writeNode(aType, uid, data, "merge");
|
|
360
|
+
}
|
|
361
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
362
|
+
this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
363
|
+
}
|
|
364
|
+
async replaceNode(aType, uid, data) {
|
|
365
|
+
this.writeNode(aType, uid, data, "replace");
|
|
366
|
+
}
|
|
367
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
368
|
+
this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
369
|
+
}
|
|
370
|
+
writeNode(aType, uid, data, mode) {
|
|
371
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
372
|
+
if (this.registry) {
|
|
373
|
+
this.registry.validate(aType, NODE_RELATION, aType, data, this.scopePath);
|
|
374
|
+
}
|
|
375
|
+
const docId = computeNodeDocId(uid);
|
|
376
|
+
const record = buildWritableNodeRecord(aType, uid, data);
|
|
377
|
+
if (this.registry) {
|
|
378
|
+
const entry = this.registry.lookup(aType, NODE_RELATION, aType);
|
|
379
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
380
|
+
record.v = entry.schemaVersion;
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
this.backend.setDoc(docId, record, mode);
|
|
384
|
+
}
|
|
385
|
+
writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
386
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
387
|
+
if (this.registry) {
|
|
388
|
+
this.registry.validate(aType, axbType, bType, data, this.scopePath);
|
|
389
|
+
}
|
|
390
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
391
|
+
const record = buildWritableEdgeRecord(aType, aUid, axbType, bType, bUid, data);
|
|
392
|
+
if (this.registry) {
|
|
393
|
+
const entry = this.registry.lookup(aType, axbType, bType);
|
|
394
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
395
|
+
record.v = entry.schemaVersion;
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
this.backend.setDoc(docId, record, mode);
|
|
399
|
+
}
|
|
400
|
+
async updateNode(uid, data) {
|
|
401
|
+
const docId = computeNodeDocId(uid);
|
|
402
|
+
this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
403
|
+
}
|
|
404
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
405
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
406
|
+
this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
407
|
+
}
|
|
408
|
+
async removeNode(uid) {
|
|
409
|
+
const docId = computeNodeDocId(uid);
|
|
410
|
+
this.backend.deleteDoc(docId);
|
|
411
|
+
}
|
|
412
|
+
async removeEdge(aUid, axbType, bUid) {
|
|
413
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
414
|
+
this.backend.deleteDoc(docId);
|
|
415
|
+
}
|
|
416
|
+
async commit() {
|
|
417
|
+
await this.backend.commit();
|
|
418
|
+
}
|
|
419
|
+
};
|
|
420
|
+
|
|
421
|
+
// src/dynamic-registry.ts
|
|
422
|
+
var import_node_crypto3 = require("crypto");
|
|
423
|
+
|
|
424
|
+
// src/errors.ts
|
|
425
|
+
var FiregraphError = class extends Error {
|
|
426
|
+
constructor(message, code) {
|
|
427
|
+
super(message);
|
|
428
|
+
this.code = code;
|
|
429
|
+
this.name = "FiregraphError";
|
|
430
|
+
}
|
|
431
|
+
};
|
|
432
|
+
var ValidationError = class extends FiregraphError {
|
|
433
|
+
constructor(message, details) {
|
|
434
|
+
super(message, "VALIDATION_ERROR");
|
|
435
|
+
this.details = details;
|
|
436
|
+
this.name = "ValidationError";
|
|
437
|
+
}
|
|
438
|
+
};
|
|
439
|
+
var RegistryViolationError = class extends FiregraphError {
|
|
440
|
+
constructor(aType, axbType, bType) {
|
|
441
|
+
super(`Unregistered triple: (${aType}) -[${axbType}]-> (${bType})`, "REGISTRY_VIOLATION");
|
|
442
|
+
this.name = "RegistryViolationError";
|
|
443
|
+
}
|
|
444
|
+
};
|
|
445
|
+
var InvalidQueryError = class extends FiregraphError {
|
|
446
|
+
constructor(message) {
|
|
447
|
+
super(message, "INVALID_QUERY");
|
|
448
|
+
this.name = "InvalidQueryError";
|
|
449
|
+
}
|
|
450
|
+
};
|
|
451
|
+
var DynamicRegistryError = class extends FiregraphError {
|
|
452
|
+
constructor(message) {
|
|
453
|
+
super(message, "DYNAMIC_REGISTRY_ERROR");
|
|
454
|
+
this.name = "DynamicRegistryError";
|
|
455
|
+
}
|
|
456
|
+
};
|
|
457
|
+
var QuerySafetyError = class extends FiregraphError {
|
|
458
|
+
constructor(message) {
|
|
459
|
+
super(message, "QUERY_SAFETY");
|
|
460
|
+
this.name = "QuerySafetyError";
|
|
461
|
+
}
|
|
462
|
+
};
|
|
463
|
+
var RegistryScopeError = class extends FiregraphError {
|
|
464
|
+
constructor(aType, axbType, bType, scopePath, allowedIn) {
|
|
465
|
+
super(
|
|
466
|
+
`Type (${aType}) -[${axbType}]-> (${bType}) is not allowed at scope "${scopePath || "root"}". Allowed in: [${allowedIn.join(", ")}]`,
|
|
467
|
+
"REGISTRY_SCOPE"
|
|
468
|
+
);
|
|
469
|
+
this.name = "RegistryScopeError";
|
|
470
|
+
}
|
|
471
|
+
};
|
|
472
|
+
var MigrationError = class extends FiregraphError {
|
|
473
|
+
constructor(message) {
|
|
474
|
+
super(message, "MIGRATION_ERROR");
|
|
475
|
+
this.name = "MigrationError";
|
|
476
|
+
}
|
|
477
|
+
};
|
|
478
|
+
|
|
479
|
+
// src/json-schema.ts
|
|
480
|
+
var import_json_schema = require("@cfworker/json-schema");
|
|
481
|
+
var MAX_RENDERED_ERRORS = 20;
|
|
482
|
+
function compileSchema(schema, label) {
|
|
483
|
+
const validator = new import_json_schema.Validator(schema, "2020-12", false);
|
|
484
|
+
return (data) => {
|
|
485
|
+
const result = validator.validate(data);
|
|
486
|
+
if (!result.valid) {
|
|
487
|
+
const total = result.errors.length;
|
|
488
|
+
const head = result.errors.slice(0, MAX_RENDERED_ERRORS).map(formatError).join("; ");
|
|
489
|
+
const overflow = total > MAX_RENDERED_ERRORS ? ` (+${total - MAX_RENDERED_ERRORS} more)` : "";
|
|
490
|
+
throw new ValidationError(
|
|
491
|
+
`Data validation failed${label ? " for " + label : ""}: ${head}${overflow}`,
|
|
492
|
+
result.errors
|
|
493
|
+
);
|
|
494
|
+
}
|
|
495
|
+
};
|
|
496
|
+
}
|
|
497
|
+
function formatError(err) {
|
|
498
|
+
const path = err.instanceLocation.replace(/^#/, "") || "/";
|
|
499
|
+
const keyword = err.keyword ? `[${err.keyword}] ` : "";
|
|
500
|
+
const detail = err.error ? `: ${keyword}${err.error}` : "";
|
|
501
|
+
return `${path}${detail}`;
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
// src/migration.ts
|
|
505
|
+
async function applyMigrationChain(data, currentVersion, targetVersion, migrations) {
|
|
506
|
+
const sorted = [...migrations].sort((a, b) => a.fromVersion - b.fromVersion);
|
|
507
|
+
let result = { ...data };
|
|
508
|
+
let version = currentVersion;
|
|
509
|
+
for (const step of sorted) {
|
|
510
|
+
if (step.fromVersion === version) {
|
|
511
|
+
try {
|
|
512
|
+
result = await step.up(result);
|
|
513
|
+
} catch (err) {
|
|
514
|
+
if (err instanceof MigrationError) throw err;
|
|
515
|
+
throw new MigrationError(
|
|
516
|
+
`Migration from v${step.fromVersion} to v${step.toVersion} failed: ${err.message}`
|
|
517
|
+
);
|
|
518
|
+
}
|
|
519
|
+
if (!result || typeof result !== "object") {
|
|
520
|
+
throw new MigrationError(
|
|
521
|
+
`Migration from v${step.fromVersion} to v${step.toVersion} returned invalid data (expected object)`
|
|
522
|
+
);
|
|
523
|
+
}
|
|
524
|
+
version = step.toVersion;
|
|
525
|
+
}
|
|
526
|
+
}
|
|
527
|
+
if (version !== targetVersion) {
|
|
528
|
+
throw new MigrationError(
|
|
529
|
+
`Incomplete migration chain: reached v${version} but target is v${targetVersion}`
|
|
530
|
+
);
|
|
531
|
+
}
|
|
532
|
+
return result;
|
|
533
|
+
}
|
|
534
|
+
function validateMigrationChain(migrations, label) {
|
|
535
|
+
if (migrations.length === 0) return;
|
|
536
|
+
const seen = /* @__PURE__ */ new Set();
|
|
537
|
+
for (const step of migrations) {
|
|
538
|
+
if (step.toVersion <= step.fromVersion) {
|
|
539
|
+
throw new MigrationError(
|
|
540
|
+
`${label}: migration step has toVersion (${step.toVersion}) <= fromVersion (${step.fromVersion})`
|
|
541
|
+
);
|
|
542
|
+
}
|
|
543
|
+
if (seen.has(step.fromVersion)) {
|
|
544
|
+
throw new MigrationError(
|
|
545
|
+
`${label}: duplicate migration step for fromVersion ${step.fromVersion}`
|
|
546
|
+
);
|
|
547
|
+
}
|
|
548
|
+
seen.add(step.fromVersion);
|
|
549
|
+
}
|
|
550
|
+
const sorted = [...migrations].sort((a, b) => a.fromVersion - b.fromVersion);
|
|
551
|
+
const targetVersion = Math.max(...migrations.map((m) => m.toVersion));
|
|
552
|
+
let version = 0;
|
|
553
|
+
for (const step of sorted) {
|
|
554
|
+
if (step.fromVersion === version) {
|
|
555
|
+
version = step.toVersion;
|
|
556
|
+
} else if (step.fromVersion > version) {
|
|
557
|
+
throw new MigrationError(
|
|
558
|
+
`${label}: migration chain has a gap \u2014 no step covers v${version} \u2192 v${step.fromVersion}`
|
|
559
|
+
);
|
|
560
|
+
}
|
|
561
|
+
}
|
|
562
|
+
if (version !== targetVersion) {
|
|
563
|
+
throw new MigrationError(
|
|
564
|
+
`${label}: migration chain does not reach v${targetVersion} (stuck at v${version})`
|
|
565
|
+
);
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
async function migrateRecord(record, registry, globalWriteBack = "off") {
|
|
569
|
+
const entry = registry.lookup(record.aType, record.axbType, record.bType);
|
|
570
|
+
if (!entry?.migrations?.length || !entry.schemaVersion) {
|
|
571
|
+
return { record, migrated: false, writeBack: "off" };
|
|
572
|
+
}
|
|
573
|
+
const currentVersion = record.v ?? 0;
|
|
574
|
+
if (currentVersion >= entry.schemaVersion) {
|
|
575
|
+
return { record, migrated: false, writeBack: "off" };
|
|
576
|
+
}
|
|
577
|
+
const migratedData = await applyMigrationChain(
|
|
578
|
+
record.data,
|
|
579
|
+
currentVersion,
|
|
580
|
+
entry.schemaVersion,
|
|
581
|
+
entry.migrations
|
|
582
|
+
);
|
|
583
|
+
const writeBack = entry.migrationWriteBack ?? globalWriteBack ?? "off";
|
|
584
|
+
return {
|
|
585
|
+
record: { ...record, data: migratedData, v: entry.schemaVersion },
|
|
586
|
+
migrated: true,
|
|
587
|
+
writeBack
|
|
588
|
+
};
|
|
589
|
+
}
|
|
590
|
+
async function migrateRecords(records, registry, globalWriteBack = "off") {
|
|
591
|
+
return Promise.all(records.map((r) => migrateRecord(r, registry, globalWriteBack)));
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
// src/scope.ts
|
|
595
|
+
function matchScope(scopePath, pattern) {
|
|
596
|
+
if (pattern === "root") return scopePath === "";
|
|
597
|
+
if (pattern === "**") return true;
|
|
598
|
+
const pathSegments = scopePath === "" ? [] : scopePath.split("/");
|
|
599
|
+
const patternSegments = pattern.split("/");
|
|
600
|
+
return matchSegments(pathSegments, 0, patternSegments, 0);
|
|
601
|
+
}
|
|
602
|
+
function matchScopeAny(scopePath, patterns) {
|
|
603
|
+
if (!patterns || patterns.length === 0) return true;
|
|
604
|
+
return patterns.some((p) => matchScope(scopePath, p));
|
|
605
|
+
}
|
|
606
|
+
function matchSegments(path, pi, pattern, qi) {
|
|
607
|
+
if (pi === path.length && qi === pattern.length) return true;
|
|
608
|
+
if (qi === pattern.length) return false;
|
|
609
|
+
const seg = pattern[qi];
|
|
610
|
+
if (seg === "**") {
|
|
611
|
+
if (qi === pattern.length - 1) return true;
|
|
612
|
+
for (let skip = 0; skip <= path.length - pi; skip++) {
|
|
613
|
+
if (matchSegments(path, pi + skip, pattern, qi + 1)) return true;
|
|
614
|
+
}
|
|
615
|
+
return false;
|
|
616
|
+
}
|
|
617
|
+
if (pi === path.length) return false;
|
|
618
|
+
if (seg === "*") {
|
|
619
|
+
return matchSegments(path, pi + 1, pattern, qi + 1);
|
|
620
|
+
}
|
|
621
|
+
if (path[pi] === seg) {
|
|
622
|
+
return matchSegments(path, pi + 1, pattern, qi + 1);
|
|
623
|
+
}
|
|
624
|
+
return false;
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
// src/registry.ts
|
|
628
|
+
function tripleKey(aType, axbType, bType) {
|
|
629
|
+
return `${aType}:${axbType}:${bType}`;
|
|
630
|
+
}
|
|
631
|
+
function tripleKeyFor(e) {
|
|
632
|
+
return tripleKey(e.aType, e.axbType, e.bType);
|
|
633
|
+
}
|
|
634
|
+
function createRegistry(input) {
|
|
635
|
+
const map = /* @__PURE__ */ new Map();
|
|
636
|
+
let entries;
|
|
637
|
+
if (Array.isArray(input)) {
|
|
638
|
+
entries = input;
|
|
639
|
+
} else {
|
|
640
|
+
entries = discoveryToEntries(input);
|
|
641
|
+
}
|
|
642
|
+
const entryList = Object.freeze([...entries]);
|
|
643
|
+
for (const entry of entries) {
|
|
644
|
+
if (entry.targetGraph && entry.targetGraph.includes("/")) {
|
|
645
|
+
throw new ValidationError(
|
|
646
|
+
`Entry (${entry.aType}) -[${entry.axbType}]-> (${entry.bType}) has invalid targetGraph "${entry.targetGraph}" \u2014 must be a single segment (no "/")`
|
|
647
|
+
);
|
|
648
|
+
}
|
|
649
|
+
if (entry.migrations?.length) {
|
|
650
|
+
const label = `Entry (${entry.aType}) -[${entry.axbType}]-> (${entry.bType})`;
|
|
651
|
+
validateMigrationChain(entry.migrations, label);
|
|
652
|
+
entry.schemaVersion = Math.max(...entry.migrations.map((m) => m.toVersion));
|
|
653
|
+
} else {
|
|
654
|
+
entry.schemaVersion = void 0;
|
|
655
|
+
}
|
|
656
|
+
const key = tripleKey(entry.aType, entry.axbType, entry.bType);
|
|
657
|
+
const validator = entry.jsonSchema ? compileSchema(entry.jsonSchema, `(${entry.aType}) -[${entry.axbType}]-> (${entry.bType})`) : void 0;
|
|
658
|
+
map.set(key, { entry, validate: validator });
|
|
659
|
+
}
|
|
660
|
+
const axbIndex = /* @__PURE__ */ new Map();
|
|
661
|
+
const axbBuild = /* @__PURE__ */ new Map();
|
|
662
|
+
for (const entry of entries) {
|
|
663
|
+
const existing = axbBuild.get(entry.axbType);
|
|
664
|
+
if (existing) {
|
|
665
|
+
existing.push(entry);
|
|
666
|
+
} else {
|
|
667
|
+
axbBuild.set(entry.axbType, [entry]);
|
|
668
|
+
}
|
|
669
|
+
}
|
|
670
|
+
for (const [key, arr] of axbBuild) {
|
|
671
|
+
axbIndex.set(key, Object.freeze(arr));
|
|
672
|
+
}
|
|
673
|
+
const topologyIndex = /* @__PURE__ */ new Map();
|
|
674
|
+
const topologyBuild = /* @__PURE__ */ new Map();
|
|
675
|
+
const topologySeen = /* @__PURE__ */ new Map();
|
|
676
|
+
for (const entry of entries) {
|
|
677
|
+
if (!entry.targetGraph) continue;
|
|
678
|
+
let seen = topologySeen.get(entry.aType);
|
|
679
|
+
if (!seen) {
|
|
680
|
+
seen = /* @__PURE__ */ new Set();
|
|
681
|
+
topologySeen.set(entry.aType, seen);
|
|
682
|
+
}
|
|
683
|
+
if (seen.has(entry.targetGraph)) continue;
|
|
684
|
+
seen.add(entry.targetGraph);
|
|
685
|
+
const existing = topologyBuild.get(entry.aType);
|
|
686
|
+
if (existing) {
|
|
687
|
+
existing.push(entry);
|
|
688
|
+
} else {
|
|
689
|
+
topologyBuild.set(entry.aType, [entry]);
|
|
690
|
+
}
|
|
691
|
+
}
|
|
692
|
+
for (const [key, arr] of topologyBuild) {
|
|
693
|
+
topologyIndex.set(key, Object.freeze(arr));
|
|
694
|
+
}
|
|
695
|
+
return {
|
|
696
|
+
lookup(aType, axbType, bType) {
|
|
697
|
+
return map.get(tripleKey(aType, axbType, bType))?.entry;
|
|
698
|
+
},
|
|
699
|
+
lookupByAxbType(axbType) {
|
|
700
|
+
return axbIndex.get(axbType) ?? [];
|
|
701
|
+
},
|
|
702
|
+
getSubgraphTopology(aType) {
|
|
703
|
+
return topologyIndex.get(aType) ?? [];
|
|
704
|
+
},
|
|
705
|
+
validate(aType, axbType, bType, data, scopePath) {
|
|
706
|
+
const rec = map.get(tripleKey(aType, axbType, bType));
|
|
707
|
+
if (!rec) {
|
|
708
|
+
throw new RegistryViolationError(aType, axbType, bType);
|
|
709
|
+
}
|
|
710
|
+
if (scopePath !== void 0 && rec.entry.allowedIn && rec.entry.allowedIn.length > 0) {
|
|
711
|
+
if (!matchScopeAny(scopePath, rec.entry.allowedIn)) {
|
|
712
|
+
throw new RegistryScopeError(aType, axbType, bType, scopePath, rec.entry.allowedIn);
|
|
713
|
+
}
|
|
714
|
+
}
|
|
715
|
+
if (rec.validate) {
|
|
716
|
+
try {
|
|
717
|
+
rec.validate(data);
|
|
718
|
+
} catch (err) {
|
|
719
|
+
if (err instanceof ValidationError) throw err;
|
|
720
|
+
throw new ValidationError(
|
|
721
|
+
`Data validation failed for (${aType}) -[${axbType}]-> (${bType})`,
|
|
722
|
+
err
|
|
723
|
+
);
|
|
724
|
+
}
|
|
725
|
+
}
|
|
726
|
+
},
|
|
727
|
+
entries() {
|
|
728
|
+
return entryList;
|
|
729
|
+
}
|
|
730
|
+
};
|
|
731
|
+
}
|
|
732
|
+
function createMergedRegistry(base, extension) {
|
|
733
|
+
const baseKeys = new Set(base.entries().map(tripleKeyFor));
|
|
734
|
+
return {
|
|
735
|
+
lookup(aType, axbType, bType) {
|
|
736
|
+
return base.lookup(aType, axbType, bType) ?? extension.lookup(aType, axbType, bType);
|
|
737
|
+
},
|
|
738
|
+
lookupByAxbType(axbType) {
|
|
739
|
+
const baseResults = base.lookupByAxbType(axbType);
|
|
740
|
+
const extResults = extension.lookupByAxbType(axbType);
|
|
741
|
+
if (extResults.length === 0) return baseResults;
|
|
742
|
+
if (baseResults.length === 0) return extResults;
|
|
743
|
+
const seen = new Set(baseResults.map(tripleKeyFor));
|
|
744
|
+
const merged = [...baseResults];
|
|
745
|
+
for (const entry of extResults) {
|
|
746
|
+
if (!seen.has(tripleKeyFor(entry))) {
|
|
747
|
+
merged.push(entry);
|
|
748
|
+
}
|
|
749
|
+
}
|
|
750
|
+
return Object.freeze(merged);
|
|
751
|
+
},
|
|
752
|
+
getSubgraphTopology(aType) {
|
|
753
|
+
const baseResults = base.getSubgraphTopology(aType);
|
|
754
|
+
const extResults = extension.getSubgraphTopology(aType);
|
|
755
|
+
if (extResults.length === 0) return baseResults;
|
|
756
|
+
if (baseResults.length === 0) return extResults;
|
|
757
|
+
const seen = new Set(baseResults.map((e) => e.targetGraph));
|
|
758
|
+
const merged = [...baseResults];
|
|
759
|
+
for (const entry of extResults) {
|
|
760
|
+
if (!seen.has(entry.targetGraph)) {
|
|
761
|
+
seen.add(entry.targetGraph);
|
|
762
|
+
merged.push(entry);
|
|
763
|
+
}
|
|
764
|
+
}
|
|
765
|
+
return Object.freeze(merged);
|
|
766
|
+
},
|
|
767
|
+
validate(aType, axbType, bType, data, scopePath) {
|
|
768
|
+
if (baseKeys.has(tripleKey(aType, axbType, bType))) {
|
|
769
|
+
return base.validate(aType, axbType, bType, data, scopePath);
|
|
770
|
+
}
|
|
771
|
+
return extension.validate(aType, axbType, bType, data, scopePath);
|
|
772
|
+
},
|
|
773
|
+
entries() {
|
|
774
|
+
const extEntries = extension.entries();
|
|
775
|
+
if (extEntries.length === 0) return base.entries();
|
|
776
|
+
const merged = [...base.entries()];
|
|
777
|
+
for (const entry of extEntries) {
|
|
778
|
+
if (!baseKeys.has(tripleKeyFor(entry))) {
|
|
779
|
+
merged.push(entry);
|
|
780
|
+
}
|
|
781
|
+
}
|
|
782
|
+
return Object.freeze(merged);
|
|
783
|
+
}
|
|
784
|
+
};
|
|
785
|
+
}
|
|
786
|
+
function discoveryToEntries(discovery) {
|
|
787
|
+
const entries = [];
|
|
788
|
+
for (const [name, entity] of discovery.nodes) {
|
|
789
|
+
entries.push({
|
|
790
|
+
aType: name,
|
|
791
|
+
axbType: NODE_RELATION,
|
|
792
|
+
bType: name,
|
|
793
|
+
jsonSchema: entity.schema,
|
|
794
|
+
description: entity.description,
|
|
795
|
+
titleField: entity.titleField,
|
|
796
|
+
subtitleField: entity.subtitleField,
|
|
797
|
+
allowedIn: entity.allowedIn,
|
|
798
|
+
migrations: entity.migrations,
|
|
799
|
+
migrationWriteBack: entity.migrationWriteBack,
|
|
800
|
+
indexes: entity.indexes
|
|
801
|
+
});
|
|
802
|
+
}
|
|
803
|
+
for (const [axbType, entity] of discovery.edges) {
|
|
804
|
+
const topology = entity.topology;
|
|
805
|
+
if (!topology) continue;
|
|
806
|
+
const fromTypes = Array.isArray(topology.from) ? topology.from : [topology.from];
|
|
807
|
+
const toTypes = Array.isArray(topology.to) ? topology.to : [topology.to];
|
|
808
|
+
const resolvedTargetGraph = entity.targetGraph ?? topology.targetGraph;
|
|
809
|
+
if (resolvedTargetGraph && resolvedTargetGraph.includes("/")) {
|
|
810
|
+
throw new ValidationError(
|
|
811
|
+
`Edge "${axbType}" has invalid targetGraph "${resolvedTargetGraph}" \u2014 must be a single segment (no "/")`
|
|
812
|
+
);
|
|
813
|
+
}
|
|
814
|
+
for (const aType of fromTypes) {
|
|
815
|
+
for (const bType of toTypes) {
|
|
816
|
+
entries.push({
|
|
817
|
+
aType,
|
|
818
|
+
axbType,
|
|
819
|
+
bType,
|
|
820
|
+
jsonSchema: entity.schema,
|
|
821
|
+
description: entity.description,
|
|
822
|
+
inverseLabel: topology.inverseLabel,
|
|
823
|
+
titleField: entity.titleField,
|
|
824
|
+
subtitleField: entity.subtitleField,
|
|
825
|
+
allowedIn: entity.allowedIn,
|
|
826
|
+
targetGraph: resolvedTargetGraph,
|
|
827
|
+
migrations: entity.migrations,
|
|
828
|
+
migrationWriteBack: entity.migrationWriteBack,
|
|
829
|
+
indexes: entity.indexes
|
|
830
|
+
});
|
|
831
|
+
}
|
|
832
|
+
}
|
|
833
|
+
}
|
|
834
|
+
return entries;
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
// src/sandbox.ts
|
|
838
|
+
var import_node_crypto2 = require("crypto");
|
|
839
|
+
var import_meta = {};
|
|
840
|
+
var _worker = null;
|
|
841
|
+
var _requestId = 0;
|
|
842
|
+
var _pending = /* @__PURE__ */ new Map();
|
|
843
|
+
var WORKER_SOURCE = [
|
|
844
|
+
`'use strict';`,
|
|
845
|
+
`var _wt = require('node:worker_threads');`,
|
|
846
|
+
`var _mod = require('node:module');`,
|
|
847
|
+
`var _crypto = require('node:crypto');`,
|
|
848
|
+
`var parentPort = _wt.parentPort;`,
|
|
849
|
+
`var workerData = _wt.workerData;`,
|
|
850
|
+
``,
|
|
851
|
+
`// Load SES using the parent module's resolution context`,
|
|
852
|
+
`var esmRequire = _mod.createRequire(workerData.parentUrl);`,
|
|
853
|
+
`esmRequire('ses');`,
|
|
854
|
+
``,
|
|
855
|
+
`lockdown({`,
|
|
856
|
+
` errorTaming: 'unsafe',`,
|
|
857
|
+
` consoleTaming: 'unsafe',`,
|
|
858
|
+
` evalTaming: 'safe-eval',`,
|
|
859
|
+
` overrideTaming: 'moderate',`,
|
|
860
|
+
` stackFiltering: 'verbose'`,
|
|
861
|
+
`});`,
|
|
862
|
+
``,
|
|
863
|
+
`// Defense-in-depth: verify lockdown() actually hardened JSON.`,
|
|
864
|
+
`if (!Object.isFrozen(JSON)) {`,
|
|
865
|
+
` throw new Error('SES lockdown failed: JSON is not frozen');`,
|
|
866
|
+
`}`,
|
|
867
|
+
``,
|
|
868
|
+
`var cache = new Map();`,
|
|
869
|
+
``,
|
|
870
|
+
`function hashSource(s) {`,
|
|
871
|
+
` return _crypto.createHash('sha256').update(s).digest('hex');`,
|
|
872
|
+
`}`,
|
|
873
|
+
``,
|
|
874
|
+
`function buildWrapper(source) {`,
|
|
875
|
+
` return '(function() {' +`,
|
|
876
|
+
` ' var fn = (' + source + ');\\n' +`,
|
|
877
|
+
` ' if (typeof fn !== "function") return null;\\n' +`,
|
|
878
|
+
` ' return function(jsonIn) {\\n' +`,
|
|
879
|
+
` ' var data = JSON.parse(jsonIn);\\n' +`,
|
|
880
|
+
` ' var result = fn(data);\\n' +`,
|
|
881
|
+
` ' if (result !== null && typeof result === "object" && typeof result.then === "function") {\\n' +`,
|
|
882
|
+
` ' return result.then(function(r) { return JSON.stringify(r); });\\n' +`,
|
|
883
|
+
` ' }\\n' +`,
|
|
884
|
+
` ' return JSON.stringify(result);\\n' +`,
|
|
885
|
+
` ' };\\n' +`,
|
|
886
|
+
` '})()';`,
|
|
887
|
+
`}`,
|
|
888
|
+
``,
|
|
889
|
+
`function compileSource(source) {`,
|
|
890
|
+
` var key = hashSource(source);`,
|
|
891
|
+
` var cached = cache.get(key);`,
|
|
892
|
+
` if (cached) return cached;`,
|
|
893
|
+
``,
|
|
894
|
+
` var compartmentFn;`,
|
|
895
|
+
` try {`,
|
|
896
|
+
` var c = new Compartment({ JSON: JSON });`,
|
|
897
|
+
` compartmentFn = c.evaluate(buildWrapper(source));`,
|
|
898
|
+
` } catch (err) {`,
|
|
899
|
+
` throw new Error('Failed to compile migration source: ' + (err.message || String(err)));`,
|
|
900
|
+
` }`,
|
|
901
|
+
``,
|
|
902
|
+
` if (typeof compartmentFn !== 'function') {`,
|
|
903
|
+
` throw new Error('Migration source did not produce a function: ' + source.slice(0, 80));`,
|
|
904
|
+
` }`,
|
|
905
|
+
``,
|
|
906
|
+
` cache.set(key, compartmentFn);`,
|
|
907
|
+
` return compartmentFn;`,
|
|
908
|
+
`}`,
|
|
909
|
+
``,
|
|
910
|
+
`parentPort.on('message', function(msg) {`,
|
|
911
|
+
` var id = msg.id;`,
|
|
912
|
+
` try {`,
|
|
913
|
+
` if (msg.type === 'compile') {`,
|
|
914
|
+
` compileSource(msg.source);`,
|
|
915
|
+
` parentPort.postMessage({ id: id, type: 'compiled' });`,
|
|
916
|
+
` return;`,
|
|
917
|
+
` }`,
|
|
918
|
+
` if (msg.type === 'execute') {`,
|
|
919
|
+
` var fn = compileSource(msg.source);`,
|
|
920
|
+
` var raw;`,
|
|
921
|
+
` try {`,
|
|
922
|
+
` raw = fn(msg.jsonData);`,
|
|
923
|
+
` } catch (err) {`,
|
|
924
|
+
` parentPort.postMessage({ id: id, type: 'error', message: 'Migration function threw: ' + (err.message || String(err)) });`,
|
|
925
|
+
` return;`,
|
|
926
|
+
` }`,
|
|
927
|
+
` if (raw !== null && typeof raw === 'object' && typeof raw.then === 'function') {`,
|
|
928
|
+
` raw.then(`,
|
|
929
|
+
` function(jsonResult) {`,
|
|
930
|
+
` if (jsonResult === undefined || jsonResult === null) {`,
|
|
931
|
+
` parentPort.postMessage({ id: id, type: 'error', message: 'Migration returned a non-JSON-serializable value' });`,
|
|
932
|
+
` } else {`,
|
|
933
|
+
` parentPort.postMessage({ id: id, type: 'result', jsonResult: jsonResult });`,
|
|
934
|
+
` }`,
|
|
935
|
+
` },`,
|
|
936
|
+
` function(err) {`,
|
|
937
|
+
` parentPort.postMessage({ id: id, type: 'error', message: 'Async migration function threw: ' + (err.message || String(err)) });`,
|
|
938
|
+
` }`,
|
|
939
|
+
` );`,
|
|
940
|
+
` return;`,
|
|
941
|
+
` }`,
|
|
942
|
+
` if (raw === undefined || raw === null) {`,
|
|
943
|
+
` parentPort.postMessage({ id: id, type: 'error', message: 'Migration returned a non-JSON-serializable value' });`,
|
|
944
|
+
` } else {`,
|
|
945
|
+
` parentPort.postMessage({ id: id, type: 'result', jsonResult: raw });`,
|
|
946
|
+
` }`,
|
|
947
|
+
` }`,
|
|
948
|
+
` } catch (err) {`,
|
|
949
|
+
` parentPort.postMessage({ id: id, type: 'error', message: err.message || String(err) });`,
|
|
950
|
+
` }`,
|
|
951
|
+
`});`
|
|
952
|
+
].join("\n");
|
|
953
|
+
var _WorkerCtor = null;
|
|
954
|
+
async function loadWorkerCtor() {
|
|
955
|
+
if (_WorkerCtor) return _WorkerCtor;
|
|
956
|
+
const wt = await import("worker_threads");
|
|
957
|
+
_WorkerCtor = wt.Worker;
|
|
958
|
+
return _WorkerCtor;
|
|
959
|
+
}
|
|
960
|
+
async function ensureWorker() {
|
|
961
|
+
if (_worker) return _worker;
|
|
962
|
+
const Ctor = await loadWorkerCtor();
|
|
963
|
+
_worker = new Ctor(WORKER_SOURCE, {
|
|
964
|
+
eval: true,
|
|
965
|
+
workerData: { parentUrl: import_meta.url }
|
|
966
|
+
});
|
|
967
|
+
_worker.unref();
|
|
968
|
+
_worker.on("message", (msg) => {
|
|
969
|
+
if (msg.id === void 0) return;
|
|
970
|
+
const pending = _pending.get(msg.id);
|
|
971
|
+
if (!pending) return;
|
|
972
|
+
_pending.delete(msg.id);
|
|
973
|
+
if (msg.type === "error") {
|
|
974
|
+
pending.reject(new MigrationError(msg.message ?? "Unknown sandbox error"));
|
|
975
|
+
} else {
|
|
976
|
+
pending.resolve(msg);
|
|
977
|
+
}
|
|
978
|
+
});
|
|
979
|
+
_worker.on("error", (err) => {
|
|
980
|
+
for (const [, p] of _pending) {
|
|
981
|
+
p.reject(new MigrationError(`Sandbox worker error: ${err.message}`));
|
|
982
|
+
}
|
|
983
|
+
_pending.clear();
|
|
984
|
+
_worker = null;
|
|
985
|
+
});
|
|
986
|
+
_worker.on("exit", (code) => {
|
|
987
|
+
if (_pending.size > 0) {
|
|
988
|
+
for (const [, p] of _pending) {
|
|
989
|
+
p.reject(new MigrationError(`Sandbox worker exited with code ${code}`));
|
|
990
|
+
}
|
|
991
|
+
_pending.clear();
|
|
992
|
+
}
|
|
993
|
+
_worker = null;
|
|
994
|
+
});
|
|
995
|
+
return _worker;
|
|
996
|
+
}
|
|
997
|
+
async function sendToWorker(msg) {
|
|
998
|
+
const worker = await ensureWorker();
|
|
999
|
+
if (_requestId >= Number.MAX_SAFE_INTEGER) _requestId = 0;
|
|
1000
|
+
const id = ++_requestId;
|
|
1001
|
+
return new Promise((resolve, reject) => {
|
|
1002
|
+
_pending.set(id, { resolve, reject });
|
|
1003
|
+
worker.postMessage({ ...msg, id });
|
|
1004
|
+
});
|
|
1005
|
+
}
|
|
1006
|
+
var compiledCache = /* @__PURE__ */ new WeakMap();
|
|
1007
|
+
function getExecutorCache(executor) {
|
|
1008
|
+
let cache = compiledCache.get(executor);
|
|
1009
|
+
if (!cache) {
|
|
1010
|
+
cache = /* @__PURE__ */ new Map();
|
|
1011
|
+
compiledCache.set(executor, cache);
|
|
1012
|
+
}
|
|
1013
|
+
return cache;
|
|
1014
|
+
}
|
|
1015
|
+
function hashSource(source) {
|
|
1016
|
+
return (0, import_node_crypto2.createHash)("sha256").update(source).digest("hex");
|
|
1017
|
+
}
|
|
1018
|
+
var _serializationModule = null;
|
|
1019
|
+
async function loadSerialization() {
|
|
1020
|
+
if (_serializationModule) return _serializationModule;
|
|
1021
|
+
_serializationModule = await Promise.resolve().then(() => (init_serialization(), serialization_exports));
|
|
1022
|
+
return _serializationModule;
|
|
1023
|
+
}
|
|
1024
|
+
function defaultExecutor(source) {
|
|
1025
|
+
return (async (data) => {
|
|
1026
|
+
const { serializeFirestoreTypes: serializeFirestoreTypes2, deserializeFirestoreTypes: deserializeFirestoreTypes2 } = await loadSerialization();
|
|
1027
|
+
const jsonData = JSON.stringify(serializeFirestoreTypes2(data));
|
|
1028
|
+
const response = await sendToWorker({ type: "execute", source, jsonData });
|
|
1029
|
+
if (response.jsonResult === void 0 || response.jsonResult === null) {
|
|
1030
|
+
throw new MigrationError("Migration returned a non-JSON-serializable value");
|
|
1031
|
+
}
|
|
1032
|
+
try {
|
|
1033
|
+
return deserializeFirestoreTypes2(JSON.parse(response.jsonResult));
|
|
1034
|
+
} catch {
|
|
1035
|
+
throw new MigrationError("Migration returned a non-JSON-serializable value");
|
|
1036
|
+
}
|
|
1037
|
+
});
|
|
1038
|
+
}
|
|
1039
|
+
async function precompileSource(source, executor) {
|
|
1040
|
+
if (executor && executor !== defaultExecutor) {
|
|
1041
|
+
try {
|
|
1042
|
+
executor(source);
|
|
1043
|
+
} catch (err) {
|
|
1044
|
+
if (err instanceof MigrationError) throw err;
|
|
1045
|
+
throw new MigrationError(`Failed to compile migration source: ${err.message}`);
|
|
1046
|
+
}
|
|
1047
|
+
return;
|
|
1048
|
+
}
|
|
1049
|
+
await sendToWorker({ type: "compile", source });
|
|
1050
|
+
}
|
|
1051
|
+
function compileMigrationFn(source, executor = defaultExecutor) {
|
|
1052
|
+
const cache = getExecutorCache(executor);
|
|
1053
|
+
const key = hashSource(source);
|
|
1054
|
+
const cached = cache.get(key);
|
|
1055
|
+
if (cached) return cached;
|
|
1056
|
+
try {
|
|
1057
|
+
const fn = executor(source);
|
|
1058
|
+
cache.set(key, fn);
|
|
1059
|
+
return fn;
|
|
1060
|
+
} catch (err) {
|
|
1061
|
+
if (err instanceof MigrationError) throw err;
|
|
1062
|
+
throw new MigrationError(`Failed to compile migration source: ${err.message}`);
|
|
1063
|
+
}
|
|
1064
|
+
}
|
|
1065
|
+
function compileMigrations(stored, executor) {
|
|
1066
|
+
return stored.map((step) => ({
|
|
1067
|
+
fromVersion: step.fromVersion,
|
|
1068
|
+
toVersion: step.toVersion,
|
|
1069
|
+
up: compileMigrationFn(step.up, executor)
|
|
1070
|
+
}));
|
|
1071
|
+
}
|
|
1072
|
+
|
|
1073
|
+
// src/dynamic-registry.ts
|
|
1074
|
+
var META_NODE_TYPE = "nodeType";
|
|
1075
|
+
var META_EDGE_TYPE = "edgeType";
|
|
1076
|
+
var STORED_MIGRATION_STEP_SCHEMA = {
|
|
1077
|
+
type: "object",
|
|
1078
|
+
required: ["fromVersion", "toVersion", "up"],
|
|
1079
|
+
properties: {
|
|
1080
|
+
fromVersion: { type: "integer", minimum: 0 },
|
|
1081
|
+
toVersion: { type: "integer", minimum: 1 },
|
|
1082
|
+
up: { type: "string", minLength: 1 }
|
|
1083
|
+
},
|
|
1084
|
+
additionalProperties: false
|
|
1085
|
+
};
|
|
1086
|
+
var NODE_TYPE_SCHEMA = {
|
|
1087
|
+
type: "object",
|
|
1088
|
+
required: ["name", "jsonSchema"],
|
|
1089
|
+
properties: {
|
|
1090
|
+
name: { type: "string", minLength: 1 },
|
|
1091
|
+
jsonSchema: { type: "object" },
|
|
1092
|
+
description: { type: "string" },
|
|
1093
|
+
titleField: { type: "string" },
|
|
1094
|
+
subtitleField: { type: "string" },
|
|
1095
|
+
viewTemplate: { type: "string" },
|
|
1096
|
+
viewCss: { type: "string" },
|
|
1097
|
+
allowedIn: { type: "array", items: { type: "string", minLength: 1 } },
|
|
1098
|
+
schemaVersion: { type: "integer", minimum: 0 },
|
|
1099
|
+
migrations: { type: "array", items: STORED_MIGRATION_STEP_SCHEMA },
|
|
1100
|
+
migrationWriteBack: { type: "string", enum: ["off", "eager", "background"] }
|
|
1101
|
+
},
|
|
1102
|
+
additionalProperties: false
|
|
1103
|
+
};
|
|
1104
|
+
var EDGE_TYPE_SCHEMA = {
|
|
1105
|
+
type: "object",
|
|
1106
|
+
required: ["name", "from", "to"],
|
|
1107
|
+
properties: {
|
|
1108
|
+
name: { type: "string", minLength: 1 },
|
|
1109
|
+
from: {
|
|
1110
|
+
oneOf: [
|
|
1111
|
+
{ type: "string", minLength: 1 },
|
|
1112
|
+
{ type: "array", items: { type: "string", minLength: 1 }, minItems: 1 }
|
|
1113
|
+
]
|
|
1114
|
+
},
|
|
1115
|
+
to: {
|
|
1116
|
+
oneOf: [
|
|
1117
|
+
{ type: "string", minLength: 1 },
|
|
1118
|
+
{ type: "array", items: { type: "string", minLength: 1 }, minItems: 1 }
|
|
1119
|
+
]
|
|
1120
|
+
},
|
|
1121
|
+
jsonSchema: { type: "object" },
|
|
1122
|
+
inverseLabel: { type: "string" },
|
|
1123
|
+
description: { type: "string" },
|
|
1124
|
+
titleField: { type: "string" },
|
|
1125
|
+
subtitleField: { type: "string" },
|
|
1126
|
+
viewTemplate: { type: "string" },
|
|
1127
|
+
viewCss: { type: "string" },
|
|
1128
|
+
allowedIn: { type: "array", items: { type: "string", minLength: 1 } },
|
|
1129
|
+
targetGraph: { type: "string", minLength: 1, pattern: "^[^/]+$" },
|
|
1130
|
+
schemaVersion: { type: "integer", minimum: 0 },
|
|
1131
|
+
migrations: { type: "array", items: STORED_MIGRATION_STEP_SCHEMA },
|
|
1132
|
+
migrationWriteBack: { type: "string", enum: ["off", "eager", "background"] }
|
|
1133
|
+
},
|
|
1134
|
+
additionalProperties: false
|
|
1135
|
+
};
|
|
1136
|
+
var BOOTSTRAP_ENTRIES = [
|
|
1137
|
+
{
|
|
1138
|
+
aType: META_NODE_TYPE,
|
|
1139
|
+
axbType: NODE_RELATION,
|
|
1140
|
+
bType: META_NODE_TYPE,
|
|
1141
|
+
jsonSchema: NODE_TYPE_SCHEMA,
|
|
1142
|
+
description: "Meta-type: defines a node type"
|
|
1143
|
+
},
|
|
1144
|
+
{
|
|
1145
|
+
aType: META_EDGE_TYPE,
|
|
1146
|
+
axbType: NODE_RELATION,
|
|
1147
|
+
bType: META_EDGE_TYPE,
|
|
1148
|
+
jsonSchema: EDGE_TYPE_SCHEMA,
|
|
1149
|
+
description: "Meta-type: defines an edge type"
|
|
1150
|
+
}
|
|
1151
|
+
];
|
|
1152
|
+
var _bootstrapRegistry = null;
|
|
1153
|
+
function createBootstrapRegistry() {
|
|
1154
|
+
if (_bootstrapRegistry) return _bootstrapRegistry;
|
|
1155
|
+
_bootstrapRegistry = createRegistry([...BOOTSTRAP_ENTRIES]);
|
|
1156
|
+
return _bootstrapRegistry;
|
|
1157
|
+
}
|
|
1158
|
+
function generateDeterministicUid(metaType, name) {
|
|
1159
|
+
const hash = (0, import_node_crypto3.createHash)("sha256").update(`${metaType}:${name}`).digest("base64url");
|
|
1160
|
+
return hash.slice(0, 21);
|
|
1161
|
+
}
|
|
1162
|
+
async function createRegistryFromGraph(reader, executor) {
|
|
1163
|
+
const [nodeTypes, edgeTypes] = await Promise.all([
|
|
1164
|
+
reader.findNodes({ aType: META_NODE_TYPE }),
|
|
1165
|
+
reader.findNodes({ aType: META_EDGE_TYPE })
|
|
1166
|
+
]);
|
|
1167
|
+
const entries = [...BOOTSTRAP_ENTRIES];
|
|
1168
|
+
const prevalidations = [];
|
|
1169
|
+
for (const record of nodeTypes) {
|
|
1170
|
+
const data = record.data;
|
|
1171
|
+
if (data.migrations) {
|
|
1172
|
+
for (const m of data.migrations) {
|
|
1173
|
+
prevalidations.push(precompileSource(m.up, executor));
|
|
1174
|
+
}
|
|
1175
|
+
}
|
|
1176
|
+
}
|
|
1177
|
+
for (const record of edgeTypes) {
|
|
1178
|
+
const data = record.data;
|
|
1179
|
+
if (data.migrations) {
|
|
1180
|
+
for (const m of data.migrations) {
|
|
1181
|
+
prevalidations.push(precompileSource(m.up, executor));
|
|
1182
|
+
}
|
|
1183
|
+
}
|
|
1184
|
+
}
|
|
1185
|
+
await Promise.all(prevalidations);
|
|
1186
|
+
for (const record of nodeTypes) {
|
|
1187
|
+
const data = record.data;
|
|
1188
|
+
entries.push({
|
|
1189
|
+
aType: data.name,
|
|
1190
|
+
axbType: NODE_RELATION,
|
|
1191
|
+
bType: data.name,
|
|
1192
|
+
jsonSchema: data.jsonSchema,
|
|
1193
|
+
description: data.description,
|
|
1194
|
+
titleField: data.titleField,
|
|
1195
|
+
subtitleField: data.subtitleField,
|
|
1196
|
+
allowedIn: data.allowedIn,
|
|
1197
|
+
migrations: data.migrations ? compileMigrations(data.migrations, executor) : void 0,
|
|
1198
|
+
migrationWriteBack: data.migrationWriteBack
|
|
1199
|
+
});
|
|
1200
|
+
}
|
|
1201
|
+
for (const record of edgeTypes) {
|
|
1202
|
+
const data = record.data;
|
|
1203
|
+
const fromTypes = Array.isArray(data.from) ? data.from : [data.from];
|
|
1204
|
+
const toTypes = Array.isArray(data.to) ? data.to : [data.to];
|
|
1205
|
+
const compiledMigrations = data.migrations ? compileMigrations(data.migrations, executor) : void 0;
|
|
1206
|
+
for (const aType of fromTypes) {
|
|
1207
|
+
for (const bType of toTypes) {
|
|
1208
|
+
entries.push({
|
|
1209
|
+
aType,
|
|
1210
|
+
axbType: data.name,
|
|
1211
|
+
bType,
|
|
1212
|
+
jsonSchema: data.jsonSchema,
|
|
1213
|
+
description: data.description,
|
|
1214
|
+
inverseLabel: data.inverseLabel,
|
|
1215
|
+
titleField: data.titleField,
|
|
1216
|
+
subtitleField: data.subtitleField,
|
|
1217
|
+
allowedIn: data.allowedIn,
|
|
1218
|
+
targetGraph: data.targetGraph,
|
|
1219
|
+
migrations: compiledMigrations,
|
|
1220
|
+
migrationWriteBack: data.migrationWriteBack
|
|
1221
|
+
});
|
|
1222
|
+
}
|
|
1223
|
+
}
|
|
1224
|
+
}
|
|
1225
|
+
return createRegistry(entries);
|
|
1226
|
+
}
|
|
1227
|
+
|
|
1228
|
+
// src/query.ts
|
|
1229
|
+
function buildEdgeQueryPlan(params) {
|
|
1230
|
+
const { aType, aUid, axbType, bType, bUid, limit, orderBy } = params;
|
|
1231
|
+
if (aUid && axbType && bUid && !params.where?.length) {
|
|
1232
|
+
return { strategy: "get", docId: computeEdgeDocId(aUid, axbType, bUid) };
|
|
1233
|
+
}
|
|
1234
|
+
const filters = [];
|
|
1235
|
+
if (aType) filters.push({ field: "aType", op: "==", value: aType });
|
|
1236
|
+
if (aUid) filters.push({ field: "aUid", op: "==", value: aUid });
|
|
1237
|
+
if (axbType) filters.push({ field: "axbType", op: "==", value: axbType });
|
|
1238
|
+
if (bType) filters.push({ field: "bType", op: "==", value: bType });
|
|
1239
|
+
if (bUid) filters.push({ field: "bUid", op: "==", value: bUid });
|
|
1240
|
+
if (params.where) {
|
|
1241
|
+
for (const clause of params.where) {
|
|
1242
|
+
const field = BUILTIN_FIELDS.has(clause.field) ? clause.field : clause.field.startsWith("data.") ? clause.field : `data.${clause.field}`;
|
|
1243
|
+
filters.push({ field, op: clause.op, value: clause.value });
|
|
1244
|
+
}
|
|
1245
|
+
}
|
|
1246
|
+
if (filters.length === 0) {
|
|
1247
|
+
throw new InvalidQueryError("findEdges requires at least one filter parameter");
|
|
1248
|
+
}
|
|
1249
|
+
const effectiveLimit = limit === void 0 ? DEFAULT_QUERY_LIMIT : limit || void 0;
|
|
1250
|
+
return { strategy: "query", filters, options: { limit: effectiveLimit, orderBy } };
|
|
1251
|
+
}
|
|
1252
|
+
function buildNodeQueryPlan(params) {
|
|
1253
|
+
const { aType, limit, orderBy } = params;
|
|
1254
|
+
const filters = [
|
|
1255
|
+
{ field: "aType", op: "==", value: aType },
|
|
1256
|
+
{ field: "axbType", op: "==", value: NODE_RELATION }
|
|
1257
|
+
];
|
|
1258
|
+
if (params.where) {
|
|
1259
|
+
for (const clause of params.where) {
|
|
1260
|
+
const field = BUILTIN_FIELDS.has(clause.field) ? clause.field : clause.field.startsWith("data.") ? clause.field : `data.${clause.field}`;
|
|
1261
|
+
filters.push({ field, op: clause.op, value: clause.value });
|
|
1262
|
+
}
|
|
1263
|
+
}
|
|
1264
|
+
const effectiveLimit = limit === void 0 ? DEFAULT_QUERY_LIMIT : limit || void 0;
|
|
1265
|
+
return { strategy: "query", filters, options: { limit: effectiveLimit, orderBy } };
|
|
1266
|
+
}
|
|
1267
|
+
|
|
1268
|
+
// src/query-safety.ts
|
|
1269
|
+
var SAFE_INDEX_PATTERNS = [
|
|
1270
|
+
/* @__PURE__ */ new Set(["aUid", "axbType"]),
|
|
1271
|
+
/* @__PURE__ */ new Set(["axbType", "bUid"]),
|
|
1272
|
+
/* @__PURE__ */ new Set(["aType", "axbType"]),
|
|
1273
|
+
/* @__PURE__ */ new Set(["axbType", "bType"])
|
|
1274
|
+
];
|
|
1275
|
+
function analyzeQuerySafety(filters) {
|
|
1276
|
+
const builtinFieldsPresent = /* @__PURE__ */ new Set();
|
|
1277
|
+
let hasDataFilters = false;
|
|
1278
|
+
for (const f of filters) {
|
|
1279
|
+
if (BUILTIN_FIELDS.has(f.field)) {
|
|
1280
|
+
builtinFieldsPresent.add(f.field);
|
|
1281
|
+
} else {
|
|
1282
|
+
hasDataFilters = true;
|
|
1283
|
+
}
|
|
1284
|
+
}
|
|
1285
|
+
for (const pattern of SAFE_INDEX_PATTERNS) {
|
|
1286
|
+
let matched = true;
|
|
1287
|
+
for (const field of pattern) {
|
|
1288
|
+
if (!builtinFieldsPresent.has(field)) {
|
|
1289
|
+
matched = false;
|
|
1290
|
+
break;
|
|
1291
|
+
}
|
|
1292
|
+
}
|
|
1293
|
+
if (matched) {
|
|
1294
|
+
return { safe: true };
|
|
1295
|
+
}
|
|
1296
|
+
}
|
|
1297
|
+
const presentFields = [...builtinFieldsPresent];
|
|
1298
|
+
if (presentFields.length === 0 && hasDataFilters) {
|
|
1299
|
+
return {
|
|
1300
|
+
safe: false,
|
|
1301
|
+
reason: "Query filters only use data.* fields with no builtin field constraints. This requires a full collection scan. Add aType, aUid, axbType, bType, or bUid filters, or set allowCollectionScan: true."
|
|
1302
|
+
};
|
|
1303
|
+
}
|
|
1304
|
+
if (hasDataFilters) {
|
|
1305
|
+
return {
|
|
1306
|
+
safe: false,
|
|
1307
|
+
reason: `Query filters on [${presentFields.join(", ")}] do not match any indexed pattern. data.* filters without an indexed base require a full collection scan. Safe patterns: (aUid + axbType), (axbType + bUid), (aType + axbType), (axbType + bType). Set allowCollectionScan: true to override.`
|
|
1308
|
+
};
|
|
1309
|
+
}
|
|
1310
|
+
return {
|
|
1311
|
+
safe: false,
|
|
1312
|
+
reason: `Query filters on [${presentFields.join(", ")}] do not match any indexed pattern. This may cause a full collection scan on Firestore Enterprise. Safe patterns: (aUid + axbType), (axbType + bUid), (aType + axbType), (axbType + bType). Set allowCollectionScan: true to override.`
|
|
1313
|
+
};
|
|
1314
|
+
}
|
|
1315
|
+
|
|
1316
|
+
// src/transaction.ts
|
|
1317
|
+
function buildWritableNodeRecord2(aType, uid, data) {
|
|
1318
|
+
return { aType, aUid: uid, axbType: NODE_RELATION, bType: aType, bUid: uid, data };
|
|
1319
|
+
}
|
|
1320
|
+
function buildWritableEdgeRecord2(aType, aUid, axbType, bType, bUid, data) {
|
|
1321
|
+
return { aType, aUid, axbType, bType, bUid, data };
|
|
1322
|
+
}
|
|
1323
|
+
var GraphTransactionImpl = class {
|
|
1324
|
+
constructor(backend, registry, scanProtection = "error", scopePath = "", globalWriteBack = "off") {
|
|
1325
|
+
this.backend = backend;
|
|
1326
|
+
this.registry = registry;
|
|
1327
|
+
this.scanProtection = scanProtection;
|
|
1328
|
+
this.scopePath = scopePath;
|
|
1329
|
+
this.globalWriteBack = globalWriteBack;
|
|
1330
|
+
}
|
|
1331
|
+
async getNode(uid) {
|
|
1332
|
+
const docId = computeNodeDocId(uid);
|
|
1333
|
+
const record = await this.backend.getDoc(docId);
|
|
1334
|
+
if (!record || !this.registry) return record;
|
|
1335
|
+
const result = await migrateRecord(record, this.registry, this.globalWriteBack);
|
|
1336
|
+
if (result.migrated && result.writeBack !== "off") {
|
|
1337
|
+
await this.backend.updateDoc(docId, {
|
|
1338
|
+
replaceData: result.record.data,
|
|
1339
|
+
v: result.record.v
|
|
1340
|
+
});
|
|
1341
|
+
}
|
|
1342
|
+
return result.record;
|
|
1343
|
+
}
|
|
1344
|
+
async getEdge(aUid, axbType, bUid) {
|
|
1345
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1346
|
+
const record = await this.backend.getDoc(docId);
|
|
1347
|
+
if (!record || !this.registry) return record;
|
|
1348
|
+
const result = await migrateRecord(record, this.registry, this.globalWriteBack);
|
|
1349
|
+
if (result.migrated && result.writeBack !== "off") {
|
|
1350
|
+
await this.backend.updateDoc(docId, {
|
|
1351
|
+
replaceData: result.record.data,
|
|
1352
|
+
v: result.record.v
|
|
1353
|
+
});
|
|
1354
|
+
}
|
|
1355
|
+
return result.record;
|
|
1356
|
+
}
|
|
1357
|
+
async edgeExists(aUid, axbType, bUid) {
|
|
1358
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1359
|
+
const record = await this.backend.getDoc(docId);
|
|
1360
|
+
return record !== null;
|
|
1361
|
+
}
|
|
1362
|
+
checkQuerySafety(filters, allowCollectionScan) {
|
|
1363
|
+
if (allowCollectionScan || this.scanProtection === "off") return;
|
|
1364
|
+
const result = analyzeQuerySafety(filters);
|
|
1365
|
+
if (result.safe) return;
|
|
1366
|
+
if (this.scanProtection === "error") {
|
|
1367
|
+
throw new QuerySafetyError(result.reason);
|
|
1368
|
+
}
|
|
1369
|
+
console.warn(`[firegraph] Query safety warning: ${result.reason}`);
|
|
1370
|
+
}
|
|
1371
|
+
async findEdges(params) {
|
|
1372
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1373
|
+
let records;
|
|
1374
|
+
if (plan.strategy === "get") {
|
|
1375
|
+
const record = await this.backend.getDoc(plan.docId);
|
|
1376
|
+
records = record ? [record] : [];
|
|
1377
|
+
} else {
|
|
1378
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1379
|
+
records = await this.backend.query(plan.filters, plan.options);
|
|
1380
|
+
}
|
|
1381
|
+
return this.applyMigrations(records);
|
|
1382
|
+
}
|
|
1383
|
+
async findNodes(params) {
|
|
1384
|
+
const plan = buildNodeQueryPlan(params);
|
|
1385
|
+
let records;
|
|
1386
|
+
if (plan.strategy === "get") {
|
|
1387
|
+
const record = await this.backend.getDoc(plan.docId);
|
|
1388
|
+
records = record ? [record] : [];
|
|
1389
|
+
} else {
|
|
1390
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1391
|
+
records = await this.backend.query(plan.filters, plan.options);
|
|
1392
|
+
}
|
|
1393
|
+
return this.applyMigrations(records);
|
|
1394
|
+
}
|
|
1395
|
+
async applyMigrations(records) {
|
|
1396
|
+
if (!this.registry || records.length === 0) return records;
|
|
1397
|
+
const results = await migrateRecords(records, this.registry, this.globalWriteBack);
|
|
1398
|
+
for (const result of results) {
|
|
1399
|
+
if (result.migrated && result.writeBack !== "off") {
|
|
1400
|
+
const docId = result.record.axbType === NODE_RELATION ? computeNodeDocId(result.record.aUid) : computeEdgeDocId(result.record.aUid, result.record.axbType, result.record.bUid);
|
|
1401
|
+
await this.backend.updateDoc(docId, {
|
|
1402
|
+
replaceData: result.record.data,
|
|
1403
|
+
v: result.record.v
|
|
1404
|
+
});
|
|
1405
|
+
}
|
|
1406
|
+
}
|
|
1407
|
+
return results.map((r) => r.record);
|
|
1408
|
+
}
|
|
1409
|
+
async putNode(aType, uid, data) {
|
|
1410
|
+
await this.writeNode(aType, uid, data, "merge");
|
|
1411
|
+
}
|
|
1412
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1413
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
1414
|
+
}
|
|
1415
|
+
async replaceNode(aType, uid, data) {
|
|
1416
|
+
await this.writeNode(aType, uid, data, "replace");
|
|
1417
|
+
}
|
|
1418
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1419
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
1420
|
+
}
|
|
1421
|
+
async writeNode(aType, uid, data, mode) {
|
|
1422
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
1423
|
+
if (this.registry) {
|
|
1424
|
+
this.registry.validate(aType, NODE_RELATION, aType, data, this.scopePath);
|
|
1425
|
+
}
|
|
1426
|
+
const docId = computeNodeDocId(uid);
|
|
1427
|
+
const record = buildWritableNodeRecord2(aType, uid, data);
|
|
1428
|
+
if (this.registry) {
|
|
1429
|
+
const entry = this.registry.lookup(aType, NODE_RELATION, aType);
|
|
1430
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
1431
|
+
record.v = entry.schemaVersion;
|
|
1432
|
+
}
|
|
1433
|
+
}
|
|
1434
|
+
await this.backend.setDoc(docId, record, mode);
|
|
1435
|
+
}
|
|
1436
|
+
async writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
1437
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
1438
|
+
if (this.registry) {
|
|
1439
|
+
this.registry.validate(aType, axbType, bType, data, this.scopePath);
|
|
1440
|
+
}
|
|
1441
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1442
|
+
const record = buildWritableEdgeRecord2(aType, aUid, axbType, bType, bUid, data);
|
|
1443
|
+
if (this.registry) {
|
|
1444
|
+
const entry = this.registry.lookup(aType, axbType, bType);
|
|
1445
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
1446
|
+
record.v = entry.schemaVersion;
|
|
1447
|
+
}
|
|
1448
|
+
}
|
|
1449
|
+
await this.backend.setDoc(docId, record, mode);
|
|
1450
|
+
}
|
|
1451
|
+
async updateNode(uid, data) {
|
|
1452
|
+
const docId = computeNodeDocId(uid);
|
|
1453
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1454
|
+
}
|
|
1455
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
1456
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1457
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1458
|
+
}
|
|
1459
|
+
async removeNode(uid) {
|
|
1460
|
+
const docId = computeNodeDocId(uid);
|
|
1461
|
+
await this.backend.deleteDoc(docId);
|
|
1462
|
+
}
|
|
1463
|
+
async removeEdge(aUid, axbType, bUid) {
|
|
1464
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1465
|
+
await this.backend.deleteDoc(docId);
|
|
1466
|
+
}
|
|
1467
|
+
};
|
|
1468
|
+
|
|
1469
|
+
// src/client.ts
|
|
1470
|
+
var RESERVED_TYPE_NAMES = /* @__PURE__ */ new Set([META_NODE_TYPE, META_EDGE_TYPE]);
|
|
1471
|
+
function buildWritableNodeRecord3(aType, uid, data) {
|
|
1472
|
+
return { aType, aUid: uid, axbType: NODE_RELATION, bType: aType, bUid: uid, data };
|
|
1473
|
+
}
|
|
1474
|
+
function buildWritableEdgeRecord3(aType, aUid, axbType, bType, bUid, data) {
|
|
1475
|
+
return { aType, aUid, axbType, bType, bUid, data };
|
|
1476
|
+
}
|
|
1477
|
+
var GraphClientImpl = class _GraphClientImpl {
|
|
1478
|
+
constructor(backend, options, metaBackend) {
|
|
1479
|
+
this.backend = backend;
|
|
1480
|
+
this.globalWriteBack = options?.migrationWriteBack ?? "off";
|
|
1481
|
+
this.migrationSandbox = options?.migrationSandbox;
|
|
1482
|
+
if (options?.registryMode) {
|
|
1483
|
+
this.dynamicConfig = options.registryMode;
|
|
1484
|
+
this.bootstrapRegistry = createBootstrapRegistry();
|
|
1485
|
+
if (options.registry) {
|
|
1486
|
+
this.staticRegistry = options.registry;
|
|
1487
|
+
}
|
|
1488
|
+
this.metaBackend = metaBackend;
|
|
1489
|
+
} else {
|
|
1490
|
+
this.staticRegistry = options?.registry;
|
|
1491
|
+
}
|
|
1492
|
+
this.scanProtection = options?.scanProtection ?? "error";
|
|
1493
|
+
}
|
|
1494
|
+
scanProtection;
|
|
1495
|
+
/**
|
|
1496
|
+
* Capability set of the underlying backend. Mirrors `backend.capabilities`
|
|
1497
|
+
* verbatim so callers can portability-check (`client.capabilities.has(
|
|
1498
|
+
* 'query.join')`) without reaching for the backend handle. Static for the
|
|
1499
|
+
* lifetime of the client.
|
|
1500
|
+
*/
|
|
1501
|
+
get capabilities() {
|
|
1502
|
+
return this.backend.capabilities;
|
|
1503
|
+
}
|
|
1504
|
+
// Static mode
|
|
1505
|
+
staticRegistry;
|
|
1506
|
+
// Dynamic mode
|
|
1507
|
+
dynamicConfig;
|
|
1508
|
+
bootstrapRegistry;
|
|
1509
|
+
dynamicRegistry;
|
|
1510
|
+
metaBackend;
|
|
1511
|
+
// Migration settings
|
|
1512
|
+
globalWriteBack;
|
|
1513
|
+
migrationSandbox;
|
|
1514
|
+
// ---------------------------------------------------------------------------
|
|
1515
|
+
// Backend access (exposed for traversal helpers and subgraph cloning)
|
|
1516
|
+
// ---------------------------------------------------------------------------
|
|
1517
|
+
/** @internal */
|
|
1518
|
+
getBackend() {
|
|
1519
|
+
return this.backend;
|
|
1520
|
+
}
|
|
1521
|
+
/**
|
|
1522
|
+
* Snapshot of the currently-effective registry. Returns the merged view
|
|
1523
|
+
* used for domain-type validation and migration — in dynamic mode this is
|
|
1524
|
+
* `dynamicRegistry ?? staticRegistry ?? bootstrapRegistry`, so callers see
|
|
1525
|
+
* updates after `reloadRegistry()` without having to re-resolve anything.
|
|
1526
|
+
*
|
|
1527
|
+
* Exposed for backends that need topology access during bulk operations
|
|
1528
|
+
* (e.g. the Cloudflare DO backend's cross-DO cascade). Not part of the
|
|
1529
|
+
* public `GraphClient` surface.
|
|
1530
|
+
*
|
|
1531
|
+
* @internal
|
|
1532
|
+
*/
|
|
1533
|
+
getRegistrySnapshot() {
|
|
1534
|
+
return this.getCombinedRegistry();
|
|
1535
|
+
}
|
|
1536
|
+
// ---------------------------------------------------------------------------
|
|
1537
|
+
// Registry routing
|
|
1538
|
+
// ---------------------------------------------------------------------------
|
|
1539
|
+
getRegistryForType(aType) {
|
|
1540
|
+
if (!this.dynamicConfig) return this.staticRegistry;
|
|
1541
|
+
if (aType === META_NODE_TYPE || aType === META_EDGE_TYPE) {
|
|
1542
|
+
return this.bootstrapRegistry;
|
|
1543
|
+
}
|
|
1544
|
+
return this.dynamicRegistry ?? this.staticRegistry ?? this.bootstrapRegistry;
|
|
1545
|
+
}
|
|
1546
|
+
getBackendForType(aType) {
|
|
1547
|
+
if (this.metaBackend && (aType === META_NODE_TYPE || aType === META_EDGE_TYPE)) {
|
|
1548
|
+
return this.metaBackend;
|
|
1549
|
+
}
|
|
1550
|
+
return this.backend;
|
|
1551
|
+
}
|
|
1552
|
+
getCombinedRegistry() {
|
|
1553
|
+
if (!this.dynamicConfig) return this.staticRegistry;
|
|
1554
|
+
return this.dynamicRegistry ?? this.staticRegistry ?? this.bootstrapRegistry;
|
|
1555
|
+
}
|
|
1556
|
+
// ---------------------------------------------------------------------------
|
|
1557
|
+
// Query safety
|
|
1558
|
+
// ---------------------------------------------------------------------------
|
|
1559
|
+
checkQuerySafety(filters, allowCollectionScan) {
|
|
1560
|
+
if (allowCollectionScan || this.scanProtection === "off") return;
|
|
1561
|
+
const result = analyzeQuerySafety(filters);
|
|
1562
|
+
if (result.safe) return;
|
|
1563
|
+
if (this.scanProtection === "error") {
|
|
1564
|
+
throw new QuerySafetyError(result.reason);
|
|
1565
|
+
}
|
|
1566
|
+
console.warn(`[firegraph] Query safety warning: ${result.reason}`);
|
|
1567
|
+
}
|
|
1568
|
+
// ---------------------------------------------------------------------------
|
|
1569
|
+
// Migration helpers
|
|
1570
|
+
// ---------------------------------------------------------------------------
|
|
1571
|
+
async applyMigration(record, docId) {
|
|
1572
|
+
const registry = this.getCombinedRegistry();
|
|
1573
|
+
if (!registry) return record;
|
|
1574
|
+
const result = await migrateRecord(record, registry, this.globalWriteBack);
|
|
1575
|
+
if (result.migrated) {
|
|
1576
|
+
this.handleWriteBack(result, docId);
|
|
1577
|
+
}
|
|
1578
|
+
return result.record;
|
|
1579
|
+
}
|
|
1580
|
+
async applyMigrations(records) {
|
|
1581
|
+
const registry = this.getCombinedRegistry();
|
|
1582
|
+
if (!registry || records.length === 0) return records;
|
|
1583
|
+
const results = await migrateRecords(records, registry, this.globalWriteBack);
|
|
1584
|
+
for (const result of results) {
|
|
1585
|
+
if (result.migrated) {
|
|
1586
|
+
const docId = result.record.axbType === NODE_RELATION ? computeNodeDocId(result.record.aUid) : computeEdgeDocId(result.record.aUid, result.record.axbType, result.record.bUid);
|
|
1587
|
+
this.handleWriteBack(result, docId);
|
|
1588
|
+
}
|
|
1589
|
+
}
|
|
1590
|
+
return results.map((r) => r.record);
|
|
1591
|
+
}
|
|
1592
|
+
/**
|
|
1593
|
+
* Fire-and-forget write-back for a migrated record. Both `'eager'` and
|
|
1594
|
+
* `'background'` are non-blocking; the difference is the log level on
|
|
1595
|
+
* failure. For synchronous write-back, use a transaction — see
|
|
1596
|
+
* `GraphTransactionImpl`.
|
|
1597
|
+
*/
|
|
1598
|
+
handleWriteBack(result, docId) {
|
|
1599
|
+
if (result.writeBack === "off") return;
|
|
1600
|
+
const doWriteBack = async () => {
|
|
1601
|
+
try {
|
|
1602
|
+
await this.backend.updateDoc(docId, {
|
|
1603
|
+
replaceData: result.record.data,
|
|
1604
|
+
v: result.record.v
|
|
1605
|
+
});
|
|
1606
|
+
} catch (err) {
|
|
1607
|
+
const msg = `[firegraph] Migration write-back failed for ${docId}: ${err.message}`;
|
|
1608
|
+
if (result.writeBack === "eager") {
|
|
1609
|
+
console.error(msg);
|
|
1610
|
+
} else {
|
|
1611
|
+
console.warn(msg);
|
|
1612
|
+
}
|
|
1613
|
+
}
|
|
1614
|
+
};
|
|
1615
|
+
void doWriteBack();
|
|
1616
|
+
}
|
|
1617
|
+
// ---------------------------------------------------------------------------
|
|
1618
|
+
// GraphReader
|
|
1619
|
+
// ---------------------------------------------------------------------------
|
|
1620
|
+
async getNode(uid) {
|
|
1621
|
+
const docId = computeNodeDocId(uid);
|
|
1622
|
+
const record = await this.backend.getDoc(docId);
|
|
1623
|
+
if (!record) return null;
|
|
1624
|
+
return this.applyMigration(record, docId);
|
|
1625
|
+
}
|
|
1626
|
+
async getEdge(aUid, axbType, bUid) {
|
|
1627
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1628
|
+
const record = await this.backend.getDoc(docId);
|
|
1629
|
+
if (!record) return null;
|
|
1630
|
+
return this.applyMigration(record, docId);
|
|
1631
|
+
}
|
|
1632
|
+
async edgeExists(aUid, axbType, bUid) {
|
|
1633
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1634
|
+
const record = await this.backend.getDoc(docId);
|
|
1635
|
+
return record !== null;
|
|
1636
|
+
}
|
|
1637
|
+
async findEdges(params) {
|
|
1638
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1639
|
+
let records;
|
|
1640
|
+
if (plan.strategy === "get") {
|
|
1641
|
+
const record = await this.backend.getDoc(plan.docId);
|
|
1642
|
+
records = record ? [record] : [];
|
|
1643
|
+
} else {
|
|
1644
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1645
|
+
records = await this.backend.query(plan.filters, plan.options);
|
|
1646
|
+
}
|
|
1647
|
+
return this.applyMigrations(records);
|
|
1648
|
+
}
|
|
1649
|
+
async findNodes(params) {
|
|
1650
|
+
const plan = buildNodeQueryPlan(params);
|
|
1651
|
+
let records;
|
|
1652
|
+
if (plan.strategy === "get") {
|
|
1653
|
+
const record = await this.backend.getDoc(plan.docId);
|
|
1654
|
+
records = record ? [record] : [];
|
|
1655
|
+
} else {
|
|
1656
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1657
|
+
records = await this.backend.query(plan.filters, plan.options);
|
|
1658
|
+
}
|
|
1659
|
+
return this.applyMigrations(records);
|
|
1660
|
+
}
|
|
1661
|
+
// ---------------------------------------------------------------------------
|
|
1662
|
+
// GraphWriter
|
|
1663
|
+
// ---------------------------------------------------------------------------
|
|
1664
|
+
async putNode(aType, uid, data) {
|
|
1665
|
+
await this.writeNode(aType, uid, data, "merge");
|
|
1666
|
+
}
|
|
1667
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1668
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
1669
|
+
}
|
|
1670
|
+
async replaceNode(aType, uid, data) {
|
|
1671
|
+
await this.writeNode(aType, uid, data, "replace");
|
|
1672
|
+
}
|
|
1673
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1674
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
1675
|
+
}
|
|
1676
|
+
async writeNode(aType, uid, data, mode) {
|
|
1677
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
1678
|
+
const registry = this.getRegistryForType(aType);
|
|
1679
|
+
if (registry) {
|
|
1680
|
+
registry.validate(aType, NODE_RELATION, aType, data, this.backend.scopePath);
|
|
1681
|
+
}
|
|
1682
|
+
const backend = this.getBackendForType(aType);
|
|
1683
|
+
const docId = computeNodeDocId(uid);
|
|
1684
|
+
const record = buildWritableNodeRecord3(aType, uid, data);
|
|
1685
|
+
if (registry) {
|
|
1686
|
+
const entry = registry.lookup(aType, NODE_RELATION, aType);
|
|
1687
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
1688
|
+
record.v = entry.schemaVersion;
|
|
1689
|
+
}
|
|
1690
|
+
}
|
|
1691
|
+
await backend.setDoc(docId, record, mode);
|
|
1692
|
+
}
|
|
1693
|
+
async writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
1694
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
1695
|
+
const registry = this.getRegistryForType(aType);
|
|
1696
|
+
if (registry) {
|
|
1697
|
+
registry.validate(aType, axbType, bType, data, this.backend.scopePath);
|
|
1698
|
+
}
|
|
1699
|
+
const backend = this.getBackendForType(aType);
|
|
1700
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1701
|
+
const record = buildWritableEdgeRecord3(aType, aUid, axbType, bType, bUid, data);
|
|
1702
|
+
if (registry) {
|
|
1703
|
+
const entry = registry.lookup(aType, axbType, bType);
|
|
1704
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
1705
|
+
record.v = entry.schemaVersion;
|
|
1706
|
+
}
|
|
1707
|
+
}
|
|
1708
|
+
await backend.setDoc(docId, record, mode);
|
|
1709
|
+
}
|
|
1710
|
+
async updateNode(uid, data) {
|
|
1711
|
+
const docId = computeNodeDocId(uid);
|
|
1712
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1713
|
+
}
|
|
1714
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
1715
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1716
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1717
|
+
}
|
|
1718
|
+
async removeNode(uid) {
|
|
1719
|
+
const docId = computeNodeDocId(uid);
|
|
1720
|
+
await this.backend.deleteDoc(docId);
|
|
1721
|
+
}
|
|
1722
|
+
async removeEdge(aUid, axbType, bUid) {
|
|
1723
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1724
|
+
await this.backend.deleteDoc(docId);
|
|
1725
|
+
}
|
|
1726
|
+
// ---------------------------------------------------------------------------
|
|
1727
|
+
// Transactions & Batches
|
|
1728
|
+
// ---------------------------------------------------------------------------
|
|
1729
|
+
async runTransaction(fn) {
|
|
1730
|
+
return this.backend.runTransaction(async (txBackend) => {
|
|
1731
|
+
const graphTx = new GraphTransactionImpl(
|
|
1732
|
+
txBackend,
|
|
1733
|
+
this.getCombinedRegistry(),
|
|
1734
|
+
this.scanProtection,
|
|
1735
|
+
this.backend.scopePath,
|
|
1736
|
+
this.globalWriteBack
|
|
1737
|
+
);
|
|
1738
|
+
return fn(graphTx);
|
|
1739
|
+
});
|
|
1740
|
+
}
|
|
1741
|
+
batch() {
|
|
1742
|
+
return new GraphBatchImpl(
|
|
1743
|
+
this.backend.createBatch(),
|
|
1744
|
+
this.getCombinedRegistry(),
|
|
1745
|
+
this.backend.scopePath
|
|
1746
|
+
);
|
|
1747
|
+
}
|
|
1748
|
+
// ---------------------------------------------------------------------------
|
|
1749
|
+
// Subgraph
|
|
1750
|
+
// ---------------------------------------------------------------------------
|
|
1751
|
+
subgraph(parentNodeUid, name = "graph") {
|
|
1752
|
+
if (!parentNodeUid || parentNodeUid.includes("/")) {
|
|
1753
|
+
throw new FiregraphError(
|
|
1754
|
+
`Invalid parentNodeUid for subgraph: "${parentNodeUid}". Must be a non-empty string without "/".`,
|
|
1755
|
+
"INVALID_SUBGRAPH"
|
|
1756
|
+
);
|
|
1757
|
+
}
|
|
1758
|
+
if (name.includes("/")) {
|
|
1759
|
+
throw new FiregraphError(
|
|
1760
|
+
`Subgraph name must not contain "/": got "${name}". Use chained .subgraph() calls for nested subgraphs.`,
|
|
1761
|
+
"INVALID_SUBGRAPH"
|
|
1762
|
+
);
|
|
1763
|
+
}
|
|
1764
|
+
const childBackend = this.backend.subgraph(parentNodeUid, name);
|
|
1765
|
+
return new _GraphClientImpl(
|
|
1766
|
+
childBackend,
|
|
1767
|
+
{
|
|
1768
|
+
registry: this.getCombinedRegistry(),
|
|
1769
|
+
scanProtection: this.scanProtection,
|
|
1770
|
+
migrationWriteBack: this.globalWriteBack,
|
|
1771
|
+
migrationSandbox: this.migrationSandbox
|
|
1772
|
+
}
|
|
1773
|
+
// Subgraphs do not have meta-backends; meta lives only at the root.
|
|
1774
|
+
);
|
|
1775
|
+
}
|
|
1776
|
+
// ---------------------------------------------------------------------------
|
|
1777
|
+
// Collection group query
|
|
1778
|
+
// ---------------------------------------------------------------------------
|
|
1779
|
+
async findEdgesGlobal(params, collectionName) {
|
|
1780
|
+
if (!this.backend.findEdgesGlobal) {
|
|
1781
|
+
throw new FiregraphError(
|
|
1782
|
+
"findEdgesGlobal() is not supported by the current storage backend.",
|
|
1783
|
+
"UNSUPPORTED_OPERATION"
|
|
1784
|
+
);
|
|
1785
|
+
}
|
|
1786
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1787
|
+
if (plan.strategy === "get") {
|
|
1788
|
+
throw new FiregraphError(
|
|
1789
|
+
"findEdgesGlobal() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
1790
|
+
"INVALID_QUERY"
|
|
1791
|
+
);
|
|
1792
|
+
}
|
|
1793
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1794
|
+
const records = await this.backend.findEdgesGlobal(params, collectionName);
|
|
1795
|
+
return this.applyMigrations(records);
|
|
1796
|
+
}
|
|
1797
|
+
// ---------------------------------------------------------------------------
|
|
1798
|
+
// Aggregate query (capability: query.aggregate)
|
|
1799
|
+
// ---------------------------------------------------------------------------
|
|
1800
|
+
async aggregate(params) {
|
|
1801
|
+
if (!this.backend.aggregate) {
|
|
1802
|
+
throw new FiregraphError(
|
|
1803
|
+
"aggregate() is not supported by the current storage backend.",
|
|
1804
|
+
"UNSUPPORTED_OPERATION"
|
|
1805
|
+
);
|
|
1806
|
+
}
|
|
1807
|
+
const hasAnyFilter = params.aType || params.aUid || params.axbType || params.bType || params.bUid || params.where && params.where.length > 0;
|
|
1808
|
+
if (!hasAnyFilter) {
|
|
1809
|
+
this.checkQuerySafety([], params.allowCollectionScan);
|
|
1810
|
+
const result2 = await this.backend.aggregate(params.aggregates, []);
|
|
1811
|
+
return result2;
|
|
1812
|
+
}
|
|
1813
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1814
|
+
if (plan.strategy === "get") {
|
|
1815
|
+
throw new FiregraphError(
|
|
1816
|
+
"aggregate() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
1817
|
+
"INVALID_QUERY"
|
|
1818
|
+
);
|
|
1819
|
+
}
|
|
1820
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1821
|
+
const result = await this.backend.aggregate(params.aggregates, plan.filters);
|
|
1822
|
+
return result;
|
|
1823
|
+
}
|
|
1824
|
+
// ---------------------------------------------------------------------------
|
|
1825
|
+
// Bulk operations
|
|
1826
|
+
// ---------------------------------------------------------------------------
|
|
1827
|
+
async removeNodeCascade(uid, options) {
|
|
1828
|
+
return this.backend.removeNodeCascade(uid, this, options);
|
|
1829
|
+
}
|
|
1830
|
+
async bulkRemoveEdges(params, options) {
|
|
1831
|
+
return this.backend.bulkRemoveEdges(params, this, options);
|
|
1832
|
+
}
|
|
1833
|
+
// ---------------------------------------------------------------------------
|
|
1834
|
+
// Server-side DML (capability: query.dml)
|
|
1835
|
+
// ---------------------------------------------------------------------------
|
|
1836
|
+
/**
|
|
1837
|
+
* Single-statement bulk DELETE. Translates `params` to a filter list via
|
|
1838
|
+
* `buildEdgeQueryPlan` (the same plan `findEdges` uses) and dispatches to
|
|
1839
|
+
* `backend.bulkDelete`. The fetch-then-delete loop in `bulkRemoveEdges`
|
|
1840
|
+
* is the cap-less fallback; this method is the fast path on backends
|
|
1841
|
+
* declaring `query.dml`.
|
|
1842
|
+
*
|
|
1843
|
+
* Scan-protection rules match `findEdges`: a query with no identifying
|
|
1844
|
+
* fields requires `allowCollectionScan: true` to pass. A bare-empty
|
|
1845
|
+
* filter set (no `aType`, `aUid`, etc., no `where`) is allowed at this
|
|
1846
|
+
* layer — shared SQLite bounds the blast radius via its leading `scope`
|
|
1847
|
+
* predicate — but the DO RPC backend rejects empty filters at the wire
|
|
1848
|
+
* boundary as defense-in-depth. To wipe a routed subgraph DO, use
|
|
1849
|
+
* `removeNodeCascade` on the parent node instead.
|
|
1850
|
+
*/
|
|
1851
|
+
async bulkDelete(params, options) {
|
|
1852
|
+
if (!this.backend.bulkDelete) {
|
|
1853
|
+
throw new FiregraphError(
|
|
1854
|
+
"bulkDelete() is not supported by the current storage backend. Fall back to bulkRemoveEdges() for backends without query.dml (e.g. Firestore Standard).",
|
|
1855
|
+
"UNSUPPORTED_OPERATION"
|
|
1856
|
+
);
|
|
1857
|
+
}
|
|
1858
|
+
const filters = this.buildDmlFilters(params);
|
|
1859
|
+
return this.backend.bulkDelete(filters, options);
|
|
1860
|
+
}
|
|
1861
|
+
/**
|
|
1862
|
+
* Single-statement bulk UPDATE. Same translation path as `bulkDelete`,
|
|
1863
|
+
* but the patch is deep-merged into each matching row's `data` via the
|
|
1864
|
+
* shared `flattenPatch` pipeline. Identifying columns are immutable
|
|
1865
|
+
* through this path (see `BulkUpdatePatch` JSDoc).
|
|
1866
|
+
*
|
|
1867
|
+
* Empty-patch rejection happens inside the backend (`compileBulkUpdate`)
|
|
1868
|
+
* — a `data: {}` payload would only rewrite `updated_at`, which is
|
|
1869
|
+
* almost certainly a bug.
|
|
1870
|
+
*/
|
|
1871
|
+
async bulkUpdate(params, patch, options) {
|
|
1872
|
+
if (!this.backend.bulkUpdate) {
|
|
1873
|
+
throw new FiregraphError(
|
|
1874
|
+
"bulkUpdate() is not supported by the current storage backend.",
|
|
1875
|
+
"UNSUPPORTED_OPERATION"
|
|
1876
|
+
);
|
|
1877
|
+
}
|
|
1878
|
+
const filters = this.buildDmlFilters(params);
|
|
1879
|
+
return this.backend.bulkUpdate(filters, patch, options);
|
|
1880
|
+
}
|
|
1881
|
+
// ---------------------------------------------------------------------------
|
|
1882
|
+
// Multi-source fan-out (capability: query.join)
|
|
1883
|
+
// ---------------------------------------------------------------------------
|
|
1884
|
+
/**
|
|
1885
|
+
* Fan out from `params.sources` over a single edge type in one round trip.
|
|
1886
|
+
* On backends without `query.join`, throws `UNSUPPORTED_OPERATION` — the
|
|
1887
|
+
* cap-less fallback is the per-source `findEdges` loop, which lives in
|
|
1888
|
+
* `traverse.ts` (the higher-level traversal walker) rather than here.
|
|
1889
|
+
*
|
|
1890
|
+
* `expand()` is intentionally edge-type-only — the source set is a flat
|
|
1891
|
+
* UID list and the hop matches one `axbType`. Multi-axbType expansions
|
|
1892
|
+
* become multiple `expand()` calls, one per relation.
|
|
1893
|
+
*
|
|
1894
|
+
* `params.sources.length === 0` short-circuits to an empty result. The
|
|
1895
|
+
* backend never sees the call. (`compileExpand` itself rejects empty
|
|
1896
|
+
* because `IN ()` is not valid SQL.)
|
|
1897
|
+
*/
|
|
1898
|
+
async expand(params) {
|
|
1899
|
+
if (!this.backend.expand) {
|
|
1900
|
+
throw new FiregraphError(
|
|
1901
|
+
"expand() is not supported by the current storage backend. Backends without `query.join` can use createTraversal() instead \u2014 the per-hop loop is functionally equivalent (just slower).",
|
|
1902
|
+
"UNSUPPORTED_OPERATION"
|
|
1903
|
+
);
|
|
1904
|
+
}
|
|
1905
|
+
if (params.sources.length === 0) {
|
|
1906
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
1907
|
+
}
|
|
1908
|
+
return this.backend.expand(params);
|
|
1909
|
+
}
|
|
1910
|
+
// ---------------------------------------------------------------------------
|
|
1911
|
+
// Engine-level multi-hop traversal (capability: traversal.serverSide)
|
|
1912
|
+
// ---------------------------------------------------------------------------
|
|
1913
|
+
/**
|
|
1914
|
+
* Compile a multi-hop traversal spec into one server-side nested
|
|
1915
|
+
* Pipeline and dispatch a single round trip.
|
|
1916
|
+
*
|
|
1917
|
+
* Backends declaring `traversal.serverSide` (Firestore Enterprise
|
|
1918
|
+
* today) install this method; everywhere else, it throws
|
|
1919
|
+
* `UNSUPPORTED_OPERATION`. The capability gate matches the type-level
|
|
1920
|
+
* surface — `GraphClient<C>` only exposes `runEngineTraversal` when
|
|
1921
|
+
* `'traversal.serverSide' extends C`.
|
|
1922
|
+
*
|
|
1923
|
+
* Most callers should not invoke this method directly; the
|
|
1924
|
+
* `createTraversal(...).run()` builder routes through it
|
|
1925
|
+
* automatically when `engineTraversal: 'auto'` (the default) and
|
|
1926
|
+
* the spec is eligible per `firestore-traverse-compiler.ts`. Calling
|
|
1927
|
+
* directly is appropriate for benchmarking or for callers that have
|
|
1928
|
+
* already shaped their hop chain into the strict
|
|
1929
|
+
* `EngineTraversalParams` shape.
|
|
1930
|
+
*
|
|
1931
|
+
* `params.sources.length === 0` short-circuits to empty per-hop
|
|
1932
|
+
* arrays. The backend never sees the call.
|
|
1933
|
+
*/
|
|
1934
|
+
async runEngineTraversal(params) {
|
|
1935
|
+
if (!this.backend.runEngineTraversal) {
|
|
1936
|
+
throw new FiregraphError(
|
|
1937
|
+
"runEngineTraversal() is not supported by the current storage backend. Backends without `traversal.serverSide` can use createTraversal() instead \u2014 the per-hop loop is functionally equivalent for in-graph specs (different round-trip profile).",
|
|
1938
|
+
"UNSUPPORTED_OPERATION"
|
|
1939
|
+
);
|
|
1940
|
+
}
|
|
1941
|
+
if (params.sources.length === 0) {
|
|
1942
|
+
return {
|
|
1943
|
+
hops: params.hops.map(() => ({ edges: [], sourceCount: 0 })),
|
|
1944
|
+
totalReads: 0
|
|
1945
|
+
};
|
|
1946
|
+
}
|
|
1947
|
+
return this.backend.runEngineTraversal(params);
|
|
1948
|
+
}
|
|
1949
|
+
// ---------------------------------------------------------------------------
|
|
1950
|
+
// Server-side projection (capability: query.select)
|
|
1951
|
+
// ---------------------------------------------------------------------------
|
|
1952
|
+
/**
|
|
1953
|
+
* Server-side projection — fetch only the requested fields from each
|
|
1954
|
+
* matching edge. The backend translates the call into a projecting query
|
|
1955
|
+
* (`SELECT json_extract(...)` on SQLite/DO, `Query.select(...)` on
|
|
1956
|
+
* Firestore Standard, classic projection on Enterprise) so the wire
|
|
1957
|
+
* payload is reduced to just the requested fields.
|
|
1958
|
+
*
|
|
1959
|
+
* Resolution rules for `select` (mirrored across all backends):
|
|
1960
|
+
*
|
|
1961
|
+
* - Built-in envelope fields (`aType`, `aUid`, `axbType`, `bType`,
|
|
1962
|
+
* `bUid`, `createdAt`, `updatedAt`, `v`) → resolve to the typed
|
|
1963
|
+
* column / Firestore field directly.
|
|
1964
|
+
* - `'data'` literal → returns the whole user payload.
|
|
1965
|
+
* - `'data.<x>'` → explicit nested path, returned at the same shape.
|
|
1966
|
+
* - bare name → rewritten to `data.<name>` (the canonical "give me a
|
|
1967
|
+
* few keys out of the JSON payload" shape).
|
|
1968
|
+
*
|
|
1969
|
+
* Empty `select: []` is rejected with `INVALID_QUERY`. Duplicate entries
|
|
1970
|
+
* are de-duped (first-occurrence order preserved); the result row carries
|
|
1971
|
+
* one slot per unique field.
|
|
1972
|
+
*
|
|
1973
|
+
* Migrations are *not* applied to the result. The caller asked for a
|
|
1974
|
+
* partial shape, and rehydrating it through the migration pipeline would
|
|
1975
|
+
* require synthesising every absent field — see
|
|
1976
|
+
* `StorageBackend.findEdgesProjected` for the rationale.
|
|
1977
|
+
*
|
|
1978
|
+
* Scan protection follows the `findEdges` rules: a query with no
|
|
1979
|
+
* identifying fields requires `allowCollectionScan: true` to pass. The
|
|
1980
|
+
* cap-less fallback would be `findEdges` + JS-side projection, but that
|
|
1981
|
+
* defeats the wire-payload reduction; backends without `query.select`
|
|
1982
|
+
* throw `UNSUPPORTED_OPERATION` rather than silently materialising full
|
|
1983
|
+
* rows.
|
|
1984
|
+
*/
|
|
1985
|
+
async findEdgesProjected(params) {
|
|
1986
|
+
if (!this.backend.findEdgesProjected) {
|
|
1987
|
+
throw new FiregraphError(
|
|
1988
|
+
"findEdgesProjected() is not supported by the current storage backend. There is no client-side fallback because the wire-payload reduction is the entire point of the API \u2014 use findEdges() and project in JS if the backend does not declare `query.select`.",
|
|
1989
|
+
"UNSUPPORTED_OPERATION"
|
|
1990
|
+
);
|
|
1991
|
+
}
|
|
1992
|
+
if (params.select.length === 0) {
|
|
1993
|
+
throw new FiregraphError(
|
|
1994
|
+
"findEdgesProjected() requires a non-empty `select` list.",
|
|
1995
|
+
"INVALID_QUERY"
|
|
1996
|
+
);
|
|
1997
|
+
}
|
|
1998
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1999
|
+
let filters;
|
|
2000
|
+
let options;
|
|
2001
|
+
if (plan.strategy === "get") {
|
|
2002
|
+
filters = [
|
|
2003
|
+
{ field: "aUid", op: "==", value: params.aUid },
|
|
2004
|
+
{ field: "axbType", op: "==", value: params.axbType },
|
|
2005
|
+
{ field: "bUid", op: "==", value: params.bUid }
|
|
2006
|
+
];
|
|
2007
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2008
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2009
|
+
options = void 0;
|
|
2010
|
+
} else {
|
|
2011
|
+
filters = plan.filters;
|
|
2012
|
+
options = plan.options;
|
|
2013
|
+
}
|
|
2014
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2015
|
+
const rows = await this.backend.findEdgesProjected(params.select, filters, options);
|
|
2016
|
+
return rows;
|
|
2017
|
+
}
|
|
2018
|
+
/**
|
|
2019
|
+
* Native vector / nearest-neighbour search (capability `search.vector`).
|
|
2020
|
+
*
|
|
2021
|
+
* Resolves to the top-K records by similarity, sorted nearest-first
|
|
2022
|
+
* (`EUCLIDEAN` / `COSINE`) or highest-first (`DOT_PRODUCT`). The wrapper
|
|
2023
|
+
* is intentionally thin: capability check, scan-protection, then forward
|
|
2024
|
+
* `params` verbatim to the backend. All field-path normalisation and
|
|
2025
|
+
* SDK-shape validation lives in the shared
|
|
2026
|
+
* `runFirestoreFindNearest` helper that both Firestore editions call —
|
|
2027
|
+
* keeping it there means the validation surface stays in lockstep with
|
|
2028
|
+
* the SDK call site, regardless of which backend is plugged in.
|
|
2029
|
+
*
|
|
2030
|
+
* Migrations are NOT applied. The vector index walked the raw stored
|
|
2031
|
+
* shape; rehydrating each row through the migration pipeline would
|
|
2032
|
+
* change the candidate set the index already chose. If you need
|
|
2033
|
+
* migrated shape, follow up with `getNode` / `findEdges` on the
|
|
2034
|
+
* returned UIDs — those paths apply migrations normally.
|
|
2035
|
+
*
|
|
2036
|
+
* Scan-protection mirrors `findEdges`: if no identifying filters
|
|
2037
|
+
* (`aType` / `axbType` / `bType`) and no `where` clauses are supplied,
|
|
2038
|
+
* the request must opt in via `allowCollectionScan: true`. The ANN
|
|
2039
|
+
* query still walks the candidate set the WHERE clause produces, so
|
|
2040
|
+
* an unfiltered nearest-neighbour search over a million-row collection
|
|
2041
|
+
* is the same scan trap as an unfiltered `findEdges`.
|
|
2042
|
+
*
|
|
2043
|
+
* Backends without `search.vector` throw `UNSUPPORTED_OPERATION` —
|
|
2044
|
+
* there is no client-side fallback because emulating ANN over the
|
|
2045
|
+
* generic backend surface (`findEdges` + JS-side cosine) doesn't scale
|
|
2046
|
+
* past trivial datasets and would give callers the wrong mental model
|
|
2047
|
+
* about cost.
|
|
2048
|
+
*/
|
|
2049
|
+
async findNearest(params) {
|
|
2050
|
+
if (!this.backend.findNearest) {
|
|
2051
|
+
throw new FiregraphError(
|
|
2052
|
+
"findNearest() is not supported by the current storage backend. Vector search requires a backend that declares `search.vector` (currently Firestore Standard and Enterprise). There is no client-side fallback because emulating ANN on top of the generic backend surface does not scale beyond toy datasets.",
|
|
2053
|
+
"UNSUPPORTED_OPERATION"
|
|
2054
|
+
);
|
|
2055
|
+
}
|
|
2056
|
+
const filters = [];
|
|
2057
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2058
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2059
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2060
|
+
if (params.where) filters.push(...params.where);
|
|
2061
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2062
|
+
return this.backend.findNearest(params);
|
|
2063
|
+
}
|
|
2064
|
+
/**
|
|
2065
|
+
* Native full-text search (capability `search.fullText`).
|
|
2066
|
+
*
|
|
2067
|
+
* Returns the top-N records by relevance, ordered by the search
|
|
2068
|
+
* index's score. Only Firestore Enterprise declares this capability
|
|
2069
|
+
* today — the underlying Pipelines `search({ query: documentMatches(...) })`
|
|
2070
|
+
* stage requires Enterprise's FTS index. Standard does not declare
|
|
2071
|
+
* the cap (FTS is an Enterprise-only product feature, not a
|
|
2072
|
+
* typed-API gap), and the SQLite-shaped backends have no native
|
|
2073
|
+
* FTS index. Backends without `search.fullText` throw
|
|
2074
|
+
* `UNSUPPORTED_OPERATION` from this wrapper.
|
|
2075
|
+
*
|
|
2076
|
+
* Scan-protection mirrors `findNearest`: a search with no
|
|
2077
|
+
* identifying filters (`aType` / `axbType` / `bType`) walks every
|
|
2078
|
+
* row the index scored, so the request must opt in via
|
|
2079
|
+
* `allowCollectionScan: true`.
|
|
2080
|
+
*
|
|
2081
|
+
* Migrations are NOT applied. The FTS index walked the raw stored
|
|
2082
|
+
* shape; rehydrating each row through the migration pipeline would
|
|
2083
|
+
* change the candidate set the index already scored. If you need
|
|
2084
|
+
* migrated shape, follow up with `getNode` / `findEdges` on the
|
|
2085
|
+
* returned UIDs.
|
|
2086
|
+
*/
|
|
2087
|
+
async fullTextSearch(params) {
|
|
2088
|
+
if (!this.backend.fullTextSearch) {
|
|
2089
|
+
throw new FiregraphError(
|
|
2090
|
+
"fullTextSearch() is not supported by the current storage backend. Full-text search requires a backend that declares `search.fullText` (currently Firestore Enterprise only \u2014 FTS is an Enterprise product feature). There is no client-side fallback because emulating FTS over the generic backend surface would not scale beyond toy datasets.",
|
|
2091
|
+
"UNSUPPORTED_OPERATION"
|
|
2092
|
+
);
|
|
2093
|
+
}
|
|
2094
|
+
const filters = [];
|
|
2095
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2096
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2097
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2098
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2099
|
+
return this.backend.fullTextSearch(params);
|
|
2100
|
+
}
|
|
2101
|
+
/**
|
|
2102
|
+
* Native geospatial distance search (capability `search.geo`).
|
|
2103
|
+
*
|
|
2104
|
+
* Returns rows whose `geoField` lies within `radiusMeters` of
|
|
2105
|
+
* `point`, ordered nearest-first by default. Only Firestore
|
|
2106
|
+
* Enterprise declares this capability — same Enterprise-only
|
|
2107
|
+
* gating as `fullTextSearch`. Backends without `search.geo` throw
|
|
2108
|
+
* `UNSUPPORTED_OPERATION` from this wrapper.
|
|
2109
|
+
*
|
|
2110
|
+
* Scan-protection mirrors `findNearest` and `fullTextSearch`.
|
|
2111
|
+
*
|
|
2112
|
+
* Migrations are NOT applied — same rationale as the other search
|
|
2113
|
+
* extensions.
|
|
2114
|
+
*/
|
|
2115
|
+
async geoSearch(params) {
|
|
2116
|
+
if (!this.backend.geoSearch) {
|
|
2117
|
+
throw new FiregraphError(
|
|
2118
|
+
"geoSearch() is not supported by the current storage backend. Geospatial search requires a backend that declares `search.geo` (currently Firestore Enterprise only \u2014 geo queries are an Enterprise product feature). There is no client-side fallback because emulating geo over the generic backend surface (haversine over `findEdges`) would not scale beyond trivial datasets.",
|
|
2119
|
+
"UNSUPPORTED_OPERATION"
|
|
2120
|
+
);
|
|
2121
|
+
}
|
|
2122
|
+
const filters = [];
|
|
2123
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2124
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2125
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2126
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2127
|
+
return this.backend.geoSearch(params);
|
|
2128
|
+
}
|
|
2129
|
+
/**
|
|
2130
|
+
* Translate a `FindEdgesParams` into the `QueryFilter[]` shape the
|
|
2131
|
+
* backend `bulkDelete` / `bulkUpdate` methods expect. Mirrors the
|
|
2132
|
+
* `aggregate()` plan: a bare-empty params object becomes an empty
|
|
2133
|
+
* filter list (after a scan-protection check); a GET-shape (all three
|
|
2134
|
+
* identifiers) is rejected so we never silently turn a single-row
|
|
2135
|
+
* lookup into a server-side DML; otherwise we run `buildEdgeQueryPlan`
|
|
2136
|
+
* and surface its filters.
|
|
2137
|
+
*/
|
|
2138
|
+
buildDmlFilters(params) {
|
|
2139
|
+
const hasAnyFilter = params.aType || params.aUid || params.axbType || params.bType || params.bUid || params.where && params.where.length > 0;
|
|
2140
|
+
if (!hasAnyFilter) {
|
|
2141
|
+
this.checkQuerySafety([], params.allowCollectionScan);
|
|
2142
|
+
return [];
|
|
2143
|
+
}
|
|
2144
|
+
const plan = buildEdgeQueryPlan(params);
|
|
2145
|
+
if (plan.strategy === "get") {
|
|
2146
|
+
throw new FiregraphError(
|
|
2147
|
+
"bulkDelete() / bulkUpdate() require a query, not a direct document lookup. Use removeEdge() / updateEdge() for single-row operations, or omit one of aUid/axbType/bUid to force a query strategy.",
|
|
2148
|
+
"INVALID_QUERY"
|
|
2149
|
+
);
|
|
2150
|
+
}
|
|
2151
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
2152
|
+
return plan.filters;
|
|
2153
|
+
}
|
|
2154
|
+
// ---------------------------------------------------------------------------
|
|
2155
|
+
// Dynamic registry methods
|
|
2156
|
+
// ---------------------------------------------------------------------------
|
|
2157
|
+
async defineNodeType(name, jsonSchema, description, options) {
|
|
2158
|
+
if (!this.dynamicConfig) {
|
|
2159
|
+
throw new DynamicRegistryError(
|
|
2160
|
+
'defineNodeType() is only available in dynamic registry mode. Pass registryMode: { mode: "dynamic" } to createGraphClient().'
|
|
2161
|
+
);
|
|
2162
|
+
}
|
|
2163
|
+
if (RESERVED_TYPE_NAMES.has(name)) {
|
|
2164
|
+
throw new DynamicRegistryError(
|
|
2165
|
+
`Cannot define type "${name}": this name is reserved for the meta-registry.`
|
|
2166
|
+
);
|
|
2167
|
+
}
|
|
2168
|
+
if (this.staticRegistry?.lookup(name, NODE_RELATION, name)) {
|
|
2169
|
+
throw new DynamicRegistryError(
|
|
2170
|
+
`Cannot define node type "${name}": already defined in the static registry.`
|
|
2171
|
+
);
|
|
2172
|
+
}
|
|
2173
|
+
const uid = generateDeterministicUid(META_NODE_TYPE, name);
|
|
2174
|
+
const data = { name, jsonSchema };
|
|
2175
|
+
if (description !== void 0) data.description = description;
|
|
2176
|
+
if (options?.titleField !== void 0) data.titleField = options.titleField;
|
|
2177
|
+
if (options?.subtitleField !== void 0) data.subtitleField = options.subtitleField;
|
|
2178
|
+
if (options?.viewTemplate !== void 0) data.viewTemplate = options.viewTemplate;
|
|
2179
|
+
if (options?.viewCss !== void 0) data.viewCss = options.viewCss;
|
|
2180
|
+
if (options?.allowedIn !== void 0) data.allowedIn = options.allowedIn;
|
|
2181
|
+
if (options?.migrationWriteBack !== void 0)
|
|
2182
|
+
data.migrationWriteBack = options.migrationWriteBack;
|
|
2183
|
+
if (options?.migrations !== void 0) {
|
|
2184
|
+
data.migrations = await this.serializeMigrations(options.migrations);
|
|
2185
|
+
}
|
|
2186
|
+
await this.putNode(META_NODE_TYPE, uid, data);
|
|
2187
|
+
}
|
|
2188
|
+
async defineEdgeType(name, topology, jsonSchema, description, options) {
|
|
2189
|
+
if (!this.dynamicConfig) {
|
|
2190
|
+
throw new DynamicRegistryError(
|
|
2191
|
+
'defineEdgeType() is only available in dynamic registry mode. Pass registryMode: { mode: "dynamic" } to createGraphClient().'
|
|
2192
|
+
);
|
|
2193
|
+
}
|
|
2194
|
+
if (RESERVED_TYPE_NAMES.has(name)) {
|
|
2195
|
+
throw new DynamicRegistryError(
|
|
2196
|
+
`Cannot define type "${name}": this name is reserved for the meta-registry.`
|
|
2197
|
+
);
|
|
2198
|
+
}
|
|
2199
|
+
if (this.staticRegistry) {
|
|
2200
|
+
const fromTypes = Array.isArray(topology.from) ? topology.from : [topology.from];
|
|
2201
|
+
const toTypes = Array.isArray(topology.to) ? topology.to : [topology.to];
|
|
2202
|
+
for (const aType of fromTypes) {
|
|
2203
|
+
for (const bType of toTypes) {
|
|
2204
|
+
if (this.staticRegistry.lookup(aType, name, bType)) {
|
|
2205
|
+
throw new DynamicRegistryError(
|
|
2206
|
+
`Cannot define edge type "${name}" for (${aType}) -> (${bType}): already defined in the static registry.`
|
|
2207
|
+
);
|
|
2208
|
+
}
|
|
2209
|
+
}
|
|
2210
|
+
}
|
|
2211
|
+
}
|
|
2212
|
+
const uid = generateDeterministicUid(META_EDGE_TYPE, name);
|
|
2213
|
+
const data = {
|
|
2214
|
+
name,
|
|
2215
|
+
from: topology.from,
|
|
2216
|
+
to: topology.to
|
|
2217
|
+
};
|
|
2218
|
+
if (jsonSchema !== void 0) data.jsonSchema = jsonSchema;
|
|
2219
|
+
if (topology.inverseLabel !== void 0) data.inverseLabel = topology.inverseLabel;
|
|
2220
|
+
if (topology.targetGraph !== void 0) data.targetGraph = topology.targetGraph;
|
|
2221
|
+
if (description !== void 0) data.description = description;
|
|
2222
|
+
if (options?.titleField !== void 0) data.titleField = options.titleField;
|
|
2223
|
+
if (options?.subtitleField !== void 0) data.subtitleField = options.subtitleField;
|
|
2224
|
+
if (options?.viewTemplate !== void 0) data.viewTemplate = options.viewTemplate;
|
|
2225
|
+
if (options?.viewCss !== void 0) data.viewCss = options.viewCss;
|
|
2226
|
+
if (options?.allowedIn !== void 0) data.allowedIn = options.allowedIn;
|
|
2227
|
+
if (options?.migrationWriteBack !== void 0)
|
|
2228
|
+
data.migrationWriteBack = options.migrationWriteBack;
|
|
2229
|
+
if (options?.migrations !== void 0) {
|
|
2230
|
+
data.migrations = await this.serializeMigrations(options.migrations);
|
|
2231
|
+
}
|
|
2232
|
+
await this.putNode(META_EDGE_TYPE, uid, data);
|
|
2233
|
+
}
|
|
2234
|
+
async reloadRegistry() {
|
|
2235
|
+
if (!this.dynamicConfig) {
|
|
2236
|
+
throw new DynamicRegistryError(
|
|
2237
|
+
'reloadRegistry() is only available in dynamic registry mode. Pass registryMode: { mode: "dynamic" } to createGraphClient().'
|
|
2238
|
+
);
|
|
2239
|
+
}
|
|
2240
|
+
const reader = this.createMetaReader();
|
|
2241
|
+
const dynamicOnly = await createRegistryFromGraph(reader, this.migrationSandbox);
|
|
2242
|
+
if (this.staticRegistry) {
|
|
2243
|
+
this.dynamicRegistry = createMergedRegistry(this.staticRegistry, dynamicOnly);
|
|
2244
|
+
} else {
|
|
2245
|
+
this.dynamicRegistry = dynamicOnly;
|
|
2246
|
+
}
|
|
2247
|
+
}
|
|
2248
|
+
async serializeMigrations(migrations) {
|
|
2249
|
+
const result = migrations.map((m) => {
|
|
2250
|
+
const source = typeof m.up === "function" ? m.up.toString() : m.up;
|
|
2251
|
+
return { fromVersion: m.fromVersion, toVersion: m.toVersion, up: source };
|
|
2252
|
+
});
|
|
2253
|
+
await Promise.all(result.map((m) => precompileSource(m.up, this.migrationSandbox)));
|
|
2254
|
+
return result;
|
|
2255
|
+
}
|
|
2256
|
+
/**
|
|
2257
|
+
* Build a `GraphReader` over the meta-backend. If meta lives in the same
|
|
2258
|
+
* collection as the main backend, `this` is returned directly.
|
|
2259
|
+
*/
|
|
2260
|
+
createMetaReader() {
|
|
2261
|
+
if (!this.metaBackend) return this;
|
|
2262
|
+
const backend = this.metaBackend;
|
|
2263
|
+
const executeMetaQuery = (filters, options) => backend.query(filters, options);
|
|
2264
|
+
return {
|
|
2265
|
+
async getNode(uid) {
|
|
2266
|
+
return backend.getDoc(computeNodeDocId(uid));
|
|
2267
|
+
},
|
|
2268
|
+
async getEdge(aUid, axbType, bUid) {
|
|
2269
|
+
return backend.getDoc(computeEdgeDocId(aUid, axbType, bUid));
|
|
2270
|
+
},
|
|
2271
|
+
async edgeExists(aUid, axbType, bUid) {
|
|
2272
|
+
const record = await backend.getDoc(computeEdgeDocId(aUid, axbType, bUid));
|
|
2273
|
+
return record !== null;
|
|
2274
|
+
},
|
|
2275
|
+
async findEdges(params) {
|
|
2276
|
+
const plan = buildEdgeQueryPlan(params);
|
|
2277
|
+
if (plan.strategy === "get") {
|
|
2278
|
+
const record = await backend.getDoc(plan.docId);
|
|
2279
|
+
return record ? [record] : [];
|
|
2280
|
+
}
|
|
2281
|
+
return executeMetaQuery(plan.filters, plan.options);
|
|
2282
|
+
},
|
|
2283
|
+
async findNodes(params) {
|
|
2284
|
+
const plan = buildNodeQueryPlan(params);
|
|
2285
|
+
if (plan.strategy === "get") {
|
|
2286
|
+
const record = await backend.getDoc(plan.docId);
|
|
2287
|
+
return record ? [record] : [];
|
|
2288
|
+
}
|
|
2289
|
+
return executeMetaQuery(plan.filters, plan.options);
|
|
2290
|
+
}
|
|
2291
|
+
};
|
|
2292
|
+
}
|
|
2293
|
+
};
|
|
2294
|
+
function createGraphClient(backend, options, metaBackend) {
|
|
2295
|
+
return new GraphClientImpl(backend, options, metaBackend);
|
|
2296
|
+
}
|
|
2297
|
+
|
|
2298
|
+
// src/id.ts
|
|
2299
|
+
var import_nanoid = require("nanoid");
|
|
2300
|
+
function generateId() {
|
|
2301
|
+
return (0, import_nanoid.nanoid)();
|
|
2302
|
+
}
|
|
2303
|
+
|
|
2304
|
+
// src/firestore-standard/backend.ts
|
|
2305
|
+
var import_firestore3 = require("@google-cloud/firestore");
|
|
2306
|
+
|
|
2307
|
+
// src/bulk.ts
|
|
2308
|
+
var MAX_BATCH_SIZE = 500;
|
|
2309
|
+
var DEFAULT_MAX_RETRIES = 3;
|
|
2310
|
+
var BASE_DELAY_MS = 200;
|
|
2311
|
+
function sleep(ms) {
|
|
2312
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
2313
|
+
}
|
|
2314
|
+
function chunk(arr, size) {
|
|
2315
|
+
const chunks = [];
|
|
2316
|
+
for (let i = 0; i < arr.length; i += size) {
|
|
2317
|
+
chunks.push(arr.slice(i, i + size));
|
|
2318
|
+
}
|
|
2319
|
+
return chunks;
|
|
2320
|
+
}
|
|
2321
|
+
async function bulkDeleteDocIds(db, collectionPath, docIds, options) {
|
|
2322
|
+
if (docIds.length === 0) {
|
|
2323
|
+
return { deleted: 0, batches: 0, errors: [] };
|
|
2324
|
+
}
|
|
2325
|
+
const batchSize = Math.min(options?.batchSize ?? MAX_BATCH_SIZE, MAX_BATCH_SIZE);
|
|
2326
|
+
const maxRetries = options?.maxRetries ?? DEFAULT_MAX_RETRIES;
|
|
2327
|
+
const onProgress = options?.onProgress;
|
|
2328
|
+
const chunks = chunk(docIds, batchSize);
|
|
2329
|
+
const errors = [];
|
|
2330
|
+
let deleted = 0;
|
|
2331
|
+
let completedBatches = 0;
|
|
2332
|
+
for (let i = 0; i < chunks.length; i++) {
|
|
2333
|
+
const ids = chunks[i];
|
|
2334
|
+
let committed = false;
|
|
2335
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
2336
|
+
try {
|
|
2337
|
+
const batch = db.batch();
|
|
2338
|
+
const collectionRef = db.collection(collectionPath);
|
|
2339
|
+
for (const id of ids) {
|
|
2340
|
+
batch.delete(collectionRef.doc(id));
|
|
2341
|
+
}
|
|
2342
|
+
await batch.commit();
|
|
2343
|
+
committed = true;
|
|
2344
|
+
deleted += ids.length;
|
|
2345
|
+
break;
|
|
2346
|
+
} catch (err) {
|
|
2347
|
+
if (attempt < maxRetries) {
|
|
2348
|
+
const delay = BASE_DELAY_MS * Math.pow(2, attempt);
|
|
2349
|
+
await sleep(delay);
|
|
2350
|
+
} else {
|
|
2351
|
+
errors.push({
|
|
2352
|
+
batchIndex: i,
|
|
2353
|
+
error: err instanceof Error ? err : new Error(String(err)),
|
|
2354
|
+
operationCount: ids.length
|
|
2355
|
+
});
|
|
2356
|
+
}
|
|
2357
|
+
}
|
|
2358
|
+
}
|
|
2359
|
+
if (committed) {
|
|
2360
|
+
completedBatches++;
|
|
2361
|
+
}
|
|
2362
|
+
if (onProgress) {
|
|
2363
|
+
onProgress({
|
|
2364
|
+
completedBatches,
|
|
2365
|
+
totalBatches: chunks.length,
|
|
2366
|
+
deletedSoFar: deleted
|
|
2367
|
+
});
|
|
2368
|
+
}
|
|
2369
|
+
}
|
|
2370
|
+
return { deleted, batches: completedBatches, errors };
|
|
2371
|
+
}
|
|
2372
|
+
async function bulkRemoveEdges(db, collectionPath, reader, params, options) {
|
|
2373
|
+
const effectiveParams = params.limit !== void 0 ? { ...params, allowCollectionScan: params.allowCollectionScan ?? true } : { ...params, limit: 0, allowCollectionScan: params.allowCollectionScan ?? true };
|
|
2374
|
+
const edges = await reader.findEdges(effectiveParams);
|
|
2375
|
+
const docIds = edges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
|
|
2376
|
+
return bulkDeleteDocIds(db, collectionPath, docIds, options);
|
|
2377
|
+
}
|
|
2378
|
+
async function deleteSubcollectionsRecursive(db, collectionPath, docId, options) {
|
|
2379
|
+
const docRef = db.collection(collectionPath).doc(docId);
|
|
2380
|
+
const subcollections = await docRef.listCollections();
|
|
2381
|
+
if (subcollections.length === 0) return { deleted: 0, errors: [] };
|
|
2382
|
+
let totalDeleted = 0;
|
|
2383
|
+
const allErrors = [];
|
|
2384
|
+
const subOptions = options ? { batchSize: options.batchSize, maxRetries: options.maxRetries } : void 0;
|
|
2385
|
+
for (const subCollRef of subcollections) {
|
|
2386
|
+
const subCollPath = subCollRef.path;
|
|
2387
|
+
const snapshot = await subCollRef.select().get();
|
|
2388
|
+
const subDocIds = snapshot.docs.map((d) => d.id);
|
|
2389
|
+
for (const subDocId of subDocIds) {
|
|
2390
|
+
const subResult = await deleteSubcollectionsRecursive(db, subCollPath, subDocId, subOptions);
|
|
2391
|
+
totalDeleted += subResult.deleted;
|
|
2392
|
+
allErrors.push(...subResult.errors);
|
|
2393
|
+
}
|
|
2394
|
+
if (subDocIds.length > 0) {
|
|
2395
|
+
const result = await bulkDeleteDocIds(db, subCollPath, subDocIds, subOptions);
|
|
2396
|
+
totalDeleted += result.deleted;
|
|
2397
|
+
allErrors.push(...result.errors);
|
|
2398
|
+
}
|
|
2399
|
+
}
|
|
2400
|
+
return { deleted: totalDeleted, errors: allErrors };
|
|
2401
|
+
}
|
|
2402
|
+
async function removeNodeCascade(db, collectionPath, reader, uid, options) {
|
|
2403
|
+
const [outgoingRaw, incomingRaw] = await Promise.all([
|
|
2404
|
+
reader.findEdges({ aUid: uid, allowCollectionScan: true, limit: 0 }),
|
|
2405
|
+
reader.findEdges({ bUid: uid, allowCollectionScan: true, limit: 0 })
|
|
2406
|
+
]);
|
|
2407
|
+
const outgoing = outgoingRaw.filter((e) => e.axbType !== NODE_RELATION);
|
|
2408
|
+
const incoming = incomingRaw.filter((e) => e.axbType !== NODE_RELATION);
|
|
2409
|
+
const edgeDocIdSet = /* @__PURE__ */ new Set();
|
|
2410
|
+
const allEdges = [];
|
|
2411
|
+
for (const edge of [...outgoing, ...incoming]) {
|
|
2412
|
+
const docId = computeEdgeDocId(edge.aUid, edge.axbType, edge.bUid);
|
|
2413
|
+
if (!edgeDocIdSet.has(docId)) {
|
|
2414
|
+
edgeDocIdSet.add(docId);
|
|
2415
|
+
allEdges.push(edge);
|
|
2416
|
+
}
|
|
2417
|
+
}
|
|
2418
|
+
const shouldDeleteSubcollections = options?.deleteSubcollections !== false;
|
|
2419
|
+
const nodeDocId = computeNodeDocId(uid);
|
|
2420
|
+
let subcollectionResult = { deleted: 0, errors: [] };
|
|
2421
|
+
if (shouldDeleteSubcollections) {
|
|
2422
|
+
subcollectionResult = await deleteSubcollectionsRecursive(
|
|
2423
|
+
db,
|
|
2424
|
+
collectionPath,
|
|
2425
|
+
nodeDocId,
|
|
2426
|
+
options
|
|
2427
|
+
);
|
|
2428
|
+
}
|
|
2429
|
+
const edgeDocIds = allEdges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
|
|
2430
|
+
const allDocIds = [...edgeDocIds, nodeDocId];
|
|
2431
|
+
const batchSize = Math.min(options?.batchSize ?? MAX_BATCH_SIZE, MAX_BATCH_SIZE);
|
|
2432
|
+
const result = await bulkDeleteDocIds(db, collectionPath, allDocIds, {
|
|
2433
|
+
...options,
|
|
2434
|
+
batchSize
|
|
2435
|
+
});
|
|
2436
|
+
const totalChunks = Math.ceil(allDocIds.length / batchSize);
|
|
2437
|
+
const nodeChunkIndex = totalChunks - 1;
|
|
2438
|
+
const nodeDeleted = !result.errors.some((e) => e.batchIndex === nodeChunkIndex);
|
|
2439
|
+
const topLevelEdgesDeleted = nodeDeleted ? result.deleted - 1 : result.deleted;
|
|
2440
|
+
return {
|
|
2441
|
+
deleted: result.deleted + subcollectionResult.deleted,
|
|
2442
|
+
batches: result.batches,
|
|
2443
|
+
errors: [...result.errors, ...subcollectionResult.errors],
|
|
2444
|
+
edgesDeleted: topLevelEdgesDeleted,
|
|
2445
|
+
nodeDeleted
|
|
2446
|
+
};
|
|
2447
|
+
}
|
|
2448
|
+
|
|
2449
|
+
// src/internal/backend.ts
|
|
2450
|
+
function createCapabilities(caps) {
|
|
2451
|
+
return {
|
|
2452
|
+
has: (capability) => caps.has(capability),
|
|
2453
|
+
values: () => caps.values()
|
|
2454
|
+
};
|
|
2455
|
+
}
|
|
2456
|
+
|
|
2457
|
+
// src/internal/firestore-aggregate.ts
|
|
2458
|
+
var import_firestore2 = require("@google-cloud/firestore");
|
|
2459
|
+
function applyFiltersToQuery(base, filters) {
|
|
2460
|
+
let q = base;
|
|
2461
|
+
for (const f of filters) {
|
|
2462
|
+
q = q.where(f.field, f.op, f.value);
|
|
2463
|
+
}
|
|
2464
|
+
return q;
|
|
2465
|
+
}
|
|
2466
|
+
async function runFirestoreAggregate(base, spec, filters, { edition }) {
|
|
2467
|
+
if (Object.keys(spec).length === 0) {
|
|
2468
|
+
throw new FiregraphError(
|
|
2469
|
+
"aggregate() requires at least one aggregation in the `aggregates` map.",
|
|
2470
|
+
"INVALID_QUERY"
|
|
2471
|
+
);
|
|
2472
|
+
}
|
|
2473
|
+
const filtered = applyFiltersToQuery(base, filters);
|
|
2474
|
+
const aggregations = {};
|
|
2475
|
+
for (const [alias, { op, field }] of Object.entries(spec)) {
|
|
2476
|
+
if (op === "count") {
|
|
2477
|
+
if (field !== void 0) {
|
|
2478
|
+
throw new FiregraphError(
|
|
2479
|
+
`Aggregate '${alias}' op 'count' must not specify a field \u2014 count operates on rows, not a column expression.`,
|
|
2480
|
+
"INVALID_QUERY"
|
|
2481
|
+
);
|
|
2482
|
+
}
|
|
2483
|
+
aggregations[alias] = import_firestore2.AggregateField.count();
|
|
2484
|
+
continue;
|
|
2485
|
+
}
|
|
2486
|
+
if (!field) {
|
|
2487
|
+
throw new FiregraphError(
|
|
2488
|
+
`Aggregate '${alias}' op '${op}' requires a field.`,
|
|
2489
|
+
"INVALID_QUERY"
|
|
2490
|
+
);
|
|
2491
|
+
}
|
|
2492
|
+
if (op === "sum") {
|
|
2493
|
+
aggregations[alias] = import_firestore2.AggregateField.sum(field);
|
|
2494
|
+
} else if (op === "avg") {
|
|
2495
|
+
aggregations[alias] = import_firestore2.AggregateField.average(field);
|
|
2496
|
+
} else {
|
|
2497
|
+
const editionLabel = edition === "enterprise" ? "Firestore Enterprise" : "Firestore Standard";
|
|
2498
|
+
throw new FiregraphError(
|
|
2499
|
+
`Aggregate op '${op}' is not supported on ${editionLabel}. Both Firestore editions support count/sum/avg via the classic Query API; min/max requires a backend with SQL aggregation (SQLite or DO).`,
|
|
2500
|
+
"UNSUPPORTED_AGGREGATE"
|
|
2501
|
+
);
|
|
2502
|
+
}
|
|
2503
|
+
}
|
|
2504
|
+
const snap = await filtered.aggregate(aggregations).get();
|
|
2505
|
+
const data = snap.data();
|
|
2506
|
+
const out = {};
|
|
2507
|
+
for (const alias of Object.keys(spec)) {
|
|
2508
|
+
const v = data[alias];
|
|
2509
|
+
if (v === null || v === void 0) {
|
|
2510
|
+
const op = spec[alias].op;
|
|
2511
|
+
out[alias] = op === "avg" ? Number.NaN : 0;
|
|
2512
|
+
} else {
|
|
2513
|
+
out[alias] = v;
|
|
2514
|
+
}
|
|
2515
|
+
}
|
|
2516
|
+
return out;
|
|
2517
|
+
}
|
|
2518
|
+
|
|
2519
|
+
// src/internal/firestore-classic-adapter.ts
|
|
2520
|
+
function createFirestoreAdapter(db, collectionPath) {
|
|
2521
|
+
const collectionRef = db.collection(collectionPath);
|
|
2522
|
+
return {
|
|
2523
|
+
collectionPath,
|
|
2524
|
+
async getDoc(docId) {
|
|
2525
|
+
const snap = await collectionRef.doc(docId).get();
|
|
2526
|
+
if (!snap.exists) return null;
|
|
2527
|
+
return snap.data();
|
|
2528
|
+
},
|
|
2529
|
+
async setDoc(docId, data, options) {
|
|
2530
|
+
if (options?.merge) {
|
|
2531
|
+
await collectionRef.doc(docId).set(data, { merge: true });
|
|
2532
|
+
} else {
|
|
2533
|
+
await collectionRef.doc(docId).set(data);
|
|
2534
|
+
}
|
|
2535
|
+
},
|
|
2536
|
+
async updateDoc(docId, data) {
|
|
2537
|
+
await collectionRef.doc(docId).update(data);
|
|
2538
|
+
},
|
|
2539
|
+
async deleteDoc(docId) {
|
|
2540
|
+
await collectionRef.doc(docId).delete();
|
|
2541
|
+
},
|
|
2542
|
+
async query(filters, options) {
|
|
2543
|
+
let q = collectionRef;
|
|
2544
|
+
for (const f of filters) {
|
|
2545
|
+
q = q.where(f.field, f.op, f.value);
|
|
2546
|
+
}
|
|
2547
|
+
if (options?.orderBy) {
|
|
2548
|
+
q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
|
|
2549
|
+
}
|
|
2550
|
+
if (options?.limit !== void 0) {
|
|
2551
|
+
q = q.limit(options.limit);
|
|
2552
|
+
}
|
|
2553
|
+
const snap = await q.get();
|
|
2554
|
+
return snap.docs.map((doc) => doc.data());
|
|
2555
|
+
}
|
|
2556
|
+
};
|
|
2557
|
+
}
|
|
2558
|
+
function createTransactionAdapter(db, collectionPath, tx) {
|
|
2559
|
+
const collectionRef = db.collection(collectionPath);
|
|
2560
|
+
return {
|
|
2561
|
+
async getDoc(docId) {
|
|
2562
|
+
const snap = await tx.get(collectionRef.doc(docId));
|
|
2563
|
+
if (!snap.exists) return null;
|
|
2564
|
+
return snap.data();
|
|
2565
|
+
},
|
|
2566
|
+
setDoc(docId, data, options) {
|
|
2567
|
+
if (options?.merge) {
|
|
2568
|
+
tx.set(collectionRef.doc(docId), data, { merge: true });
|
|
2569
|
+
} else {
|
|
2570
|
+
tx.set(collectionRef.doc(docId), data);
|
|
2571
|
+
}
|
|
2572
|
+
},
|
|
2573
|
+
updateDoc(docId, data) {
|
|
2574
|
+
tx.update(collectionRef.doc(docId), data);
|
|
2575
|
+
},
|
|
2576
|
+
deleteDoc(docId) {
|
|
2577
|
+
tx.delete(collectionRef.doc(docId));
|
|
2578
|
+
},
|
|
2579
|
+
async query(filters, options) {
|
|
2580
|
+
let q = collectionRef;
|
|
2581
|
+
for (const f of filters) {
|
|
2582
|
+
q = q.where(f.field, f.op, f.value);
|
|
2583
|
+
}
|
|
2584
|
+
if (options?.orderBy) {
|
|
2585
|
+
q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
|
|
2586
|
+
}
|
|
2587
|
+
if (options?.limit !== void 0) {
|
|
2588
|
+
q = q.limit(options.limit);
|
|
2589
|
+
}
|
|
2590
|
+
const snap = await tx.get(q);
|
|
2591
|
+
return snap.docs.map((doc) => doc.data());
|
|
2592
|
+
}
|
|
2593
|
+
};
|
|
2594
|
+
}
|
|
2595
|
+
function createBatchAdapter(db, collectionPath) {
|
|
2596
|
+
const collectionRef = db.collection(collectionPath);
|
|
2597
|
+
const batch = db.batch();
|
|
2598
|
+
return {
|
|
2599
|
+
setDoc(docId, data, options) {
|
|
2600
|
+
if (options?.merge) {
|
|
2601
|
+
batch.set(collectionRef.doc(docId), data, { merge: true });
|
|
2602
|
+
} else {
|
|
2603
|
+
batch.set(collectionRef.doc(docId), data);
|
|
2604
|
+
}
|
|
2605
|
+
},
|
|
2606
|
+
updateDoc(docId, data) {
|
|
2607
|
+
batch.update(collectionRef.doc(docId), data);
|
|
2608
|
+
},
|
|
2609
|
+
deleteDoc(docId) {
|
|
2610
|
+
batch.delete(collectionRef.doc(docId));
|
|
2611
|
+
},
|
|
2612
|
+
async commit() {
|
|
2613
|
+
await batch.commit();
|
|
2614
|
+
}
|
|
2615
|
+
};
|
|
2616
|
+
}
|
|
2617
|
+
|
|
2618
|
+
// src/internal/firestore-classic-expand.ts
|
|
2619
|
+
var FIRESTORE_CLASSIC_IN_CHUNK_SIZE = 30;
|
|
2620
|
+
function readField(record, path) {
|
|
2621
|
+
if (!path.includes(".")) {
|
|
2622
|
+
return record[path];
|
|
2623
|
+
}
|
|
2624
|
+
let cursor = record;
|
|
2625
|
+
for (const seg of path.split(".")) {
|
|
2626
|
+
if (cursor === null || typeof cursor !== "object") return void 0;
|
|
2627
|
+
cursor = cursor[seg];
|
|
2628
|
+
}
|
|
2629
|
+
return cursor;
|
|
2630
|
+
}
|
|
2631
|
+
function compareFieldValues(a, b) {
|
|
2632
|
+
if (a === b) return 0;
|
|
2633
|
+
if (a === void 0 || a === null) return -1;
|
|
2634
|
+
if (b === void 0 || b === null) return 1;
|
|
2635
|
+
const ta = typeof a;
|
|
2636
|
+
const tb = typeof b;
|
|
2637
|
+
if (ta === tb) {
|
|
2638
|
+
return a < b ? -1 : 1;
|
|
2639
|
+
}
|
|
2640
|
+
const sa = String(a);
|
|
2641
|
+
const sb = String(b);
|
|
2642
|
+
return sa < sb ? -1 : sa > sb ? 1 : 0;
|
|
2643
|
+
}
|
|
2644
|
+
async function runFirestoreClassicExpand(adapter, params) {
|
|
2645
|
+
if (params.sources.length === 0) {
|
|
2646
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
2647
|
+
}
|
|
2648
|
+
if (params.axbType.length === 0) {
|
|
2649
|
+
throw new FiregraphError("expand(): axbType must be a non-empty string.", "INVALID_QUERY");
|
|
2650
|
+
}
|
|
2651
|
+
const direction = params.direction ?? "forward";
|
|
2652
|
+
const sourceField = direction === "forward" ? "aUid" : "bUid";
|
|
2653
|
+
const chunks = chunkUids(params.sources, FIRESTORE_CLASSIC_IN_CHUNK_SIZE);
|
|
2654
|
+
const totalLimit = params.limitPerSource !== void 0 ? params.sources.length * params.limitPerSource : void 0;
|
|
2655
|
+
const buildFilters = (chunk2) => {
|
|
2656
|
+
const filters = [
|
|
2657
|
+
{ field: "axbType", op: "==", value: params.axbType },
|
|
2658
|
+
{ field: sourceField, op: "in", value: chunk2 }
|
|
2659
|
+
];
|
|
2660
|
+
if (params.aType !== void 0) {
|
|
2661
|
+
filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2662
|
+
}
|
|
2663
|
+
if (params.bType !== void 0) {
|
|
2664
|
+
filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2665
|
+
}
|
|
2666
|
+
return filters;
|
|
2667
|
+
};
|
|
2668
|
+
const buildOptions = (chunk2) => {
|
|
2669
|
+
const opts = {};
|
|
2670
|
+
if (params.orderBy) opts.orderBy = params.orderBy;
|
|
2671
|
+
if (params.limitPerSource !== void 0) {
|
|
2672
|
+
opts.limit = chunk2.length * params.limitPerSource;
|
|
2673
|
+
}
|
|
2674
|
+
return opts;
|
|
2675
|
+
};
|
|
2676
|
+
const chunkResults = await Promise.all(
|
|
2677
|
+
chunks.map((chunk2) => adapter.query(buildFilters(chunk2), buildOptions(chunk2)))
|
|
2678
|
+
);
|
|
2679
|
+
let edges = chunkResults.flat();
|
|
2680
|
+
if (params.axbType === NODE_RELATION) {
|
|
2681
|
+
edges = edges.filter((e) => e.aUid !== e.bUid);
|
|
2682
|
+
}
|
|
2683
|
+
if (params.orderBy) {
|
|
2684
|
+
const sortField = params.orderBy.field;
|
|
2685
|
+
const dir = params.orderBy.direction ?? "asc";
|
|
2686
|
+
edges.sort((a, b) => {
|
|
2687
|
+
const cmp = compareFieldValues(readField(a, sortField), readField(b, sortField));
|
|
2688
|
+
return dir === "asc" ? cmp : -cmp;
|
|
2689
|
+
});
|
|
2690
|
+
}
|
|
2691
|
+
if (totalLimit !== void 0 && edges.length > totalLimit) {
|
|
2692
|
+
edges = edges.slice(0, totalLimit);
|
|
2693
|
+
}
|
|
2694
|
+
if (!params.hydrate) return { edges };
|
|
2695
|
+
const targetUids = edges.map((e) => direction === "forward" ? e.bUid : e.aUid);
|
|
2696
|
+
const uniqueTargets = [...new Set(targetUids)];
|
|
2697
|
+
if (uniqueTargets.length === 0) {
|
|
2698
|
+
return { edges, targets: [] };
|
|
2699
|
+
}
|
|
2700
|
+
const hydrateChunks = chunkUids(uniqueTargets, FIRESTORE_CLASSIC_IN_CHUNK_SIZE);
|
|
2701
|
+
const hydrateResults = await Promise.all(
|
|
2702
|
+
hydrateChunks.map(
|
|
2703
|
+
(chunk2) => adapter.query([
|
|
2704
|
+
{ field: "axbType", op: "==", value: NODE_RELATION },
|
|
2705
|
+
{ field: "aUid", op: "in", value: chunk2 }
|
|
2706
|
+
])
|
|
2707
|
+
)
|
|
2708
|
+
);
|
|
2709
|
+
const byUid = /* @__PURE__ */ new Map();
|
|
2710
|
+
for (const row of hydrateResults.flat()) {
|
|
2711
|
+
byUid.set(row.bUid, row);
|
|
2712
|
+
}
|
|
2713
|
+
const targets = targetUids.map((uid) => byUid.get(uid) ?? null);
|
|
2714
|
+
return { edges, targets };
|
|
2715
|
+
}
|
|
2716
|
+
function chunkUids(uids, chunkSize) {
|
|
2717
|
+
if (chunkSize <= 0) {
|
|
2718
|
+
throw new FiregraphError(
|
|
2719
|
+
`chunkUids: chunkSize must be positive (got ${chunkSize}).`,
|
|
2720
|
+
"INVALID_QUERY"
|
|
2721
|
+
);
|
|
2722
|
+
}
|
|
2723
|
+
const out = [];
|
|
2724
|
+
for (let i = 0; i < uids.length; i += chunkSize) {
|
|
2725
|
+
out.push(uids.slice(i, i + chunkSize));
|
|
2726
|
+
}
|
|
2727
|
+
return out;
|
|
2728
|
+
}
|
|
2729
|
+
|
|
2730
|
+
// src/internal/projection.ts
|
|
2731
|
+
var PROJECTION_BUILTIN_FIELDS = /* @__PURE__ */ new Set([
|
|
2732
|
+
"aType",
|
|
2733
|
+
"aUid",
|
|
2734
|
+
"axbType",
|
|
2735
|
+
"bType",
|
|
2736
|
+
"bUid",
|
|
2737
|
+
"createdAt",
|
|
2738
|
+
"updatedAt",
|
|
2739
|
+
"v"
|
|
2740
|
+
]);
|
|
2741
|
+
function normalizeFirestoreProjectionField(field) {
|
|
2742
|
+
if (PROJECTION_BUILTIN_FIELDS.has(field)) return field;
|
|
2743
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
2744
|
+
return `data.${field}`;
|
|
2745
|
+
}
|
|
2746
|
+
function readProjectionPath(obj, path) {
|
|
2747
|
+
if (obj === void 0 || obj === null) return null;
|
|
2748
|
+
const raw = !path.includes(".") ? obj[path] : (() => {
|
|
2749
|
+
const parts = path.split(".");
|
|
2750
|
+
let cur = obj;
|
|
2751
|
+
for (const part of parts) {
|
|
2752
|
+
if (cur === void 0 || cur === null) return void 0;
|
|
2753
|
+
if (typeof cur !== "object") return void 0;
|
|
2754
|
+
cur = cur[part];
|
|
2755
|
+
}
|
|
2756
|
+
return cur;
|
|
2757
|
+
})();
|
|
2758
|
+
return raw === void 0 ? null : raw;
|
|
2759
|
+
}
|
|
2760
|
+
|
|
2761
|
+
// src/internal/firestore-projection.ts
|
|
2762
|
+
async function runFirestoreFindEdgesProjected(base, select, filters, options) {
|
|
2763
|
+
if (select.length === 0) {
|
|
2764
|
+
throw new FiregraphError(
|
|
2765
|
+
"findEdgesProjected requires a non-empty select list \u2014 an empty projection has no representation distinct from `findEdges`.",
|
|
2766
|
+
"INVALID_QUERY"
|
|
2767
|
+
);
|
|
2768
|
+
}
|
|
2769
|
+
const seen = /* @__PURE__ */ new Set();
|
|
2770
|
+
const fields = [];
|
|
2771
|
+
for (const f of select) {
|
|
2772
|
+
if (!seen.has(f)) {
|
|
2773
|
+
seen.add(f);
|
|
2774
|
+
fields.push({ original: f, canonical: normalizeFirestoreProjectionField(f) });
|
|
2775
|
+
}
|
|
2776
|
+
}
|
|
2777
|
+
let q = base;
|
|
2778
|
+
for (const f of filters) {
|
|
2779
|
+
q = q.where(f.field, f.op, f.value);
|
|
2780
|
+
}
|
|
2781
|
+
if (options?.orderBy) {
|
|
2782
|
+
q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
|
|
2783
|
+
}
|
|
2784
|
+
if (options?.limit !== void 0) {
|
|
2785
|
+
q = q.limit(options.limit);
|
|
2786
|
+
}
|
|
2787
|
+
q = q.select(...fields.map((p) => p.canonical));
|
|
2788
|
+
const snap = await q.get();
|
|
2789
|
+
return snap.docs.map((doc) => {
|
|
2790
|
+
const data = doc.data();
|
|
2791
|
+
const out = {};
|
|
2792
|
+
for (const { original, canonical } of fields) {
|
|
2793
|
+
out[original] = readProjectionPath(data, canonical);
|
|
2794
|
+
}
|
|
2795
|
+
return out;
|
|
2796
|
+
});
|
|
2797
|
+
}
|
|
2798
|
+
|
|
2799
|
+
// src/internal/firestore-vector.ts
|
|
2800
|
+
var ENVELOPE_FIELDS = /* @__PURE__ */ new Set([
|
|
2801
|
+
"aType",
|
|
2802
|
+
"aUid",
|
|
2803
|
+
"axbType",
|
|
2804
|
+
"bType",
|
|
2805
|
+
"bUid",
|
|
2806
|
+
"createdAt",
|
|
2807
|
+
"updatedAt",
|
|
2808
|
+
"v"
|
|
2809
|
+
]);
|
|
2810
|
+
function normalizeVectorFieldPath(label, field) {
|
|
2811
|
+
if (ENVELOPE_FIELDS.has(field)) {
|
|
2812
|
+
throw new FiregraphError(
|
|
2813
|
+
`findNearest(): ${label} '${field}' is a built-in envelope field \u2014 vectors must live under \`data.*\`. Use a path like 'data.${field}' if you really meant a nested data field.`,
|
|
2814
|
+
"INVALID_QUERY"
|
|
2815
|
+
);
|
|
2816
|
+
}
|
|
2817
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
2818
|
+
return `data.${field}`;
|
|
2819
|
+
}
|
|
2820
|
+
function buildVectorFilters(params) {
|
|
2821
|
+
const filters = [];
|
|
2822
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2823
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2824
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2825
|
+
if (params.where) filters.push(...params.where);
|
|
2826
|
+
return filters;
|
|
2827
|
+
}
|
|
2828
|
+
function toNumberArray(qv) {
|
|
2829
|
+
if (Array.isArray(qv)) return qv;
|
|
2830
|
+
if (typeof qv.toArray === "function") {
|
|
2831
|
+
return qv.toArray();
|
|
2832
|
+
}
|
|
2833
|
+
throw new FiregraphError(
|
|
2834
|
+
"findNearest(): queryVector must be a number[] or a Firestore VectorValue.",
|
|
2835
|
+
"INVALID_QUERY"
|
|
2836
|
+
);
|
|
2837
|
+
}
|
|
2838
|
+
async function runFirestoreFindNearest(base, params) {
|
|
2839
|
+
const vec = toNumberArray(params.queryVector);
|
|
2840
|
+
if (vec.length === 0) {
|
|
2841
|
+
throw new FiregraphError(
|
|
2842
|
+
"findNearest(): queryVector is empty \u2014 at least one dimension is required.",
|
|
2843
|
+
"INVALID_QUERY"
|
|
2844
|
+
);
|
|
2845
|
+
}
|
|
2846
|
+
if (!Number.isInteger(params.limit) || params.limit <= 0 || params.limit > 1e3) {
|
|
2847
|
+
throw new FiregraphError(
|
|
2848
|
+
`findNearest(): limit must be a positive integer \u2264 1000 (got ${params.limit}).`,
|
|
2849
|
+
"INVALID_QUERY"
|
|
2850
|
+
);
|
|
2851
|
+
}
|
|
2852
|
+
const vectorField = normalizeVectorFieldPath("vectorField", params.vectorField);
|
|
2853
|
+
const distanceResultField = params.distanceResultField !== void 0 ? normalizeVectorFieldPath("distanceResultField", params.distanceResultField) : void 0;
|
|
2854
|
+
const filtered = applyFiltersToQuery(base, buildVectorFilters(params));
|
|
2855
|
+
const opts = {
|
|
2856
|
+
vectorField,
|
|
2857
|
+
queryVector: vec,
|
|
2858
|
+
limit: params.limit,
|
|
2859
|
+
distanceMeasure: params.distanceMeasure
|
|
2860
|
+
};
|
|
2861
|
+
if (params.distanceThreshold !== void 0) opts.distanceThreshold = params.distanceThreshold;
|
|
2862
|
+
if (distanceResultField !== void 0) opts.distanceResultField = distanceResultField;
|
|
2863
|
+
const snap = await filtered.findNearest(opts).get();
|
|
2864
|
+
return snap.docs.map((doc) => doc.data());
|
|
2865
|
+
}
|
|
2866
|
+
|
|
2867
|
+
// src/firestore-standard/backend.ts
|
|
2868
|
+
init_serialization();
|
|
2869
|
+
var STANDARD_CAPS = /* @__PURE__ */ new Set([
|
|
2870
|
+
"core.read",
|
|
2871
|
+
"core.write",
|
|
2872
|
+
"core.transactions",
|
|
2873
|
+
"core.batch",
|
|
2874
|
+
"core.subgraph",
|
|
2875
|
+
"query.aggregate",
|
|
2876
|
+
"query.select",
|
|
2877
|
+
"query.join",
|
|
2878
|
+
"search.vector",
|
|
2879
|
+
"raw.firestore"
|
|
2880
|
+
]);
|
|
2881
|
+
function dottedDataPath(op) {
|
|
2882
|
+
assertSafePath(op.path);
|
|
2883
|
+
return `data.${op.path.join(".")}`;
|
|
2884
|
+
}
|
|
2885
|
+
function buildFirestoreUpdate(update, db) {
|
|
2886
|
+
assertUpdatePayloadExclusive(update);
|
|
2887
|
+
const out = {
|
|
2888
|
+
updatedAt: import_firestore3.FieldValue.serverTimestamp()
|
|
2889
|
+
};
|
|
2890
|
+
if (update.replaceData) {
|
|
2891
|
+
out.data = deserializeFirestoreTypes(update.replaceData, db);
|
|
2892
|
+
} else if (update.dataOps) {
|
|
2893
|
+
for (const op of update.dataOps) {
|
|
2894
|
+
const key = dottedDataPath(op);
|
|
2895
|
+
out[key] = op.delete ? import_firestore3.FieldValue.delete() : op.value;
|
|
2896
|
+
}
|
|
2897
|
+
}
|
|
2898
|
+
if (update.v !== void 0) {
|
|
2899
|
+
out.v = update.v;
|
|
2900
|
+
}
|
|
2901
|
+
return out;
|
|
2902
|
+
}
|
|
2903
|
+
function stampWritableRecord(record) {
|
|
2904
|
+
const now = import_firestore3.FieldValue.serverTimestamp();
|
|
2905
|
+
const out = {
|
|
2906
|
+
aType: record.aType,
|
|
2907
|
+
aUid: record.aUid,
|
|
2908
|
+
axbType: record.axbType,
|
|
2909
|
+
bType: record.bType,
|
|
2910
|
+
bUid: record.bUid,
|
|
2911
|
+
data: record.data,
|
|
2912
|
+
createdAt: now,
|
|
2913
|
+
updatedAt: now
|
|
2914
|
+
};
|
|
2915
|
+
if (record.v !== void 0) out.v = record.v;
|
|
2916
|
+
return out;
|
|
2917
|
+
}
|
|
2918
|
+
var FirestoreStandardTransactionBackend = class {
|
|
2919
|
+
constructor(adapter, db) {
|
|
2920
|
+
this.adapter = adapter;
|
|
2921
|
+
this.db = db;
|
|
2922
|
+
}
|
|
2923
|
+
getDoc(docId) {
|
|
2924
|
+
return this.adapter.getDoc(docId);
|
|
2925
|
+
}
|
|
2926
|
+
query(filters, options) {
|
|
2927
|
+
return this.adapter.query(filters, options);
|
|
2928
|
+
}
|
|
2929
|
+
async setDoc(docId, record, mode) {
|
|
2930
|
+
this.adapter.setDoc(
|
|
2931
|
+
docId,
|
|
2932
|
+
stampWritableRecord(record),
|
|
2933
|
+
mode === "merge" ? { merge: true } : void 0
|
|
2934
|
+
);
|
|
2935
|
+
}
|
|
2936
|
+
async updateDoc(docId, update) {
|
|
2937
|
+
this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
2938
|
+
}
|
|
2939
|
+
async deleteDoc(docId) {
|
|
2940
|
+
this.adapter.deleteDoc(docId);
|
|
2941
|
+
}
|
|
2942
|
+
};
|
|
2943
|
+
var FirestoreStandardBatchBackend = class {
|
|
2944
|
+
constructor(adapter, db) {
|
|
2945
|
+
this.adapter = adapter;
|
|
2946
|
+
this.db = db;
|
|
2947
|
+
}
|
|
2948
|
+
setDoc(docId, record, mode) {
|
|
2949
|
+
this.adapter.setDoc(
|
|
2950
|
+
docId,
|
|
2951
|
+
stampWritableRecord(record),
|
|
2952
|
+
mode === "merge" ? { merge: true } : void 0
|
|
2953
|
+
);
|
|
2954
|
+
}
|
|
2955
|
+
updateDoc(docId, update) {
|
|
2956
|
+
this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
2957
|
+
}
|
|
2958
|
+
deleteDoc(docId) {
|
|
2959
|
+
this.adapter.deleteDoc(docId);
|
|
2960
|
+
}
|
|
2961
|
+
commit() {
|
|
2962
|
+
return this.adapter.commit();
|
|
2963
|
+
}
|
|
2964
|
+
};
|
|
2965
|
+
var FirestoreStandardBackendImpl = class _FirestoreStandardBackendImpl {
|
|
2966
|
+
constructor(db, collectionPath, scopePath) {
|
|
2967
|
+
this.db = db;
|
|
2968
|
+
this.collectionPath = collectionPath;
|
|
2969
|
+
this.scopePath = scopePath;
|
|
2970
|
+
this.adapter = createFirestoreAdapter(db, collectionPath);
|
|
2971
|
+
}
|
|
2972
|
+
capabilities = createCapabilities(STANDARD_CAPS);
|
|
2973
|
+
collectionPath;
|
|
2974
|
+
scopePath;
|
|
2975
|
+
adapter;
|
|
2976
|
+
// --- Reads ---
|
|
2977
|
+
getDoc(docId) {
|
|
2978
|
+
return this.adapter.getDoc(docId);
|
|
2979
|
+
}
|
|
2980
|
+
query(filters, options) {
|
|
2981
|
+
return this.adapter.query(filters, options);
|
|
2982
|
+
}
|
|
2983
|
+
// --- Writes ---
|
|
2984
|
+
setDoc(docId, record, mode) {
|
|
2985
|
+
return this.adapter.setDoc(
|
|
2986
|
+
docId,
|
|
2987
|
+
stampWritableRecord(record),
|
|
2988
|
+
mode === "merge" ? { merge: true } : void 0
|
|
2989
|
+
);
|
|
2990
|
+
}
|
|
2991
|
+
updateDoc(docId, update) {
|
|
2992
|
+
return this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
2993
|
+
}
|
|
2994
|
+
deleteDoc(docId) {
|
|
2995
|
+
return this.adapter.deleteDoc(docId);
|
|
2996
|
+
}
|
|
2997
|
+
// --- Transactions / Batches ---
|
|
2998
|
+
runTransaction(fn) {
|
|
2999
|
+
return this.db.runTransaction(async (firestoreTx) => {
|
|
3000
|
+
const txAdapter = createTransactionAdapter(this.db, this.collectionPath, firestoreTx);
|
|
3001
|
+
return fn(new FirestoreStandardTransactionBackend(txAdapter, this.db));
|
|
3002
|
+
});
|
|
3003
|
+
}
|
|
3004
|
+
createBatch() {
|
|
3005
|
+
const batchAdapter = createBatchAdapter(this.db, this.collectionPath);
|
|
3006
|
+
return new FirestoreStandardBatchBackend(batchAdapter, this.db);
|
|
3007
|
+
}
|
|
3008
|
+
// --- Subgraphs ---
|
|
3009
|
+
subgraph(parentNodeUid, name) {
|
|
3010
|
+
const subPath = `${this.collectionPath}/${parentNodeUid}/${name}`;
|
|
3011
|
+
const newScope = this.scopePath ? `${this.scopePath}/${name}` : name;
|
|
3012
|
+
return new _FirestoreStandardBackendImpl(this.db, subPath, newScope);
|
|
3013
|
+
}
|
|
3014
|
+
// --- Cascade & bulk ---
|
|
3015
|
+
removeNodeCascade(uid, reader, options) {
|
|
3016
|
+
return removeNodeCascade(this.db, this.collectionPath, reader, uid, options);
|
|
3017
|
+
}
|
|
3018
|
+
bulkRemoveEdges(params, reader, options) {
|
|
3019
|
+
return bulkRemoveEdges(this.db, this.collectionPath, reader, params, options);
|
|
3020
|
+
}
|
|
3021
|
+
// --- Cross-collection ---
|
|
3022
|
+
async findEdgesGlobal(params, collectionName) {
|
|
3023
|
+
const name = collectionName ?? this.collectionPath.split("/").pop();
|
|
3024
|
+
const plan = buildEdgeQueryPlan(params);
|
|
3025
|
+
if (plan.strategy === "get") {
|
|
3026
|
+
throw new FiregraphError(
|
|
3027
|
+
"findEdgesGlobal() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
3028
|
+
"INVALID_QUERY"
|
|
3029
|
+
);
|
|
3030
|
+
}
|
|
3031
|
+
const collectionGroupRef = this.db.collectionGroup(name);
|
|
3032
|
+
let q = collectionGroupRef;
|
|
3033
|
+
for (const f of plan.filters) {
|
|
3034
|
+
q = q.where(f.field, f.op, f.value);
|
|
3035
|
+
}
|
|
3036
|
+
if (plan.options?.orderBy) {
|
|
3037
|
+
q = q.orderBy(plan.options.orderBy.field, plan.options.orderBy.direction ?? "asc");
|
|
3038
|
+
}
|
|
3039
|
+
if (plan.options?.limit !== void 0) {
|
|
3040
|
+
q = q.limit(plan.options.limit);
|
|
3041
|
+
}
|
|
3042
|
+
const snap = await q.get();
|
|
3043
|
+
return snap.docs.map((doc) => doc.data());
|
|
3044
|
+
}
|
|
3045
|
+
// --- Aggregate ---
|
|
3046
|
+
aggregate(spec, filters) {
|
|
3047
|
+
return runFirestoreAggregate(this.db.collection(this.collectionPath), spec, filters, {
|
|
3048
|
+
edition: "standard"
|
|
3049
|
+
});
|
|
3050
|
+
}
|
|
3051
|
+
// --- Server-side projection (capability: query.select) ---
|
|
3052
|
+
/**
|
|
3053
|
+
* Run a projecting query via the shared classic-API helper. Both Firestore
|
|
3054
|
+
* editions delegate to one implementation so the projection contract
|
|
3055
|
+
* (bare-name normalization, builtin / `data.*` resolution, dedup,
|
|
3056
|
+
* original-key preservation) stays consistent across editions. See
|
|
3057
|
+
* `runFirestoreFindEdgesProjected` for the resolution rules.
|
|
3058
|
+
*/
|
|
3059
|
+
findEdgesProjected(select, filters, options) {
|
|
3060
|
+
return runFirestoreFindEdgesProjected(
|
|
3061
|
+
this.db.collection(this.collectionPath),
|
|
3062
|
+
select,
|
|
3063
|
+
filters,
|
|
3064
|
+
options
|
|
3065
|
+
);
|
|
3066
|
+
}
|
|
3067
|
+
// --- Native vector / nearest-neighbour search (capability: search.vector) ---
|
|
3068
|
+
/**
|
|
3069
|
+
* Run a vector / nearest-neighbour query via the shared classic-API
|
|
3070
|
+
* helper. Both Firestore editions delegate to one implementation so the
|
|
3071
|
+
* field-path normalisation and result shape stay consistent across
|
|
3072
|
+
* editions. See `runFirestoreFindNearest` for the resolution rules and
|
|
3073
|
+
* the validation surface.
|
|
3074
|
+
*
|
|
3075
|
+
* Standard-edition note: vector search requires a single-field vector
|
|
3076
|
+
* index on the indexed `vectorField`, plus a composite index whenever
|
|
3077
|
+
* additional `where` filters narrow the candidate set before the ANN
|
|
3078
|
+
* walk. Both indexes are configured per project in the Firestore
|
|
3079
|
+
* console — firegraph does not auto-provision them.
|
|
3080
|
+
*/
|
|
3081
|
+
findNearest(params) {
|
|
3082
|
+
return runFirestoreFindNearest(this.db.collection(this.collectionPath), params);
|
|
3083
|
+
}
|
|
3084
|
+
// --- Server-side multi-source fan-out (capability: query.join) ---
|
|
3085
|
+
/**
|
|
3086
|
+
* Fan out from `params.sources` over a single edge type via the chunked
|
|
3087
|
+
* classic-API helper. The classic `'in'` operator caps at 30 elements
|
|
3088
|
+
* per call, so the helper splits sources into 30-element chunks and
|
|
3089
|
+
* dispatches them in parallel via `Promise.all`. With 100 sources this
|
|
3090
|
+
* is `ceil(100/30) = 4` round trips; the per-source `findEdges` loop in
|
|
3091
|
+
* `traverse.ts` would have been 100. Enterprise can collapse this
|
|
3092
|
+
* further to a single Pipelines `equalAny(...)` call — Standard cannot,
|
|
3093
|
+
* so chunked classic is the best Standard can do.
|
|
3094
|
+
*
|
|
3095
|
+
* The helper applies a cross-chunk re-sort + total-limit slice so the
|
|
3096
|
+
* observable contract matches the SQL backends'
|
|
3097
|
+
* `WHERE … IN (?,?,…) ORDER BY … LIMIT N` semantics.
|
|
3098
|
+
*/
|
|
3099
|
+
expand(params) {
|
|
3100
|
+
return runFirestoreClassicExpand(this.adapter, params);
|
|
3101
|
+
}
|
|
3102
|
+
};
|
|
3103
|
+
function createFirestoreStandardBackend(db, collectionPath, options = {}) {
|
|
3104
|
+
const scopePath = options.scopePath ?? "";
|
|
3105
|
+
return new FirestoreStandardBackendImpl(db, collectionPath, scopePath);
|
|
3106
|
+
}
|
|
3107
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
3108
|
+
0 && (module.exports = {
|
|
3109
|
+
META_EDGE_TYPE,
|
|
3110
|
+
META_NODE_TYPE,
|
|
3111
|
+
createFirestoreStandardBackend,
|
|
3112
|
+
createGraphClient,
|
|
3113
|
+
createMergedRegistry,
|
|
3114
|
+
createRegistry,
|
|
3115
|
+
generateId
|
|
3116
|
+
});
|
|
3117
|
+
//# sourceMappingURL=index.cjs.map
|