@typicalday/firegraph 0.12.0 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +317 -73
- package/dist/backend-DuvHGgK1.d.cts +1897 -0
- package/dist/backend-DuvHGgK1.d.ts +1897 -0
- package/dist/backend.cjs +222 -3
- package/dist/backend.cjs.map +1 -1
- package/dist/backend.d.cts +25 -5
- package/dist/backend.d.ts +25 -5
- package/dist/backend.js +197 -4
- package/dist/backend.js.map +1 -1
- package/dist/chunk-2DHMNTV6.js +16 -0
- package/dist/chunk-2DHMNTV6.js.map +1 -0
- package/dist/chunk-4MMQ5W74.js +288 -0
- package/dist/chunk-4MMQ5W74.js.map +1 -0
- package/dist/chunk-D4J7Z4FE.js +67 -0
- package/dist/chunk-D4J7Z4FE.js.map +1 -0
- package/dist/chunk-N5HFDWQX.js +23 -0
- package/dist/chunk-N5HFDWQX.js.map +1 -0
- package/dist/chunk-PAD7WFFU.js +573 -0
- package/dist/chunk-PAD7WFFU.js.map +1 -0
- package/dist/{chunk-AWW4MUJ5.js → chunk-TK64DNVK.js} +12 -1
- package/dist/chunk-TK64DNVK.js.map +1 -0
- package/dist/{chunk-HONQY4HF.js → chunk-WRTFC5NG.js} +362 -17
- package/dist/chunk-WRTFC5NG.js.map +1 -0
- package/dist/client-BKi3vk0Q.d.ts +34 -0
- package/dist/client-BrsaXtDV.d.cts +34 -0
- package/dist/cloudflare/index.cjs +930 -3
- package/dist/cloudflare/index.cjs.map +1 -1
- package/dist/cloudflare/index.d.cts +213 -12
- package/dist/cloudflare/index.d.ts +213 -12
- package/dist/cloudflare/index.js +562 -281
- package/dist/cloudflare/index.js.map +1 -1
- package/dist/codegen/index.d.cts +1 -1
- package/dist/codegen/index.d.ts +1 -1
- package/dist/errors-BRc3I_eH.d.cts +73 -0
- package/dist/errors-BRc3I_eH.d.ts +73 -0
- package/dist/firestore-enterprise/index.cjs +3877 -0
- package/dist/firestore-enterprise/index.cjs.map +1 -0
- package/dist/firestore-enterprise/index.d.cts +141 -0
- package/dist/firestore-enterprise/index.d.ts +141 -0
- package/dist/firestore-enterprise/index.js +985 -0
- package/dist/firestore-enterprise/index.js.map +1 -0
- package/dist/firestore-standard/index.cjs +3117 -0
- package/dist/firestore-standard/index.cjs.map +1 -0
- package/dist/firestore-standard/index.d.cts +49 -0
- package/dist/firestore-standard/index.d.ts +49 -0
- package/dist/firestore-standard/index.js +283 -0
- package/dist/firestore-standard/index.js.map +1 -0
- package/dist/index.cjs +590 -550
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +9 -37
- package/dist/index.d.ts +9 -37
- package/dist/index.js +178 -555
- package/dist/index.js.map +1 -1
- package/dist/{registry-Fi074zVa.d.ts → registry-Bc7h6WTM.d.cts} +1 -1
- package/dist/{registry-B1qsVL0E.d.cts → registry-C2KUPVZj.d.ts} +1 -1
- package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.cts} +1 -56
- package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.ts} +1 -56
- package/dist/sqlite/index.cjs +3631 -0
- package/dist/sqlite/index.cjs.map +1 -0
- package/dist/sqlite/index.d.cts +111 -0
- package/dist/sqlite/index.d.ts +111 -0
- package/dist/sqlite/index.js +1164 -0
- package/dist/sqlite/index.js.map +1 -0
- package/package.json +33 -3
- package/dist/backend-BsR0lnFL.d.ts +0 -200
- package/dist/backend-Ct-fLlkG.d.cts +0 -200
- package/dist/chunk-AWW4MUJ5.js.map +0 -1
- package/dist/chunk-HONQY4HF.js.map +0 -1
- package/dist/types-DxYLy8Ol.d.cts +0 -770
- package/dist/types-DxYLy8Ol.d.ts +0 -770
|
@@ -0,0 +1,3877 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __create = Object.create;
|
|
3
|
+
var __defProp = Object.defineProperty;
|
|
4
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
7
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
+
var __esm = (fn, res) => function __init() {
|
|
9
|
+
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
|
|
10
|
+
};
|
|
11
|
+
var __export = (target, all) => {
|
|
12
|
+
for (var name in all)
|
|
13
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
14
|
+
};
|
|
15
|
+
var __copyProps = (to, from, except, desc) => {
|
|
16
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
17
|
+
for (let key of __getOwnPropNames(from))
|
|
18
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
19
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
20
|
+
}
|
|
21
|
+
return to;
|
|
22
|
+
};
|
|
23
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
24
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
25
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
26
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
27
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
28
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
29
|
+
mod
|
|
30
|
+
));
|
|
31
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
32
|
+
|
|
33
|
+
// src/internal/serialization-tag.ts
|
|
34
|
+
function isTaggedValue(value) {
|
|
35
|
+
if (value === null || typeof value !== "object") return false;
|
|
36
|
+
const tag = value[SERIALIZATION_TAG];
|
|
37
|
+
return typeof tag === "string" && KNOWN_TYPES.has(tag);
|
|
38
|
+
}
|
|
39
|
+
var SERIALIZATION_TAG, KNOWN_TYPES;
|
|
40
|
+
var init_serialization_tag = __esm({
|
|
41
|
+
"src/internal/serialization-tag.ts"() {
|
|
42
|
+
"use strict";
|
|
43
|
+
SERIALIZATION_TAG = "__firegraph_ser__";
|
|
44
|
+
KNOWN_TYPES = /* @__PURE__ */ new Set(["Timestamp", "GeoPoint", "VectorValue", "DocumentReference"]);
|
|
45
|
+
}
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
// src/serialization.ts
|
|
49
|
+
var serialization_exports = {};
|
|
50
|
+
__export(serialization_exports, {
|
|
51
|
+
SERIALIZATION_TAG: () => SERIALIZATION_TAG,
|
|
52
|
+
deserializeFirestoreTypes: () => deserializeFirestoreTypes,
|
|
53
|
+
isTaggedValue: () => isTaggedValue,
|
|
54
|
+
serializeFirestoreTypes: () => serializeFirestoreTypes
|
|
55
|
+
});
|
|
56
|
+
function isTimestamp(value) {
|
|
57
|
+
return value instanceof import_firestore.Timestamp;
|
|
58
|
+
}
|
|
59
|
+
function isGeoPoint(value) {
|
|
60
|
+
return value instanceof import_firestore.GeoPoint;
|
|
61
|
+
}
|
|
62
|
+
function isDocumentReference(value) {
|
|
63
|
+
if (value === null || typeof value !== "object") return false;
|
|
64
|
+
const v = value;
|
|
65
|
+
return typeof v.path === "string" && v.firestore !== void 0 && typeof v.id === "string" && v.constructor?.name === "DocumentReference";
|
|
66
|
+
}
|
|
67
|
+
function isVectorValue(value) {
|
|
68
|
+
if (value === null || typeof value !== "object") return false;
|
|
69
|
+
const v = value;
|
|
70
|
+
return v.constructor?.name === "VectorValue" && Array.isArray(v._values);
|
|
71
|
+
}
|
|
72
|
+
function serializeFirestoreTypes(data) {
|
|
73
|
+
return serializeValue(data);
|
|
74
|
+
}
|
|
75
|
+
function serializeValue(value) {
|
|
76
|
+
if (value === null || value === void 0) return value;
|
|
77
|
+
if (typeof value !== "object") return value;
|
|
78
|
+
if (isTimestamp(value)) {
|
|
79
|
+
return {
|
|
80
|
+
[SERIALIZATION_TAG]: "Timestamp",
|
|
81
|
+
seconds: value.seconds,
|
|
82
|
+
nanoseconds: value.nanoseconds
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
if (isGeoPoint(value)) {
|
|
86
|
+
return {
|
|
87
|
+
[SERIALIZATION_TAG]: "GeoPoint",
|
|
88
|
+
latitude: value.latitude,
|
|
89
|
+
longitude: value.longitude
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
if (isDocumentReference(value)) {
|
|
93
|
+
return { [SERIALIZATION_TAG]: "DocumentReference", path: value.path };
|
|
94
|
+
}
|
|
95
|
+
if (isVectorValue(value)) {
|
|
96
|
+
const v = value;
|
|
97
|
+
const values = typeof v.toArray === "function" ? v.toArray() : v._values;
|
|
98
|
+
return { [SERIALIZATION_TAG]: "VectorValue", values: [...values] };
|
|
99
|
+
}
|
|
100
|
+
if (Array.isArray(value)) {
|
|
101
|
+
return value.map(serializeValue);
|
|
102
|
+
}
|
|
103
|
+
const result = {};
|
|
104
|
+
for (const key of Object.keys(value)) {
|
|
105
|
+
result[key] = serializeValue(value[key]);
|
|
106
|
+
}
|
|
107
|
+
return result;
|
|
108
|
+
}
|
|
109
|
+
function deserializeFirestoreTypes(data, db) {
|
|
110
|
+
return deserializeValue(data, db);
|
|
111
|
+
}
|
|
112
|
+
function deserializeValue(value, db) {
|
|
113
|
+
if (value === null || value === void 0) return value;
|
|
114
|
+
if (typeof value !== "object") return value;
|
|
115
|
+
if (isTimestamp(value) || isGeoPoint(value) || isDocumentReference(value) || isVectorValue(value)) {
|
|
116
|
+
return value;
|
|
117
|
+
}
|
|
118
|
+
if (Array.isArray(value)) {
|
|
119
|
+
return value.map((v) => deserializeValue(v, db));
|
|
120
|
+
}
|
|
121
|
+
const obj = value;
|
|
122
|
+
if (isTaggedValue(obj)) {
|
|
123
|
+
const tag = obj[SERIALIZATION_TAG];
|
|
124
|
+
switch (tag) {
|
|
125
|
+
case "Timestamp":
|
|
126
|
+
if (typeof obj.seconds !== "number" || typeof obj.nanoseconds !== "number") return obj;
|
|
127
|
+
return new import_firestore.Timestamp(obj.seconds, obj.nanoseconds);
|
|
128
|
+
case "GeoPoint":
|
|
129
|
+
if (typeof obj.latitude !== "number" || typeof obj.longitude !== "number") return obj;
|
|
130
|
+
return new import_firestore.GeoPoint(obj.latitude, obj.longitude);
|
|
131
|
+
case "VectorValue":
|
|
132
|
+
if (!Array.isArray(obj.values)) return obj;
|
|
133
|
+
return import_firestore.FieldValue.vector(obj.values);
|
|
134
|
+
case "DocumentReference":
|
|
135
|
+
if (typeof obj.path !== "string") return obj;
|
|
136
|
+
if (db) {
|
|
137
|
+
return db.doc(obj.path);
|
|
138
|
+
}
|
|
139
|
+
if (!_docRefWarned) {
|
|
140
|
+
_docRefWarned = true;
|
|
141
|
+
console.warn(
|
|
142
|
+
"[firegraph] DocumentReference encountered during migration deserialization but no Firestore instance available. The reference will remain as a tagged object with its path. Enable write-back for full reconstruction."
|
|
143
|
+
);
|
|
144
|
+
}
|
|
145
|
+
return obj;
|
|
146
|
+
default:
|
|
147
|
+
return obj;
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
const result = {};
|
|
151
|
+
for (const key of Object.keys(obj)) {
|
|
152
|
+
result[key] = deserializeValue(obj[key], db);
|
|
153
|
+
}
|
|
154
|
+
return result;
|
|
155
|
+
}
|
|
156
|
+
var import_firestore, _docRefWarned;
|
|
157
|
+
var init_serialization = __esm({
|
|
158
|
+
"src/serialization.ts"() {
|
|
159
|
+
"use strict";
|
|
160
|
+
import_firestore = require("@google-cloud/firestore");
|
|
161
|
+
init_serialization_tag();
|
|
162
|
+
init_serialization_tag();
|
|
163
|
+
_docRefWarned = false;
|
|
164
|
+
}
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
// src/firestore-enterprise/index.ts
|
|
168
|
+
var firestore_enterprise_exports = {};
|
|
169
|
+
__export(firestore_enterprise_exports, {
|
|
170
|
+
META_EDGE_TYPE: () => META_EDGE_TYPE,
|
|
171
|
+
META_NODE_TYPE: () => META_NODE_TYPE,
|
|
172
|
+
createFirestoreEnterpriseBackend: () => createFirestoreEnterpriseBackend,
|
|
173
|
+
createGraphClient: () => createGraphClient,
|
|
174
|
+
createMergedRegistry: () => createMergedRegistry,
|
|
175
|
+
createRegistry: () => createRegistry,
|
|
176
|
+
generateId: () => generateId
|
|
177
|
+
});
|
|
178
|
+
module.exports = __toCommonJS(firestore_enterprise_exports);
|
|
179
|
+
|
|
180
|
+
// src/docid.ts
|
|
181
|
+
var import_node_crypto = require("crypto");
|
|
182
|
+
|
|
183
|
+
// src/internal/constants.ts
|
|
184
|
+
var NODE_RELATION = "is";
|
|
185
|
+
var DEFAULT_QUERY_LIMIT = 500;
|
|
186
|
+
var BUILTIN_FIELDS = /* @__PURE__ */ new Set([
|
|
187
|
+
"aType",
|
|
188
|
+
"aUid",
|
|
189
|
+
"axbType",
|
|
190
|
+
"bType",
|
|
191
|
+
"bUid",
|
|
192
|
+
"createdAt",
|
|
193
|
+
"updatedAt"
|
|
194
|
+
]);
|
|
195
|
+
var SHARD_SEPARATOR = ":";
|
|
196
|
+
|
|
197
|
+
// src/docid.ts
|
|
198
|
+
function computeNodeDocId(uid) {
|
|
199
|
+
return uid;
|
|
200
|
+
}
|
|
201
|
+
function computeEdgeDocId(aUid, axbType, bUid) {
|
|
202
|
+
const composite = `${aUid}${SHARD_SEPARATOR}${axbType}${SHARD_SEPARATOR}${bUid}`;
|
|
203
|
+
const hash = (0, import_node_crypto.createHash)("sha256").update(composite).digest("hex");
|
|
204
|
+
const shard = hash[0];
|
|
205
|
+
return `${shard}${SHARD_SEPARATOR}${aUid}${SHARD_SEPARATOR}${axbType}${SHARD_SEPARATOR}${bUid}`;
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// src/internal/write-plan.ts
|
|
209
|
+
init_serialization_tag();
|
|
210
|
+
var DELETE_FIELD = /* @__PURE__ */ Symbol.for("firegraph.deleteField");
|
|
211
|
+
function isDeleteSentinel(value) {
|
|
212
|
+
return value === DELETE_FIELD;
|
|
213
|
+
}
|
|
214
|
+
var FIRESTORE_TERMINAL_CTOR = /* @__PURE__ */ new Set([
|
|
215
|
+
"Timestamp",
|
|
216
|
+
"GeoPoint",
|
|
217
|
+
"VectorValue",
|
|
218
|
+
"DocumentReference",
|
|
219
|
+
"FieldValue",
|
|
220
|
+
"NumericIncrementTransform",
|
|
221
|
+
"ArrayUnionTransform",
|
|
222
|
+
"ArrayRemoveTransform",
|
|
223
|
+
"ServerTimestampTransform",
|
|
224
|
+
"DeleteTransform"
|
|
225
|
+
]);
|
|
226
|
+
function isTerminalValue(value) {
|
|
227
|
+
if (value === null) return true;
|
|
228
|
+
const t = typeof value;
|
|
229
|
+
if (t !== "object") return true;
|
|
230
|
+
if (Array.isArray(value)) return true;
|
|
231
|
+
if (isTaggedValue(value)) return true;
|
|
232
|
+
const proto = Object.getPrototypeOf(value);
|
|
233
|
+
if (proto === null || proto === Object.prototype) return false;
|
|
234
|
+
const ctor = value.constructor;
|
|
235
|
+
if (ctor && typeof ctor.name === "string" && FIRESTORE_TERMINAL_CTOR.has(ctor.name)) return true;
|
|
236
|
+
return true;
|
|
237
|
+
}
|
|
238
|
+
var SAFE_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
239
|
+
function assertUpdatePayloadExclusive(update) {
|
|
240
|
+
if (update.replaceData !== void 0 && update.dataOps !== void 0) {
|
|
241
|
+
throw new Error(
|
|
242
|
+
"firegraph: UpdatePayload cannot specify both `replaceData` and `dataOps`. Use one or the other \u2014 `replaceData` is the migration-write-back form, `dataOps` is the standard partial-update form."
|
|
243
|
+
);
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
function assertNoDeleteSentinels(data, callerLabel) {
|
|
247
|
+
walkForDeleteSentinels(data, [], { kind: "root" }, ({ path }) => {
|
|
248
|
+
const where = path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
249
|
+
throw new Error(
|
|
250
|
+
`firegraph: ${callerLabel} payload contains a deleteField() sentinel at ${where}. deleteField() is only valid inside updateNode/updateEdge \u2014 full-data writes (put*, replace*) cannot delete individual fields. Use updateNode with a deleteField() value, or omit the field from the replace payload.`
|
|
251
|
+
);
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
function walkForDeleteSentinels(node, path, parent, visit) {
|
|
255
|
+
if (node === null || node === void 0) return;
|
|
256
|
+
if (isDeleteSentinel(node)) {
|
|
257
|
+
visit({ path, parent });
|
|
258
|
+
return;
|
|
259
|
+
}
|
|
260
|
+
if (typeof node !== "object") return;
|
|
261
|
+
if (isTaggedValue(node)) return;
|
|
262
|
+
if (Array.isArray(node)) {
|
|
263
|
+
for (let i = 0; i < node.length; i++) {
|
|
264
|
+
walkForDeleteSentinels(node[i], [...path, String(i)], { kind: "array", index: i }, visit);
|
|
265
|
+
}
|
|
266
|
+
return;
|
|
267
|
+
}
|
|
268
|
+
const proto = Object.getPrototypeOf(node);
|
|
269
|
+
if (proto !== null && proto !== Object.prototype) return;
|
|
270
|
+
const obj = node;
|
|
271
|
+
for (const key of Object.keys(obj)) {
|
|
272
|
+
walkForDeleteSentinels(obj[key], [...path, key], { kind: "object" }, visit);
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
function assertSafePath(path) {
|
|
276
|
+
for (const seg of path) {
|
|
277
|
+
if (!SAFE_KEY_RE.test(seg)) {
|
|
278
|
+
throw new Error(
|
|
279
|
+
`firegraph: unsafe object key ${JSON.stringify(seg)} at path ${path.map((p) => JSON.stringify(p)).join(" > ")}. Keys used inside update payloads must match /^[A-Za-z_][A-Za-z0-9_-]*$/ so they can be embedded safely in SQLite JSON paths.`
|
|
280
|
+
);
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
function flattenPatch(data) {
|
|
285
|
+
const ops = [];
|
|
286
|
+
walk(data, [], ops);
|
|
287
|
+
return ops;
|
|
288
|
+
}
|
|
289
|
+
function assertNoDeleteSentinelsInArrayValue(arr, arrayPath) {
|
|
290
|
+
walkForDeleteSentinels(arr, arrayPath, { kind: "root" }, ({ parent }) => {
|
|
291
|
+
const arrayPathStr = arrayPath.length === 0 ? "<root>" : arrayPath.map((p) => JSON.stringify(p)).join(" > ");
|
|
292
|
+
if (parent.kind === "array") {
|
|
293
|
+
throw new Error(
|
|
294
|
+
`firegraph: deleteField() sentinel at index ${parent.index} inside an array at path ${arrayPathStr}. Arrays are terminal in update payloads (replaced as a unit), so the sentinel would be silently dropped by JSON serialization. To remove the field entirely, pass deleteField() in place of the whole array.`
|
|
295
|
+
);
|
|
296
|
+
}
|
|
297
|
+
throw new Error(
|
|
298
|
+
`firegraph: deleteField() sentinel inside an array element at path ${arrayPathStr}. Arrays are terminal in update payloads \u2014 the sentinel would be silently dropped by JSON serialization.`
|
|
299
|
+
);
|
|
300
|
+
});
|
|
301
|
+
}
|
|
302
|
+
function walk(node, path, out) {
|
|
303
|
+
if (node === void 0) return;
|
|
304
|
+
if (isDeleteSentinel(node)) {
|
|
305
|
+
if (path.length === 0) {
|
|
306
|
+
throw new Error("firegraph: deleteField() cannot be the entire update payload.");
|
|
307
|
+
}
|
|
308
|
+
assertSafePath(path);
|
|
309
|
+
out.push({ path: [...path], value: void 0, delete: true });
|
|
310
|
+
return;
|
|
311
|
+
}
|
|
312
|
+
if (isTerminalValue(node)) {
|
|
313
|
+
if (path.length === 0) {
|
|
314
|
+
throw new Error(
|
|
315
|
+
"firegraph: update payload must be a plain object. Got " + (node === null ? "null" : Array.isArray(node) ? "array" : typeof node) + "."
|
|
316
|
+
);
|
|
317
|
+
}
|
|
318
|
+
if (Array.isArray(node)) {
|
|
319
|
+
assertNoDeleteSentinelsInArrayValue(node, path);
|
|
320
|
+
}
|
|
321
|
+
assertSafePath(path);
|
|
322
|
+
out.push({ path: [...path], value: node, delete: false });
|
|
323
|
+
return;
|
|
324
|
+
}
|
|
325
|
+
const obj = node;
|
|
326
|
+
const keys = Object.keys(obj);
|
|
327
|
+
if (keys.length === 0) {
|
|
328
|
+
if (path.length > 0) {
|
|
329
|
+
assertSafePath(path);
|
|
330
|
+
out.push({ path: [...path], value: {}, delete: false });
|
|
331
|
+
}
|
|
332
|
+
return;
|
|
333
|
+
}
|
|
334
|
+
for (const key of keys) {
|
|
335
|
+
if (key === SERIALIZATION_TAG) {
|
|
336
|
+
const where = path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
337
|
+
throw new Error(
|
|
338
|
+
`firegraph: update payload contains a literal \`${SERIALIZATION_TAG}\` key at ${where}. That key is reserved for firegraph's serialization envelope and cannot appear on a plain object in user data. Use a different field name, or pass a recognized tagged value through replaceNode/replaceEdge instead.`
|
|
339
|
+
);
|
|
340
|
+
}
|
|
341
|
+
walk(obj[key], [...path, key], out);
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
// src/batch.ts
|
|
346
|
+
function buildWritableNodeRecord(aType, uid, data) {
|
|
347
|
+
return { aType, aUid: uid, axbType: NODE_RELATION, bType: aType, bUid: uid, data };
|
|
348
|
+
}
|
|
349
|
+
function buildWritableEdgeRecord(aType, aUid, axbType, bType, bUid, data) {
|
|
350
|
+
return { aType, aUid, axbType, bType, bUid, data };
|
|
351
|
+
}
|
|
352
|
+
var GraphBatchImpl = class {
|
|
353
|
+
constructor(backend, registry, scopePath = "") {
|
|
354
|
+
this.backend = backend;
|
|
355
|
+
this.registry = registry;
|
|
356
|
+
this.scopePath = scopePath;
|
|
357
|
+
}
|
|
358
|
+
async putNode(aType, uid, data) {
|
|
359
|
+
this.writeNode(aType, uid, data, "merge");
|
|
360
|
+
}
|
|
361
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
362
|
+
this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
363
|
+
}
|
|
364
|
+
async replaceNode(aType, uid, data) {
|
|
365
|
+
this.writeNode(aType, uid, data, "replace");
|
|
366
|
+
}
|
|
367
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
368
|
+
this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
369
|
+
}
|
|
370
|
+
writeNode(aType, uid, data, mode) {
|
|
371
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
372
|
+
if (this.registry) {
|
|
373
|
+
this.registry.validate(aType, NODE_RELATION, aType, data, this.scopePath);
|
|
374
|
+
}
|
|
375
|
+
const docId = computeNodeDocId(uid);
|
|
376
|
+
const record = buildWritableNodeRecord(aType, uid, data);
|
|
377
|
+
if (this.registry) {
|
|
378
|
+
const entry = this.registry.lookup(aType, NODE_RELATION, aType);
|
|
379
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
380
|
+
record.v = entry.schemaVersion;
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
this.backend.setDoc(docId, record, mode);
|
|
384
|
+
}
|
|
385
|
+
writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
386
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
387
|
+
if (this.registry) {
|
|
388
|
+
this.registry.validate(aType, axbType, bType, data, this.scopePath);
|
|
389
|
+
}
|
|
390
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
391
|
+
const record = buildWritableEdgeRecord(aType, aUid, axbType, bType, bUid, data);
|
|
392
|
+
if (this.registry) {
|
|
393
|
+
const entry = this.registry.lookup(aType, axbType, bType);
|
|
394
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
395
|
+
record.v = entry.schemaVersion;
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
this.backend.setDoc(docId, record, mode);
|
|
399
|
+
}
|
|
400
|
+
async updateNode(uid, data) {
|
|
401
|
+
const docId = computeNodeDocId(uid);
|
|
402
|
+
this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
403
|
+
}
|
|
404
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
405
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
406
|
+
this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
407
|
+
}
|
|
408
|
+
async removeNode(uid) {
|
|
409
|
+
const docId = computeNodeDocId(uid);
|
|
410
|
+
this.backend.deleteDoc(docId);
|
|
411
|
+
}
|
|
412
|
+
async removeEdge(aUid, axbType, bUid) {
|
|
413
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
414
|
+
this.backend.deleteDoc(docId);
|
|
415
|
+
}
|
|
416
|
+
async commit() {
|
|
417
|
+
await this.backend.commit();
|
|
418
|
+
}
|
|
419
|
+
};
|
|
420
|
+
|
|
421
|
+
// src/dynamic-registry.ts
|
|
422
|
+
var import_node_crypto3 = require("crypto");
|
|
423
|
+
|
|
424
|
+
// src/errors.ts
|
|
425
|
+
var FiregraphError = class extends Error {
|
|
426
|
+
constructor(message, code) {
|
|
427
|
+
super(message);
|
|
428
|
+
this.code = code;
|
|
429
|
+
this.name = "FiregraphError";
|
|
430
|
+
}
|
|
431
|
+
};
|
|
432
|
+
var ValidationError = class extends FiregraphError {
|
|
433
|
+
constructor(message, details) {
|
|
434
|
+
super(message, "VALIDATION_ERROR");
|
|
435
|
+
this.details = details;
|
|
436
|
+
this.name = "ValidationError";
|
|
437
|
+
}
|
|
438
|
+
};
|
|
439
|
+
var RegistryViolationError = class extends FiregraphError {
|
|
440
|
+
constructor(aType, axbType, bType) {
|
|
441
|
+
super(`Unregistered triple: (${aType}) -[${axbType}]-> (${bType})`, "REGISTRY_VIOLATION");
|
|
442
|
+
this.name = "RegistryViolationError";
|
|
443
|
+
}
|
|
444
|
+
};
|
|
445
|
+
var InvalidQueryError = class extends FiregraphError {
|
|
446
|
+
constructor(message) {
|
|
447
|
+
super(message, "INVALID_QUERY");
|
|
448
|
+
this.name = "InvalidQueryError";
|
|
449
|
+
}
|
|
450
|
+
};
|
|
451
|
+
var DynamicRegistryError = class extends FiregraphError {
|
|
452
|
+
constructor(message) {
|
|
453
|
+
super(message, "DYNAMIC_REGISTRY_ERROR");
|
|
454
|
+
this.name = "DynamicRegistryError";
|
|
455
|
+
}
|
|
456
|
+
};
|
|
457
|
+
var QuerySafetyError = class extends FiregraphError {
|
|
458
|
+
constructor(message) {
|
|
459
|
+
super(message, "QUERY_SAFETY");
|
|
460
|
+
this.name = "QuerySafetyError";
|
|
461
|
+
}
|
|
462
|
+
};
|
|
463
|
+
var RegistryScopeError = class extends FiregraphError {
|
|
464
|
+
constructor(aType, axbType, bType, scopePath, allowedIn) {
|
|
465
|
+
super(
|
|
466
|
+
`Type (${aType}) -[${axbType}]-> (${bType}) is not allowed at scope "${scopePath || "root"}". Allowed in: [${allowedIn.join(", ")}]`,
|
|
467
|
+
"REGISTRY_SCOPE"
|
|
468
|
+
);
|
|
469
|
+
this.name = "RegistryScopeError";
|
|
470
|
+
}
|
|
471
|
+
};
|
|
472
|
+
var MigrationError = class extends FiregraphError {
|
|
473
|
+
constructor(message) {
|
|
474
|
+
super(message, "MIGRATION_ERROR");
|
|
475
|
+
this.name = "MigrationError";
|
|
476
|
+
}
|
|
477
|
+
};
|
|
478
|
+
|
|
479
|
+
// src/json-schema.ts
|
|
480
|
+
var import_json_schema = require("@cfworker/json-schema");
|
|
481
|
+
var MAX_RENDERED_ERRORS = 20;
|
|
482
|
+
function compileSchema(schema, label) {
|
|
483
|
+
const validator = new import_json_schema.Validator(schema, "2020-12", false);
|
|
484
|
+
return (data) => {
|
|
485
|
+
const result = validator.validate(data);
|
|
486
|
+
if (!result.valid) {
|
|
487
|
+
const total = result.errors.length;
|
|
488
|
+
const head = result.errors.slice(0, MAX_RENDERED_ERRORS).map(formatError).join("; ");
|
|
489
|
+
const overflow = total > MAX_RENDERED_ERRORS ? ` (+${total - MAX_RENDERED_ERRORS} more)` : "";
|
|
490
|
+
throw new ValidationError(
|
|
491
|
+
`Data validation failed${label ? " for " + label : ""}: ${head}${overflow}`,
|
|
492
|
+
result.errors
|
|
493
|
+
);
|
|
494
|
+
}
|
|
495
|
+
};
|
|
496
|
+
}
|
|
497
|
+
function formatError(err) {
|
|
498
|
+
const path = err.instanceLocation.replace(/^#/, "") || "/";
|
|
499
|
+
const keyword = err.keyword ? `[${err.keyword}] ` : "";
|
|
500
|
+
const detail = err.error ? `: ${keyword}${err.error}` : "";
|
|
501
|
+
return `${path}${detail}`;
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
// src/migration.ts
|
|
505
|
+
async function applyMigrationChain(data, currentVersion, targetVersion, migrations) {
|
|
506
|
+
const sorted = [...migrations].sort((a, b) => a.fromVersion - b.fromVersion);
|
|
507
|
+
let result = { ...data };
|
|
508
|
+
let version = currentVersion;
|
|
509
|
+
for (const step of sorted) {
|
|
510
|
+
if (step.fromVersion === version) {
|
|
511
|
+
try {
|
|
512
|
+
result = await step.up(result);
|
|
513
|
+
} catch (err) {
|
|
514
|
+
if (err instanceof MigrationError) throw err;
|
|
515
|
+
throw new MigrationError(
|
|
516
|
+
`Migration from v${step.fromVersion} to v${step.toVersion} failed: ${err.message}`
|
|
517
|
+
);
|
|
518
|
+
}
|
|
519
|
+
if (!result || typeof result !== "object") {
|
|
520
|
+
throw new MigrationError(
|
|
521
|
+
`Migration from v${step.fromVersion} to v${step.toVersion} returned invalid data (expected object)`
|
|
522
|
+
);
|
|
523
|
+
}
|
|
524
|
+
version = step.toVersion;
|
|
525
|
+
}
|
|
526
|
+
}
|
|
527
|
+
if (version !== targetVersion) {
|
|
528
|
+
throw new MigrationError(
|
|
529
|
+
`Incomplete migration chain: reached v${version} but target is v${targetVersion}`
|
|
530
|
+
);
|
|
531
|
+
}
|
|
532
|
+
return result;
|
|
533
|
+
}
|
|
534
|
+
function validateMigrationChain(migrations, label) {
|
|
535
|
+
if (migrations.length === 0) return;
|
|
536
|
+
const seen = /* @__PURE__ */ new Set();
|
|
537
|
+
for (const step of migrations) {
|
|
538
|
+
if (step.toVersion <= step.fromVersion) {
|
|
539
|
+
throw new MigrationError(
|
|
540
|
+
`${label}: migration step has toVersion (${step.toVersion}) <= fromVersion (${step.fromVersion})`
|
|
541
|
+
);
|
|
542
|
+
}
|
|
543
|
+
if (seen.has(step.fromVersion)) {
|
|
544
|
+
throw new MigrationError(
|
|
545
|
+
`${label}: duplicate migration step for fromVersion ${step.fromVersion}`
|
|
546
|
+
);
|
|
547
|
+
}
|
|
548
|
+
seen.add(step.fromVersion);
|
|
549
|
+
}
|
|
550
|
+
const sorted = [...migrations].sort((a, b) => a.fromVersion - b.fromVersion);
|
|
551
|
+
const targetVersion = Math.max(...migrations.map((m) => m.toVersion));
|
|
552
|
+
let version = 0;
|
|
553
|
+
for (const step of sorted) {
|
|
554
|
+
if (step.fromVersion === version) {
|
|
555
|
+
version = step.toVersion;
|
|
556
|
+
} else if (step.fromVersion > version) {
|
|
557
|
+
throw new MigrationError(
|
|
558
|
+
`${label}: migration chain has a gap \u2014 no step covers v${version} \u2192 v${step.fromVersion}`
|
|
559
|
+
);
|
|
560
|
+
}
|
|
561
|
+
}
|
|
562
|
+
if (version !== targetVersion) {
|
|
563
|
+
throw new MigrationError(
|
|
564
|
+
`${label}: migration chain does not reach v${targetVersion} (stuck at v${version})`
|
|
565
|
+
);
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
async function migrateRecord(record, registry, globalWriteBack = "off") {
|
|
569
|
+
const entry = registry.lookup(record.aType, record.axbType, record.bType);
|
|
570
|
+
if (!entry?.migrations?.length || !entry.schemaVersion) {
|
|
571
|
+
return { record, migrated: false, writeBack: "off" };
|
|
572
|
+
}
|
|
573
|
+
const currentVersion = record.v ?? 0;
|
|
574
|
+
if (currentVersion >= entry.schemaVersion) {
|
|
575
|
+
return { record, migrated: false, writeBack: "off" };
|
|
576
|
+
}
|
|
577
|
+
const migratedData = await applyMigrationChain(
|
|
578
|
+
record.data,
|
|
579
|
+
currentVersion,
|
|
580
|
+
entry.schemaVersion,
|
|
581
|
+
entry.migrations
|
|
582
|
+
);
|
|
583
|
+
const writeBack = entry.migrationWriteBack ?? globalWriteBack ?? "off";
|
|
584
|
+
return {
|
|
585
|
+
record: { ...record, data: migratedData, v: entry.schemaVersion },
|
|
586
|
+
migrated: true,
|
|
587
|
+
writeBack
|
|
588
|
+
};
|
|
589
|
+
}
|
|
590
|
+
async function migrateRecords(records, registry, globalWriteBack = "off") {
|
|
591
|
+
return Promise.all(records.map((r) => migrateRecord(r, registry, globalWriteBack)));
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
// src/scope.ts
|
|
595
|
+
function matchScope(scopePath, pattern) {
|
|
596
|
+
if (pattern === "root") return scopePath === "";
|
|
597
|
+
if (pattern === "**") return true;
|
|
598
|
+
const pathSegments = scopePath === "" ? [] : scopePath.split("/");
|
|
599
|
+
const patternSegments = pattern.split("/");
|
|
600
|
+
return matchSegments(pathSegments, 0, patternSegments, 0);
|
|
601
|
+
}
|
|
602
|
+
function matchScopeAny(scopePath, patterns) {
|
|
603
|
+
if (!patterns || patterns.length === 0) return true;
|
|
604
|
+
return patterns.some((p) => matchScope(scopePath, p));
|
|
605
|
+
}
|
|
606
|
+
function matchSegments(path, pi, pattern, qi) {
|
|
607
|
+
if (pi === path.length && qi === pattern.length) return true;
|
|
608
|
+
if (qi === pattern.length) return false;
|
|
609
|
+
const seg = pattern[qi];
|
|
610
|
+
if (seg === "**") {
|
|
611
|
+
if (qi === pattern.length - 1) return true;
|
|
612
|
+
for (let skip = 0; skip <= path.length - pi; skip++) {
|
|
613
|
+
if (matchSegments(path, pi + skip, pattern, qi + 1)) return true;
|
|
614
|
+
}
|
|
615
|
+
return false;
|
|
616
|
+
}
|
|
617
|
+
if (pi === path.length) return false;
|
|
618
|
+
if (seg === "*") {
|
|
619
|
+
return matchSegments(path, pi + 1, pattern, qi + 1);
|
|
620
|
+
}
|
|
621
|
+
if (path[pi] === seg) {
|
|
622
|
+
return matchSegments(path, pi + 1, pattern, qi + 1);
|
|
623
|
+
}
|
|
624
|
+
return false;
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
// src/registry.ts
|
|
628
|
+
function tripleKey(aType, axbType, bType) {
|
|
629
|
+
return `${aType}:${axbType}:${bType}`;
|
|
630
|
+
}
|
|
631
|
+
function tripleKeyFor(e) {
|
|
632
|
+
return tripleKey(e.aType, e.axbType, e.bType);
|
|
633
|
+
}
|
|
634
|
+
function createRegistry(input) {
|
|
635
|
+
const map = /* @__PURE__ */ new Map();
|
|
636
|
+
let entries;
|
|
637
|
+
if (Array.isArray(input)) {
|
|
638
|
+
entries = input;
|
|
639
|
+
} else {
|
|
640
|
+
entries = discoveryToEntries(input);
|
|
641
|
+
}
|
|
642
|
+
const entryList = Object.freeze([...entries]);
|
|
643
|
+
for (const entry of entries) {
|
|
644
|
+
if (entry.targetGraph && entry.targetGraph.includes("/")) {
|
|
645
|
+
throw new ValidationError(
|
|
646
|
+
`Entry (${entry.aType}) -[${entry.axbType}]-> (${entry.bType}) has invalid targetGraph "${entry.targetGraph}" \u2014 must be a single segment (no "/")`
|
|
647
|
+
);
|
|
648
|
+
}
|
|
649
|
+
if (entry.migrations?.length) {
|
|
650
|
+
const label = `Entry (${entry.aType}) -[${entry.axbType}]-> (${entry.bType})`;
|
|
651
|
+
validateMigrationChain(entry.migrations, label);
|
|
652
|
+
entry.schemaVersion = Math.max(...entry.migrations.map((m) => m.toVersion));
|
|
653
|
+
} else {
|
|
654
|
+
entry.schemaVersion = void 0;
|
|
655
|
+
}
|
|
656
|
+
const key = tripleKey(entry.aType, entry.axbType, entry.bType);
|
|
657
|
+
const validator = entry.jsonSchema ? compileSchema(entry.jsonSchema, `(${entry.aType}) -[${entry.axbType}]-> (${entry.bType})`) : void 0;
|
|
658
|
+
map.set(key, { entry, validate: validator });
|
|
659
|
+
}
|
|
660
|
+
const axbIndex = /* @__PURE__ */ new Map();
|
|
661
|
+
const axbBuild = /* @__PURE__ */ new Map();
|
|
662
|
+
for (const entry of entries) {
|
|
663
|
+
const existing = axbBuild.get(entry.axbType);
|
|
664
|
+
if (existing) {
|
|
665
|
+
existing.push(entry);
|
|
666
|
+
} else {
|
|
667
|
+
axbBuild.set(entry.axbType, [entry]);
|
|
668
|
+
}
|
|
669
|
+
}
|
|
670
|
+
for (const [key, arr] of axbBuild) {
|
|
671
|
+
axbIndex.set(key, Object.freeze(arr));
|
|
672
|
+
}
|
|
673
|
+
const topologyIndex = /* @__PURE__ */ new Map();
|
|
674
|
+
const topologyBuild = /* @__PURE__ */ new Map();
|
|
675
|
+
const topologySeen = /* @__PURE__ */ new Map();
|
|
676
|
+
for (const entry of entries) {
|
|
677
|
+
if (!entry.targetGraph) continue;
|
|
678
|
+
let seen = topologySeen.get(entry.aType);
|
|
679
|
+
if (!seen) {
|
|
680
|
+
seen = /* @__PURE__ */ new Set();
|
|
681
|
+
topologySeen.set(entry.aType, seen);
|
|
682
|
+
}
|
|
683
|
+
if (seen.has(entry.targetGraph)) continue;
|
|
684
|
+
seen.add(entry.targetGraph);
|
|
685
|
+
const existing = topologyBuild.get(entry.aType);
|
|
686
|
+
if (existing) {
|
|
687
|
+
existing.push(entry);
|
|
688
|
+
} else {
|
|
689
|
+
topologyBuild.set(entry.aType, [entry]);
|
|
690
|
+
}
|
|
691
|
+
}
|
|
692
|
+
for (const [key, arr] of topologyBuild) {
|
|
693
|
+
topologyIndex.set(key, Object.freeze(arr));
|
|
694
|
+
}
|
|
695
|
+
return {
|
|
696
|
+
lookup(aType, axbType, bType) {
|
|
697
|
+
return map.get(tripleKey(aType, axbType, bType))?.entry;
|
|
698
|
+
},
|
|
699
|
+
lookupByAxbType(axbType) {
|
|
700
|
+
return axbIndex.get(axbType) ?? [];
|
|
701
|
+
},
|
|
702
|
+
getSubgraphTopology(aType) {
|
|
703
|
+
return topologyIndex.get(aType) ?? [];
|
|
704
|
+
},
|
|
705
|
+
validate(aType, axbType, bType, data, scopePath) {
|
|
706
|
+
const rec = map.get(tripleKey(aType, axbType, bType));
|
|
707
|
+
if (!rec) {
|
|
708
|
+
throw new RegistryViolationError(aType, axbType, bType);
|
|
709
|
+
}
|
|
710
|
+
if (scopePath !== void 0 && rec.entry.allowedIn && rec.entry.allowedIn.length > 0) {
|
|
711
|
+
if (!matchScopeAny(scopePath, rec.entry.allowedIn)) {
|
|
712
|
+
throw new RegistryScopeError(aType, axbType, bType, scopePath, rec.entry.allowedIn);
|
|
713
|
+
}
|
|
714
|
+
}
|
|
715
|
+
if (rec.validate) {
|
|
716
|
+
try {
|
|
717
|
+
rec.validate(data);
|
|
718
|
+
} catch (err) {
|
|
719
|
+
if (err instanceof ValidationError) throw err;
|
|
720
|
+
throw new ValidationError(
|
|
721
|
+
`Data validation failed for (${aType}) -[${axbType}]-> (${bType})`,
|
|
722
|
+
err
|
|
723
|
+
);
|
|
724
|
+
}
|
|
725
|
+
}
|
|
726
|
+
},
|
|
727
|
+
entries() {
|
|
728
|
+
return entryList;
|
|
729
|
+
}
|
|
730
|
+
};
|
|
731
|
+
}
|
|
732
|
+
function createMergedRegistry(base, extension) {
|
|
733
|
+
const baseKeys = new Set(base.entries().map(tripleKeyFor));
|
|
734
|
+
return {
|
|
735
|
+
lookup(aType, axbType, bType) {
|
|
736
|
+
return base.lookup(aType, axbType, bType) ?? extension.lookup(aType, axbType, bType);
|
|
737
|
+
},
|
|
738
|
+
lookupByAxbType(axbType) {
|
|
739
|
+
const baseResults = base.lookupByAxbType(axbType);
|
|
740
|
+
const extResults = extension.lookupByAxbType(axbType);
|
|
741
|
+
if (extResults.length === 0) return baseResults;
|
|
742
|
+
if (baseResults.length === 0) return extResults;
|
|
743
|
+
const seen = new Set(baseResults.map(tripleKeyFor));
|
|
744
|
+
const merged = [...baseResults];
|
|
745
|
+
for (const entry of extResults) {
|
|
746
|
+
if (!seen.has(tripleKeyFor(entry))) {
|
|
747
|
+
merged.push(entry);
|
|
748
|
+
}
|
|
749
|
+
}
|
|
750
|
+
return Object.freeze(merged);
|
|
751
|
+
},
|
|
752
|
+
getSubgraphTopology(aType) {
|
|
753
|
+
const baseResults = base.getSubgraphTopology(aType);
|
|
754
|
+
const extResults = extension.getSubgraphTopology(aType);
|
|
755
|
+
if (extResults.length === 0) return baseResults;
|
|
756
|
+
if (baseResults.length === 0) return extResults;
|
|
757
|
+
const seen = new Set(baseResults.map((e) => e.targetGraph));
|
|
758
|
+
const merged = [...baseResults];
|
|
759
|
+
for (const entry of extResults) {
|
|
760
|
+
if (!seen.has(entry.targetGraph)) {
|
|
761
|
+
seen.add(entry.targetGraph);
|
|
762
|
+
merged.push(entry);
|
|
763
|
+
}
|
|
764
|
+
}
|
|
765
|
+
return Object.freeze(merged);
|
|
766
|
+
},
|
|
767
|
+
validate(aType, axbType, bType, data, scopePath) {
|
|
768
|
+
if (baseKeys.has(tripleKey(aType, axbType, bType))) {
|
|
769
|
+
return base.validate(aType, axbType, bType, data, scopePath);
|
|
770
|
+
}
|
|
771
|
+
return extension.validate(aType, axbType, bType, data, scopePath);
|
|
772
|
+
},
|
|
773
|
+
entries() {
|
|
774
|
+
const extEntries = extension.entries();
|
|
775
|
+
if (extEntries.length === 0) return base.entries();
|
|
776
|
+
const merged = [...base.entries()];
|
|
777
|
+
for (const entry of extEntries) {
|
|
778
|
+
if (!baseKeys.has(tripleKeyFor(entry))) {
|
|
779
|
+
merged.push(entry);
|
|
780
|
+
}
|
|
781
|
+
}
|
|
782
|
+
return Object.freeze(merged);
|
|
783
|
+
}
|
|
784
|
+
};
|
|
785
|
+
}
|
|
786
|
+
function discoveryToEntries(discovery) {
|
|
787
|
+
const entries = [];
|
|
788
|
+
for (const [name, entity] of discovery.nodes) {
|
|
789
|
+
entries.push({
|
|
790
|
+
aType: name,
|
|
791
|
+
axbType: NODE_RELATION,
|
|
792
|
+
bType: name,
|
|
793
|
+
jsonSchema: entity.schema,
|
|
794
|
+
description: entity.description,
|
|
795
|
+
titleField: entity.titleField,
|
|
796
|
+
subtitleField: entity.subtitleField,
|
|
797
|
+
allowedIn: entity.allowedIn,
|
|
798
|
+
migrations: entity.migrations,
|
|
799
|
+
migrationWriteBack: entity.migrationWriteBack,
|
|
800
|
+
indexes: entity.indexes
|
|
801
|
+
});
|
|
802
|
+
}
|
|
803
|
+
for (const [axbType, entity] of discovery.edges) {
|
|
804
|
+
const topology = entity.topology;
|
|
805
|
+
if (!topology) continue;
|
|
806
|
+
const fromTypes = Array.isArray(topology.from) ? topology.from : [topology.from];
|
|
807
|
+
const toTypes = Array.isArray(topology.to) ? topology.to : [topology.to];
|
|
808
|
+
const resolvedTargetGraph = entity.targetGraph ?? topology.targetGraph;
|
|
809
|
+
if (resolvedTargetGraph && resolvedTargetGraph.includes("/")) {
|
|
810
|
+
throw new ValidationError(
|
|
811
|
+
`Edge "${axbType}" has invalid targetGraph "${resolvedTargetGraph}" \u2014 must be a single segment (no "/")`
|
|
812
|
+
);
|
|
813
|
+
}
|
|
814
|
+
for (const aType of fromTypes) {
|
|
815
|
+
for (const bType of toTypes) {
|
|
816
|
+
entries.push({
|
|
817
|
+
aType,
|
|
818
|
+
axbType,
|
|
819
|
+
bType,
|
|
820
|
+
jsonSchema: entity.schema,
|
|
821
|
+
description: entity.description,
|
|
822
|
+
inverseLabel: topology.inverseLabel,
|
|
823
|
+
titleField: entity.titleField,
|
|
824
|
+
subtitleField: entity.subtitleField,
|
|
825
|
+
allowedIn: entity.allowedIn,
|
|
826
|
+
targetGraph: resolvedTargetGraph,
|
|
827
|
+
migrations: entity.migrations,
|
|
828
|
+
migrationWriteBack: entity.migrationWriteBack,
|
|
829
|
+
indexes: entity.indexes
|
|
830
|
+
});
|
|
831
|
+
}
|
|
832
|
+
}
|
|
833
|
+
}
|
|
834
|
+
return entries;
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
// src/sandbox.ts
|
|
838
|
+
var import_node_crypto2 = require("crypto");
|
|
839
|
+
var import_meta = {};
|
|
840
|
+
var _worker = null;
|
|
841
|
+
var _requestId = 0;
|
|
842
|
+
var _pending = /* @__PURE__ */ new Map();
|
|
843
|
+
var WORKER_SOURCE = [
|
|
844
|
+
`'use strict';`,
|
|
845
|
+
`var _wt = require('node:worker_threads');`,
|
|
846
|
+
`var _mod = require('node:module');`,
|
|
847
|
+
`var _crypto = require('node:crypto');`,
|
|
848
|
+
`var parentPort = _wt.parentPort;`,
|
|
849
|
+
`var workerData = _wt.workerData;`,
|
|
850
|
+
``,
|
|
851
|
+
`// Load SES using the parent module's resolution context`,
|
|
852
|
+
`var esmRequire = _mod.createRequire(workerData.parentUrl);`,
|
|
853
|
+
`esmRequire('ses');`,
|
|
854
|
+
``,
|
|
855
|
+
`lockdown({`,
|
|
856
|
+
` errorTaming: 'unsafe',`,
|
|
857
|
+
` consoleTaming: 'unsafe',`,
|
|
858
|
+
` evalTaming: 'safe-eval',`,
|
|
859
|
+
` overrideTaming: 'moderate',`,
|
|
860
|
+
` stackFiltering: 'verbose'`,
|
|
861
|
+
`});`,
|
|
862
|
+
``,
|
|
863
|
+
`// Defense-in-depth: verify lockdown() actually hardened JSON.`,
|
|
864
|
+
`if (!Object.isFrozen(JSON)) {`,
|
|
865
|
+
` throw new Error('SES lockdown failed: JSON is not frozen');`,
|
|
866
|
+
`}`,
|
|
867
|
+
``,
|
|
868
|
+
`var cache = new Map();`,
|
|
869
|
+
``,
|
|
870
|
+
`function hashSource(s) {`,
|
|
871
|
+
` return _crypto.createHash('sha256').update(s).digest('hex');`,
|
|
872
|
+
`}`,
|
|
873
|
+
``,
|
|
874
|
+
`function buildWrapper(source) {`,
|
|
875
|
+
` return '(function() {' +`,
|
|
876
|
+
` ' var fn = (' + source + ');\\n' +`,
|
|
877
|
+
` ' if (typeof fn !== "function") return null;\\n' +`,
|
|
878
|
+
` ' return function(jsonIn) {\\n' +`,
|
|
879
|
+
` ' var data = JSON.parse(jsonIn);\\n' +`,
|
|
880
|
+
` ' var result = fn(data);\\n' +`,
|
|
881
|
+
` ' if (result !== null && typeof result === "object" && typeof result.then === "function") {\\n' +`,
|
|
882
|
+
` ' return result.then(function(r) { return JSON.stringify(r); });\\n' +`,
|
|
883
|
+
` ' }\\n' +`,
|
|
884
|
+
` ' return JSON.stringify(result);\\n' +`,
|
|
885
|
+
` ' };\\n' +`,
|
|
886
|
+
` '})()';`,
|
|
887
|
+
`}`,
|
|
888
|
+
``,
|
|
889
|
+
`function compileSource(source) {`,
|
|
890
|
+
` var key = hashSource(source);`,
|
|
891
|
+
` var cached = cache.get(key);`,
|
|
892
|
+
` if (cached) return cached;`,
|
|
893
|
+
``,
|
|
894
|
+
` var compartmentFn;`,
|
|
895
|
+
` try {`,
|
|
896
|
+
` var c = new Compartment({ JSON: JSON });`,
|
|
897
|
+
` compartmentFn = c.evaluate(buildWrapper(source));`,
|
|
898
|
+
` } catch (err) {`,
|
|
899
|
+
` throw new Error('Failed to compile migration source: ' + (err.message || String(err)));`,
|
|
900
|
+
` }`,
|
|
901
|
+
``,
|
|
902
|
+
` if (typeof compartmentFn !== 'function') {`,
|
|
903
|
+
` throw new Error('Migration source did not produce a function: ' + source.slice(0, 80));`,
|
|
904
|
+
` }`,
|
|
905
|
+
``,
|
|
906
|
+
` cache.set(key, compartmentFn);`,
|
|
907
|
+
` return compartmentFn;`,
|
|
908
|
+
`}`,
|
|
909
|
+
``,
|
|
910
|
+
`parentPort.on('message', function(msg) {`,
|
|
911
|
+
` var id = msg.id;`,
|
|
912
|
+
` try {`,
|
|
913
|
+
` if (msg.type === 'compile') {`,
|
|
914
|
+
` compileSource(msg.source);`,
|
|
915
|
+
` parentPort.postMessage({ id: id, type: 'compiled' });`,
|
|
916
|
+
` return;`,
|
|
917
|
+
` }`,
|
|
918
|
+
` if (msg.type === 'execute') {`,
|
|
919
|
+
` var fn = compileSource(msg.source);`,
|
|
920
|
+
` var raw;`,
|
|
921
|
+
` try {`,
|
|
922
|
+
` raw = fn(msg.jsonData);`,
|
|
923
|
+
` } catch (err) {`,
|
|
924
|
+
` parentPort.postMessage({ id: id, type: 'error', message: 'Migration function threw: ' + (err.message || String(err)) });`,
|
|
925
|
+
` return;`,
|
|
926
|
+
` }`,
|
|
927
|
+
` if (raw !== null && typeof raw === 'object' && typeof raw.then === 'function') {`,
|
|
928
|
+
` raw.then(`,
|
|
929
|
+
` function(jsonResult) {`,
|
|
930
|
+
` if (jsonResult === undefined || jsonResult === null) {`,
|
|
931
|
+
` parentPort.postMessage({ id: id, type: 'error', message: 'Migration returned a non-JSON-serializable value' });`,
|
|
932
|
+
` } else {`,
|
|
933
|
+
` parentPort.postMessage({ id: id, type: 'result', jsonResult: jsonResult });`,
|
|
934
|
+
` }`,
|
|
935
|
+
` },`,
|
|
936
|
+
` function(err) {`,
|
|
937
|
+
` parentPort.postMessage({ id: id, type: 'error', message: 'Async migration function threw: ' + (err.message || String(err)) });`,
|
|
938
|
+
` }`,
|
|
939
|
+
` );`,
|
|
940
|
+
` return;`,
|
|
941
|
+
` }`,
|
|
942
|
+
` if (raw === undefined || raw === null) {`,
|
|
943
|
+
` parentPort.postMessage({ id: id, type: 'error', message: 'Migration returned a non-JSON-serializable value' });`,
|
|
944
|
+
` } else {`,
|
|
945
|
+
` parentPort.postMessage({ id: id, type: 'result', jsonResult: raw });`,
|
|
946
|
+
` }`,
|
|
947
|
+
` }`,
|
|
948
|
+
` } catch (err) {`,
|
|
949
|
+
` parentPort.postMessage({ id: id, type: 'error', message: err.message || String(err) });`,
|
|
950
|
+
` }`,
|
|
951
|
+
`});`
|
|
952
|
+
].join("\n");
|
|
953
|
+
var _WorkerCtor = null;
|
|
954
|
+
async function loadWorkerCtor() {
|
|
955
|
+
if (_WorkerCtor) return _WorkerCtor;
|
|
956
|
+
const wt = await import("worker_threads");
|
|
957
|
+
_WorkerCtor = wt.Worker;
|
|
958
|
+
return _WorkerCtor;
|
|
959
|
+
}
|
|
960
|
+
async function ensureWorker() {
|
|
961
|
+
if (_worker) return _worker;
|
|
962
|
+
const Ctor = await loadWorkerCtor();
|
|
963
|
+
_worker = new Ctor(WORKER_SOURCE, {
|
|
964
|
+
eval: true,
|
|
965
|
+
workerData: { parentUrl: import_meta.url }
|
|
966
|
+
});
|
|
967
|
+
_worker.unref();
|
|
968
|
+
_worker.on("message", (msg) => {
|
|
969
|
+
if (msg.id === void 0) return;
|
|
970
|
+
const pending = _pending.get(msg.id);
|
|
971
|
+
if (!pending) return;
|
|
972
|
+
_pending.delete(msg.id);
|
|
973
|
+
if (msg.type === "error") {
|
|
974
|
+
pending.reject(new MigrationError(msg.message ?? "Unknown sandbox error"));
|
|
975
|
+
} else {
|
|
976
|
+
pending.resolve(msg);
|
|
977
|
+
}
|
|
978
|
+
});
|
|
979
|
+
_worker.on("error", (err) => {
|
|
980
|
+
for (const [, p] of _pending) {
|
|
981
|
+
p.reject(new MigrationError(`Sandbox worker error: ${err.message}`));
|
|
982
|
+
}
|
|
983
|
+
_pending.clear();
|
|
984
|
+
_worker = null;
|
|
985
|
+
});
|
|
986
|
+
_worker.on("exit", (code) => {
|
|
987
|
+
if (_pending.size > 0) {
|
|
988
|
+
for (const [, p] of _pending) {
|
|
989
|
+
p.reject(new MigrationError(`Sandbox worker exited with code ${code}`));
|
|
990
|
+
}
|
|
991
|
+
_pending.clear();
|
|
992
|
+
}
|
|
993
|
+
_worker = null;
|
|
994
|
+
});
|
|
995
|
+
return _worker;
|
|
996
|
+
}
|
|
997
|
+
async function sendToWorker(msg) {
|
|
998
|
+
const worker = await ensureWorker();
|
|
999
|
+
if (_requestId >= Number.MAX_SAFE_INTEGER) _requestId = 0;
|
|
1000
|
+
const id = ++_requestId;
|
|
1001
|
+
return new Promise((resolve, reject) => {
|
|
1002
|
+
_pending.set(id, { resolve, reject });
|
|
1003
|
+
worker.postMessage({ ...msg, id });
|
|
1004
|
+
});
|
|
1005
|
+
}
|
|
1006
|
+
var compiledCache = /* @__PURE__ */ new WeakMap();
|
|
1007
|
+
function getExecutorCache(executor) {
|
|
1008
|
+
let cache = compiledCache.get(executor);
|
|
1009
|
+
if (!cache) {
|
|
1010
|
+
cache = /* @__PURE__ */ new Map();
|
|
1011
|
+
compiledCache.set(executor, cache);
|
|
1012
|
+
}
|
|
1013
|
+
return cache;
|
|
1014
|
+
}
|
|
1015
|
+
function hashSource(source) {
|
|
1016
|
+
return (0, import_node_crypto2.createHash)("sha256").update(source).digest("hex");
|
|
1017
|
+
}
|
|
1018
|
+
var _serializationModule = null;
|
|
1019
|
+
async function loadSerialization() {
|
|
1020
|
+
if (_serializationModule) return _serializationModule;
|
|
1021
|
+
_serializationModule = await Promise.resolve().then(() => (init_serialization(), serialization_exports));
|
|
1022
|
+
return _serializationModule;
|
|
1023
|
+
}
|
|
1024
|
+
function defaultExecutor(source) {
|
|
1025
|
+
return (async (data) => {
|
|
1026
|
+
const { serializeFirestoreTypes: serializeFirestoreTypes2, deserializeFirestoreTypes: deserializeFirestoreTypes2 } = await loadSerialization();
|
|
1027
|
+
const jsonData = JSON.stringify(serializeFirestoreTypes2(data));
|
|
1028
|
+
const response = await sendToWorker({ type: "execute", source, jsonData });
|
|
1029
|
+
if (response.jsonResult === void 0 || response.jsonResult === null) {
|
|
1030
|
+
throw new MigrationError("Migration returned a non-JSON-serializable value");
|
|
1031
|
+
}
|
|
1032
|
+
try {
|
|
1033
|
+
return deserializeFirestoreTypes2(JSON.parse(response.jsonResult));
|
|
1034
|
+
} catch {
|
|
1035
|
+
throw new MigrationError("Migration returned a non-JSON-serializable value");
|
|
1036
|
+
}
|
|
1037
|
+
});
|
|
1038
|
+
}
|
|
1039
|
+
async function precompileSource(source, executor) {
|
|
1040
|
+
if (executor && executor !== defaultExecutor) {
|
|
1041
|
+
try {
|
|
1042
|
+
executor(source);
|
|
1043
|
+
} catch (err) {
|
|
1044
|
+
if (err instanceof MigrationError) throw err;
|
|
1045
|
+
throw new MigrationError(`Failed to compile migration source: ${err.message}`);
|
|
1046
|
+
}
|
|
1047
|
+
return;
|
|
1048
|
+
}
|
|
1049
|
+
await sendToWorker({ type: "compile", source });
|
|
1050
|
+
}
|
|
1051
|
+
function compileMigrationFn(source, executor = defaultExecutor) {
|
|
1052
|
+
const cache = getExecutorCache(executor);
|
|
1053
|
+
const key = hashSource(source);
|
|
1054
|
+
const cached = cache.get(key);
|
|
1055
|
+
if (cached) return cached;
|
|
1056
|
+
try {
|
|
1057
|
+
const fn = executor(source);
|
|
1058
|
+
cache.set(key, fn);
|
|
1059
|
+
return fn;
|
|
1060
|
+
} catch (err) {
|
|
1061
|
+
if (err instanceof MigrationError) throw err;
|
|
1062
|
+
throw new MigrationError(`Failed to compile migration source: ${err.message}`);
|
|
1063
|
+
}
|
|
1064
|
+
}
|
|
1065
|
+
function compileMigrations(stored, executor) {
|
|
1066
|
+
return stored.map((step) => ({
|
|
1067
|
+
fromVersion: step.fromVersion,
|
|
1068
|
+
toVersion: step.toVersion,
|
|
1069
|
+
up: compileMigrationFn(step.up, executor)
|
|
1070
|
+
}));
|
|
1071
|
+
}
|
|
1072
|
+
|
|
1073
|
+
// src/dynamic-registry.ts
|
|
1074
|
+
var META_NODE_TYPE = "nodeType";
|
|
1075
|
+
var META_EDGE_TYPE = "edgeType";
|
|
1076
|
+
var STORED_MIGRATION_STEP_SCHEMA = {
|
|
1077
|
+
type: "object",
|
|
1078
|
+
required: ["fromVersion", "toVersion", "up"],
|
|
1079
|
+
properties: {
|
|
1080
|
+
fromVersion: { type: "integer", minimum: 0 },
|
|
1081
|
+
toVersion: { type: "integer", minimum: 1 },
|
|
1082
|
+
up: { type: "string", minLength: 1 }
|
|
1083
|
+
},
|
|
1084
|
+
additionalProperties: false
|
|
1085
|
+
};
|
|
1086
|
+
var NODE_TYPE_SCHEMA = {
|
|
1087
|
+
type: "object",
|
|
1088
|
+
required: ["name", "jsonSchema"],
|
|
1089
|
+
properties: {
|
|
1090
|
+
name: { type: "string", minLength: 1 },
|
|
1091
|
+
jsonSchema: { type: "object" },
|
|
1092
|
+
description: { type: "string" },
|
|
1093
|
+
titleField: { type: "string" },
|
|
1094
|
+
subtitleField: { type: "string" },
|
|
1095
|
+
viewTemplate: { type: "string" },
|
|
1096
|
+
viewCss: { type: "string" },
|
|
1097
|
+
allowedIn: { type: "array", items: { type: "string", minLength: 1 } },
|
|
1098
|
+
schemaVersion: { type: "integer", minimum: 0 },
|
|
1099
|
+
migrations: { type: "array", items: STORED_MIGRATION_STEP_SCHEMA },
|
|
1100
|
+
migrationWriteBack: { type: "string", enum: ["off", "eager", "background"] }
|
|
1101
|
+
},
|
|
1102
|
+
additionalProperties: false
|
|
1103
|
+
};
|
|
1104
|
+
var EDGE_TYPE_SCHEMA = {
|
|
1105
|
+
type: "object",
|
|
1106
|
+
required: ["name", "from", "to"],
|
|
1107
|
+
properties: {
|
|
1108
|
+
name: { type: "string", minLength: 1 },
|
|
1109
|
+
from: {
|
|
1110
|
+
oneOf: [
|
|
1111
|
+
{ type: "string", minLength: 1 },
|
|
1112
|
+
{ type: "array", items: { type: "string", minLength: 1 }, minItems: 1 }
|
|
1113
|
+
]
|
|
1114
|
+
},
|
|
1115
|
+
to: {
|
|
1116
|
+
oneOf: [
|
|
1117
|
+
{ type: "string", minLength: 1 },
|
|
1118
|
+
{ type: "array", items: { type: "string", minLength: 1 }, minItems: 1 }
|
|
1119
|
+
]
|
|
1120
|
+
},
|
|
1121
|
+
jsonSchema: { type: "object" },
|
|
1122
|
+
inverseLabel: { type: "string" },
|
|
1123
|
+
description: { type: "string" },
|
|
1124
|
+
titleField: { type: "string" },
|
|
1125
|
+
subtitleField: { type: "string" },
|
|
1126
|
+
viewTemplate: { type: "string" },
|
|
1127
|
+
viewCss: { type: "string" },
|
|
1128
|
+
allowedIn: { type: "array", items: { type: "string", minLength: 1 } },
|
|
1129
|
+
targetGraph: { type: "string", minLength: 1, pattern: "^[^/]+$" },
|
|
1130
|
+
schemaVersion: { type: "integer", minimum: 0 },
|
|
1131
|
+
migrations: { type: "array", items: STORED_MIGRATION_STEP_SCHEMA },
|
|
1132
|
+
migrationWriteBack: { type: "string", enum: ["off", "eager", "background"] }
|
|
1133
|
+
},
|
|
1134
|
+
additionalProperties: false
|
|
1135
|
+
};
|
|
1136
|
+
var BOOTSTRAP_ENTRIES = [
|
|
1137
|
+
{
|
|
1138
|
+
aType: META_NODE_TYPE,
|
|
1139
|
+
axbType: NODE_RELATION,
|
|
1140
|
+
bType: META_NODE_TYPE,
|
|
1141
|
+
jsonSchema: NODE_TYPE_SCHEMA,
|
|
1142
|
+
description: "Meta-type: defines a node type"
|
|
1143
|
+
},
|
|
1144
|
+
{
|
|
1145
|
+
aType: META_EDGE_TYPE,
|
|
1146
|
+
axbType: NODE_RELATION,
|
|
1147
|
+
bType: META_EDGE_TYPE,
|
|
1148
|
+
jsonSchema: EDGE_TYPE_SCHEMA,
|
|
1149
|
+
description: "Meta-type: defines an edge type"
|
|
1150
|
+
}
|
|
1151
|
+
];
|
|
1152
|
+
var _bootstrapRegistry = null;
|
|
1153
|
+
function createBootstrapRegistry() {
|
|
1154
|
+
if (_bootstrapRegistry) return _bootstrapRegistry;
|
|
1155
|
+
_bootstrapRegistry = createRegistry([...BOOTSTRAP_ENTRIES]);
|
|
1156
|
+
return _bootstrapRegistry;
|
|
1157
|
+
}
|
|
1158
|
+
function generateDeterministicUid(metaType, name) {
|
|
1159
|
+
const hash = (0, import_node_crypto3.createHash)("sha256").update(`${metaType}:${name}`).digest("base64url");
|
|
1160
|
+
return hash.slice(0, 21);
|
|
1161
|
+
}
|
|
1162
|
+
async function createRegistryFromGraph(reader, executor) {
|
|
1163
|
+
const [nodeTypes, edgeTypes] = await Promise.all([
|
|
1164
|
+
reader.findNodes({ aType: META_NODE_TYPE }),
|
|
1165
|
+
reader.findNodes({ aType: META_EDGE_TYPE })
|
|
1166
|
+
]);
|
|
1167
|
+
const entries = [...BOOTSTRAP_ENTRIES];
|
|
1168
|
+
const prevalidations = [];
|
|
1169
|
+
for (const record of nodeTypes) {
|
|
1170
|
+
const data = record.data;
|
|
1171
|
+
if (data.migrations) {
|
|
1172
|
+
for (const m of data.migrations) {
|
|
1173
|
+
prevalidations.push(precompileSource(m.up, executor));
|
|
1174
|
+
}
|
|
1175
|
+
}
|
|
1176
|
+
}
|
|
1177
|
+
for (const record of edgeTypes) {
|
|
1178
|
+
const data = record.data;
|
|
1179
|
+
if (data.migrations) {
|
|
1180
|
+
for (const m of data.migrations) {
|
|
1181
|
+
prevalidations.push(precompileSource(m.up, executor));
|
|
1182
|
+
}
|
|
1183
|
+
}
|
|
1184
|
+
}
|
|
1185
|
+
await Promise.all(prevalidations);
|
|
1186
|
+
for (const record of nodeTypes) {
|
|
1187
|
+
const data = record.data;
|
|
1188
|
+
entries.push({
|
|
1189
|
+
aType: data.name,
|
|
1190
|
+
axbType: NODE_RELATION,
|
|
1191
|
+
bType: data.name,
|
|
1192
|
+
jsonSchema: data.jsonSchema,
|
|
1193
|
+
description: data.description,
|
|
1194
|
+
titleField: data.titleField,
|
|
1195
|
+
subtitleField: data.subtitleField,
|
|
1196
|
+
allowedIn: data.allowedIn,
|
|
1197
|
+
migrations: data.migrations ? compileMigrations(data.migrations, executor) : void 0,
|
|
1198
|
+
migrationWriteBack: data.migrationWriteBack
|
|
1199
|
+
});
|
|
1200
|
+
}
|
|
1201
|
+
for (const record of edgeTypes) {
|
|
1202
|
+
const data = record.data;
|
|
1203
|
+
const fromTypes = Array.isArray(data.from) ? data.from : [data.from];
|
|
1204
|
+
const toTypes = Array.isArray(data.to) ? data.to : [data.to];
|
|
1205
|
+
const compiledMigrations = data.migrations ? compileMigrations(data.migrations, executor) : void 0;
|
|
1206
|
+
for (const aType of fromTypes) {
|
|
1207
|
+
for (const bType of toTypes) {
|
|
1208
|
+
entries.push({
|
|
1209
|
+
aType,
|
|
1210
|
+
axbType: data.name,
|
|
1211
|
+
bType,
|
|
1212
|
+
jsonSchema: data.jsonSchema,
|
|
1213
|
+
description: data.description,
|
|
1214
|
+
inverseLabel: data.inverseLabel,
|
|
1215
|
+
titleField: data.titleField,
|
|
1216
|
+
subtitleField: data.subtitleField,
|
|
1217
|
+
allowedIn: data.allowedIn,
|
|
1218
|
+
targetGraph: data.targetGraph,
|
|
1219
|
+
migrations: compiledMigrations,
|
|
1220
|
+
migrationWriteBack: data.migrationWriteBack
|
|
1221
|
+
});
|
|
1222
|
+
}
|
|
1223
|
+
}
|
|
1224
|
+
}
|
|
1225
|
+
return createRegistry(entries);
|
|
1226
|
+
}
|
|
1227
|
+
|
|
1228
|
+
// src/query.ts
|
|
1229
|
+
function buildEdgeQueryPlan(params) {
|
|
1230
|
+
const { aType, aUid, axbType, bType, bUid, limit, orderBy } = params;
|
|
1231
|
+
if (aUid && axbType && bUid && !params.where?.length) {
|
|
1232
|
+
return { strategy: "get", docId: computeEdgeDocId(aUid, axbType, bUid) };
|
|
1233
|
+
}
|
|
1234
|
+
const filters = [];
|
|
1235
|
+
if (aType) filters.push({ field: "aType", op: "==", value: aType });
|
|
1236
|
+
if (aUid) filters.push({ field: "aUid", op: "==", value: aUid });
|
|
1237
|
+
if (axbType) filters.push({ field: "axbType", op: "==", value: axbType });
|
|
1238
|
+
if (bType) filters.push({ field: "bType", op: "==", value: bType });
|
|
1239
|
+
if (bUid) filters.push({ field: "bUid", op: "==", value: bUid });
|
|
1240
|
+
if (params.where) {
|
|
1241
|
+
for (const clause of params.where) {
|
|
1242
|
+
const field = BUILTIN_FIELDS.has(clause.field) ? clause.field : clause.field.startsWith("data.") ? clause.field : `data.${clause.field}`;
|
|
1243
|
+
filters.push({ field, op: clause.op, value: clause.value });
|
|
1244
|
+
}
|
|
1245
|
+
}
|
|
1246
|
+
if (filters.length === 0) {
|
|
1247
|
+
throw new InvalidQueryError("findEdges requires at least one filter parameter");
|
|
1248
|
+
}
|
|
1249
|
+
const effectiveLimit = limit === void 0 ? DEFAULT_QUERY_LIMIT : limit || void 0;
|
|
1250
|
+
return { strategy: "query", filters, options: { limit: effectiveLimit, orderBy } };
|
|
1251
|
+
}
|
|
1252
|
+
function buildNodeQueryPlan(params) {
|
|
1253
|
+
const { aType, limit, orderBy } = params;
|
|
1254
|
+
const filters = [
|
|
1255
|
+
{ field: "aType", op: "==", value: aType },
|
|
1256
|
+
{ field: "axbType", op: "==", value: NODE_RELATION }
|
|
1257
|
+
];
|
|
1258
|
+
if (params.where) {
|
|
1259
|
+
for (const clause of params.where) {
|
|
1260
|
+
const field = BUILTIN_FIELDS.has(clause.field) ? clause.field : clause.field.startsWith("data.") ? clause.field : `data.${clause.field}`;
|
|
1261
|
+
filters.push({ field, op: clause.op, value: clause.value });
|
|
1262
|
+
}
|
|
1263
|
+
}
|
|
1264
|
+
const effectiveLimit = limit === void 0 ? DEFAULT_QUERY_LIMIT : limit || void 0;
|
|
1265
|
+
return { strategy: "query", filters, options: { limit: effectiveLimit, orderBy } };
|
|
1266
|
+
}
|
|
1267
|
+
|
|
1268
|
+
// src/query-safety.ts
|
|
1269
|
+
var SAFE_INDEX_PATTERNS = [
|
|
1270
|
+
/* @__PURE__ */ new Set(["aUid", "axbType"]),
|
|
1271
|
+
/* @__PURE__ */ new Set(["axbType", "bUid"]),
|
|
1272
|
+
/* @__PURE__ */ new Set(["aType", "axbType"]),
|
|
1273
|
+
/* @__PURE__ */ new Set(["axbType", "bType"])
|
|
1274
|
+
];
|
|
1275
|
+
function analyzeQuerySafety(filters) {
|
|
1276
|
+
const builtinFieldsPresent = /* @__PURE__ */ new Set();
|
|
1277
|
+
let hasDataFilters = false;
|
|
1278
|
+
for (const f of filters) {
|
|
1279
|
+
if (BUILTIN_FIELDS.has(f.field)) {
|
|
1280
|
+
builtinFieldsPresent.add(f.field);
|
|
1281
|
+
} else {
|
|
1282
|
+
hasDataFilters = true;
|
|
1283
|
+
}
|
|
1284
|
+
}
|
|
1285
|
+
for (const pattern of SAFE_INDEX_PATTERNS) {
|
|
1286
|
+
let matched = true;
|
|
1287
|
+
for (const field of pattern) {
|
|
1288
|
+
if (!builtinFieldsPresent.has(field)) {
|
|
1289
|
+
matched = false;
|
|
1290
|
+
break;
|
|
1291
|
+
}
|
|
1292
|
+
}
|
|
1293
|
+
if (matched) {
|
|
1294
|
+
return { safe: true };
|
|
1295
|
+
}
|
|
1296
|
+
}
|
|
1297
|
+
const presentFields = [...builtinFieldsPresent];
|
|
1298
|
+
if (presentFields.length === 0 && hasDataFilters) {
|
|
1299
|
+
return {
|
|
1300
|
+
safe: false,
|
|
1301
|
+
reason: "Query filters only use data.* fields with no builtin field constraints. This requires a full collection scan. Add aType, aUid, axbType, bType, or bUid filters, or set allowCollectionScan: true."
|
|
1302
|
+
};
|
|
1303
|
+
}
|
|
1304
|
+
if (hasDataFilters) {
|
|
1305
|
+
return {
|
|
1306
|
+
safe: false,
|
|
1307
|
+
reason: `Query filters on [${presentFields.join(", ")}] do not match any indexed pattern. data.* filters without an indexed base require a full collection scan. Safe patterns: (aUid + axbType), (axbType + bUid), (aType + axbType), (axbType + bType). Set allowCollectionScan: true to override.`
|
|
1308
|
+
};
|
|
1309
|
+
}
|
|
1310
|
+
return {
|
|
1311
|
+
safe: false,
|
|
1312
|
+
reason: `Query filters on [${presentFields.join(", ")}] do not match any indexed pattern. This may cause a full collection scan on Firestore Enterprise. Safe patterns: (aUid + axbType), (axbType + bUid), (aType + axbType), (axbType + bType). Set allowCollectionScan: true to override.`
|
|
1313
|
+
};
|
|
1314
|
+
}
|
|
1315
|
+
|
|
1316
|
+
// src/transaction.ts
|
|
1317
|
+
function buildWritableNodeRecord2(aType, uid, data) {
|
|
1318
|
+
return { aType, aUid: uid, axbType: NODE_RELATION, bType: aType, bUid: uid, data };
|
|
1319
|
+
}
|
|
1320
|
+
function buildWritableEdgeRecord2(aType, aUid, axbType, bType, bUid, data) {
|
|
1321
|
+
return { aType, aUid, axbType, bType, bUid, data };
|
|
1322
|
+
}
|
|
1323
|
+
var GraphTransactionImpl = class {
|
|
1324
|
+
constructor(backend, registry, scanProtection = "error", scopePath = "", globalWriteBack = "off") {
|
|
1325
|
+
this.backend = backend;
|
|
1326
|
+
this.registry = registry;
|
|
1327
|
+
this.scanProtection = scanProtection;
|
|
1328
|
+
this.scopePath = scopePath;
|
|
1329
|
+
this.globalWriteBack = globalWriteBack;
|
|
1330
|
+
}
|
|
1331
|
+
async getNode(uid) {
|
|
1332
|
+
const docId = computeNodeDocId(uid);
|
|
1333
|
+
const record = await this.backend.getDoc(docId);
|
|
1334
|
+
if (!record || !this.registry) return record;
|
|
1335
|
+
const result = await migrateRecord(record, this.registry, this.globalWriteBack);
|
|
1336
|
+
if (result.migrated && result.writeBack !== "off") {
|
|
1337
|
+
await this.backend.updateDoc(docId, {
|
|
1338
|
+
replaceData: result.record.data,
|
|
1339
|
+
v: result.record.v
|
|
1340
|
+
});
|
|
1341
|
+
}
|
|
1342
|
+
return result.record;
|
|
1343
|
+
}
|
|
1344
|
+
async getEdge(aUid, axbType, bUid) {
|
|
1345
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1346
|
+
const record = await this.backend.getDoc(docId);
|
|
1347
|
+
if (!record || !this.registry) return record;
|
|
1348
|
+
const result = await migrateRecord(record, this.registry, this.globalWriteBack);
|
|
1349
|
+
if (result.migrated && result.writeBack !== "off") {
|
|
1350
|
+
await this.backend.updateDoc(docId, {
|
|
1351
|
+
replaceData: result.record.data,
|
|
1352
|
+
v: result.record.v
|
|
1353
|
+
});
|
|
1354
|
+
}
|
|
1355
|
+
return result.record;
|
|
1356
|
+
}
|
|
1357
|
+
async edgeExists(aUid, axbType, bUid) {
|
|
1358
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1359
|
+
const record = await this.backend.getDoc(docId);
|
|
1360
|
+
return record !== null;
|
|
1361
|
+
}
|
|
1362
|
+
checkQuerySafety(filters, allowCollectionScan) {
|
|
1363
|
+
if (allowCollectionScan || this.scanProtection === "off") return;
|
|
1364
|
+
const result = analyzeQuerySafety(filters);
|
|
1365
|
+
if (result.safe) return;
|
|
1366
|
+
if (this.scanProtection === "error") {
|
|
1367
|
+
throw new QuerySafetyError(result.reason);
|
|
1368
|
+
}
|
|
1369
|
+
console.warn(`[firegraph] Query safety warning: ${result.reason}`);
|
|
1370
|
+
}
|
|
1371
|
+
async findEdges(params) {
|
|
1372
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1373
|
+
let records;
|
|
1374
|
+
if (plan.strategy === "get") {
|
|
1375
|
+
const record = await this.backend.getDoc(plan.docId);
|
|
1376
|
+
records = record ? [record] : [];
|
|
1377
|
+
} else {
|
|
1378
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1379
|
+
records = await this.backend.query(plan.filters, plan.options);
|
|
1380
|
+
}
|
|
1381
|
+
return this.applyMigrations(records);
|
|
1382
|
+
}
|
|
1383
|
+
async findNodes(params) {
|
|
1384
|
+
const plan = buildNodeQueryPlan(params);
|
|
1385
|
+
let records;
|
|
1386
|
+
if (plan.strategy === "get") {
|
|
1387
|
+
const record = await this.backend.getDoc(plan.docId);
|
|
1388
|
+
records = record ? [record] : [];
|
|
1389
|
+
} else {
|
|
1390
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1391
|
+
records = await this.backend.query(plan.filters, plan.options);
|
|
1392
|
+
}
|
|
1393
|
+
return this.applyMigrations(records);
|
|
1394
|
+
}
|
|
1395
|
+
async applyMigrations(records) {
|
|
1396
|
+
if (!this.registry || records.length === 0) return records;
|
|
1397
|
+
const results = await migrateRecords(records, this.registry, this.globalWriteBack);
|
|
1398
|
+
for (const result of results) {
|
|
1399
|
+
if (result.migrated && result.writeBack !== "off") {
|
|
1400
|
+
const docId = result.record.axbType === NODE_RELATION ? computeNodeDocId(result.record.aUid) : computeEdgeDocId(result.record.aUid, result.record.axbType, result.record.bUid);
|
|
1401
|
+
await this.backend.updateDoc(docId, {
|
|
1402
|
+
replaceData: result.record.data,
|
|
1403
|
+
v: result.record.v
|
|
1404
|
+
});
|
|
1405
|
+
}
|
|
1406
|
+
}
|
|
1407
|
+
return results.map((r) => r.record);
|
|
1408
|
+
}
|
|
1409
|
+
async putNode(aType, uid, data) {
|
|
1410
|
+
await this.writeNode(aType, uid, data, "merge");
|
|
1411
|
+
}
|
|
1412
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1413
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
1414
|
+
}
|
|
1415
|
+
async replaceNode(aType, uid, data) {
|
|
1416
|
+
await this.writeNode(aType, uid, data, "replace");
|
|
1417
|
+
}
|
|
1418
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1419
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
1420
|
+
}
|
|
1421
|
+
async writeNode(aType, uid, data, mode) {
|
|
1422
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
1423
|
+
if (this.registry) {
|
|
1424
|
+
this.registry.validate(aType, NODE_RELATION, aType, data, this.scopePath);
|
|
1425
|
+
}
|
|
1426
|
+
const docId = computeNodeDocId(uid);
|
|
1427
|
+
const record = buildWritableNodeRecord2(aType, uid, data);
|
|
1428
|
+
if (this.registry) {
|
|
1429
|
+
const entry = this.registry.lookup(aType, NODE_RELATION, aType);
|
|
1430
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
1431
|
+
record.v = entry.schemaVersion;
|
|
1432
|
+
}
|
|
1433
|
+
}
|
|
1434
|
+
await this.backend.setDoc(docId, record, mode);
|
|
1435
|
+
}
|
|
1436
|
+
async writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
1437
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
1438
|
+
if (this.registry) {
|
|
1439
|
+
this.registry.validate(aType, axbType, bType, data, this.scopePath);
|
|
1440
|
+
}
|
|
1441
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1442
|
+
const record = buildWritableEdgeRecord2(aType, aUid, axbType, bType, bUid, data);
|
|
1443
|
+
if (this.registry) {
|
|
1444
|
+
const entry = this.registry.lookup(aType, axbType, bType);
|
|
1445
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
1446
|
+
record.v = entry.schemaVersion;
|
|
1447
|
+
}
|
|
1448
|
+
}
|
|
1449
|
+
await this.backend.setDoc(docId, record, mode);
|
|
1450
|
+
}
|
|
1451
|
+
async updateNode(uid, data) {
|
|
1452
|
+
const docId = computeNodeDocId(uid);
|
|
1453
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1454
|
+
}
|
|
1455
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
1456
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1457
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1458
|
+
}
|
|
1459
|
+
async removeNode(uid) {
|
|
1460
|
+
const docId = computeNodeDocId(uid);
|
|
1461
|
+
await this.backend.deleteDoc(docId);
|
|
1462
|
+
}
|
|
1463
|
+
async removeEdge(aUid, axbType, bUid) {
|
|
1464
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1465
|
+
await this.backend.deleteDoc(docId);
|
|
1466
|
+
}
|
|
1467
|
+
};
|
|
1468
|
+
|
|
1469
|
+
// src/client.ts
|
|
1470
|
+
var RESERVED_TYPE_NAMES = /* @__PURE__ */ new Set([META_NODE_TYPE, META_EDGE_TYPE]);
|
|
1471
|
+
function buildWritableNodeRecord3(aType, uid, data) {
|
|
1472
|
+
return { aType, aUid: uid, axbType: NODE_RELATION, bType: aType, bUid: uid, data };
|
|
1473
|
+
}
|
|
1474
|
+
function buildWritableEdgeRecord3(aType, aUid, axbType, bType, bUid, data) {
|
|
1475
|
+
return { aType, aUid, axbType, bType, bUid, data };
|
|
1476
|
+
}
|
|
1477
|
+
var GraphClientImpl = class _GraphClientImpl {
|
|
1478
|
+
constructor(backend, options, metaBackend) {
|
|
1479
|
+
this.backend = backend;
|
|
1480
|
+
this.globalWriteBack = options?.migrationWriteBack ?? "off";
|
|
1481
|
+
this.migrationSandbox = options?.migrationSandbox;
|
|
1482
|
+
if (options?.registryMode) {
|
|
1483
|
+
this.dynamicConfig = options.registryMode;
|
|
1484
|
+
this.bootstrapRegistry = createBootstrapRegistry();
|
|
1485
|
+
if (options.registry) {
|
|
1486
|
+
this.staticRegistry = options.registry;
|
|
1487
|
+
}
|
|
1488
|
+
this.metaBackend = metaBackend;
|
|
1489
|
+
} else {
|
|
1490
|
+
this.staticRegistry = options?.registry;
|
|
1491
|
+
}
|
|
1492
|
+
this.scanProtection = options?.scanProtection ?? "error";
|
|
1493
|
+
}
|
|
1494
|
+
scanProtection;
|
|
1495
|
+
/**
|
|
1496
|
+
* Capability set of the underlying backend. Mirrors `backend.capabilities`
|
|
1497
|
+
* verbatim so callers can portability-check (`client.capabilities.has(
|
|
1498
|
+
* 'query.join')`) without reaching for the backend handle. Static for the
|
|
1499
|
+
* lifetime of the client.
|
|
1500
|
+
*/
|
|
1501
|
+
get capabilities() {
|
|
1502
|
+
return this.backend.capabilities;
|
|
1503
|
+
}
|
|
1504
|
+
// Static mode
|
|
1505
|
+
staticRegistry;
|
|
1506
|
+
// Dynamic mode
|
|
1507
|
+
dynamicConfig;
|
|
1508
|
+
bootstrapRegistry;
|
|
1509
|
+
dynamicRegistry;
|
|
1510
|
+
metaBackend;
|
|
1511
|
+
// Migration settings
|
|
1512
|
+
globalWriteBack;
|
|
1513
|
+
migrationSandbox;
|
|
1514
|
+
// ---------------------------------------------------------------------------
|
|
1515
|
+
// Backend access (exposed for traversal helpers and subgraph cloning)
|
|
1516
|
+
// ---------------------------------------------------------------------------
|
|
1517
|
+
/** @internal */
|
|
1518
|
+
getBackend() {
|
|
1519
|
+
return this.backend;
|
|
1520
|
+
}
|
|
1521
|
+
/**
|
|
1522
|
+
* Snapshot of the currently-effective registry. Returns the merged view
|
|
1523
|
+
* used for domain-type validation and migration — in dynamic mode this is
|
|
1524
|
+
* `dynamicRegistry ?? staticRegistry ?? bootstrapRegistry`, so callers see
|
|
1525
|
+
* updates after `reloadRegistry()` without having to re-resolve anything.
|
|
1526
|
+
*
|
|
1527
|
+
* Exposed for backends that need topology access during bulk operations
|
|
1528
|
+
* (e.g. the Cloudflare DO backend's cross-DO cascade). Not part of the
|
|
1529
|
+
* public `GraphClient` surface.
|
|
1530
|
+
*
|
|
1531
|
+
* @internal
|
|
1532
|
+
*/
|
|
1533
|
+
getRegistrySnapshot() {
|
|
1534
|
+
return this.getCombinedRegistry();
|
|
1535
|
+
}
|
|
1536
|
+
// ---------------------------------------------------------------------------
|
|
1537
|
+
// Registry routing
|
|
1538
|
+
// ---------------------------------------------------------------------------
|
|
1539
|
+
getRegistryForType(aType) {
|
|
1540
|
+
if (!this.dynamicConfig) return this.staticRegistry;
|
|
1541
|
+
if (aType === META_NODE_TYPE || aType === META_EDGE_TYPE) {
|
|
1542
|
+
return this.bootstrapRegistry;
|
|
1543
|
+
}
|
|
1544
|
+
return this.dynamicRegistry ?? this.staticRegistry ?? this.bootstrapRegistry;
|
|
1545
|
+
}
|
|
1546
|
+
getBackendForType(aType) {
|
|
1547
|
+
if (this.metaBackend && (aType === META_NODE_TYPE || aType === META_EDGE_TYPE)) {
|
|
1548
|
+
return this.metaBackend;
|
|
1549
|
+
}
|
|
1550
|
+
return this.backend;
|
|
1551
|
+
}
|
|
1552
|
+
getCombinedRegistry() {
|
|
1553
|
+
if (!this.dynamicConfig) return this.staticRegistry;
|
|
1554
|
+
return this.dynamicRegistry ?? this.staticRegistry ?? this.bootstrapRegistry;
|
|
1555
|
+
}
|
|
1556
|
+
// ---------------------------------------------------------------------------
|
|
1557
|
+
// Query safety
|
|
1558
|
+
// ---------------------------------------------------------------------------
|
|
1559
|
+
checkQuerySafety(filters, allowCollectionScan) {
|
|
1560
|
+
if (allowCollectionScan || this.scanProtection === "off") return;
|
|
1561
|
+
const result = analyzeQuerySafety(filters);
|
|
1562
|
+
if (result.safe) return;
|
|
1563
|
+
if (this.scanProtection === "error") {
|
|
1564
|
+
throw new QuerySafetyError(result.reason);
|
|
1565
|
+
}
|
|
1566
|
+
console.warn(`[firegraph] Query safety warning: ${result.reason}`);
|
|
1567
|
+
}
|
|
1568
|
+
// ---------------------------------------------------------------------------
|
|
1569
|
+
// Migration helpers
|
|
1570
|
+
// ---------------------------------------------------------------------------
|
|
1571
|
+
async applyMigration(record, docId) {
|
|
1572
|
+
const registry = this.getCombinedRegistry();
|
|
1573
|
+
if (!registry) return record;
|
|
1574
|
+
const result = await migrateRecord(record, registry, this.globalWriteBack);
|
|
1575
|
+
if (result.migrated) {
|
|
1576
|
+
this.handleWriteBack(result, docId);
|
|
1577
|
+
}
|
|
1578
|
+
return result.record;
|
|
1579
|
+
}
|
|
1580
|
+
async applyMigrations(records) {
|
|
1581
|
+
const registry = this.getCombinedRegistry();
|
|
1582
|
+
if (!registry || records.length === 0) return records;
|
|
1583
|
+
const results = await migrateRecords(records, registry, this.globalWriteBack);
|
|
1584
|
+
for (const result of results) {
|
|
1585
|
+
if (result.migrated) {
|
|
1586
|
+
const docId = result.record.axbType === NODE_RELATION ? computeNodeDocId(result.record.aUid) : computeEdgeDocId(result.record.aUid, result.record.axbType, result.record.bUid);
|
|
1587
|
+
this.handleWriteBack(result, docId);
|
|
1588
|
+
}
|
|
1589
|
+
}
|
|
1590
|
+
return results.map((r) => r.record);
|
|
1591
|
+
}
|
|
1592
|
+
/**
|
|
1593
|
+
* Fire-and-forget write-back for a migrated record. Both `'eager'` and
|
|
1594
|
+
* `'background'` are non-blocking; the difference is the log level on
|
|
1595
|
+
* failure. For synchronous write-back, use a transaction — see
|
|
1596
|
+
* `GraphTransactionImpl`.
|
|
1597
|
+
*/
|
|
1598
|
+
handleWriteBack(result, docId) {
|
|
1599
|
+
if (result.writeBack === "off") return;
|
|
1600
|
+
const doWriteBack = async () => {
|
|
1601
|
+
try {
|
|
1602
|
+
await this.backend.updateDoc(docId, {
|
|
1603
|
+
replaceData: result.record.data,
|
|
1604
|
+
v: result.record.v
|
|
1605
|
+
});
|
|
1606
|
+
} catch (err) {
|
|
1607
|
+
const msg = `[firegraph] Migration write-back failed for ${docId}: ${err.message}`;
|
|
1608
|
+
if (result.writeBack === "eager") {
|
|
1609
|
+
console.error(msg);
|
|
1610
|
+
} else {
|
|
1611
|
+
console.warn(msg);
|
|
1612
|
+
}
|
|
1613
|
+
}
|
|
1614
|
+
};
|
|
1615
|
+
void doWriteBack();
|
|
1616
|
+
}
|
|
1617
|
+
// ---------------------------------------------------------------------------
|
|
1618
|
+
// GraphReader
|
|
1619
|
+
// ---------------------------------------------------------------------------
|
|
1620
|
+
async getNode(uid) {
|
|
1621
|
+
const docId = computeNodeDocId(uid);
|
|
1622
|
+
const record = await this.backend.getDoc(docId);
|
|
1623
|
+
if (!record) return null;
|
|
1624
|
+
return this.applyMigration(record, docId);
|
|
1625
|
+
}
|
|
1626
|
+
async getEdge(aUid, axbType, bUid) {
|
|
1627
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1628
|
+
const record = await this.backend.getDoc(docId);
|
|
1629
|
+
if (!record) return null;
|
|
1630
|
+
return this.applyMigration(record, docId);
|
|
1631
|
+
}
|
|
1632
|
+
async edgeExists(aUid, axbType, bUid) {
|
|
1633
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1634
|
+
const record = await this.backend.getDoc(docId);
|
|
1635
|
+
return record !== null;
|
|
1636
|
+
}
|
|
1637
|
+
async findEdges(params) {
|
|
1638
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1639
|
+
let records;
|
|
1640
|
+
if (plan.strategy === "get") {
|
|
1641
|
+
const record = await this.backend.getDoc(plan.docId);
|
|
1642
|
+
records = record ? [record] : [];
|
|
1643
|
+
} else {
|
|
1644
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1645
|
+
records = await this.backend.query(plan.filters, plan.options);
|
|
1646
|
+
}
|
|
1647
|
+
return this.applyMigrations(records);
|
|
1648
|
+
}
|
|
1649
|
+
async findNodes(params) {
|
|
1650
|
+
const plan = buildNodeQueryPlan(params);
|
|
1651
|
+
let records;
|
|
1652
|
+
if (plan.strategy === "get") {
|
|
1653
|
+
const record = await this.backend.getDoc(plan.docId);
|
|
1654
|
+
records = record ? [record] : [];
|
|
1655
|
+
} else {
|
|
1656
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1657
|
+
records = await this.backend.query(plan.filters, plan.options);
|
|
1658
|
+
}
|
|
1659
|
+
return this.applyMigrations(records);
|
|
1660
|
+
}
|
|
1661
|
+
// ---------------------------------------------------------------------------
|
|
1662
|
+
// GraphWriter
|
|
1663
|
+
// ---------------------------------------------------------------------------
|
|
1664
|
+
async putNode(aType, uid, data) {
|
|
1665
|
+
await this.writeNode(aType, uid, data, "merge");
|
|
1666
|
+
}
|
|
1667
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1668
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
1669
|
+
}
|
|
1670
|
+
async replaceNode(aType, uid, data) {
|
|
1671
|
+
await this.writeNode(aType, uid, data, "replace");
|
|
1672
|
+
}
|
|
1673
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1674
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
1675
|
+
}
|
|
1676
|
+
async writeNode(aType, uid, data, mode) {
|
|
1677
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
1678
|
+
const registry = this.getRegistryForType(aType);
|
|
1679
|
+
if (registry) {
|
|
1680
|
+
registry.validate(aType, NODE_RELATION, aType, data, this.backend.scopePath);
|
|
1681
|
+
}
|
|
1682
|
+
const backend = this.getBackendForType(aType);
|
|
1683
|
+
const docId = computeNodeDocId(uid);
|
|
1684
|
+
const record = buildWritableNodeRecord3(aType, uid, data);
|
|
1685
|
+
if (registry) {
|
|
1686
|
+
const entry = registry.lookup(aType, NODE_RELATION, aType);
|
|
1687
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
1688
|
+
record.v = entry.schemaVersion;
|
|
1689
|
+
}
|
|
1690
|
+
}
|
|
1691
|
+
await backend.setDoc(docId, record, mode);
|
|
1692
|
+
}
|
|
1693
|
+
async writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
1694
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
1695
|
+
const registry = this.getRegistryForType(aType);
|
|
1696
|
+
if (registry) {
|
|
1697
|
+
registry.validate(aType, axbType, bType, data, this.backend.scopePath);
|
|
1698
|
+
}
|
|
1699
|
+
const backend = this.getBackendForType(aType);
|
|
1700
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1701
|
+
const record = buildWritableEdgeRecord3(aType, aUid, axbType, bType, bUid, data);
|
|
1702
|
+
if (registry) {
|
|
1703
|
+
const entry = registry.lookup(aType, axbType, bType);
|
|
1704
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
1705
|
+
record.v = entry.schemaVersion;
|
|
1706
|
+
}
|
|
1707
|
+
}
|
|
1708
|
+
await backend.setDoc(docId, record, mode);
|
|
1709
|
+
}
|
|
1710
|
+
async updateNode(uid, data) {
|
|
1711
|
+
const docId = computeNodeDocId(uid);
|
|
1712
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1713
|
+
}
|
|
1714
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
1715
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1716
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1717
|
+
}
|
|
1718
|
+
async removeNode(uid) {
|
|
1719
|
+
const docId = computeNodeDocId(uid);
|
|
1720
|
+
await this.backend.deleteDoc(docId);
|
|
1721
|
+
}
|
|
1722
|
+
async removeEdge(aUid, axbType, bUid) {
|
|
1723
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1724
|
+
await this.backend.deleteDoc(docId);
|
|
1725
|
+
}
|
|
1726
|
+
// ---------------------------------------------------------------------------
|
|
1727
|
+
// Transactions & Batches
|
|
1728
|
+
// ---------------------------------------------------------------------------
|
|
1729
|
+
async runTransaction(fn) {
|
|
1730
|
+
return this.backend.runTransaction(async (txBackend) => {
|
|
1731
|
+
const graphTx = new GraphTransactionImpl(
|
|
1732
|
+
txBackend,
|
|
1733
|
+
this.getCombinedRegistry(),
|
|
1734
|
+
this.scanProtection,
|
|
1735
|
+
this.backend.scopePath,
|
|
1736
|
+
this.globalWriteBack
|
|
1737
|
+
);
|
|
1738
|
+
return fn(graphTx);
|
|
1739
|
+
});
|
|
1740
|
+
}
|
|
1741
|
+
batch() {
|
|
1742
|
+
return new GraphBatchImpl(
|
|
1743
|
+
this.backend.createBatch(),
|
|
1744
|
+
this.getCombinedRegistry(),
|
|
1745
|
+
this.backend.scopePath
|
|
1746
|
+
);
|
|
1747
|
+
}
|
|
1748
|
+
// ---------------------------------------------------------------------------
|
|
1749
|
+
// Subgraph
|
|
1750
|
+
// ---------------------------------------------------------------------------
|
|
1751
|
+
subgraph(parentNodeUid, name = "graph") {
|
|
1752
|
+
if (!parentNodeUid || parentNodeUid.includes("/")) {
|
|
1753
|
+
throw new FiregraphError(
|
|
1754
|
+
`Invalid parentNodeUid for subgraph: "${parentNodeUid}". Must be a non-empty string without "/".`,
|
|
1755
|
+
"INVALID_SUBGRAPH"
|
|
1756
|
+
);
|
|
1757
|
+
}
|
|
1758
|
+
if (name.includes("/")) {
|
|
1759
|
+
throw new FiregraphError(
|
|
1760
|
+
`Subgraph name must not contain "/": got "${name}". Use chained .subgraph() calls for nested subgraphs.`,
|
|
1761
|
+
"INVALID_SUBGRAPH"
|
|
1762
|
+
);
|
|
1763
|
+
}
|
|
1764
|
+
const childBackend = this.backend.subgraph(parentNodeUid, name);
|
|
1765
|
+
return new _GraphClientImpl(
|
|
1766
|
+
childBackend,
|
|
1767
|
+
{
|
|
1768
|
+
registry: this.getCombinedRegistry(),
|
|
1769
|
+
scanProtection: this.scanProtection,
|
|
1770
|
+
migrationWriteBack: this.globalWriteBack,
|
|
1771
|
+
migrationSandbox: this.migrationSandbox
|
|
1772
|
+
}
|
|
1773
|
+
// Subgraphs do not have meta-backends; meta lives only at the root.
|
|
1774
|
+
);
|
|
1775
|
+
}
|
|
1776
|
+
// ---------------------------------------------------------------------------
|
|
1777
|
+
// Collection group query
|
|
1778
|
+
// ---------------------------------------------------------------------------
|
|
1779
|
+
async findEdgesGlobal(params, collectionName) {
|
|
1780
|
+
if (!this.backend.findEdgesGlobal) {
|
|
1781
|
+
throw new FiregraphError(
|
|
1782
|
+
"findEdgesGlobal() is not supported by the current storage backend.",
|
|
1783
|
+
"UNSUPPORTED_OPERATION"
|
|
1784
|
+
);
|
|
1785
|
+
}
|
|
1786
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1787
|
+
if (plan.strategy === "get") {
|
|
1788
|
+
throw new FiregraphError(
|
|
1789
|
+
"findEdgesGlobal() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
1790
|
+
"INVALID_QUERY"
|
|
1791
|
+
);
|
|
1792
|
+
}
|
|
1793
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1794
|
+
const records = await this.backend.findEdgesGlobal(params, collectionName);
|
|
1795
|
+
return this.applyMigrations(records);
|
|
1796
|
+
}
|
|
1797
|
+
// ---------------------------------------------------------------------------
|
|
1798
|
+
// Aggregate query (capability: query.aggregate)
|
|
1799
|
+
// ---------------------------------------------------------------------------
|
|
1800
|
+
async aggregate(params) {
|
|
1801
|
+
if (!this.backend.aggregate) {
|
|
1802
|
+
throw new FiregraphError(
|
|
1803
|
+
"aggregate() is not supported by the current storage backend.",
|
|
1804
|
+
"UNSUPPORTED_OPERATION"
|
|
1805
|
+
);
|
|
1806
|
+
}
|
|
1807
|
+
const hasAnyFilter = params.aType || params.aUid || params.axbType || params.bType || params.bUid || params.where && params.where.length > 0;
|
|
1808
|
+
if (!hasAnyFilter) {
|
|
1809
|
+
this.checkQuerySafety([], params.allowCollectionScan);
|
|
1810
|
+
const result2 = await this.backend.aggregate(params.aggregates, []);
|
|
1811
|
+
return result2;
|
|
1812
|
+
}
|
|
1813
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1814
|
+
if (plan.strategy === "get") {
|
|
1815
|
+
throw new FiregraphError(
|
|
1816
|
+
"aggregate() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
1817
|
+
"INVALID_QUERY"
|
|
1818
|
+
);
|
|
1819
|
+
}
|
|
1820
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1821
|
+
const result = await this.backend.aggregate(params.aggregates, plan.filters);
|
|
1822
|
+
return result;
|
|
1823
|
+
}
|
|
1824
|
+
// ---------------------------------------------------------------------------
|
|
1825
|
+
// Bulk operations
|
|
1826
|
+
// ---------------------------------------------------------------------------
|
|
1827
|
+
async removeNodeCascade(uid, options) {
|
|
1828
|
+
return this.backend.removeNodeCascade(uid, this, options);
|
|
1829
|
+
}
|
|
1830
|
+
async bulkRemoveEdges(params, options) {
|
|
1831
|
+
return this.backend.bulkRemoveEdges(params, this, options);
|
|
1832
|
+
}
|
|
1833
|
+
// ---------------------------------------------------------------------------
|
|
1834
|
+
// Server-side DML (capability: query.dml)
|
|
1835
|
+
// ---------------------------------------------------------------------------
|
|
1836
|
+
/**
|
|
1837
|
+
* Single-statement bulk DELETE. Translates `params` to a filter list via
|
|
1838
|
+
* `buildEdgeQueryPlan` (the same plan `findEdges` uses) and dispatches to
|
|
1839
|
+
* `backend.bulkDelete`. The fetch-then-delete loop in `bulkRemoveEdges`
|
|
1840
|
+
* is the cap-less fallback; this method is the fast path on backends
|
|
1841
|
+
* declaring `query.dml`.
|
|
1842
|
+
*
|
|
1843
|
+
* Scan-protection rules match `findEdges`: a query with no identifying
|
|
1844
|
+
* fields requires `allowCollectionScan: true` to pass. A bare-empty
|
|
1845
|
+
* filter set (no `aType`, `aUid`, etc., no `where`) is allowed at this
|
|
1846
|
+
* layer — shared SQLite bounds the blast radius via its leading `scope`
|
|
1847
|
+
* predicate — but the DO RPC backend rejects empty filters at the wire
|
|
1848
|
+
* boundary as defense-in-depth. To wipe a routed subgraph DO, use
|
|
1849
|
+
* `removeNodeCascade` on the parent node instead.
|
|
1850
|
+
*/
|
|
1851
|
+
async bulkDelete(params, options) {
|
|
1852
|
+
if (!this.backend.bulkDelete) {
|
|
1853
|
+
throw new FiregraphError(
|
|
1854
|
+
"bulkDelete() is not supported by the current storage backend. Fall back to bulkRemoveEdges() for backends without query.dml (e.g. Firestore Standard).",
|
|
1855
|
+
"UNSUPPORTED_OPERATION"
|
|
1856
|
+
);
|
|
1857
|
+
}
|
|
1858
|
+
const filters = this.buildDmlFilters(params);
|
|
1859
|
+
return this.backend.bulkDelete(filters, options);
|
|
1860
|
+
}
|
|
1861
|
+
/**
|
|
1862
|
+
* Single-statement bulk UPDATE. Same translation path as `bulkDelete`,
|
|
1863
|
+
* but the patch is deep-merged into each matching row's `data` via the
|
|
1864
|
+
* shared `flattenPatch` pipeline. Identifying columns are immutable
|
|
1865
|
+
* through this path (see `BulkUpdatePatch` JSDoc).
|
|
1866
|
+
*
|
|
1867
|
+
* Empty-patch rejection happens inside the backend (`compileBulkUpdate`)
|
|
1868
|
+
* — a `data: {}` payload would only rewrite `updated_at`, which is
|
|
1869
|
+
* almost certainly a bug.
|
|
1870
|
+
*/
|
|
1871
|
+
async bulkUpdate(params, patch, options) {
|
|
1872
|
+
if (!this.backend.bulkUpdate) {
|
|
1873
|
+
throw new FiregraphError(
|
|
1874
|
+
"bulkUpdate() is not supported by the current storage backend.",
|
|
1875
|
+
"UNSUPPORTED_OPERATION"
|
|
1876
|
+
);
|
|
1877
|
+
}
|
|
1878
|
+
const filters = this.buildDmlFilters(params);
|
|
1879
|
+
return this.backend.bulkUpdate(filters, patch, options);
|
|
1880
|
+
}
|
|
1881
|
+
// ---------------------------------------------------------------------------
|
|
1882
|
+
// Multi-source fan-out (capability: query.join)
|
|
1883
|
+
// ---------------------------------------------------------------------------
|
|
1884
|
+
/**
|
|
1885
|
+
* Fan out from `params.sources` over a single edge type in one round trip.
|
|
1886
|
+
* On backends without `query.join`, throws `UNSUPPORTED_OPERATION` — the
|
|
1887
|
+
* cap-less fallback is the per-source `findEdges` loop, which lives in
|
|
1888
|
+
* `traverse.ts` (the higher-level traversal walker) rather than here.
|
|
1889
|
+
*
|
|
1890
|
+
* `expand()` is intentionally edge-type-only — the source set is a flat
|
|
1891
|
+
* UID list and the hop matches one `axbType`. Multi-axbType expansions
|
|
1892
|
+
* become multiple `expand()` calls, one per relation.
|
|
1893
|
+
*
|
|
1894
|
+
* `params.sources.length === 0` short-circuits to an empty result. The
|
|
1895
|
+
* backend never sees the call. (`compileExpand` itself rejects empty
|
|
1896
|
+
* because `IN ()` is not valid SQL.)
|
|
1897
|
+
*/
|
|
1898
|
+
async expand(params) {
|
|
1899
|
+
if (!this.backend.expand) {
|
|
1900
|
+
throw new FiregraphError(
|
|
1901
|
+
"expand() is not supported by the current storage backend. Backends without `query.join` can use createTraversal() instead \u2014 the per-hop loop is functionally equivalent (just slower).",
|
|
1902
|
+
"UNSUPPORTED_OPERATION"
|
|
1903
|
+
);
|
|
1904
|
+
}
|
|
1905
|
+
if (params.sources.length === 0) {
|
|
1906
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
1907
|
+
}
|
|
1908
|
+
return this.backend.expand(params);
|
|
1909
|
+
}
|
|
1910
|
+
// ---------------------------------------------------------------------------
|
|
1911
|
+
// Engine-level multi-hop traversal (capability: traversal.serverSide)
|
|
1912
|
+
// ---------------------------------------------------------------------------
|
|
1913
|
+
/**
|
|
1914
|
+
* Compile a multi-hop traversal spec into one server-side nested
|
|
1915
|
+
* Pipeline and dispatch a single round trip.
|
|
1916
|
+
*
|
|
1917
|
+
* Backends declaring `traversal.serverSide` (Firestore Enterprise
|
|
1918
|
+
* today) install this method; everywhere else, it throws
|
|
1919
|
+
* `UNSUPPORTED_OPERATION`. The capability gate matches the type-level
|
|
1920
|
+
* surface — `GraphClient<C>` only exposes `runEngineTraversal` when
|
|
1921
|
+
* `'traversal.serverSide' extends C`.
|
|
1922
|
+
*
|
|
1923
|
+
* Most callers should not invoke this method directly; the
|
|
1924
|
+
* `createTraversal(...).run()` builder routes through it
|
|
1925
|
+
* automatically when `engineTraversal: 'auto'` (the default) and
|
|
1926
|
+
* the spec is eligible per `firestore-traverse-compiler.ts`. Calling
|
|
1927
|
+
* directly is appropriate for benchmarking or for callers that have
|
|
1928
|
+
* already shaped their hop chain into the strict
|
|
1929
|
+
* `EngineTraversalParams` shape.
|
|
1930
|
+
*
|
|
1931
|
+
* `params.sources.length === 0` short-circuits to empty per-hop
|
|
1932
|
+
* arrays. The backend never sees the call.
|
|
1933
|
+
*/
|
|
1934
|
+
async runEngineTraversal(params) {
|
|
1935
|
+
if (!this.backend.runEngineTraversal) {
|
|
1936
|
+
throw new FiregraphError(
|
|
1937
|
+
"runEngineTraversal() is not supported by the current storage backend. Backends without `traversal.serverSide` can use createTraversal() instead \u2014 the per-hop loop is functionally equivalent for in-graph specs (different round-trip profile).",
|
|
1938
|
+
"UNSUPPORTED_OPERATION"
|
|
1939
|
+
);
|
|
1940
|
+
}
|
|
1941
|
+
if (params.sources.length === 0) {
|
|
1942
|
+
return {
|
|
1943
|
+
hops: params.hops.map(() => ({ edges: [], sourceCount: 0 })),
|
|
1944
|
+
totalReads: 0
|
|
1945
|
+
};
|
|
1946
|
+
}
|
|
1947
|
+
return this.backend.runEngineTraversal(params);
|
|
1948
|
+
}
|
|
1949
|
+
// ---------------------------------------------------------------------------
|
|
1950
|
+
// Server-side projection (capability: query.select)
|
|
1951
|
+
// ---------------------------------------------------------------------------
|
|
1952
|
+
/**
|
|
1953
|
+
* Server-side projection — fetch only the requested fields from each
|
|
1954
|
+
* matching edge. The backend translates the call into a projecting query
|
|
1955
|
+
* (`SELECT json_extract(...)` on SQLite/DO, `Query.select(...)` on
|
|
1956
|
+
* Firestore Standard, classic projection on Enterprise) so the wire
|
|
1957
|
+
* payload is reduced to just the requested fields.
|
|
1958
|
+
*
|
|
1959
|
+
* Resolution rules for `select` (mirrored across all backends):
|
|
1960
|
+
*
|
|
1961
|
+
* - Built-in envelope fields (`aType`, `aUid`, `axbType`, `bType`,
|
|
1962
|
+
* `bUid`, `createdAt`, `updatedAt`, `v`) → resolve to the typed
|
|
1963
|
+
* column / Firestore field directly.
|
|
1964
|
+
* - `'data'` literal → returns the whole user payload.
|
|
1965
|
+
* - `'data.<x>'` → explicit nested path, returned at the same shape.
|
|
1966
|
+
* - bare name → rewritten to `data.<name>` (the canonical "give me a
|
|
1967
|
+
* few keys out of the JSON payload" shape).
|
|
1968
|
+
*
|
|
1969
|
+
* Empty `select: []` is rejected with `INVALID_QUERY`. Duplicate entries
|
|
1970
|
+
* are de-duped (first-occurrence order preserved); the result row carries
|
|
1971
|
+
* one slot per unique field.
|
|
1972
|
+
*
|
|
1973
|
+
* Migrations are *not* applied to the result. The caller asked for a
|
|
1974
|
+
* partial shape, and rehydrating it through the migration pipeline would
|
|
1975
|
+
* require synthesising every absent field — see
|
|
1976
|
+
* `StorageBackend.findEdgesProjected` for the rationale.
|
|
1977
|
+
*
|
|
1978
|
+
* Scan protection follows the `findEdges` rules: a query with no
|
|
1979
|
+
* identifying fields requires `allowCollectionScan: true` to pass. The
|
|
1980
|
+
* cap-less fallback would be `findEdges` + JS-side projection, but that
|
|
1981
|
+
* defeats the wire-payload reduction; backends without `query.select`
|
|
1982
|
+
* throw `UNSUPPORTED_OPERATION` rather than silently materialising full
|
|
1983
|
+
* rows.
|
|
1984
|
+
*/
|
|
1985
|
+
async findEdgesProjected(params) {
|
|
1986
|
+
if (!this.backend.findEdgesProjected) {
|
|
1987
|
+
throw new FiregraphError(
|
|
1988
|
+
"findEdgesProjected() is not supported by the current storage backend. There is no client-side fallback because the wire-payload reduction is the entire point of the API \u2014 use findEdges() and project in JS if the backend does not declare `query.select`.",
|
|
1989
|
+
"UNSUPPORTED_OPERATION"
|
|
1990
|
+
);
|
|
1991
|
+
}
|
|
1992
|
+
if (params.select.length === 0) {
|
|
1993
|
+
throw new FiregraphError(
|
|
1994
|
+
"findEdgesProjected() requires a non-empty `select` list.",
|
|
1995
|
+
"INVALID_QUERY"
|
|
1996
|
+
);
|
|
1997
|
+
}
|
|
1998
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1999
|
+
let filters;
|
|
2000
|
+
let options;
|
|
2001
|
+
if (plan.strategy === "get") {
|
|
2002
|
+
filters = [
|
|
2003
|
+
{ field: "aUid", op: "==", value: params.aUid },
|
|
2004
|
+
{ field: "axbType", op: "==", value: params.axbType },
|
|
2005
|
+
{ field: "bUid", op: "==", value: params.bUid }
|
|
2006
|
+
];
|
|
2007
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2008
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2009
|
+
options = void 0;
|
|
2010
|
+
} else {
|
|
2011
|
+
filters = plan.filters;
|
|
2012
|
+
options = plan.options;
|
|
2013
|
+
}
|
|
2014
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2015
|
+
const rows = await this.backend.findEdgesProjected(params.select, filters, options);
|
|
2016
|
+
return rows;
|
|
2017
|
+
}
|
|
2018
|
+
/**
|
|
2019
|
+
* Native vector / nearest-neighbour search (capability `search.vector`).
|
|
2020
|
+
*
|
|
2021
|
+
* Resolves to the top-K records by similarity, sorted nearest-first
|
|
2022
|
+
* (`EUCLIDEAN` / `COSINE`) or highest-first (`DOT_PRODUCT`). The wrapper
|
|
2023
|
+
* is intentionally thin: capability check, scan-protection, then forward
|
|
2024
|
+
* `params` verbatim to the backend. All field-path normalisation and
|
|
2025
|
+
* SDK-shape validation lives in the shared
|
|
2026
|
+
* `runFirestoreFindNearest` helper that both Firestore editions call —
|
|
2027
|
+
* keeping it there means the validation surface stays in lockstep with
|
|
2028
|
+
* the SDK call site, regardless of which backend is plugged in.
|
|
2029
|
+
*
|
|
2030
|
+
* Migrations are NOT applied. The vector index walked the raw stored
|
|
2031
|
+
* shape; rehydrating each row through the migration pipeline would
|
|
2032
|
+
* change the candidate set the index already chose. If you need
|
|
2033
|
+
* migrated shape, follow up with `getNode` / `findEdges` on the
|
|
2034
|
+
* returned UIDs — those paths apply migrations normally.
|
|
2035
|
+
*
|
|
2036
|
+
* Scan-protection mirrors `findEdges`: if no identifying filters
|
|
2037
|
+
* (`aType` / `axbType` / `bType`) and no `where` clauses are supplied,
|
|
2038
|
+
* the request must opt in via `allowCollectionScan: true`. The ANN
|
|
2039
|
+
* query still walks the candidate set the WHERE clause produces, so
|
|
2040
|
+
* an unfiltered nearest-neighbour search over a million-row collection
|
|
2041
|
+
* is the same scan trap as an unfiltered `findEdges`.
|
|
2042
|
+
*
|
|
2043
|
+
* Backends without `search.vector` throw `UNSUPPORTED_OPERATION` —
|
|
2044
|
+
* there is no client-side fallback because emulating ANN over the
|
|
2045
|
+
* generic backend surface (`findEdges` + JS-side cosine) doesn't scale
|
|
2046
|
+
* past trivial datasets and would give callers the wrong mental model
|
|
2047
|
+
* about cost.
|
|
2048
|
+
*/
|
|
2049
|
+
async findNearest(params) {
|
|
2050
|
+
if (!this.backend.findNearest) {
|
|
2051
|
+
throw new FiregraphError(
|
|
2052
|
+
"findNearest() is not supported by the current storage backend. Vector search requires a backend that declares `search.vector` (currently Firestore Standard and Enterprise). There is no client-side fallback because emulating ANN on top of the generic backend surface does not scale beyond toy datasets.",
|
|
2053
|
+
"UNSUPPORTED_OPERATION"
|
|
2054
|
+
);
|
|
2055
|
+
}
|
|
2056
|
+
const filters = [];
|
|
2057
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2058
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2059
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2060
|
+
if (params.where) filters.push(...params.where);
|
|
2061
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2062
|
+
return this.backend.findNearest(params);
|
|
2063
|
+
}
|
|
2064
|
+
/**
|
|
2065
|
+
* Native full-text search (capability `search.fullText`).
|
|
2066
|
+
*
|
|
2067
|
+
* Returns the top-N records by relevance, ordered by the search
|
|
2068
|
+
* index's score. Only Firestore Enterprise declares this capability
|
|
2069
|
+
* today — the underlying Pipelines `search({ query: documentMatches(...) })`
|
|
2070
|
+
* stage requires Enterprise's FTS index. Standard does not declare
|
|
2071
|
+
* the cap (FTS is an Enterprise-only product feature, not a
|
|
2072
|
+
* typed-API gap), and the SQLite-shaped backends have no native
|
|
2073
|
+
* FTS index. Backends without `search.fullText` throw
|
|
2074
|
+
* `UNSUPPORTED_OPERATION` from this wrapper.
|
|
2075
|
+
*
|
|
2076
|
+
* Scan-protection mirrors `findNearest`: a search with no
|
|
2077
|
+
* identifying filters (`aType` / `axbType` / `bType`) walks every
|
|
2078
|
+
* row the index scored, so the request must opt in via
|
|
2079
|
+
* `allowCollectionScan: true`.
|
|
2080
|
+
*
|
|
2081
|
+
* Migrations are NOT applied. The FTS index walked the raw stored
|
|
2082
|
+
* shape; rehydrating each row through the migration pipeline would
|
|
2083
|
+
* change the candidate set the index already scored. If you need
|
|
2084
|
+
* migrated shape, follow up with `getNode` / `findEdges` on the
|
|
2085
|
+
* returned UIDs.
|
|
2086
|
+
*/
|
|
2087
|
+
async fullTextSearch(params) {
|
|
2088
|
+
if (!this.backend.fullTextSearch) {
|
|
2089
|
+
throw new FiregraphError(
|
|
2090
|
+
"fullTextSearch() is not supported by the current storage backend. Full-text search requires a backend that declares `search.fullText` (currently Firestore Enterprise only \u2014 FTS is an Enterprise product feature). There is no client-side fallback because emulating FTS over the generic backend surface would not scale beyond toy datasets.",
|
|
2091
|
+
"UNSUPPORTED_OPERATION"
|
|
2092
|
+
);
|
|
2093
|
+
}
|
|
2094
|
+
const filters = [];
|
|
2095
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2096
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2097
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2098
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2099
|
+
return this.backend.fullTextSearch(params);
|
|
2100
|
+
}
|
|
2101
|
+
/**
|
|
2102
|
+
* Native geospatial distance search (capability `search.geo`).
|
|
2103
|
+
*
|
|
2104
|
+
* Returns rows whose `geoField` lies within `radiusMeters` of
|
|
2105
|
+
* `point`, ordered nearest-first by default. Only Firestore
|
|
2106
|
+
* Enterprise declares this capability — same Enterprise-only
|
|
2107
|
+
* gating as `fullTextSearch`. Backends without `search.geo` throw
|
|
2108
|
+
* `UNSUPPORTED_OPERATION` from this wrapper.
|
|
2109
|
+
*
|
|
2110
|
+
* Scan-protection mirrors `findNearest` and `fullTextSearch`.
|
|
2111
|
+
*
|
|
2112
|
+
* Migrations are NOT applied — same rationale as the other search
|
|
2113
|
+
* extensions.
|
|
2114
|
+
*/
|
|
2115
|
+
async geoSearch(params) {
|
|
2116
|
+
if (!this.backend.geoSearch) {
|
|
2117
|
+
throw new FiregraphError(
|
|
2118
|
+
"geoSearch() is not supported by the current storage backend. Geospatial search requires a backend that declares `search.geo` (currently Firestore Enterprise only \u2014 geo queries are an Enterprise product feature). There is no client-side fallback because emulating geo over the generic backend surface (haversine over `findEdges`) would not scale beyond trivial datasets.",
|
|
2119
|
+
"UNSUPPORTED_OPERATION"
|
|
2120
|
+
);
|
|
2121
|
+
}
|
|
2122
|
+
const filters = [];
|
|
2123
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2124
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2125
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2126
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2127
|
+
return this.backend.geoSearch(params);
|
|
2128
|
+
}
|
|
2129
|
+
/**
|
|
2130
|
+
* Translate a `FindEdgesParams` into the `QueryFilter[]` shape the
|
|
2131
|
+
* backend `bulkDelete` / `bulkUpdate` methods expect. Mirrors the
|
|
2132
|
+
* `aggregate()` plan: a bare-empty params object becomes an empty
|
|
2133
|
+
* filter list (after a scan-protection check); a GET-shape (all three
|
|
2134
|
+
* identifiers) is rejected so we never silently turn a single-row
|
|
2135
|
+
* lookup into a server-side DML; otherwise we run `buildEdgeQueryPlan`
|
|
2136
|
+
* and surface its filters.
|
|
2137
|
+
*/
|
|
2138
|
+
buildDmlFilters(params) {
|
|
2139
|
+
const hasAnyFilter = params.aType || params.aUid || params.axbType || params.bType || params.bUid || params.where && params.where.length > 0;
|
|
2140
|
+
if (!hasAnyFilter) {
|
|
2141
|
+
this.checkQuerySafety([], params.allowCollectionScan);
|
|
2142
|
+
return [];
|
|
2143
|
+
}
|
|
2144
|
+
const plan = buildEdgeQueryPlan(params);
|
|
2145
|
+
if (plan.strategy === "get") {
|
|
2146
|
+
throw new FiregraphError(
|
|
2147
|
+
"bulkDelete() / bulkUpdate() require a query, not a direct document lookup. Use removeEdge() / updateEdge() for single-row operations, or omit one of aUid/axbType/bUid to force a query strategy.",
|
|
2148
|
+
"INVALID_QUERY"
|
|
2149
|
+
);
|
|
2150
|
+
}
|
|
2151
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
2152
|
+
return plan.filters;
|
|
2153
|
+
}
|
|
2154
|
+
// ---------------------------------------------------------------------------
|
|
2155
|
+
// Dynamic registry methods
|
|
2156
|
+
// ---------------------------------------------------------------------------
|
|
2157
|
+
async defineNodeType(name, jsonSchema, description, options) {
|
|
2158
|
+
if (!this.dynamicConfig) {
|
|
2159
|
+
throw new DynamicRegistryError(
|
|
2160
|
+
'defineNodeType() is only available in dynamic registry mode. Pass registryMode: { mode: "dynamic" } to createGraphClient().'
|
|
2161
|
+
);
|
|
2162
|
+
}
|
|
2163
|
+
if (RESERVED_TYPE_NAMES.has(name)) {
|
|
2164
|
+
throw new DynamicRegistryError(
|
|
2165
|
+
`Cannot define type "${name}": this name is reserved for the meta-registry.`
|
|
2166
|
+
);
|
|
2167
|
+
}
|
|
2168
|
+
if (this.staticRegistry?.lookup(name, NODE_RELATION, name)) {
|
|
2169
|
+
throw new DynamicRegistryError(
|
|
2170
|
+
`Cannot define node type "${name}": already defined in the static registry.`
|
|
2171
|
+
);
|
|
2172
|
+
}
|
|
2173
|
+
const uid = generateDeterministicUid(META_NODE_TYPE, name);
|
|
2174
|
+
const data = { name, jsonSchema };
|
|
2175
|
+
if (description !== void 0) data.description = description;
|
|
2176
|
+
if (options?.titleField !== void 0) data.titleField = options.titleField;
|
|
2177
|
+
if (options?.subtitleField !== void 0) data.subtitleField = options.subtitleField;
|
|
2178
|
+
if (options?.viewTemplate !== void 0) data.viewTemplate = options.viewTemplate;
|
|
2179
|
+
if (options?.viewCss !== void 0) data.viewCss = options.viewCss;
|
|
2180
|
+
if (options?.allowedIn !== void 0) data.allowedIn = options.allowedIn;
|
|
2181
|
+
if (options?.migrationWriteBack !== void 0)
|
|
2182
|
+
data.migrationWriteBack = options.migrationWriteBack;
|
|
2183
|
+
if (options?.migrations !== void 0) {
|
|
2184
|
+
data.migrations = await this.serializeMigrations(options.migrations);
|
|
2185
|
+
}
|
|
2186
|
+
await this.putNode(META_NODE_TYPE, uid, data);
|
|
2187
|
+
}
|
|
2188
|
+
async defineEdgeType(name, topology, jsonSchema, description, options) {
|
|
2189
|
+
if (!this.dynamicConfig) {
|
|
2190
|
+
throw new DynamicRegistryError(
|
|
2191
|
+
'defineEdgeType() is only available in dynamic registry mode. Pass registryMode: { mode: "dynamic" } to createGraphClient().'
|
|
2192
|
+
);
|
|
2193
|
+
}
|
|
2194
|
+
if (RESERVED_TYPE_NAMES.has(name)) {
|
|
2195
|
+
throw new DynamicRegistryError(
|
|
2196
|
+
`Cannot define type "${name}": this name is reserved for the meta-registry.`
|
|
2197
|
+
);
|
|
2198
|
+
}
|
|
2199
|
+
if (this.staticRegistry) {
|
|
2200
|
+
const fromTypes = Array.isArray(topology.from) ? topology.from : [topology.from];
|
|
2201
|
+
const toTypes = Array.isArray(topology.to) ? topology.to : [topology.to];
|
|
2202
|
+
for (const aType of fromTypes) {
|
|
2203
|
+
for (const bType of toTypes) {
|
|
2204
|
+
if (this.staticRegistry.lookup(aType, name, bType)) {
|
|
2205
|
+
throw new DynamicRegistryError(
|
|
2206
|
+
`Cannot define edge type "${name}" for (${aType}) -> (${bType}): already defined in the static registry.`
|
|
2207
|
+
);
|
|
2208
|
+
}
|
|
2209
|
+
}
|
|
2210
|
+
}
|
|
2211
|
+
}
|
|
2212
|
+
const uid = generateDeterministicUid(META_EDGE_TYPE, name);
|
|
2213
|
+
const data = {
|
|
2214
|
+
name,
|
|
2215
|
+
from: topology.from,
|
|
2216
|
+
to: topology.to
|
|
2217
|
+
};
|
|
2218
|
+
if (jsonSchema !== void 0) data.jsonSchema = jsonSchema;
|
|
2219
|
+
if (topology.inverseLabel !== void 0) data.inverseLabel = topology.inverseLabel;
|
|
2220
|
+
if (topology.targetGraph !== void 0) data.targetGraph = topology.targetGraph;
|
|
2221
|
+
if (description !== void 0) data.description = description;
|
|
2222
|
+
if (options?.titleField !== void 0) data.titleField = options.titleField;
|
|
2223
|
+
if (options?.subtitleField !== void 0) data.subtitleField = options.subtitleField;
|
|
2224
|
+
if (options?.viewTemplate !== void 0) data.viewTemplate = options.viewTemplate;
|
|
2225
|
+
if (options?.viewCss !== void 0) data.viewCss = options.viewCss;
|
|
2226
|
+
if (options?.allowedIn !== void 0) data.allowedIn = options.allowedIn;
|
|
2227
|
+
if (options?.migrationWriteBack !== void 0)
|
|
2228
|
+
data.migrationWriteBack = options.migrationWriteBack;
|
|
2229
|
+
if (options?.migrations !== void 0) {
|
|
2230
|
+
data.migrations = await this.serializeMigrations(options.migrations);
|
|
2231
|
+
}
|
|
2232
|
+
await this.putNode(META_EDGE_TYPE, uid, data);
|
|
2233
|
+
}
|
|
2234
|
+
async reloadRegistry() {
|
|
2235
|
+
if (!this.dynamicConfig) {
|
|
2236
|
+
throw new DynamicRegistryError(
|
|
2237
|
+
'reloadRegistry() is only available in dynamic registry mode. Pass registryMode: { mode: "dynamic" } to createGraphClient().'
|
|
2238
|
+
);
|
|
2239
|
+
}
|
|
2240
|
+
const reader = this.createMetaReader();
|
|
2241
|
+
const dynamicOnly = await createRegistryFromGraph(reader, this.migrationSandbox);
|
|
2242
|
+
if (this.staticRegistry) {
|
|
2243
|
+
this.dynamicRegistry = createMergedRegistry(this.staticRegistry, dynamicOnly);
|
|
2244
|
+
} else {
|
|
2245
|
+
this.dynamicRegistry = dynamicOnly;
|
|
2246
|
+
}
|
|
2247
|
+
}
|
|
2248
|
+
async serializeMigrations(migrations) {
|
|
2249
|
+
const result = migrations.map((m) => {
|
|
2250
|
+
const source = typeof m.up === "function" ? m.up.toString() : m.up;
|
|
2251
|
+
return { fromVersion: m.fromVersion, toVersion: m.toVersion, up: source };
|
|
2252
|
+
});
|
|
2253
|
+
await Promise.all(result.map((m) => precompileSource(m.up, this.migrationSandbox)));
|
|
2254
|
+
return result;
|
|
2255
|
+
}
|
|
2256
|
+
/**
|
|
2257
|
+
* Build a `GraphReader` over the meta-backend. If meta lives in the same
|
|
2258
|
+
* collection as the main backend, `this` is returned directly.
|
|
2259
|
+
*/
|
|
2260
|
+
createMetaReader() {
|
|
2261
|
+
if (!this.metaBackend) return this;
|
|
2262
|
+
const backend = this.metaBackend;
|
|
2263
|
+
const executeMetaQuery = (filters, options) => backend.query(filters, options);
|
|
2264
|
+
return {
|
|
2265
|
+
async getNode(uid) {
|
|
2266
|
+
return backend.getDoc(computeNodeDocId(uid));
|
|
2267
|
+
},
|
|
2268
|
+
async getEdge(aUid, axbType, bUid) {
|
|
2269
|
+
return backend.getDoc(computeEdgeDocId(aUid, axbType, bUid));
|
|
2270
|
+
},
|
|
2271
|
+
async edgeExists(aUid, axbType, bUid) {
|
|
2272
|
+
const record = await backend.getDoc(computeEdgeDocId(aUid, axbType, bUid));
|
|
2273
|
+
return record !== null;
|
|
2274
|
+
},
|
|
2275
|
+
async findEdges(params) {
|
|
2276
|
+
const plan = buildEdgeQueryPlan(params);
|
|
2277
|
+
if (plan.strategy === "get") {
|
|
2278
|
+
const record = await backend.getDoc(plan.docId);
|
|
2279
|
+
return record ? [record] : [];
|
|
2280
|
+
}
|
|
2281
|
+
return executeMetaQuery(plan.filters, plan.options);
|
|
2282
|
+
},
|
|
2283
|
+
async findNodes(params) {
|
|
2284
|
+
const plan = buildNodeQueryPlan(params);
|
|
2285
|
+
if (plan.strategy === "get") {
|
|
2286
|
+
const record = await backend.getDoc(plan.docId);
|
|
2287
|
+
return record ? [record] : [];
|
|
2288
|
+
}
|
|
2289
|
+
return executeMetaQuery(plan.filters, plan.options);
|
|
2290
|
+
}
|
|
2291
|
+
};
|
|
2292
|
+
}
|
|
2293
|
+
};
|
|
2294
|
+
function createGraphClient(backend, options, metaBackend) {
|
|
2295
|
+
return new GraphClientImpl(backend, options, metaBackend);
|
|
2296
|
+
}
|
|
2297
|
+
|
|
2298
|
+
// src/id.ts
|
|
2299
|
+
var import_nanoid = require("nanoid");
|
|
2300
|
+
function generateId() {
|
|
2301
|
+
return (0, import_nanoid.nanoid)();
|
|
2302
|
+
}
|
|
2303
|
+
|
|
2304
|
+
// src/firestore-enterprise/backend.ts
|
|
2305
|
+
var import_firestore4 = require("@google-cloud/firestore");
|
|
2306
|
+
|
|
2307
|
+
// src/bulk.ts
|
|
2308
|
+
var MAX_BATCH_SIZE = 500;
|
|
2309
|
+
var DEFAULT_MAX_RETRIES = 3;
|
|
2310
|
+
var BASE_DELAY_MS = 200;
|
|
2311
|
+
function sleep(ms) {
|
|
2312
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
2313
|
+
}
|
|
2314
|
+
function chunk(arr, size) {
|
|
2315
|
+
const chunks = [];
|
|
2316
|
+
for (let i = 0; i < arr.length; i += size) {
|
|
2317
|
+
chunks.push(arr.slice(i, i + size));
|
|
2318
|
+
}
|
|
2319
|
+
return chunks;
|
|
2320
|
+
}
|
|
2321
|
+
async function bulkDeleteDocIds(db, collectionPath, docIds, options) {
|
|
2322
|
+
if (docIds.length === 0) {
|
|
2323
|
+
return { deleted: 0, batches: 0, errors: [] };
|
|
2324
|
+
}
|
|
2325
|
+
const batchSize = Math.min(options?.batchSize ?? MAX_BATCH_SIZE, MAX_BATCH_SIZE);
|
|
2326
|
+
const maxRetries = options?.maxRetries ?? DEFAULT_MAX_RETRIES;
|
|
2327
|
+
const onProgress = options?.onProgress;
|
|
2328
|
+
const chunks = chunk(docIds, batchSize);
|
|
2329
|
+
const errors = [];
|
|
2330
|
+
let deleted = 0;
|
|
2331
|
+
let completedBatches = 0;
|
|
2332
|
+
for (let i = 0; i < chunks.length; i++) {
|
|
2333
|
+
const ids = chunks[i];
|
|
2334
|
+
let committed = false;
|
|
2335
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
2336
|
+
try {
|
|
2337
|
+
const batch = db.batch();
|
|
2338
|
+
const collectionRef = db.collection(collectionPath);
|
|
2339
|
+
for (const id of ids) {
|
|
2340
|
+
batch.delete(collectionRef.doc(id));
|
|
2341
|
+
}
|
|
2342
|
+
await batch.commit();
|
|
2343
|
+
committed = true;
|
|
2344
|
+
deleted += ids.length;
|
|
2345
|
+
break;
|
|
2346
|
+
} catch (err) {
|
|
2347
|
+
if (attempt < maxRetries) {
|
|
2348
|
+
const delay = BASE_DELAY_MS * Math.pow(2, attempt);
|
|
2349
|
+
await sleep(delay);
|
|
2350
|
+
} else {
|
|
2351
|
+
errors.push({
|
|
2352
|
+
batchIndex: i,
|
|
2353
|
+
error: err instanceof Error ? err : new Error(String(err)),
|
|
2354
|
+
operationCount: ids.length
|
|
2355
|
+
});
|
|
2356
|
+
}
|
|
2357
|
+
}
|
|
2358
|
+
}
|
|
2359
|
+
if (committed) {
|
|
2360
|
+
completedBatches++;
|
|
2361
|
+
}
|
|
2362
|
+
if (onProgress) {
|
|
2363
|
+
onProgress({
|
|
2364
|
+
completedBatches,
|
|
2365
|
+
totalBatches: chunks.length,
|
|
2366
|
+
deletedSoFar: deleted
|
|
2367
|
+
});
|
|
2368
|
+
}
|
|
2369
|
+
}
|
|
2370
|
+
return { deleted, batches: completedBatches, errors };
|
|
2371
|
+
}
|
|
2372
|
+
async function bulkRemoveEdges(db, collectionPath, reader, params, options) {
|
|
2373
|
+
const effectiveParams = params.limit !== void 0 ? { ...params, allowCollectionScan: params.allowCollectionScan ?? true } : { ...params, limit: 0, allowCollectionScan: params.allowCollectionScan ?? true };
|
|
2374
|
+
const edges = await reader.findEdges(effectiveParams);
|
|
2375
|
+
const docIds = edges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
|
|
2376
|
+
return bulkDeleteDocIds(db, collectionPath, docIds, options);
|
|
2377
|
+
}
|
|
2378
|
+
async function deleteSubcollectionsRecursive(db, collectionPath, docId, options) {
|
|
2379
|
+
const docRef = db.collection(collectionPath).doc(docId);
|
|
2380
|
+
const subcollections = await docRef.listCollections();
|
|
2381
|
+
if (subcollections.length === 0) return { deleted: 0, errors: [] };
|
|
2382
|
+
let totalDeleted = 0;
|
|
2383
|
+
const allErrors = [];
|
|
2384
|
+
const subOptions = options ? { batchSize: options.batchSize, maxRetries: options.maxRetries } : void 0;
|
|
2385
|
+
for (const subCollRef of subcollections) {
|
|
2386
|
+
const subCollPath = subCollRef.path;
|
|
2387
|
+
const snapshot = await subCollRef.select().get();
|
|
2388
|
+
const subDocIds = snapshot.docs.map((d) => d.id);
|
|
2389
|
+
for (const subDocId of subDocIds) {
|
|
2390
|
+
const subResult = await deleteSubcollectionsRecursive(db, subCollPath, subDocId, subOptions);
|
|
2391
|
+
totalDeleted += subResult.deleted;
|
|
2392
|
+
allErrors.push(...subResult.errors);
|
|
2393
|
+
}
|
|
2394
|
+
if (subDocIds.length > 0) {
|
|
2395
|
+
const result = await bulkDeleteDocIds(db, subCollPath, subDocIds, subOptions);
|
|
2396
|
+
totalDeleted += result.deleted;
|
|
2397
|
+
allErrors.push(...result.errors);
|
|
2398
|
+
}
|
|
2399
|
+
}
|
|
2400
|
+
return { deleted: totalDeleted, errors: allErrors };
|
|
2401
|
+
}
|
|
2402
|
+
async function removeNodeCascade(db, collectionPath, reader, uid, options) {
|
|
2403
|
+
const [outgoingRaw, incomingRaw] = await Promise.all([
|
|
2404
|
+
reader.findEdges({ aUid: uid, allowCollectionScan: true, limit: 0 }),
|
|
2405
|
+
reader.findEdges({ bUid: uid, allowCollectionScan: true, limit: 0 })
|
|
2406
|
+
]);
|
|
2407
|
+
const outgoing = outgoingRaw.filter((e) => e.axbType !== NODE_RELATION);
|
|
2408
|
+
const incoming = incomingRaw.filter((e) => e.axbType !== NODE_RELATION);
|
|
2409
|
+
const edgeDocIdSet = /* @__PURE__ */ new Set();
|
|
2410
|
+
const allEdges = [];
|
|
2411
|
+
for (const edge of [...outgoing, ...incoming]) {
|
|
2412
|
+
const docId = computeEdgeDocId(edge.aUid, edge.axbType, edge.bUid);
|
|
2413
|
+
if (!edgeDocIdSet.has(docId)) {
|
|
2414
|
+
edgeDocIdSet.add(docId);
|
|
2415
|
+
allEdges.push(edge);
|
|
2416
|
+
}
|
|
2417
|
+
}
|
|
2418
|
+
const shouldDeleteSubcollections = options?.deleteSubcollections !== false;
|
|
2419
|
+
const nodeDocId = computeNodeDocId(uid);
|
|
2420
|
+
let subcollectionResult = { deleted: 0, errors: [] };
|
|
2421
|
+
if (shouldDeleteSubcollections) {
|
|
2422
|
+
subcollectionResult = await deleteSubcollectionsRecursive(
|
|
2423
|
+
db,
|
|
2424
|
+
collectionPath,
|
|
2425
|
+
nodeDocId,
|
|
2426
|
+
options
|
|
2427
|
+
);
|
|
2428
|
+
}
|
|
2429
|
+
const edgeDocIds = allEdges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
|
|
2430
|
+
const allDocIds = [...edgeDocIds, nodeDocId];
|
|
2431
|
+
const batchSize = Math.min(options?.batchSize ?? MAX_BATCH_SIZE, MAX_BATCH_SIZE);
|
|
2432
|
+
const result = await bulkDeleteDocIds(db, collectionPath, allDocIds, {
|
|
2433
|
+
...options,
|
|
2434
|
+
batchSize
|
|
2435
|
+
});
|
|
2436
|
+
const totalChunks = Math.ceil(allDocIds.length / batchSize);
|
|
2437
|
+
const nodeChunkIndex = totalChunks - 1;
|
|
2438
|
+
const nodeDeleted = !result.errors.some((e) => e.batchIndex === nodeChunkIndex);
|
|
2439
|
+
const topLevelEdgesDeleted = nodeDeleted ? result.deleted - 1 : result.deleted;
|
|
2440
|
+
return {
|
|
2441
|
+
deleted: result.deleted + subcollectionResult.deleted,
|
|
2442
|
+
batches: result.batches,
|
|
2443
|
+
errors: [...result.errors, ...subcollectionResult.errors],
|
|
2444
|
+
edgesDeleted: topLevelEdgesDeleted,
|
|
2445
|
+
nodeDeleted
|
|
2446
|
+
};
|
|
2447
|
+
}
|
|
2448
|
+
|
|
2449
|
+
// src/internal/backend.ts
|
|
2450
|
+
function createCapabilities(caps) {
|
|
2451
|
+
return {
|
|
2452
|
+
has: (capability) => caps.has(capability),
|
|
2453
|
+
values: () => caps.values()
|
|
2454
|
+
};
|
|
2455
|
+
}
|
|
2456
|
+
|
|
2457
|
+
// src/internal/firestore-aggregate.ts
|
|
2458
|
+
var import_firestore2 = require("@google-cloud/firestore");
|
|
2459
|
+
function applyFiltersToQuery(base, filters) {
|
|
2460
|
+
let q = base;
|
|
2461
|
+
for (const f of filters) {
|
|
2462
|
+
q = q.where(f.field, f.op, f.value);
|
|
2463
|
+
}
|
|
2464
|
+
return q;
|
|
2465
|
+
}
|
|
2466
|
+
async function runFirestoreAggregate(base, spec, filters, { edition }) {
|
|
2467
|
+
if (Object.keys(spec).length === 0) {
|
|
2468
|
+
throw new FiregraphError(
|
|
2469
|
+
"aggregate() requires at least one aggregation in the `aggregates` map.",
|
|
2470
|
+
"INVALID_QUERY"
|
|
2471
|
+
);
|
|
2472
|
+
}
|
|
2473
|
+
const filtered = applyFiltersToQuery(base, filters);
|
|
2474
|
+
const aggregations = {};
|
|
2475
|
+
for (const [alias, { op, field }] of Object.entries(spec)) {
|
|
2476
|
+
if (op === "count") {
|
|
2477
|
+
if (field !== void 0) {
|
|
2478
|
+
throw new FiregraphError(
|
|
2479
|
+
`Aggregate '${alias}' op 'count' must not specify a field \u2014 count operates on rows, not a column expression.`,
|
|
2480
|
+
"INVALID_QUERY"
|
|
2481
|
+
);
|
|
2482
|
+
}
|
|
2483
|
+
aggregations[alias] = import_firestore2.AggregateField.count();
|
|
2484
|
+
continue;
|
|
2485
|
+
}
|
|
2486
|
+
if (!field) {
|
|
2487
|
+
throw new FiregraphError(
|
|
2488
|
+
`Aggregate '${alias}' op '${op}' requires a field.`,
|
|
2489
|
+
"INVALID_QUERY"
|
|
2490
|
+
);
|
|
2491
|
+
}
|
|
2492
|
+
if (op === "sum") {
|
|
2493
|
+
aggregations[alias] = import_firestore2.AggregateField.sum(field);
|
|
2494
|
+
} else if (op === "avg") {
|
|
2495
|
+
aggregations[alias] = import_firestore2.AggregateField.average(field);
|
|
2496
|
+
} else {
|
|
2497
|
+
const editionLabel = edition === "enterprise" ? "Firestore Enterprise" : "Firestore Standard";
|
|
2498
|
+
throw new FiregraphError(
|
|
2499
|
+
`Aggregate op '${op}' is not supported on ${editionLabel}. Both Firestore editions support count/sum/avg via the classic Query API; min/max requires a backend with SQL aggregation (SQLite or DO).`,
|
|
2500
|
+
"UNSUPPORTED_AGGREGATE"
|
|
2501
|
+
);
|
|
2502
|
+
}
|
|
2503
|
+
}
|
|
2504
|
+
const snap = await filtered.aggregate(aggregations).get();
|
|
2505
|
+
const data = snap.data();
|
|
2506
|
+
const out = {};
|
|
2507
|
+
for (const alias of Object.keys(spec)) {
|
|
2508
|
+
const v = data[alias];
|
|
2509
|
+
if (v === null || v === void 0) {
|
|
2510
|
+
const op = spec[alias].op;
|
|
2511
|
+
out[alias] = op === "avg" ? Number.NaN : 0;
|
|
2512
|
+
} else {
|
|
2513
|
+
out[alias] = v;
|
|
2514
|
+
}
|
|
2515
|
+
}
|
|
2516
|
+
return out;
|
|
2517
|
+
}
|
|
2518
|
+
|
|
2519
|
+
// src/internal/firestore-bulk-dml.ts
|
|
2520
|
+
var _Pipelines = null;
|
|
2521
|
+
var _Timestamp = null;
|
|
2522
|
+
async function getFirestoreSurface() {
|
|
2523
|
+
if (!_Pipelines || !_Timestamp) {
|
|
2524
|
+
const mod = await import("@google-cloud/firestore");
|
|
2525
|
+
_Pipelines = mod.Pipelines;
|
|
2526
|
+
_Timestamp = mod.Timestamp;
|
|
2527
|
+
}
|
|
2528
|
+
return { P: _Pipelines, Ts: _Timestamp };
|
|
2529
|
+
}
|
|
2530
|
+
function buildFilterExpression(P, filter) {
|
|
2531
|
+
const { field: fieldName, op, value } = filter;
|
|
2532
|
+
switch (op) {
|
|
2533
|
+
case "==":
|
|
2534
|
+
return P.equal(fieldName, value);
|
|
2535
|
+
case "!=":
|
|
2536
|
+
return P.notEqual(fieldName, value);
|
|
2537
|
+
case "<":
|
|
2538
|
+
return P.lessThan(fieldName, value);
|
|
2539
|
+
case "<=":
|
|
2540
|
+
return P.lessThanOrEqual(fieldName, value);
|
|
2541
|
+
case ">":
|
|
2542
|
+
return P.greaterThan(fieldName, value);
|
|
2543
|
+
case ">=":
|
|
2544
|
+
return P.greaterThanOrEqual(fieldName, value);
|
|
2545
|
+
case "in":
|
|
2546
|
+
return P.equalAny(fieldName, value);
|
|
2547
|
+
case "not-in":
|
|
2548
|
+
return P.notEqualAny(fieldName, value);
|
|
2549
|
+
case "array-contains":
|
|
2550
|
+
return P.arrayContains(fieldName, value);
|
|
2551
|
+
case "array-contains-any":
|
|
2552
|
+
return P.arrayContainsAny(fieldName, value);
|
|
2553
|
+
default:
|
|
2554
|
+
throw new FiregraphError(
|
|
2555
|
+
`bulkDelete/bulkUpdate: unsupported filter op "${op}" for pipeline DML.`,
|
|
2556
|
+
"INVALID_QUERY"
|
|
2557
|
+
);
|
|
2558
|
+
}
|
|
2559
|
+
}
|
|
2560
|
+
function applyWhere(P, pipeline, filters) {
|
|
2561
|
+
const exprs = filters.map((f) => buildFilterExpression(P, f));
|
|
2562
|
+
if (exprs.length === 1) {
|
|
2563
|
+
return pipeline.where(exprs[0]);
|
|
2564
|
+
}
|
|
2565
|
+
const [first, second, ...rest] = exprs;
|
|
2566
|
+
return pipeline.where(P.and(first, second, ...rest));
|
|
2567
|
+
}
|
|
2568
|
+
function assertNonEmptyFilters(filters, op) {
|
|
2569
|
+
if (filters.length === 0) {
|
|
2570
|
+
throw new FiregraphError(
|
|
2571
|
+
`bulk${op === "delete" ? "Delete" : "Update"}() on Firestore requires at least one filter; an empty filter list would target the whole collection. To wipe a subgraph, use removeNodeCascade() on the parent node instead.`,
|
|
2572
|
+
"INVALID_QUERY"
|
|
2573
|
+
);
|
|
2574
|
+
}
|
|
2575
|
+
}
|
|
2576
|
+
async function runFirestorePipelineDelete(db, collectionPath, filters, _options) {
|
|
2577
|
+
assertNonEmptyFilters(filters, "delete");
|
|
2578
|
+
const { P } = await getFirestoreSurface();
|
|
2579
|
+
let pipeline = db.pipeline().collection(collectionPath);
|
|
2580
|
+
pipeline = applyWhere(P, pipeline, filters);
|
|
2581
|
+
const snap = await pipeline.delete().execute();
|
|
2582
|
+
return {
|
|
2583
|
+
deleted: snap.results.length,
|
|
2584
|
+
batches: 1,
|
|
2585
|
+
errors: []
|
|
2586
|
+
};
|
|
2587
|
+
}
|
|
2588
|
+
async function runFirestorePipelineUpdate(db, collectionPath, filters, patch, _options) {
|
|
2589
|
+
assertNonEmptyFilters(filters, "update");
|
|
2590
|
+
const ops = flattenPatch(patch.data);
|
|
2591
|
+
if (ops.length === 0) {
|
|
2592
|
+
throw new FiregraphError(
|
|
2593
|
+
"bulkUpdate(): patch.data produced no field updates. An empty patch would only stamp updatedAt, which is almost certainly a bug.",
|
|
2594
|
+
"INVALID_QUERY"
|
|
2595
|
+
);
|
|
2596
|
+
}
|
|
2597
|
+
for (const op of ops) {
|
|
2598
|
+
if (op.delete) {
|
|
2599
|
+
throw new FiregraphError(
|
|
2600
|
+
`bulkUpdate(): preview Pipeline DML does not support deleteField() sentinels (no typed delete-transform on @google-cloud/firestore@8.5.0's Pipeline.update(AliasedExpression[])). Drop the delete from the patch or use replaceEdge/replaceNode for the affected rows.`,
|
|
2601
|
+
"INVALID_QUERY"
|
|
2602
|
+
);
|
|
2603
|
+
}
|
|
2604
|
+
}
|
|
2605
|
+
const { P, Ts } = await getFirestoreSurface();
|
|
2606
|
+
const transforms = ops.map((op) => {
|
|
2607
|
+
const alias = `data.${op.path.join(".")}`;
|
|
2608
|
+
return P.constant(op.value).as(alias);
|
|
2609
|
+
});
|
|
2610
|
+
transforms.push(P.constant(Ts.now()).as("updatedAt"));
|
|
2611
|
+
let pipeline = db.pipeline().collection(collectionPath);
|
|
2612
|
+
pipeline = applyWhere(P, pipeline, filters);
|
|
2613
|
+
const snap = await pipeline.update(transforms).execute();
|
|
2614
|
+
return {
|
|
2615
|
+
deleted: snap.results.length,
|
|
2616
|
+
batches: 1,
|
|
2617
|
+
errors: []
|
|
2618
|
+
};
|
|
2619
|
+
}
|
|
2620
|
+
|
|
2621
|
+
// src/internal/firestore-classic-adapter.ts
|
|
2622
|
+
function createFirestoreAdapter(db, collectionPath) {
|
|
2623
|
+
const collectionRef = db.collection(collectionPath);
|
|
2624
|
+
return {
|
|
2625
|
+
collectionPath,
|
|
2626
|
+
async getDoc(docId) {
|
|
2627
|
+
const snap = await collectionRef.doc(docId).get();
|
|
2628
|
+
if (!snap.exists) return null;
|
|
2629
|
+
return snap.data();
|
|
2630
|
+
},
|
|
2631
|
+
async setDoc(docId, data, options) {
|
|
2632
|
+
if (options?.merge) {
|
|
2633
|
+
await collectionRef.doc(docId).set(data, { merge: true });
|
|
2634
|
+
} else {
|
|
2635
|
+
await collectionRef.doc(docId).set(data);
|
|
2636
|
+
}
|
|
2637
|
+
},
|
|
2638
|
+
async updateDoc(docId, data) {
|
|
2639
|
+
await collectionRef.doc(docId).update(data);
|
|
2640
|
+
},
|
|
2641
|
+
async deleteDoc(docId) {
|
|
2642
|
+
await collectionRef.doc(docId).delete();
|
|
2643
|
+
},
|
|
2644
|
+
async query(filters, options) {
|
|
2645
|
+
let q = collectionRef;
|
|
2646
|
+
for (const f of filters) {
|
|
2647
|
+
q = q.where(f.field, f.op, f.value);
|
|
2648
|
+
}
|
|
2649
|
+
if (options?.orderBy) {
|
|
2650
|
+
q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
|
|
2651
|
+
}
|
|
2652
|
+
if (options?.limit !== void 0) {
|
|
2653
|
+
q = q.limit(options.limit);
|
|
2654
|
+
}
|
|
2655
|
+
const snap = await q.get();
|
|
2656
|
+
return snap.docs.map((doc) => doc.data());
|
|
2657
|
+
}
|
|
2658
|
+
};
|
|
2659
|
+
}
|
|
2660
|
+
function createTransactionAdapter(db, collectionPath, tx) {
|
|
2661
|
+
const collectionRef = db.collection(collectionPath);
|
|
2662
|
+
return {
|
|
2663
|
+
async getDoc(docId) {
|
|
2664
|
+
const snap = await tx.get(collectionRef.doc(docId));
|
|
2665
|
+
if (!snap.exists) return null;
|
|
2666
|
+
return snap.data();
|
|
2667
|
+
},
|
|
2668
|
+
setDoc(docId, data, options) {
|
|
2669
|
+
if (options?.merge) {
|
|
2670
|
+
tx.set(collectionRef.doc(docId), data, { merge: true });
|
|
2671
|
+
} else {
|
|
2672
|
+
tx.set(collectionRef.doc(docId), data);
|
|
2673
|
+
}
|
|
2674
|
+
},
|
|
2675
|
+
updateDoc(docId, data) {
|
|
2676
|
+
tx.update(collectionRef.doc(docId), data);
|
|
2677
|
+
},
|
|
2678
|
+
deleteDoc(docId) {
|
|
2679
|
+
tx.delete(collectionRef.doc(docId));
|
|
2680
|
+
},
|
|
2681
|
+
async query(filters, options) {
|
|
2682
|
+
let q = collectionRef;
|
|
2683
|
+
for (const f of filters) {
|
|
2684
|
+
q = q.where(f.field, f.op, f.value);
|
|
2685
|
+
}
|
|
2686
|
+
if (options?.orderBy) {
|
|
2687
|
+
q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
|
|
2688
|
+
}
|
|
2689
|
+
if (options?.limit !== void 0) {
|
|
2690
|
+
q = q.limit(options.limit);
|
|
2691
|
+
}
|
|
2692
|
+
const snap = await tx.get(q);
|
|
2693
|
+
return snap.docs.map((doc) => doc.data());
|
|
2694
|
+
}
|
|
2695
|
+
};
|
|
2696
|
+
}
|
|
2697
|
+
function createBatchAdapter(db, collectionPath) {
|
|
2698
|
+
const collectionRef = db.collection(collectionPath);
|
|
2699
|
+
const batch = db.batch();
|
|
2700
|
+
return {
|
|
2701
|
+
setDoc(docId, data, options) {
|
|
2702
|
+
if (options?.merge) {
|
|
2703
|
+
batch.set(collectionRef.doc(docId), data, { merge: true });
|
|
2704
|
+
} else {
|
|
2705
|
+
batch.set(collectionRef.doc(docId), data);
|
|
2706
|
+
}
|
|
2707
|
+
},
|
|
2708
|
+
updateDoc(docId, data) {
|
|
2709
|
+
batch.update(collectionRef.doc(docId), data);
|
|
2710
|
+
},
|
|
2711
|
+
deleteDoc(docId) {
|
|
2712
|
+
batch.delete(collectionRef.doc(docId));
|
|
2713
|
+
},
|
|
2714
|
+
async commit() {
|
|
2715
|
+
await batch.commit();
|
|
2716
|
+
}
|
|
2717
|
+
};
|
|
2718
|
+
}
|
|
2719
|
+
|
|
2720
|
+
// src/internal/firestore-classic-expand.ts
|
|
2721
|
+
var FIRESTORE_CLASSIC_IN_CHUNK_SIZE = 30;
|
|
2722
|
+
function readField(record, path) {
|
|
2723
|
+
if (!path.includes(".")) {
|
|
2724
|
+
return record[path];
|
|
2725
|
+
}
|
|
2726
|
+
let cursor = record;
|
|
2727
|
+
for (const seg of path.split(".")) {
|
|
2728
|
+
if (cursor === null || typeof cursor !== "object") return void 0;
|
|
2729
|
+
cursor = cursor[seg];
|
|
2730
|
+
}
|
|
2731
|
+
return cursor;
|
|
2732
|
+
}
|
|
2733
|
+
function compareFieldValues(a, b) {
|
|
2734
|
+
if (a === b) return 0;
|
|
2735
|
+
if (a === void 0 || a === null) return -1;
|
|
2736
|
+
if (b === void 0 || b === null) return 1;
|
|
2737
|
+
const ta = typeof a;
|
|
2738
|
+
const tb = typeof b;
|
|
2739
|
+
if (ta === tb) {
|
|
2740
|
+
return a < b ? -1 : 1;
|
|
2741
|
+
}
|
|
2742
|
+
const sa = String(a);
|
|
2743
|
+
const sb = String(b);
|
|
2744
|
+
return sa < sb ? -1 : sa > sb ? 1 : 0;
|
|
2745
|
+
}
|
|
2746
|
+
async function runFirestoreClassicExpand(adapter, params) {
|
|
2747
|
+
if (params.sources.length === 0) {
|
|
2748
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
2749
|
+
}
|
|
2750
|
+
if (params.axbType.length === 0) {
|
|
2751
|
+
throw new FiregraphError("expand(): axbType must be a non-empty string.", "INVALID_QUERY");
|
|
2752
|
+
}
|
|
2753
|
+
const direction = params.direction ?? "forward";
|
|
2754
|
+
const sourceField = direction === "forward" ? "aUid" : "bUid";
|
|
2755
|
+
const chunks = chunkUids(params.sources, FIRESTORE_CLASSIC_IN_CHUNK_SIZE);
|
|
2756
|
+
const totalLimit = params.limitPerSource !== void 0 ? params.sources.length * params.limitPerSource : void 0;
|
|
2757
|
+
const buildFilters = (chunk2) => {
|
|
2758
|
+
const filters = [
|
|
2759
|
+
{ field: "axbType", op: "==", value: params.axbType },
|
|
2760
|
+
{ field: sourceField, op: "in", value: chunk2 }
|
|
2761
|
+
];
|
|
2762
|
+
if (params.aType !== void 0) {
|
|
2763
|
+
filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2764
|
+
}
|
|
2765
|
+
if (params.bType !== void 0) {
|
|
2766
|
+
filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2767
|
+
}
|
|
2768
|
+
return filters;
|
|
2769
|
+
};
|
|
2770
|
+
const buildOptions = (chunk2) => {
|
|
2771
|
+
const opts = {};
|
|
2772
|
+
if (params.orderBy) opts.orderBy = params.orderBy;
|
|
2773
|
+
if (params.limitPerSource !== void 0) {
|
|
2774
|
+
opts.limit = chunk2.length * params.limitPerSource;
|
|
2775
|
+
}
|
|
2776
|
+
return opts;
|
|
2777
|
+
};
|
|
2778
|
+
const chunkResults = await Promise.all(
|
|
2779
|
+
chunks.map((chunk2) => adapter.query(buildFilters(chunk2), buildOptions(chunk2)))
|
|
2780
|
+
);
|
|
2781
|
+
let edges = chunkResults.flat();
|
|
2782
|
+
if (params.axbType === NODE_RELATION) {
|
|
2783
|
+
edges = edges.filter((e) => e.aUid !== e.bUid);
|
|
2784
|
+
}
|
|
2785
|
+
if (params.orderBy) {
|
|
2786
|
+
const sortField = params.orderBy.field;
|
|
2787
|
+
const dir = params.orderBy.direction ?? "asc";
|
|
2788
|
+
edges.sort((a, b) => {
|
|
2789
|
+
const cmp = compareFieldValues(readField(a, sortField), readField(b, sortField));
|
|
2790
|
+
return dir === "asc" ? cmp : -cmp;
|
|
2791
|
+
});
|
|
2792
|
+
}
|
|
2793
|
+
if (totalLimit !== void 0 && edges.length > totalLimit) {
|
|
2794
|
+
edges = edges.slice(0, totalLimit);
|
|
2795
|
+
}
|
|
2796
|
+
if (!params.hydrate) return { edges };
|
|
2797
|
+
const targetUids = edges.map((e) => direction === "forward" ? e.bUid : e.aUid);
|
|
2798
|
+
const uniqueTargets = [...new Set(targetUids)];
|
|
2799
|
+
if (uniqueTargets.length === 0) {
|
|
2800
|
+
return { edges, targets: [] };
|
|
2801
|
+
}
|
|
2802
|
+
const hydrateChunks = chunkUids(uniqueTargets, FIRESTORE_CLASSIC_IN_CHUNK_SIZE);
|
|
2803
|
+
const hydrateResults = await Promise.all(
|
|
2804
|
+
hydrateChunks.map(
|
|
2805
|
+
(chunk2) => adapter.query([
|
|
2806
|
+
{ field: "axbType", op: "==", value: NODE_RELATION },
|
|
2807
|
+
{ field: "aUid", op: "in", value: chunk2 }
|
|
2808
|
+
])
|
|
2809
|
+
)
|
|
2810
|
+
);
|
|
2811
|
+
const byUid = /* @__PURE__ */ new Map();
|
|
2812
|
+
for (const row of hydrateResults.flat()) {
|
|
2813
|
+
byUid.set(row.bUid, row);
|
|
2814
|
+
}
|
|
2815
|
+
const targets = targetUids.map((uid) => byUid.get(uid) ?? null);
|
|
2816
|
+
return { edges, targets };
|
|
2817
|
+
}
|
|
2818
|
+
function chunkUids(uids, chunkSize) {
|
|
2819
|
+
if (chunkSize <= 0) {
|
|
2820
|
+
throw new FiregraphError(
|
|
2821
|
+
`chunkUids: chunkSize must be positive (got ${chunkSize}).`,
|
|
2822
|
+
"INVALID_QUERY"
|
|
2823
|
+
);
|
|
2824
|
+
}
|
|
2825
|
+
const out = [];
|
|
2826
|
+
for (let i = 0; i < uids.length; i += chunkSize) {
|
|
2827
|
+
out.push(uids.slice(i, i + chunkSize));
|
|
2828
|
+
}
|
|
2829
|
+
return out;
|
|
2830
|
+
}
|
|
2831
|
+
|
|
2832
|
+
// src/internal/firestore-expand.ts
|
|
2833
|
+
var _Pipelines2 = null;
|
|
2834
|
+
async function getPipelines() {
|
|
2835
|
+
if (!_Pipelines2) {
|
|
2836
|
+
const mod = await import("@google-cloud/firestore");
|
|
2837
|
+
_Pipelines2 = mod.Pipelines;
|
|
2838
|
+
}
|
|
2839
|
+
return _Pipelines2;
|
|
2840
|
+
}
|
|
2841
|
+
async function runFirestorePipelineExpand(db, collectionPath, params) {
|
|
2842
|
+
if (params.sources.length === 0) {
|
|
2843
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
2844
|
+
}
|
|
2845
|
+
if (params.axbType.length === 0) {
|
|
2846
|
+
throw new FiregraphError("expand(): axbType must be a non-empty string.", "INVALID_QUERY");
|
|
2847
|
+
}
|
|
2848
|
+
const direction = params.direction ?? "forward";
|
|
2849
|
+
const sourceField = direction === "forward" ? "aUid" : "bUid";
|
|
2850
|
+
const P = await getPipelines();
|
|
2851
|
+
const exprs = [
|
|
2852
|
+
P.equal("axbType", params.axbType),
|
|
2853
|
+
P.equalAny(sourceField, params.sources)
|
|
2854
|
+
];
|
|
2855
|
+
if (params.aType !== void 0) exprs.push(P.equal("aType", params.aType));
|
|
2856
|
+
if (params.bType !== void 0) exprs.push(P.equal("bType", params.bType));
|
|
2857
|
+
let pipeline = db.pipeline().collection(collectionPath);
|
|
2858
|
+
if (exprs.length === 1) {
|
|
2859
|
+
pipeline = pipeline.where(exprs[0]);
|
|
2860
|
+
} else {
|
|
2861
|
+
const [first, second, ...rest] = exprs;
|
|
2862
|
+
pipeline = pipeline.where(P.and(first, second, ...rest));
|
|
2863
|
+
}
|
|
2864
|
+
if (params.orderBy) {
|
|
2865
|
+
const f = P.field(params.orderBy.field);
|
|
2866
|
+
const ordering = params.orderBy.direction === "desc" ? f.descending() : f.ascending();
|
|
2867
|
+
pipeline = pipeline.sort(ordering);
|
|
2868
|
+
}
|
|
2869
|
+
const totalLimit = params.limitPerSource !== void 0 ? params.sources.length * params.limitPerSource : void 0;
|
|
2870
|
+
if (totalLimit !== void 0) {
|
|
2871
|
+
pipeline = pipeline.limit(totalLimit);
|
|
2872
|
+
}
|
|
2873
|
+
const snap = await pipeline.execute();
|
|
2874
|
+
let edges = snap.results.map((r) => r.data());
|
|
2875
|
+
if (params.axbType === NODE_RELATION) {
|
|
2876
|
+
edges = edges.filter((e) => e.aUid !== e.bUid);
|
|
2877
|
+
}
|
|
2878
|
+
if (!params.hydrate) return { edges };
|
|
2879
|
+
const targetUids = edges.map((e) => direction === "forward" ? e.bUid : e.aUid);
|
|
2880
|
+
const uniqueTargets = [...new Set(targetUids)];
|
|
2881
|
+
if (uniqueTargets.length === 0) {
|
|
2882
|
+
return { edges, targets: [] };
|
|
2883
|
+
}
|
|
2884
|
+
const hydratePipeline = db.pipeline().collection(collectionPath).where(
|
|
2885
|
+
P.and(P.equal("axbType", NODE_RELATION), P.equalAny("aUid", uniqueTargets))
|
|
2886
|
+
);
|
|
2887
|
+
const hydrateSnap = await hydratePipeline.execute();
|
|
2888
|
+
const byUid = /* @__PURE__ */ new Map();
|
|
2889
|
+
for (const r of hydrateSnap.results) {
|
|
2890
|
+
const row = r.data();
|
|
2891
|
+
byUid.set(row.bUid, row);
|
|
2892
|
+
}
|
|
2893
|
+
const targets = targetUids.map((uid) => byUid.get(uid) ?? null);
|
|
2894
|
+
return { edges, targets };
|
|
2895
|
+
}
|
|
2896
|
+
|
|
2897
|
+
// src/internal/firestore-fulltext.ts
|
|
2898
|
+
var ENVELOPE_FIELDS = /* @__PURE__ */ new Set([
|
|
2899
|
+
"aType",
|
|
2900
|
+
"aUid",
|
|
2901
|
+
"axbType",
|
|
2902
|
+
"bType",
|
|
2903
|
+
"bUid",
|
|
2904
|
+
"createdAt",
|
|
2905
|
+
"updatedAt",
|
|
2906
|
+
"v"
|
|
2907
|
+
]);
|
|
2908
|
+
function normalizeFullTextFieldPath(field) {
|
|
2909
|
+
if (ENVELOPE_FIELDS.has(field)) {
|
|
2910
|
+
throw new FiregraphError(
|
|
2911
|
+
`fullTextSearch(): field '${field}' is a built-in envelope field \u2014 text-indexed fields must live under \`data.*\`. Use a path like 'data.${field}' if you really meant a nested data field.`,
|
|
2912
|
+
"INVALID_QUERY"
|
|
2913
|
+
);
|
|
2914
|
+
}
|
|
2915
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
2916
|
+
return `data.${field}`;
|
|
2917
|
+
}
|
|
2918
|
+
var _Pipelines3 = null;
|
|
2919
|
+
async function getPipelines2() {
|
|
2920
|
+
if (!_Pipelines3) {
|
|
2921
|
+
const mod = await import("@google-cloud/firestore");
|
|
2922
|
+
_Pipelines3 = mod.Pipelines;
|
|
2923
|
+
}
|
|
2924
|
+
return _Pipelines3;
|
|
2925
|
+
}
|
|
2926
|
+
async function runFirestoreFullTextSearch(db, collectionPath, params) {
|
|
2927
|
+
if (typeof params.query !== "string" || params.query.length === 0) {
|
|
2928
|
+
throw new FiregraphError(
|
|
2929
|
+
"fullTextSearch(): query must be a non-empty string.",
|
|
2930
|
+
"INVALID_QUERY"
|
|
2931
|
+
);
|
|
2932
|
+
}
|
|
2933
|
+
if (!Number.isInteger(params.limit) || params.limit <= 0) {
|
|
2934
|
+
throw new FiregraphError(
|
|
2935
|
+
`fullTextSearch(): limit must be a positive integer (got ${params.limit}).`,
|
|
2936
|
+
"INVALID_QUERY"
|
|
2937
|
+
);
|
|
2938
|
+
}
|
|
2939
|
+
const normalizedFields = params.fields?.map((f) => normalizeFullTextFieldPath(f));
|
|
2940
|
+
const P = await getPipelines2();
|
|
2941
|
+
if (normalizedFields !== void 0 && normalizedFields.length > 0) {
|
|
2942
|
+
throw new FiregraphError(
|
|
2943
|
+
"fullTextSearch(): the `fields` option is not yet supported \u2014 per-field text predicates are not available in @google-cloud/firestore@8.5.0. Omit `fields` to search all indexed fields.",
|
|
2944
|
+
"INVALID_QUERY"
|
|
2945
|
+
);
|
|
2946
|
+
}
|
|
2947
|
+
const searchQuery = P.documentMatches(params.query);
|
|
2948
|
+
let pipeline = db.pipeline().collection(collectionPath).search({
|
|
2949
|
+
query: searchQuery,
|
|
2950
|
+
sort: P.score().descending()
|
|
2951
|
+
});
|
|
2952
|
+
const whereExprs = [];
|
|
2953
|
+
if (params.aType) whereExprs.push(P.equal("aType", params.aType));
|
|
2954
|
+
if (params.axbType) whereExprs.push(P.equal("axbType", params.axbType));
|
|
2955
|
+
if (params.bType) whereExprs.push(P.equal("bType", params.bType));
|
|
2956
|
+
if (whereExprs.length === 1) {
|
|
2957
|
+
pipeline = pipeline.where(whereExprs[0]);
|
|
2958
|
+
} else if (whereExprs.length > 1) {
|
|
2959
|
+
const [first, second, ...rest] = whereExprs;
|
|
2960
|
+
pipeline = pipeline.where(P.and(first, second, ...rest));
|
|
2961
|
+
}
|
|
2962
|
+
pipeline = pipeline.limit(params.limit);
|
|
2963
|
+
const snap = await pipeline.execute();
|
|
2964
|
+
return snap.results.map((r) => r.data());
|
|
2965
|
+
}
|
|
2966
|
+
|
|
2967
|
+
// src/internal/firestore-geo.ts
|
|
2968
|
+
var import_firestore3 = require("@google-cloud/firestore");
|
|
2969
|
+
var ENVELOPE_FIELDS2 = /* @__PURE__ */ new Set([
|
|
2970
|
+
"aType",
|
|
2971
|
+
"aUid",
|
|
2972
|
+
"axbType",
|
|
2973
|
+
"bType",
|
|
2974
|
+
"bUid",
|
|
2975
|
+
"createdAt",
|
|
2976
|
+
"updatedAt",
|
|
2977
|
+
"v"
|
|
2978
|
+
]);
|
|
2979
|
+
function normalizeGeoFieldPath(field) {
|
|
2980
|
+
if (ENVELOPE_FIELDS2.has(field)) {
|
|
2981
|
+
throw new FiregraphError(
|
|
2982
|
+
`geoSearch(): geoField '${field}' is a built-in envelope field \u2014 geo-indexed fields must live under \`data.*\`. Use a path like 'data.${field}' if you really meant a nested data field.`,
|
|
2983
|
+
"INVALID_QUERY"
|
|
2984
|
+
);
|
|
2985
|
+
}
|
|
2986
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
2987
|
+
return `data.${field}`;
|
|
2988
|
+
}
|
|
2989
|
+
function assertValidGeoPoint(lat, lng) {
|
|
2990
|
+
if (!Number.isFinite(lat) || lat < -90 || lat > 90) {
|
|
2991
|
+
throw new FiregraphError(
|
|
2992
|
+
`geoSearch(): point.lat must be in [-90, 90] (got ${lat}).`,
|
|
2993
|
+
"INVALID_QUERY"
|
|
2994
|
+
);
|
|
2995
|
+
}
|
|
2996
|
+
if (!Number.isFinite(lng) || lng < -180 || lng > 180) {
|
|
2997
|
+
throw new FiregraphError(
|
|
2998
|
+
`geoSearch(): point.lng must be in [-180, 180] (got ${lng}).`,
|
|
2999
|
+
"INVALID_QUERY"
|
|
3000
|
+
);
|
|
3001
|
+
}
|
|
3002
|
+
}
|
|
3003
|
+
var _Pipelines4 = null;
|
|
3004
|
+
async function getPipelines3() {
|
|
3005
|
+
if (!_Pipelines4) {
|
|
3006
|
+
const mod = await import("@google-cloud/firestore");
|
|
3007
|
+
_Pipelines4 = mod.Pipelines;
|
|
3008
|
+
}
|
|
3009
|
+
return _Pipelines4;
|
|
3010
|
+
}
|
|
3011
|
+
async function runFirestoreGeoSearch(db, collectionPath, params) {
|
|
3012
|
+
if (!Number.isFinite(params.radiusMeters) || params.radiusMeters <= 0) {
|
|
3013
|
+
throw new FiregraphError(
|
|
3014
|
+
`geoSearch(): radiusMeters must be a positive finite number (got ${params.radiusMeters}).`,
|
|
3015
|
+
"INVALID_QUERY"
|
|
3016
|
+
);
|
|
3017
|
+
}
|
|
3018
|
+
if (!Number.isInteger(params.limit) || params.limit <= 0) {
|
|
3019
|
+
throw new FiregraphError(
|
|
3020
|
+
`geoSearch(): limit must be a positive integer (got ${params.limit}).`,
|
|
3021
|
+
"INVALID_QUERY"
|
|
3022
|
+
);
|
|
3023
|
+
}
|
|
3024
|
+
assertValidGeoPoint(params.point.lat, params.point.lng);
|
|
3025
|
+
const geoFieldPath = normalizeGeoFieldPath(params.geoField);
|
|
3026
|
+
const center = new import_firestore3.GeoPoint(params.point.lat, params.point.lng);
|
|
3027
|
+
const P = await getPipelines3();
|
|
3028
|
+
const distanceQuery = P.geoDistance(geoFieldPath, center).lessThanOrEqual(params.radiusMeters);
|
|
3029
|
+
const orderByDistance = params.orderByDistance !== false;
|
|
3030
|
+
const opts = {
|
|
3031
|
+
query: distanceQuery
|
|
3032
|
+
};
|
|
3033
|
+
if (orderByDistance) {
|
|
3034
|
+
opts.sort = P.geoDistance(geoFieldPath, center).ascending();
|
|
3035
|
+
}
|
|
3036
|
+
let pipeline = db.pipeline().collection(collectionPath).search(opts);
|
|
3037
|
+
const whereExprs = [];
|
|
3038
|
+
if (params.aType) whereExprs.push(P.equal("aType", params.aType));
|
|
3039
|
+
if (params.axbType) whereExprs.push(P.equal("axbType", params.axbType));
|
|
3040
|
+
if (params.bType) whereExprs.push(P.equal("bType", params.bType));
|
|
3041
|
+
if (whereExprs.length === 1) {
|
|
3042
|
+
pipeline = pipeline.where(whereExprs[0]);
|
|
3043
|
+
} else if (whereExprs.length > 1) {
|
|
3044
|
+
const [first, second, ...rest] = whereExprs;
|
|
3045
|
+
pipeline = pipeline.where(P.and(first, second, ...rest));
|
|
3046
|
+
}
|
|
3047
|
+
pipeline = pipeline.limit(params.limit);
|
|
3048
|
+
const snap = await pipeline.execute();
|
|
3049
|
+
return snap.results.map((r) => r.data());
|
|
3050
|
+
}
|
|
3051
|
+
|
|
3052
|
+
// src/internal/projection.ts
|
|
3053
|
+
var PROJECTION_BUILTIN_FIELDS = /* @__PURE__ */ new Set([
|
|
3054
|
+
"aType",
|
|
3055
|
+
"aUid",
|
|
3056
|
+
"axbType",
|
|
3057
|
+
"bType",
|
|
3058
|
+
"bUid",
|
|
3059
|
+
"createdAt",
|
|
3060
|
+
"updatedAt",
|
|
3061
|
+
"v"
|
|
3062
|
+
]);
|
|
3063
|
+
function normalizeFirestoreProjectionField(field) {
|
|
3064
|
+
if (PROJECTION_BUILTIN_FIELDS.has(field)) return field;
|
|
3065
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
3066
|
+
return `data.${field}`;
|
|
3067
|
+
}
|
|
3068
|
+
function readProjectionPath(obj, path) {
|
|
3069
|
+
if (obj === void 0 || obj === null) return null;
|
|
3070
|
+
const raw = !path.includes(".") ? obj[path] : (() => {
|
|
3071
|
+
const parts = path.split(".");
|
|
3072
|
+
let cur = obj;
|
|
3073
|
+
for (const part of parts) {
|
|
3074
|
+
if (cur === void 0 || cur === null) return void 0;
|
|
3075
|
+
if (typeof cur !== "object") return void 0;
|
|
3076
|
+
cur = cur[part];
|
|
3077
|
+
}
|
|
3078
|
+
return cur;
|
|
3079
|
+
})();
|
|
3080
|
+
return raw === void 0 ? null : raw;
|
|
3081
|
+
}
|
|
3082
|
+
|
|
3083
|
+
// src/internal/firestore-projection.ts
|
|
3084
|
+
async function runFirestoreFindEdgesProjected(base, select, filters, options) {
|
|
3085
|
+
if (select.length === 0) {
|
|
3086
|
+
throw new FiregraphError(
|
|
3087
|
+
"findEdgesProjected requires a non-empty select list \u2014 an empty projection has no representation distinct from `findEdges`.",
|
|
3088
|
+
"INVALID_QUERY"
|
|
3089
|
+
);
|
|
3090
|
+
}
|
|
3091
|
+
const seen = /* @__PURE__ */ new Set();
|
|
3092
|
+
const fields = [];
|
|
3093
|
+
for (const f of select) {
|
|
3094
|
+
if (!seen.has(f)) {
|
|
3095
|
+
seen.add(f);
|
|
3096
|
+
fields.push({ original: f, canonical: normalizeFirestoreProjectionField(f) });
|
|
3097
|
+
}
|
|
3098
|
+
}
|
|
3099
|
+
let q = base;
|
|
3100
|
+
for (const f of filters) {
|
|
3101
|
+
q = q.where(f.field, f.op, f.value);
|
|
3102
|
+
}
|
|
3103
|
+
if (options?.orderBy) {
|
|
3104
|
+
q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
|
|
3105
|
+
}
|
|
3106
|
+
if (options?.limit !== void 0) {
|
|
3107
|
+
q = q.limit(options.limit);
|
|
3108
|
+
}
|
|
3109
|
+
q = q.select(...fields.map((p) => p.canonical));
|
|
3110
|
+
const snap = await q.get();
|
|
3111
|
+
return snap.docs.map((doc) => {
|
|
3112
|
+
const data = doc.data();
|
|
3113
|
+
const out = {};
|
|
3114
|
+
for (const { original, canonical } of fields) {
|
|
3115
|
+
out[original] = readProjectionPath(data, canonical);
|
|
3116
|
+
}
|
|
3117
|
+
return out;
|
|
3118
|
+
});
|
|
3119
|
+
}
|
|
3120
|
+
|
|
3121
|
+
// src/internal/firestore-traverse-compiler.ts
|
|
3122
|
+
var MAX_PIPELINE_DEPTH = 5;
|
|
3123
|
+
function compileEngineTraversal(params, opts) {
|
|
3124
|
+
const maxDepth = opts?.maxDepth ?? MAX_PIPELINE_DEPTH;
|
|
3125
|
+
const maxReads = opts?.maxReads ?? params.maxReads;
|
|
3126
|
+
if (!Array.isArray(params.hops) || params.hops.length === 0) {
|
|
3127
|
+
return { eligible: false, reason: "engine traversal requires at least one hop" };
|
|
3128
|
+
}
|
|
3129
|
+
if (params.hops.length > maxDepth) {
|
|
3130
|
+
return {
|
|
3131
|
+
eligible: false,
|
|
3132
|
+
reason: `engine traversal depth ${params.hops.length} exceeds MAX_PIPELINE_DEPTH (${maxDepth})`
|
|
3133
|
+
};
|
|
3134
|
+
}
|
|
3135
|
+
if (!Array.isArray(params.sources)) {
|
|
3136
|
+
return { eligible: false, reason: "engine traversal requires a sources array" };
|
|
3137
|
+
}
|
|
3138
|
+
const normalizedHops = [];
|
|
3139
|
+
for (let i = 0; i < params.hops.length; i++) {
|
|
3140
|
+
const hop = params.hops[i];
|
|
3141
|
+
if (!hop.axbType || hop.axbType.length === 0) {
|
|
3142
|
+
return {
|
|
3143
|
+
eligible: false,
|
|
3144
|
+
reason: `engine traversal hop ${i} is missing axbType`
|
|
3145
|
+
};
|
|
3146
|
+
}
|
|
3147
|
+
if (typeof hop.limitPerSource !== "number" || hop.limitPerSource <= 0 || !Number.isFinite(hop.limitPerSource)) {
|
|
3148
|
+
return {
|
|
3149
|
+
eligible: false,
|
|
3150
|
+
reason: `engine traversal hop ${i} (${hop.axbType}) requires a positive limitPerSource`
|
|
3151
|
+
};
|
|
3152
|
+
}
|
|
3153
|
+
normalizedHops.push({
|
|
3154
|
+
...hop,
|
|
3155
|
+
axbType: hop.axbType,
|
|
3156
|
+
direction: hop.direction ?? "forward",
|
|
3157
|
+
limitPerSource: hop.limitPerSource
|
|
3158
|
+
});
|
|
3159
|
+
}
|
|
3160
|
+
let estimatedReads = Math.max(1, params.sources.length);
|
|
3161
|
+
for (const hop of normalizedHops) {
|
|
3162
|
+
estimatedReads *= hop.limitPerSource;
|
|
3163
|
+
if (estimatedReads > Number.MAX_SAFE_INTEGER) {
|
|
3164
|
+
estimatedReads = Number.MAX_SAFE_INTEGER;
|
|
3165
|
+
break;
|
|
3166
|
+
}
|
|
3167
|
+
}
|
|
3168
|
+
if (maxReads !== void 0 && estimatedReads > maxReads) {
|
|
3169
|
+
return {
|
|
3170
|
+
eligible: false,
|
|
3171
|
+
reason: `engine traversal worst-case response size ${estimatedReads} exceeds maxReads budget ${maxReads}`
|
|
3172
|
+
};
|
|
3173
|
+
}
|
|
3174
|
+
return {
|
|
3175
|
+
eligible: true,
|
|
3176
|
+
normalized: {
|
|
3177
|
+
sources: params.sources,
|
|
3178
|
+
hops: normalizedHops,
|
|
3179
|
+
estimatedReads
|
|
3180
|
+
}
|
|
3181
|
+
};
|
|
3182
|
+
}
|
|
3183
|
+
|
|
3184
|
+
// src/internal/firestore-traverse.ts
|
|
3185
|
+
function childArrayKey(depth) {
|
|
3186
|
+
return `hop_${depth}_children`;
|
|
3187
|
+
}
|
|
3188
|
+
function joinVarName(depth) {
|
|
3189
|
+
return `hop_${depth}_join`;
|
|
3190
|
+
}
|
|
3191
|
+
var _Pipelines5 = null;
|
|
3192
|
+
async function getPipelines4() {
|
|
3193
|
+
if (!_Pipelines5) {
|
|
3194
|
+
const mod = await import("@google-cloud/firestore");
|
|
3195
|
+
_Pipelines5 = mod.Pipelines;
|
|
3196
|
+
}
|
|
3197
|
+
return _Pipelines5;
|
|
3198
|
+
}
|
|
3199
|
+
function buildHopPredicates(P, hop, sourcePredicate) {
|
|
3200
|
+
const exprs = [P.equal("axbType", hop.axbType), sourcePredicate];
|
|
3201
|
+
if (hop.aType !== void 0) exprs.push(P.equal("aType", hop.aType));
|
|
3202
|
+
if (hop.bType !== void 0) exprs.push(P.equal("bType", hop.bType));
|
|
3203
|
+
const [first, second, ...rest] = exprs;
|
|
3204
|
+
return P.and(first, second, ...rest);
|
|
3205
|
+
}
|
|
3206
|
+
function applyHopOrderingAndLimit(P, pipeline, hop, totalLimit) {
|
|
3207
|
+
let p = pipeline;
|
|
3208
|
+
if (hop.orderBy) {
|
|
3209
|
+
const f = P.field(hop.orderBy.field);
|
|
3210
|
+
const ordering = hop.orderBy.direction === "desc" ? f.descending() : f.ascending();
|
|
3211
|
+
p = p.sort(ordering);
|
|
3212
|
+
}
|
|
3213
|
+
p = p.limit(totalLimit);
|
|
3214
|
+
return p;
|
|
3215
|
+
}
|
|
3216
|
+
function buildSubPipeline(P, db, collectionPath, hops, depth) {
|
|
3217
|
+
const hop = hops[depth];
|
|
3218
|
+
const parentDepth = depth - 1;
|
|
3219
|
+
const sourceField = hop.direction === "forward" ? "aUid" : "bUid";
|
|
3220
|
+
const sourcePredicate = P.equal(
|
|
3221
|
+
sourceField,
|
|
3222
|
+
P.variable(joinVarName(parentDepth))
|
|
3223
|
+
);
|
|
3224
|
+
const where = buildHopPredicates(P, hop, sourcePredicate);
|
|
3225
|
+
let pipeline = db.pipeline().collection(collectionPath).where(where);
|
|
3226
|
+
pipeline = applyHopOrderingAndLimit(P, pipeline, hop, hop.limitPerSource);
|
|
3227
|
+
if (depth + 1 < hops.length) {
|
|
3228
|
+
const targetField = hop.direction === "forward" ? "bUid" : "aUid";
|
|
3229
|
+
pipeline = pipeline.define(P.field(targetField).as(joinVarName(depth)));
|
|
3230
|
+
const child = buildSubPipeline(P, db, collectionPath, hops, depth + 1);
|
|
3231
|
+
pipeline = pipeline.addFields(child.toArrayExpression().as(childArrayKey(depth)));
|
|
3232
|
+
}
|
|
3233
|
+
return pipeline;
|
|
3234
|
+
}
|
|
3235
|
+
function buildRootPipeline(P, db, collectionPath, spec) {
|
|
3236
|
+
const hop0 = spec.hops[0];
|
|
3237
|
+
const sourceField = hop0.direction === "forward" ? "aUid" : "bUid";
|
|
3238
|
+
const sourcePredicate = P.equalAny(
|
|
3239
|
+
sourceField,
|
|
3240
|
+
spec.sources
|
|
3241
|
+
);
|
|
3242
|
+
const where = buildHopPredicates(P, hop0, sourcePredicate);
|
|
3243
|
+
let pipeline = db.pipeline().collection(collectionPath).where(where);
|
|
3244
|
+
pipeline = applyHopOrderingAndLimit(P, pipeline, hop0, spec.sources.length * hop0.limitPerSource);
|
|
3245
|
+
if (spec.hops.length > 1) {
|
|
3246
|
+
const targetField = hop0.direction === "forward" ? "bUid" : "aUid";
|
|
3247
|
+
pipeline = pipeline.define(P.field(targetField).as(joinVarName(0)));
|
|
3248
|
+
const child = buildSubPipeline(P, db, collectionPath, spec.hops, 1);
|
|
3249
|
+
pipeline = pipeline.addFields(child.toArrayExpression().as(childArrayKey(0)));
|
|
3250
|
+
}
|
|
3251
|
+
return pipeline;
|
|
3252
|
+
}
|
|
3253
|
+
function stripScaffolding(row, depth) {
|
|
3254
|
+
const stripped = { ...row };
|
|
3255
|
+
delete stripped[childArrayKey(depth)];
|
|
3256
|
+
delete stripped[joinVarName(depth)];
|
|
3257
|
+
return stripped;
|
|
3258
|
+
}
|
|
3259
|
+
function decodeTree(rootRows, hops) {
|
|
3260
|
+
const out = [];
|
|
3261
|
+
let frontier = rootRows;
|
|
3262
|
+
for (let depth = 0; depth < hops.length; depth++) {
|
|
3263
|
+
const hop = hops[depth];
|
|
3264
|
+
const targetField = hop.direction === "forward" ? "bUid" : "aUid";
|
|
3265
|
+
const seen = /* @__PURE__ */ new Set();
|
|
3266
|
+
const edges = [];
|
|
3267
|
+
const nextFrontier = [];
|
|
3268
|
+
for (const row of frontier) {
|
|
3269
|
+
const childKey = childArrayKey(depth);
|
|
3270
|
+
const children = row[childKey];
|
|
3271
|
+
if (Array.isArray(children)) {
|
|
3272
|
+
for (const child of children) {
|
|
3273
|
+
if (child && typeof child === "object") {
|
|
3274
|
+
nextFrontier.push(child);
|
|
3275
|
+
}
|
|
3276
|
+
}
|
|
3277
|
+
}
|
|
3278
|
+
const stripped = stripScaffolding(row, depth);
|
|
3279
|
+
if (hop.axbType === NODE_RELATION && stripped.aUid === stripped.bUid) continue;
|
|
3280
|
+
const dedupKey = stripped[targetField];
|
|
3281
|
+
if (typeof dedupKey !== "string" || seen.has(dedupKey)) continue;
|
|
3282
|
+
seen.add(dedupKey);
|
|
3283
|
+
edges.push(stripped);
|
|
3284
|
+
}
|
|
3285
|
+
out.push({ edges, sourceCount: depth === 0 ? -1 : out[depth - 1].edges.length });
|
|
3286
|
+
frontier = nextFrontier;
|
|
3287
|
+
}
|
|
3288
|
+
return {
|
|
3289
|
+
hops: out,
|
|
3290
|
+
totalReads: 1
|
|
3291
|
+
};
|
|
3292
|
+
}
|
|
3293
|
+
async function runFirestoreEngineTraversal(db, collectionPath, params) {
|
|
3294
|
+
if (process.env.FIRESTORE_EMULATOR_HOST) {
|
|
3295
|
+
throw new FiregraphError(
|
|
3296
|
+
"engine traversal requires Pipelines \u2014 not supported on the Firestore emulator",
|
|
3297
|
+
"UNSUPPORTED_OPERATION"
|
|
3298
|
+
);
|
|
3299
|
+
}
|
|
3300
|
+
const compiled = compileEngineTraversal(params);
|
|
3301
|
+
if (!compiled.eligible) {
|
|
3302
|
+
throw new FiregraphError(
|
|
3303
|
+
`engine traversal not eligible: ${compiled.reason}`,
|
|
3304
|
+
"UNSUPPORTED_OPERATION"
|
|
3305
|
+
);
|
|
3306
|
+
}
|
|
3307
|
+
const spec = compiled.normalized;
|
|
3308
|
+
if (spec.sources.length === 0) {
|
|
3309
|
+
return {
|
|
3310
|
+
hops: spec.hops.map(() => ({ edges: [], sourceCount: 0 })),
|
|
3311
|
+
totalReads: 0
|
|
3312
|
+
};
|
|
3313
|
+
}
|
|
3314
|
+
const P = await getPipelines4();
|
|
3315
|
+
const pipeline = buildRootPipeline(P, db, collectionPath, spec);
|
|
3316
|
+
const snap = await pipeline.execute();
|
|
3317
|
+
const rows = snap.results.map((r) => r.data());
|
|
3318
|
+
const result = decodeTree(rows, spec.hops);
|
|
3319
|
+
if (result.hops.length > 0) {
|
|
3320
|
+
result.hops[0] = { ...result.hops[0], sourceCount: spec.sources.length };
|
|
3321
|
+
}
|
|
3322
|
+
return result;
|
|
3323
|
+
}
|
|
3324
|
+
|
|
3325
|
+
// src/internal/firestore-vector.ts
|
|
3326
|
+
var ENVELOPE_FIELDS3 = /* @__PURE__ */ new Set([
|
|
3327
|
+
"aType",
|
|
3328
|
+
"aUid",
|
|
3329
|
+
"axbType",
|
|
3330
|
+
"bType",
|
|
3331
|
+
"bUid",
|
|
3332
|
+
"createdAt",
|
|
3333
|
+
"updatedAt",
|
|
3334
|
+
"v"
|
|
3335
|
+
]);
|
|
3336
|
+
function normalizeVectorFieldPath(label, field) {
|
|
3337
|
+
if (ENVELOPE_FIELDS3.has(field)) {
|
|
3338
|
+
throw new FiregraphError(
|
|
3339
|
+
`findNearest(): ${label} '${field}' is a built-in envelope field \u2014 vectors must live under \`data.*\`. Use a path like 'data.${field}' if you really meant a nested data field.`,
|
|
3340
|
+
"INVALID_QUERY"
|
|
3341
|
+
);
|
|
3342
|
+
}
|
|
3343
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
3344
|
+
return `data.${field}`;
|
|
3345
|
+
}
|
|
3346
|
+
function buildVectorFilters(params) {
|
|
3347
|
+
const filters = [];
|
|
3348
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
3349
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
3350
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
3351
|
+
if (params.where) filters.push(...params.where);
|
|
3352
|
+
return filters;
|
|
3353
|
+
}
|
|
3354
|
+
function toNumberArray(qv) {
|
|
3355
|
+
if (Array.isArray(qv)) return qv;
|
|
3356
|
+
if (typeof qv.toArray === "function") {
|
|
3357
|
+
return qv.toArray();
|
|
3358
|
+
}
|
|
3359
|
+
throw new FiregraphError(
|
|
3360
|
+
"findNearest(): queryVector must be a number[] or a Firestore VectorValue.",
|
|
3361
|
+
"INVALID_QUERY"
|
|
3362
|
+
);
|
|
3363
|
+
}
|
|
3364
|
+
async function runFirestoreFindNearest(base, params) {
|
|
3365
|
+
const vec = toNumberArray(params.queryVector);
|
|
3366
|
+
if (vec.length === 0) {
|
|
3367
|
+
throw new FiregraphError(
|
|
3368
|
+
"findNearest(): queryVector is empty \u2014 at least one dimension is required.",
|
|
3369
|
+
"INVALID_QUERY"
|
|
3370
|
+
);
|
|
3371
|
+
}
|
|
3372
|
+
if (!Number.isInteger(params.limit) || params.limit <= 0 || params.limit > 1e3) {
|
|
3373
|
+
throw new FiregraphError(
|
|
3374
|
+
`findNearest(): limit must be a positive integer \u2264 1000 (got ${params.limit}).`,
|
|
3375
|
+
"INVALID_QUERY"
|
|
3376
|
+
);
|
|
3377
|
+
}
|
|
3378
|
+
const vectorField = normalizeVectorFieldPath("vectorField", params.vectorField);
|
|
3379
|
+
const distanceResultField = params.distanceResultField !== void 0 ? normalizeVectorFieldPath("distanceResultField", params.distanceResultField) : void 0;
|
|
3380
|
+
const filtered = applyFiltersToQuery(base, buildVectorFilters(params));
|
|
3381
|
+
const opts = {
|
|
3382
|
+
vectorField,
|
|
3383
|
+
queryVector: vec,
|
|
3384
|
+
limit: params.limit,
|
|
3385
|
+
distanceMeasure: params.distanceMeasure
|
|
3386
|
+
};
|
|
3387
|
+
if (params.distanceThreshold !== void 0) opts.distanceThreshold = params.distanceThreshold;
|
|
3388
|
+
if (distanceResultField !== void 0) opts.distanceResultField = distanceResultField;
|
|
3389
|
+
const snap = await filtered.findNearest(opts).get();
|
|
3390
|
+
return snap.docs.map((doc) => doc.data());
|
|
3391
|
+
}
|
|
3392
|
+
|
|
3393
|
+
// src/firestore-enterprise/backend.ts
|
|
3394
|
+
init_serialization();
|
|
3395
|
+
|
|
3396
|
+
// src/firestore-enterprise/pipeline-adapter.ts
|
|
3397
|
+
var _Pipelines6 = null;
|
|
3398
|
+
async function getPipelines5() {
|
|
3399
|
+
if (!_Pipelines6) {
|
|
3400
|
+
const mod = await import("@google-cloud/firestore");
|
|
3401
|
+
_Pipelines6 = mod.Pipelines;
|
|
3402
|
+
}
|
|
3403
|
+
return _Pipelines6;
|
|
3404
|
+
}
|
|
3405
|
+
function buildFilterExpression2(P, filter) {
|
|
3406
|
+
const { field: fieldName, op, value } = filter;
|
|
3407
|
+
switch (op) {
|
|
3408
|
+
case "==":
|
|
3409
|
+
return P.equal(fieldName, value);
|
|
3410
|
+
case "!=":
|
|
3411
|
+
return P.notEqual(fieldName, value);
|
|
3412
|
+
case "<":
|
|
3413
|
+
return P.lessThan(fieldName, value);
|
|
3414
|
+
case "<=":
|
|
3415
|
+
return P.lessThanOrEqual(fieldName, value);
|
|
3416
|
+
case ">":
|
|
3417
|
+
return P.greaterThan(fieldName, value);
|
|
3418
|
+
case ">=":
|
|
3419
|
+
return P.greaterThanOrEqual(fieldName, value);
|
|
3420
|
+
case "in":
|
|
3421
|
+
return P.equalAny(fieldName, value);
|
|
3422
|
+
case "not-in":
|
|
3423
|
+
return P.notEqualAny(fieldName, value);
|
|
3424
|
+
case "array-contains":
|
|
3425
|
+
return P.arrayContains(fieldName, value);
|
|
3426
|
+
case "array-contains-any":
|
|
3427
|
+
return P.arrayContainsAny(fieldName, value);
|
|
3428
|
+
default:
|
|
3429
|
+
throw new Error(`Unsupported filter op for pipeline mode: ${op}`);
|
|
3430
|
+
}
|
|
3431
|
+
}
|
|
3432
|
+
function createPipelineQueryAdapter(db, collectionPath) {
|
|
3433
|
+
return {
|
|
3434
|
+
async query(filters, options) {
|
|
3435
|
+
const P = await getPipelines5();
|
|
3436
|
+
let pipeline = db.pipeline().collection(collectionPath);
|
|
3437
|
+
if (filters.length === 1) {
|
|
3438
|
+
pipeline = pipeline.where(buildFilterExpression2(P, filters[0]));
|
|
3439
|
+
} else if (filters.length > 1) {
|
|
3440
|
+
const [first, second, ...rest] = filters.map((f) => buildFilterExpression2(P, f));
|
|
3441
|
+
pipeline = pipeline.where(P.and(first, second, ...rest));
|
|
3442
|
+
}
|
|
3443
|
+
if (options?.orderBy) {
|
|
3444
|
+
const f = P.field(options.orderBy.field);
|
|
3445
|
+
const ordering = options.orderBy.direction === "desc" ? f.descending() : f.ascending();
|
|
3446
|
+
pipeline = pipeline.sort(ordering);
|
|
3447
|
+
}
|
|
3448
|
+
if (options?.limit !== void 0) {
|
|
3449
|
+
pipeline = pipeline.limit(options.limit);
|
|
3450
|
+
}
|
|
3451
|
+
const snap = await pipeline.execute();
|
|
3452
|
+
return snap.results.map((r) => r.data());
|
|
3453
|
+
}
|
|
3454
|
+
};
|
|
3455
|
+
}
|
|
3456
|
+
|
|
3457
|
+
// src/firestore-enterprise/backend.ts
|
|
3458
|
+
var ENTERPRISE_BASE_CAPS = /* @__PURE__ */ new Set([
|
|
3459
|
+
"core.read",
|
|
3460
|
+
"core.write",
|
|
3461
|
+
"core.transactions",
|
|
3462
|
+
"core.batch",
|
|
3463
|
+
"core.subgraph",
|
|
3464
|
+
"query.aggregate",
|
|
3465
|
+
"query.select",
|
|
3466
|
+
"query.join",
|
|
3467
|
+
"traversal.serverSide",
|
|
3468
|
+
"search.vector",
|
|
3469
|
+
"search.fullText",
|
|
3470
|
+
"search.geo",
|
|
3471
|
+
"raw.firestore"
|
|
3472
|
+
]);
|
|
3473
|
+
var _emulatorFallbackWarned = false;
|
|
3474
|
+
var _classicInProductionWarned = false;
|
|
3475
|
+
var _previewDmlWarned = false;
|
|
3476
|
+
function dottedDataPath(op) {
|
|
3477
|
+
assertSafePath(op.path);
|
|
3478
|
+
return `data.${op.path.join(".")}`;
|
|
3479
|
+
}
|
|
3480
|
+
function buildFirestoreUpdate(update, db) {
|
|
3481
|
+
assertUpdatePayloadExclusive(update);
|
|
3482
|
+
const out = {
|
|
3483
|
+
updatedAt: import_firestore4.FieldValue.serverTimestamp()
|
|
3484
|
+
};
|
|
3485
|
+
if (update.replaceData) {
|
|
3486
|
+
out.data = deserializeFirestoreTypes(update.replaceData, db);
|
|
3487
|
+
} else if (update.dataOps) {
|
|
3488
|
+
for (const op of update.dataOps) {
|
|
3489
|
+
const key = dottedDataPath(op);
|
|
3490
|
+
out[key] = op.delete ? import_firestore4.FieldValue.delete() : op.value;
|
|
3491
|
+
}
|
|
3492
|
+
}
|
|
3493
|
+
if (update.v !== void 0) {
|
|
3494
|
+
out.v = update.v;
|
|
3495
|
+
}
|
|
3496
|
+
return out;
|
|
3497
|
+
}
|
|
3498
|
+
function stampWritableRecord(record) {
|
|
3499
|
+
const now = import_firestore4.FieldValue.serverTimestamp();
|
|
3500
|
+
const out = {
|
|
3501
|
+
aType: record.aType,
|
|
3502
|
+
aUid: record.aUid,
|
|
3503
|
+
axbType: record.axbType,
|
|
3504
|
+
bType: record.bType,
|
|
3505
|
+
bUid: record.bUid,
|
|
3506
|
+
data: record.data,
|
|
3507
|
+
createdAt: now,
|
|
3508
|
+
updatedAt: now
|
|
3509
|
+
};
|
|
3510
|
+
if (record.v !== void 0) out.v = record.v;
|
|
3511
|
+
return out;
|
|
3512
|
+
}
|
|
3513
|
+
var FirestoreEnterpriseTransactionBackend = class {
|
|
3514
|
+
constructor(adapter, db) {
|
|
3515
|
+
this.adapter = adapter;
|
|
3516
|
+
this.db = db;
|
|
3517
|
+
}
|
|
3518
|
+
getDoc(docId) {
|
|
3519
|
+
return this.adapter.getDoc(docId);
|
|
3520
|
+
}
|
|
3521
|
+
query(filters, options) {
|
|
3522
|
+
return this.adapter.query(filters, options);
|
|
3523
|
+
}
|
|
3524
|
+
async setDoc(docId, record, mode) {
|
|
3525
|
+
this.adapter.setDoc(
|
|
3526
|
+
docId,
|
|
3527
|
+
stampWritableRecord(record),
|
|
3528
|
+
mode === "merge" ? { merge: true } : void 0
|
|
3529
|
+
);
|
|
3530
|
+
}
|
|
3531
|
+
async updateDoc(docId, update) {
|
|
3532
|
+
this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
3533
|
+
}
|
|
3534
|
+
async deleteDoc(docId) {
|
|
3535
|
+
this.adapter.deleteDoc(docId);
|
|
3536
|
+
}
|
|
3537
|
+
};
|
|
3538
|
+
var FirestoreEnterpriseBatchBackend = class {
|
|
3539
|
+
constructor(adapter, db) {
|
|
3540
|
+
this.adapter = adapter;
|
|
3541
|
+
this.db = db;
|
|
3542
|
+
}
|
|
3543
|
+
setDoc(docId, record, mode) {
|
|
3544
|
+
this.adapter.setDoc(
|
|
3545
|
+
docId,
|
|
3546
|
+
stampWritableRecord(record),
|
|
3547
|
+
mode === "merge" ? { merge: true } : void 0
|
|
3548
|
+
);
|
|
3549
|
+
}
|
|
3550
|
+
updateDoc(docId, update) {
|
|
3551
|
+
this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
3552
|
+
}
|
|
3553
|
+
deleteDoc(docId) {
|
|
3554
|
+
this.adapter.deleteDoc(docId);
|
|
3555
|
+
}
|
|
3556
|
+
commit() {
|
|
3557
|
+
return this.adapter.commit();
|
|
3558
|
+
}
|
|
3559
|
+
};
|
|
3560
|
+
var FirestoreEnterpriseBackendImpl = class _FirestoreEnterpriseBackendImpl {
|
|
3561
|
+
constructor(db, collectionPath, queryMode, scopePath, previewDml) {
|
|
3562
|
+
this.db = db;
|
|
3563
|
+
this.queryMode = queryMode;
|
|
3564
|
+
this.previewDml = previewDml;
|
|
3565
|
+
this.collectionPath = collectionPath;
|
|
3566
|
+
this.scopePath = scopePath;
|
|
3567
|
+
this.adapter = createFirestoreAdapter(db, collectionPath);
|
|
3568
|
+
if (queryMode === "pipeline") {
|
|
3569
|
+
this.pipelineAdapter = createPipelineQueryAdapter(db, collectionPath);
|
|
3570
|
+
}
|
|
3571
|
+
const caps = previewDml ? /* @__PURE__ */ new Set([...ENTERPRISE_BASE_CAPS, "query.dml"]) : ENTERPRISE_BASE_CAPS;
|
|
3572
|
+
this.capabilities = createCapabilities(caps);
|
|
3573
|
+
}
|
|
3574
|
+
capabilities;
|
|
3575
|
+
collectionPath;
|
|
3576
|
+
scopePath;
|
|
3577
|
+
adapter;
|
|
3578
|
+
pipelineAdapter;
|
|
3579
|
+
// --- Reads ---
|
|
3580
|
+
getDoc(docId) {
|
|
3581
|
+
return this.adapter.getDoc(docId);
|
|
3582
|
+
}
|
|
3583
|
+
query(filters, options) {
|
|
3584
|
+
if (this.pipelineAdapter) {
|
|
3585
|
+
return this.pipelineAdapter.query(filters, options);
|
|
3586
|
+
}
|
|
3587
|
+
return this.adapter.query(filters, options);
|
|
3588
|
+
}
|
|
3589
|
+
// --- Writes ---
|
|
3590
|
+
setDoc(docId, record, mode) {
|
|
3591
|
+
return this.adapter.setDoc(
|
|
3592
|
+
docId,
|
|
3593
|
+
stampWritableRecord(record),
|
|
3594
|
+
mode === "merge" ? { merge: true } : void 0
|
|
3595
|
+
);
|
|
3596
|
+
}
|
|
3597
|
+
updateDoc(docId, update) {
|
|
3598
|
+
return this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
3599
|
+
}
|
|
3600
|
+
deleteDoc(docId) {
|
|
3601
|
+
return this.adapter.deleteDoc(docId);
|
|
3602
|
+
}
|
|
3603
|
+
// --- Transactions / Batches ---
|
|
3604
|
+
runTransaction(fn) {
|
|
3605
|
+
return this.db.runTransaction(async (firestoreTx) => {
|
|
3606
|
+
const txAdapter = createTransactionAdapter(this.db, this.collectionPath, firestoreTx);
|
|
3607
|
+
return fn(new FirestoreEnterpriseTransactionBackend(txAdapter, this.db));
|
|
3608
|
+
});
|
|
3609
|
+
}
|
|
3610
|
+
createBatch() {
|
|
3611
|
+
const batchAdapter = createBatchAdapter(this.db, this.collectionPath);
|
|
3612
|
+
return new FirestoreEnterpriseBatchBackend(batchAdapter, this.db);
|
|
3613
|
+
}
|
|
3614
|
+
// --- Subgraphs ---
|
|
3615
|
+
subgraph(parentNodeUid, name) {
|
|
3616
|
+
const subPath = `${this.collectionPath}/${parentNodeUid}/${name}`;
|
|
3617
|
+
const newScope = this.scopePath ? `${this.scopePath}/${name}` : name;
|
|
3618
|
+
return new _FirestoreEnterpriseBackendImpl(
|
|
3619
|
+
this.db,
|
|
3620
|
+
subPath,
|
|
3621
|
+
this.queryMode,
|
|
3622
|
+
newScope,
|
|
3623
|
+
this.previewDml
|
|
3624
|
+
);
|
|
3625
|
+
}
|
|
3626
|
+
// --- Cascade & bulk ---
|
|
3627
|
+
removeNodeCascade(uid, reader, options) {
|
|
3628
|
+
return removeNodeCascade(this.db, this.collectionPath, reader, uid, options);
|
|
3629
|
+
}
|
|
3630
|
+
bulkRemoveEdges(params, reader, options) {
|
|
3631
|
+
return bulkRemoveEdges(this.db, this.collectionPath, reader, params, options);
|
|
3632
|
+
}
|
|
3633
|
+
// --- Cross-collection ---
|
|
3634
|
+
async findEdgesGlobal(params, collectionName) {
|
|
3635
|
+
const name = collectionName ?? this.collectionPath.split("/").pop();
|
|
3636
|
+
const plan = buildEdgeQueryPlan(params);
|
|
3637
|
+
if (plan.strategy === "get") {
|
|
3638
|
+
throw new FiregraphError(
|
|
3639
|
+
"findEdgesGlobal() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
3640
|
+
"INVALID_QUERY"
|
|
3641
|
+
);
|
|
3642
|
+
}
|
|
3643
|
+
const collectionGroupRef = this.db.collectionGroup(name);
|
|
3644
|
+
let q = collectionGroupRef;
|
|
3645
|
+
for (const f of plan.filters) {
|
|
3646
|
+
q = q.where(f.field, f.op, f.value);
|
|
3647
|
+
}
|
|
3648
|
+
if (plan.options?.orderBy) {
|
|
3649
|
+
q = q.orderBy(plan.options.orderBy.field, plan.options.orderBy.direction ?? "asc");
|
|
3650
|
+
}
|
|
3651
|
+
if (plan.options?.limit !== void 0) {
|
|
3652
|
+
q = q.limit(plan.options.limit);
|
|
3653
|
+
}
|
|
3654
|
+
const snap = await q.get();
|
|
3655
|
+
return snap.docs.map((doc) => doc.data());
|
|
3656
|
+
}
|
|
3657
|
+
// --- Aggregate ---
|
|
3658
|
+
/**
|
|
3659
|
+
* Aggregate via the classic `Query.aggregate()` API. Supports count/sum/avg
|
|
3660
|
+
* — min/max throws `UNSUPPORTED_AGGREGATE`. The Pipelines `aggregate()`
|
|
3661
|
+
* stage could in principle add min/max on Enterprise, but that is deferred
|
|
3662
|
+
* to a future phase; both editions currently route through the same
|
|
3663
|
+
* classic-API helper so capability semantics stay symmetric.
|
|
3664
|
+
*/
|
|
3665
|
+
aggregate(spec, filters) {
|
|
3666
|
+
return runFirestoreAggregate(this.db.collection(this.collectionPath), spec, filters, {
|
|
3667
|
+
edition: "enterprise"
|
|
3668
|
+
});
|
|
3669
|
+
}
|
|
3670
|
+
// --- Server-side projection (capability: query.select) ---
|
|
3671
|
+
/**
|
|
3672
|
+
* Run a projecting query via the shared classic-API helper. Enterprise and
|
|
3673
|
+
* Standard delegate to the same implementation so the projection contract
|
|
3674
|
+
* stays consistent.
|
|
3675
|
+
*
|
|
3676
|
+
* Why classic and not the pipeline `select()` stage: the byte-savings
|
|
3677
|
+
* deliverable (the only reason `findEdgesProjected` exists) is achieved
|
|
3678
|
+
* by either path; the classic API works on both editions today and
|
|
3679
|
+
* sidesteps the pipeline-vs-emulator forking the rest of this backend has
|
|
3680
|
+
* to manage. When pipeline `select()` becomes preferable for some other
|
|
3681
|
+
* reason — e.g. composing with a future pipeline-only stage — swap the
|
|
3682
|
+
* implementation behind `runFirestoreFindEdgesProjected`; callers and
|
|
3683
|
+
* the capability declaration stay put.
|
|
3684
|
+
*/
|
|
3685
|
+
findEdgesProjected(select, filters, options) {
|
|
3686
|
+
return runFirestoreFindEdgesProjected(
|
|
3687
|
+
this.db.collection(this.collectionPath),
|
|
3688
|
+
select,
|
|
3689
|
+
filters,
|
|
3690
|
+
options
|
|
3691
|
+
);
|
|
3692
|
+
}
|
|
3693
|
+
// --- Native vector / nearest-neighbour search (capability: search.vector) ---
|
|
3694
|
+
/**
|
|
3695
|
+
* Run a vector / nearest-neighbour query via the shared classic-API
|
|
3696
|
+
* helper. Enterprise and Standard delegate to one implementation so the
|
|
3697
|
+
* field-path normalisation, validation surface, and result shape stay
|
|
3698
|
+
* consistent across editions.
|
|
3699
|
+
*
|
|
3700
|
+
* Why classic and not the pipeline `findNearest` stage: the deliverable
|
|
3701
|
+
* (top-K by similarity) is achieved by either path; the classic API
|
|
3702
|
+
* works identically on both editions today and sidesteps the
|
|
3703
|
+
* pipeline-vs-emulator forking the rest of this backend has to manage.
|
|
3704
|
+
* When pipeline `findNearest` becomes preferable for composing with
|
|
3705
|
+
* other pipeline stages, swap the implementation behind
|
|
3706
|
+
* `runFirestoreFindNearest`; callers and the capability declaration
|
|
3707
|
+
* stay put.
|
|
3708
|
+
*
|
|
3709
|
+
* Index requirements are identical to Standard — single-field vector
|
|
3710
|
+
* index on the indexed `vectorField`, plus a composite index whenever
|
|
3711
|
+
* additional `where` filters narrow the candidate set.
|
|
3712
|
+
*/
|
|
3713
|
+
findNearest(params) {
|
|
3714
|
+
return runFirestoreFindNearest(this.db.collection(this.collectionPath), params);
|
|
3715
|
+
}
|
|
3716
|
+
// --- Native full-text search (capability: search.fullText) ---
|
|
3717
|
+
/**
|
|
3718
|
+
* Run a full-text search via Firestore Pipelines `search(...)` stage.
|
|
3719
|
+
* Translates `documentMatches(query)` against the indexed search
|
|
3720
|
+
* fields, sorts by relevance score (`score().descending()`), and
|
|
3721
|
+
* applies identifying filters as a follow-up `where(...)` stage
|
|
3722
|
+
* because `search` must be the first stage of a pipeline.
|
|
3723
|
+
*
|
|
3724
|
+
* Enterprise-only: full-text search is an Enterprise product feature.
|
|
3725
|
+
* Standard does not declare `search.fullText` regardless of SDK
|
|
3726
|
+
* surface, and the SQLite-shaped backends have no native FTS index.
|
|
3727
|
+
*
|
|
3728
|
+
* Index requirements: an FTS index on the indexed search fields
|
|
3729
|
+
* (configured in Firestore's index config). Without the index the
|
|
3730
|
+
* underlying `search` stage returns no rows; the firegraph layer
|
|
3731
|
+
* cannot detect that case ahead of time.
|
|
3732
|
+
*/
|
|
3733
|
+
fullTextSearch(params) {
|
|
3734
|
+
return runFirestoreFullTextSearch(this.db, this.collectionPath, params);
|
|
3735
|
+
}
|
|
3736
|
+
// --- Native geospatial distance search (capability: search.geo) ---
|
|
3737
|
+
/**
|
|
3738
|
+
* Run a geospatial distance query via Firestore Pipelines
|
|
3739
|
+
* `search(...)` stage. The same `geoDistance(geoField, point)`
|
|
3740
|
+
* expression feeds the radius cap (`<= radiusMeters`) and the
|
|
3741
|
+
* nearest-first sort (when `orderByDistance` is true / unset).
|
|
3742
|
+
* Identifying filters apply as a follow-up `where(...)` stage.
|
|
3743
|
+
*
|
|
3744
|
+
* Enterprise-only: same Enterprise-product-feature gating as FTS.
|
|
3745
|
+
*
|
|
3746
|
+
* Index requirements: a geospatial index on the indexed `geoField`.
|
|
3747
|
+
* Same caveat as FTS — unindexed geo searches return no rows
|
|
3748
|
+
* server-side and the firegraph layer cannot pre-detect that.
|
|
3749
|
+
*/
|
|
3750
|
+
geoSearch(params) {
|
|
3751
|
+
return runFirestoreGeoSearch(this.db, this.collectionPath, params);
|
|
3752
|
+
}
|
|
3753
|
+
// --- Server-side multi-source fan-out (capability: query.join) ---
|
|
3754
|
+
/**
|
|
3755
|
+
* Fan out from `params.sources` over a single edge type in one server-
|
|
3756
|
+
* side round trip when running in pipeline mode, or via a chunked
|
|
3757
|
+
* classic-API fan-out when running in classic mode.
|
|
3758
|
+
*
|
|
3759
|
+
* Pipeline mode (default outside the emulator): one call to
|
|
3760
|
+
* `db.pipeline().collection(path).where(equalAny(sourceField, sources))
|
|
3761
|
+
* .execute()`. `equalAny` has no documented cap, so a 1k-source fan-
|
|
3762
|
+
* out is one round trip.
|
|
3763
|
+
*
|
|
3764
|
+
* Classic mode (emulator, or `defaultQueryMode: 'classic'`): chunks
|
|
3765
|
+
* `params.sources` into 30-element groups (the classic `'in'`
|
|
3766
|
+
* operator's documented cap), dispatches one query per chunk in
|
|
3767
|
+
* parallel, concats the results, and applies a cross-chunk re-sort +
|
|
3768
|
+
* total-limit slice. Same observable contract, `ceil(N/30)` round
|
|
3769
|
+
* trips. The chunked path is still a win over the per-source
|
|
3770
|
+
* `findEdges` loop in `traverse.ts` — 100 sources go from 100 round
|
|
3771
|
+
* trips to 4.
|
|
3772
|
+
*/
|
|
3773
|
+
expand(params) {
|
|
3774
|
+
if (this.queryMode === "pipeline") {
|
|
3775
|
+
return runFirestorePipelineExpand(this.db, this.collectionPath, params);
|
|
3776
|
+
}
|
|
3777
|
+
return runFirestoreClassicExpand(this.adapter, params);
|
|
3778
|
+
}
|
|
3779
|
+
// --- Server-side DML (capability: query.dml, gated by previewDml) ---
|
|
3780
|
+
/**
|
|
3781
|
+
* Single-statement bulk DELETE via Pipeline `delete()` stage. Wired only
|
|
3782
|
+
* when the backend was created with `previewDml: true`; otherwise the
|
|
3783
|
+
* cap isn't declared and `client.bulkDelete()` throws
|
|
3784
|
+
* `UNSUPPORTED_OPERATION` (or `bulk.ts` cascade falls back to
|
|
3785
|
+
* `bulkRemoveEdges`).
|
|
3786
|
+
*
|
|
3787
|
+
* Empty filter lists are rejected at the helper boundary —
|
|
3788
|
+
* see `runFirestorePipelineDelete`'s `assertNonEmptyFilters`.
|
|
3789
|
+
*
|
|
3790
|
+
* Subgraph isolation comes from `this.collectionPath`: the pipeline's
|
|
3791
|
+
* `collection(path)` source IS the subgraph, so there's no separate
|
|
3792
|
+
* `scope` predicate to enforce (unlike SQLite's leading-`scope`-`?`
|
|
3793
|
+
* filter).
|
|
3794
|
+
*/
|
|
3795
|
+
bulkDelete(filters, options) {
|
|
3796
|
+
return runFirestorePipelineDelete(this.db, this.collectionPath, filters, options);
|
|
3797
|
+
}
|
|
3798
|
+
/**
|
|
3799
|
+
* Single-statement bulk UPDATE via Pipeline
|
|
3800
|
+
* `update(transformedFields)` stage. Same gating and scoping as
|
|
3801
|
+
* `bulkDelete` above. The patch is flattened into one
|
|
3802
|
+
* `AliasedExpression` per terminal leaf via the shared `flattenPatch`
|
|
3803
|
+
* pipeline; `deleteField()` sentinels are rejected at the helper
|
|
3804
|
+
* boundary (the typed `update(AliasedExpression[])` surface in 8.5.0
|
|
3805
|
+
* has no field-deletion transform — see `runFirestorePipelineUpdate`).
|
|
3806
|
+
*/
|
|
3807
|
+
bulkUpdate(filters, patch, options) {
|
|
3808
|
+
return runFirestorePipelineUpdate(this.db, this.collectionPath, filters, patch, options);
|
|
3809
|
+
}
|
|
3810
|
+
// --- Engine-level multi-hop traversal (capability: traversal.serverSide) ---
|
|
3811
|
+
/**
|
|
3812
|
+
* Compile a multi-hop traversal spec into one nested Pipeline and
|
|
3813
|
+
* dispatch a single round trip via `define` + `addFields(child
|
|
3814
|
+
* .toArrayExpression().as(...))`. The compiler in
|
|
3815
|
+
* `firestore-traverse-compiler.ts` validates spec eligibility (depth
|
|
3816
|
+
* ≤ `MAX_PIPELINE_DEPTH`, every hop has `limitPerSource`, response-
|
|
3817
|
+
* size product ≤ `maxReads`) and the executor in
|
|
3818
|
+
* `firestore-traverse.ts` builds + decodes the tree.
|
|
3819
|
+
*
|
|
3820
|
+
* Unlike `bulkDelete` / `bulkUpdate`, this method is GA-typed in 8.5.0
|
|
3821
|
+
* (no `@beta` annotation on `define`, `addFields`, `toArrayExpression`,
|
|
3822
|
+
* or `variable`), so it does NOT need a `previewDml`-style opt-in.
|
|
3823
|
+
*
|
|
3824
|
+
* `defaultQueryMode` is irrelevant here — engine traversal always
|
|
3825
|
+
* dispatches through Pipelines because the join-key binding (`define`
|
|
3826
|
+
* + `variable`) has no classic Query API equivalent. Specs that
|
|
3827
|
+
* arrive on a classic-mode backend still execute via Pipelines; the
|
|
3828
|
+
* `queryMode` toggle only affects the read query path
|
|
3829
|
+
* (`query()` / `expand()`).
|
|
3830
|
+
*/
|
|
3831
|
+
runEngineTraversal(params) {
|
|
3832
|
+
return runFirestoreEngineTraversal(this.db, this.collectionPath, params);
|
|
3833
|
+
}
|
|
3834
|
+
};
|
|
3835
|
+
function createFirestoreEnterpriseBackend(db, collectionPath, options = {}) {
|
|
3836
|
+
const requestedMode = options.defaultQueryMode ?? "pipeline";
|
|
3837
|
+
const isEmulator = !!process.env.FIRESTORE_EMULATOR_HOST;
|
|
3838
|
+
const effectiveMode = isEmulator && requestedMode === "pipeline" ? "classic" : requestedMode;
|
|
3839
|
+
if (isEmulator && requestedMode === "pipeline" && effectiveMode === "classic" && !_emulatorFallbackWarned) {
|
|
3840
|
+
_emulatorFallbackWarned = true;
|
|
3841
|
+
console.warn(
|
|
3842
|
+
"[firegraph] Firestore Enterprise pipeline mode is unavailable in the emulator; falling back to classic Query API for this run. Set `defaultQueryMode: 'classic'` to silence this warning."
|
|
3843
|
+
);
|
|
3844
|
+
}
|
|
3845
|
+
if (!isEmulator && requestedMode === "classic" && !_classicInProductionWarned) {
|
|
3846
|
+
_classicInProductionWarned = true;
|
|
3847
|
+
console.warn(
|
|
3848
|
+
"[firegraph] Firestore Enterprise backend created with `defaultQueryMode: 'classic'`. Classic-mode `query()` against Enterprise causes full collection scans for `data.*` filters (high billing). For production reads on Standard Firestore, import from `'firegraph/firestore-standard'` instead."
|
|
3849
|
+
);
|
|
3850
|
+
}
|
|
3851
|
+
const previewDml = options.previewDml ?? false;
|
|
3852
|
+
if (previewDml && !_previewDmlWarned) {
|
|
3853
|
+
_previewDmlWarned = true;
|
|
3854
|
+
console.warn(
|
|
3855
|
+
"[firegraph] Firestore Enterprise backend created with `previewDml: true`. bulkDelete()/bulkUpdate() will dispatch through Pipeline.delete() / Pipeline.update(transformedFields), both `@beta` in @google-cloud/firestore@8.5.0. The typed surface may shift before GA \u2014 pin your firestore SDK or be ready to set `previewDml: false` and route through the read-then-write fallback if needed."
|
|
3856
|
+
);
|
|
3857
|
+
}
|
|
3858
|
+
const scopePath = options.scopePath ?? "";
|
|
3859
|
+
return new FirestoreEnterpriseBackendImpl(
|
|
3860
|
+
db,
|
|
3861
|
+
collectionPath,
|
|
3862
|
+
effectiveMode,
|
|
3863
|
+
scopePath,
|
|
3864
|
+
previewDml
|
|
3865
|
+
);
|
|
3866
|
+
}
|
|
3867
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
3868
|
+
0 && (module.exports = {
|
|
3869
|
+
META_EDGE_TYPE,
|
|
3870
|
+
META_NODE_TYPE,
|
|
3871
|
+
createFirestoreEnterpriseBackend,
|
|
3872
|
+
createGraphClient,
|
|
3873
|
+
createMergedRegistry,
|
|
3874
|
+
createRegistry,
|
|
3875
|
+
generateId
|
|
3876
|
+
});
|
|
3877
|
+
//# sourceMappingURL=index.cjs.map
|