@typicalday/firegraph 0.12.0 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +317 -73
- package/dist/backend-DuvHGgK1.d.cts +1897 -0
- package/dist/backend-DuvHGgK1.d.ts +1897 -0
- package/dist/backend.cjs +222 -3
- package/dist/backend.cjs.map +1 -1
- package/dist/backend.d.cts +25 -5
- package/dist/backend.d.ts +25 -5
- package/dist/backend.js +197 -4
- package/dist/backend.js.map +1 -1
- package/dist/chunk-2DHMNTV6.js +16 -0
- package/dist/chunk-2DHMNTV6.js.map +1 -0
- package/dist/chunk-4MMQ5W74.js +288 -0
- package/dist/chunk-4MMQ5W74.js.map +1 -0
- package/dist/chunk-D4J7Z4FE.js +67 -0
- package/dist/chunk-D4J7Z4FE.js.map +1 -0
- package/dist/chunk-N5HFDWQX.js +23 -0
- package/dist/chunk-N5HFDWQX.js.map +1 -0
- package/dist/chunk-PAD7WFFU.js +573 -0
- package/dist/chunk-PAD7WFFU.js.map +1 -0
- package/dist/{chunk-AWW4MUJ5.js → chunk-TK64DNVK.js} +12 -1
- package/dist/chunk-TK64DNVK.js.map +1 -0
- package/dist/{chunk-HONQY4HF.js → chunk-WRTFC5NG.js} +362 -17
- package/dist/chunk-WRTFC5NG.js.map +1 -0
- package/dist/client-BKi3vk0Q.d.ts +34 -0
- package/dist/client-BrsaXtDV.d.cts +34 -0
- package/dist/cloudflare/index.cjs +930 -3
- package/dist/cloudflare/index.cjs.map +1 -1
- package/dist/cloudflare/index.d.cts +213 -12
- package/dist/cloudflare/index.d.ts +213 -12
- package/dist/cloudflare/index.js +562 -281
- package/dist/cloudflare/index.js.map +1 -1
- package/dist/codegen/index.d.cts +1 -1
- package/dist/codegen/index.d.ts +1 -1
- package/dist/errors-BRc3I_eH.d.cts +73 -0
- package/dist/errors-BRc3I_eH.d.ts +73 -0
- package/dist/firestore-enterprise/index.cjs +3877 -0
- package/dist/firestore-enterprise/index.cjs.map +1 -0
- package/dist/firestore-enterprise/index.d.cts +141 -0
- package/dist/firestore-enterprise/index.d.ts +141 -0
- package/dist/firestore-enterprise/index.js +985 -0
- package/dist/firestore-enterprise/index.js.map +1 -0
- package/dist/firestore-standard/index.cjs +3117 -0
- package/dist/firestore-standard/index.cjs.map +1 -0
- package/dist/firestore-standard/index.d.cts +49 -0
- package/dist/firestore-standard/index.d.ts +49 -0
- package/dist/firestore-standard/index.js +283 -0
- package/dist/firestore-standard/index.js.map +1 -0
- package/dist/index.cjs +590 -550
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +9 -37
- package/dist/index.d.ts +9 -37
- package/dist/index.js +178 -555
- package/dist/index.js.map +1 -1
- package/dist/{registry-Fi074zVa.d.ts → registry-Bc7h6WTM.d.cts} +1 -1
- package/dist/{registry-B1qsVL0E.d.cts → registry-C2KUPVZj.d.ts} +1 -1
- package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.cts} +1 -56
- package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.ts} +1 -56
- package/dist/sqlite/index.cjs +3631 -0
- package/dist/sqlite/index.cjs.map +1 -0
- package/dist/sqlite/index.d.cts +111 -0
- package/dist/sqlite/index.d.ts +111 -0
- package/dist/sqlite/index.js +1164 -0
- package/dist/sqlite/index.js.map +1 -0
- package/package.json +33 -3
- package/dist/backend-BsR0lnFL.d.ts +0 -200
- package/dist/backend-Ct-fLlkG.d.cts +0 -200
- package/dist/chunk-AWW4MUJ5.js.map +0 -1
- package/dist/chunk-HONQY4HF.js.map +0 -1
- package/dist/types-DxYLy8Ol.d.cts +0 -770
- package/dist/types-DxYLy8Ol.d.ts +0 -770
|
@@ -0,0 +1,3631 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __create = Object.create;
|
|
3
|
+
var __defProp = Object.defineProperty;
|
|
4
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
7
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
+
var __esm = (fn, res) => function __init() {
|
|
9
|
+
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
|
|
10
|
+
};
|
|
11
|
+
var __export = (target, all) => {
|
|
12
|
+
for (var name in all)
|
|
13
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
14
|
+
};
|
|
15
|
+
var __copyProps = (to, from, except, desc) => {
|
|
16
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
17
|
+
for (let key of __getOwnPropNames(from))
|
|
18
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
19
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
20
|
+
}
|
|
21
|
+
return to;
|
|
22
|
+
};
|
|
23
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
24
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
25
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
26
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
27
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
28
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
29
|
+
mod
|
|
30
|
+
));
|
|
31
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
32
|
+
|
|
33
|
+
// src/internal/serialization-tag.ts
|
|
34
|
+
function isTaggedValue(value) {
|
|
35
|
+
if (value === null || typeof value !== "object") return false;
|
|
36
|
+
const tag = value[SERIALIZATION_TAG];
|
|
37
|
+
return typeof tag === "string" && KNOWN_TYPES.has(tag);
|
|
38
|
+
}
|
|
39
|
+
var SERIALIZATION_TAG, KNOWN_TYPES;
|
|
40
|
+
var init_serialization_tag = __esm({
|
|
41
|
+
"src/internal/serialization-tag.ts"() {
|
|
42
|
+
"use strict";
|
|
43
|
+
SERIALIZATION_TAG = "__firegraph_ser__";
|
|
44
|
+
KNOWN_TYPES = /* @__PURE__ */ new Set(["Timestamp", "GeoPoint", "VectorValue", "DocumentReference"]);
|
|
45
|
+
}
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
// src/serialization.ts
|
|
49
|
+
var serialization_exports = {};
|
|
50
|
+
__export(serialization_exports, {
|
|
51
|
+
SERIALIZATION_TAG: () => SERIALIZATION_TAG,
|
|
52
|
+
deserializeFirestoreTypes: () => deserializeFirestoreTypes,
|
|
53
|
+
isTaggedValue: () => isTaggedValue,
|
|
54
|
+
serializeFirestoreTypes: () => serializeFirestoreTypes
|
|
55
|
+
});
|
|
56
|
+
function isTimestamp(value) {
|
|
57
|
+
return value instanceof import_firestore.Timestamp;
|
|
58
|
+
}
|
|
59
|
+
function isGeoPoint(value) {
|
|
60
|
+
return value instanceof import_firestore.GeoPoint;
|
|
61
|
+
}
|
|
62
|
+
function isDocumentReference(value) {
|
|
63
|
+
if (value === null || typeof value !== "object") return false;
|
|
64
|
+
const v = value;
|
|
65
|
+
return typeof v.path === "string" && v.firestore !== void 0 && typeof v.id === "string" && v.constructor?.name === "DocumentReference";
|
|
66
|
+
}
|
|
67
|
+
function isVectorValue(value) {
|
|
68
|
+
if (value === null || typeof value !== "object") return false;
|
|
69
|
+
const v = value;
|
|
70
|
+
return v.constructor?.name === "VectorValue" && Array.isArray(v._values);
|
|
71
|
+
}
|
|
72
|
+
function serializeFirestoreTypes(data) {
|
|
73
|
+
return serializeValue(data);
|
|
74
|
+
}
|
|
75
|
+
function serializeValue(value) {
|
|
76
|
+
if (value === null || value === void 0) return value;
|
|
77
|
+
if (typeof value !== "object") return value;
|
|
78
|
+
if (isTimestamp(value)) {
|
|
79
|
+
return {
|
|
80
|
+
[SERIALIZATION_TAG]: "Timestamp",
|
|
81
|
+
seconds: value.seconds,
|
|
82
|
+
nanoseconds: value.nanoseconds
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
if (isGeoPoint(value)) {
|
|
86
|
+
return {
|
|
87
|
+
[SERIALIZATION_TAG]: "GeoPoint",
|
|
88
|
+
latitude: value.latitude,
|
|
89
|
+
longitude: value.longitude
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
if (isDocumentReference(value)) {
|
|
93
|
+
return { [SERIALIZATION_TAG]: "DocumentReference", path: value.path };
|
|
94
|
+
}
|
|
95
|
+
if (isVectorValue(value)) {
|
|
96
|
+
const v = value;
|
|
97
|
+
const values = typeof v.toArray === "function" ? v.toArray() : v._values;
|
|
98
|
+
return { [SERIALIZATION_TAG]: "VectorValue", values: [...values] };
|
|
99
|
+
}
|
|
100
|
+
if (Array.isArray(value)) {
|
|
101
|
+
return value.map(serializeValue);
|
|
102
|
+
}
|
|
103
|
+
const result = {};
|
|
104
|
+
for (const key of Object.keys(value)) {
|
|
105
|
+
result[key] = serializeValue(value[key]);
|
|
106
|
+
}
|
|
107
|
+
return result;
|
|
108
|
+
}
|
|
109
|
+
function deserializeFirestoreTypes(data, db) {
|
|
110
|
+
return deserializeValue(data, db);
|
|
111
|
+
}
|
|
112
|
+
function deserializeValue(value, db) {
|
|
113
|
+
if (value === null || value === void 0) return value;
|
|
114
|
+
if (typeof value !== "object") return value;
|
|
115
|
+
if (isTimestamp(value) || isGeoPoint(value) || isDocumentReference(value) || isVectorValue(value)) {
|
|
116
|
+
return value;
|
|
117
|
+
}
|
|
118
|
+
if (Array.isArray(value)) {
|
|
119
|
+
return value.map((v) => deserializeValue(v, db));
|
|
120
|
+
}
|
|
121
|
+
const obj = value;
|
|
122
|
+
if (isTaggedValue(obj)) {
|
|
123
|
+
const tag = obj[SERIALIZATION_TAG];
|
|
124
|
+
switch (tag) {
|
|
125
|
+
case "Timestamp":
|
|
126
|
+
if (typeof obj.seconds !== "number" || typeof obj.nanoseconds !== "number") return obj;
|
|
127
|
+
return new import_firestore.Timestamp(obj.seconds, obj.nanoseconds);
|
|
128
|
+
case "GeoPoint":
|
|
129
|
+
if (typeof obj.latitude !== "number" || typeof obj.longitude !== "number") return obj;
|
|
130
|
+
return new import_firestore.GeoPoint(obj.latitude, obj.longitude);
|
|
131
|
+
case "VectorValue":
|
|
132
|
+
if (!Array.isArray(obj.values)) return obj;
|
|
133
|
+
return import_firestore.FieldValue.vector(obj.values);
|
|
134
|
+
case "DocumentReference":
|
|
135
|
+
if (typeof obj.path !== "string") return obj;
|
|
136
|
+
if (db) {
|
|
137
|
+
return db.doc(obj.path);
|
|
138
|
+
}
|
|
139
|
+
if (!_docRefWarned) {
|
|
140
|
+
_docRefWarned = true;
|
|
141
|
+
console.warn(
|
|
142
|
+
"[firegraph] DocumentReference encountered during migration deserialization but no Firestore instance available. The reference will remain as a tagged object with its path. Enable write-back for full reconstruction."
|
|
143
|
+
);
|
|
144
|
+
}
|
|
145
|
+
return obj;
|
|
146
|
+
default:
|
|
147
|
+
return obj;
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
const result = {};
|
|
151
|
+
for (const key of Object.keys(obj)) {
|
|
152
|
+
result[key] = deserializeValue(obj[key], db);
|
|
153
|
+
}
|
|
154
|
+
return result;
|
|
155
|
+
}
|
|
156
|
+
var import_firestore, _docRefWarned;
|
|
157
|
+
var init_serialization = __esm({
|
|
158
|
+
"src/serialization.ts"() {
|
|
159
|
+
"use strict";
|
|
160
|
+
import_firestore = require("@google-cloud/firestore");
|
|
161
|
+
init_serialization_tag();
|
|
162
|
+
init_serialization_tag();
|
|
163
|
+
_docRefWarned = false;
|
|
164
|
+
}
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
// src/sqlite/index.ts
|
|
168
|
+
var sqlite_exports = {};
|
|
169
|
+
__export(sqlite_exports, {
|
|
170
|
+
META_EDGE_TYPE: () => META_EDGE_TYPE,
|
|
171
|
+
META_NODE_TYPE: () => META_NODE_TYPE,
|
|
172
|
+
createGraphClient: () => createGraphClient,
|
|
173
|
+
createMergedRegistry: () => createMergedRegistry,
|
|
174
|
+
createRegistry: () => createRegistry,
|
|
175
|
+
createSqliteBackend: () => createSqliteBackend,
|
|
176
|
+
generateId: () => generateId
|
|
177
|
+
});
|
|
178
|
+
module.exports = __toCommonJS(sqlite_exports);
|
|
179
|
+
|
|
180
|
+
// src/docid.ts
|
|
181
|
+
var import_node_crypto = require("crypto");
|
|
182
|
+
|
|
183
|
+
// src/internal/constants.ts
|
|
184
|
+
var NODE_RELATION = "is";
|
|
185
|
+
var DEFAULT_QUERY_LIMIT = 500;
|
|
186
|
+
var BUILTIN_FIELDS = /* @__PURE__ */ new Set([
|
|
187
|
+
"aType",
|
|
188
|
+
"aUid",
|
|
189
|
+
"axbType",
|
|
190
|
+
"bType",
|
|
191
|
+
"bUid",
|
|
192
|
+
"createdAt",
|
|
193
|
+
"updatedAt"
|
|
194
|
+
]);
|
|
195
|
+
var SHARD_SEPARATOR = ":";
|
|
196
|
+
|
|
197
|
+
// src/docid.ts
|
|
198
|
+
function computeNodeDocId(uid) {
|
|
199
|
+
return uid;
|
|
200
|
+
}
|
|
201
|
+
function computeEdgeDocId(aUid, axbType, bUid) {
|
|
202
|
+
const composite = `${aUid}${SHARD_SEPARATOR}${axbType}${SHARD_SEPARATOR}${bUid}`;
|
|
203
|
+
const hash = (0, import_node_crypto.createHash)("sha256").update(composite).digest("hex");
|
|
204
|
+
const shard = hash[0];
|
|
205
|
+
return `${shard}${SHARD_SEPARATOR}${aUid}${SHARD_SEPARATOR}${axbType}${SHARD_SEPARATOR}${bUid}`;
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// src/internal/write-plan.ts
|
|
209
|
+
init_serialization_tag();
|
|
210
|
+
var DELETE_FIELD = /* @__PURE__ */ Symbol.for("firegraph.deleteField");
|
|
211
|
+
function isDeleteSentinel(value) {
|
|
212
|
+
return value === DELETE_FIELD;
|
|
213
|
+
}
|
|
214
|
+
var FIRESTORE_TERMINAL_CTOR = /* @__PURE__ */ new Set([
|
|
215
|
+
"Timestamp",
|
|
216
|
+
"GeoPoint",
|
|
217
|
+
"VectorValue",
|
|
218
|
+
"DocumentReference",
|
|
219
|
+
"FieldValue",
|
|
220
|
+
"NumericIncrementTransform",
|
|
221
|
+
"ArrayUnionTransform",
|
|
222
|
+
"ArrayRemoveTransform",
|
|
223
|
+
"ServerTimestampTransform",
|
|
224
|
+
"DeleteTransform"
|
|
225
|
+
]);
|
|
226
|
+
function isTerminalValue(value) {
|
|
227
|
+
if (value === null) return true;
|
|
228
|
+
const t = typeof value;
|
|
229
|
+
if (t !== "object") return true;
|
|
230
|
+
if (Array.isArray(value)) return true;
|
|
231
|
+
if (isTaggedValue(value)) return true;
|
|
232
|
+
const proto = Object.getPrototypeOf(value);
|
|
233
|
+
if (proto === null || proto === Object.prototype) return false;
|
|
234
|
+
const ctor = value.constructor;
|
|
235
|
+
if (ctor && typeof ctor.name === "string" && FIRESTORE_TERMINAL_CTOR.has(ctor.name)) return true;
|
|
236
|
+
return true;
|
|
237
|
+
}
|
|
238
|
+
var SAFE_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
239
|
+
function assertUpdatePayloadExclusive(update) {
|
|
240
|
+
if (update.replaceData !== void 0 && update.dataOps !== void 0) {
|
|
241
|
+
throw new Error(
|
|
242
|
+
"firegraph: UpdatePayload cannot specify both `replaceData` and `dataOps`. Use one or the other \u2014 `replaceData` is the migration-write-back form, `dataOps` is the standard partial-update form."
|
|
243
|
+
);
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
function assertNoDeleteSentinels(data, callerLabel) {
|
|
247
|
+
walkForDeleteSentinels(data, [], { kind: "root" }, ({ path }) => {
|
|
248
|
+
const where = path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
249
|
+
throw new Error(
|
|
250
|
+
`firegraph: ${callerLabel} payload contains a deleteField() sentinel at ${where}. deleteField() is only valid inside updateNode/updateEdge \u2014 full-data writes (put*, replace*) cannot delete individual fields. Use updateNode with a deleteField() value, or omit the field from the replace payload.`
|
|
251
|
+
);
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
function walkForDeleteSentinels(node, path, parent, visit) {
|
|
255
|
+
if (node === null || node === void 0) return;
|
|
256
|
+
if (isDeleteSentinel(node)) {
|
|
257
|
+
visit({ path, parent });
|
|
258
|
+
return;
|
|
259
|
+
}
|
|
260
|
+
if (typeof node !== "object") return;
|
|
261
|
+
if (isTaggedValue(node)) return;
|
|
262
|
+
if (Array.isArray(node)) {
|
|
263
|
+
for (let i = 0; i < node.length; i++) {
|
|
264
|
+
walkForDeleteSentinels(node[i], [...path, String(i)], { kind: "array", index: i }, visit);
|
|
265
|
+
}
|
|
266
|
+
return;
|
|
267
|
+
}
|
|
268
|
+
const proto = Object.getPrototypeOf(node);
|
|
269
|
+
if (proto !== null && proto !== Object.prototype) return;
|
|
270
|
+
const obj = node;
|
|
271
|
+
for (const key of Object.keys(obj)) {
|
|
272
|
+
walkForDeleteSentinels(obj[key], [...path, key], { kind: "object" }, visit);
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
function assertSafePath(path) {
|
|
276
|
+
for (const seg of path) {
|
|
277
|
+
if (!SAFE_KEY_RE.test(seg)) {
|
|
278
|
+
throw new Error(
|
|
279
|
+
`firegraph: unsafe object key ${JSON.stringify(seg)} at path ${path.map((p) => JSON.stringify(p)).join(" > ")}. Keys used inside update payloads must match /^[A-Za-z_][A-Za-z0-9_-]*$/ so they can be embedded safely in SQLite JSON paths.`
|
|
280
|
+
);
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
function flattenPatch(data) {
|
|
285
|
+
const ops = [];
|
|
286
|
+
walk(data, [], ops);
|
|
287
|
+
return ops;
|
|
288
|
+
}
|
|
289
|
+
function assertNoDeleteSentinelsInArrayValue(arr, arrayPath) {
|
|
290
|
+
walkForDeleteSentinels(arr, arrayPath, { kind: "root" }, ({ parent }) => {
|
|
291
|
+
const arrayPathStr = arrayPath.length === 0 ? "<root>" : arrayPath.map((p) => JSON.stringify(p)).join(" > ");
|
|
292
|
+
if (parent.kind === "array") {
|
|
293
|
+
throw new Error(
|
|
294
|
+
`firegraph: deleteField() sentinel at index ${parent.index} inside an array at path ${arrayPathStr}. Arrays are terminal in update payloads (replaced as a unit), so the sentinel would be silently dropped by JSON serialization. To remove the field entirely, pass deleteField() in place of the whole array.`
|
|
295
|
+
);
|
|
296
|
+
}
|
|
297
|
+
throw new Error(
|
|
298
|
+
`firegraph: deleteField() sentinel inside an array element at path ${arrayPathStr}. Arrays are terminal in update payloads \u2014 the sentinel would be silently dropped by JSON serialization.`
|
|
299
|
+
);
|
|
300
|
+
});
|
|
301
|
+
}
|
|
302
|
+
function walk(node, path, out) {
|
|
303
|
+
if (node === void 0) return;
|
|
304
|
+
if (isDeleteSentinel(node)) {
|
|
305
|
+
if (path.length === 0) {
|
|
306
|
+
throw new Error("firegraph: deleteField() cannot be the entire update payload.");
|
|
307
|
+
}
|
|
308
|
+
assertSafePath(path);
|
|
309
|
+
out.push({ path: [...path], value: void 0, delete: true });
|
|
310
|
+
return;
|
|
311
|
+
}
|
|
312
|
+
if (isTerminalValue(node)) {
|
|
313
|
+
if (path.length === 0) {
|
|
314
|
+
throw new Error(
|
|
315
|
+
"firegraph: update payload must be a plain object. Got " + (node === null ? "null" : Array.isArray(node) ? "array" : typeof node) + "."
|
|
316
|
+
);
|
|
317
|
+
}
|
|
318
|
+
if (Array.isArray(node)) {
|
|
319
|
+
assertNoDeleteSentinelsInArrayValue(node, path);
|
|
320
|
+
}
|
|
321
|
+
assertSafePath(path);
|
|
322
|
+
out.push({ path: [...path], value: node, delete: false });
|
|
323
|
+
return;
|
|
324
|
+
}
|
|
325
|
+
const obj = node;
|
|
326
|
+
const keys = Object.keys(obj);
|
|
327
|
+
if (keys.length === 0) {
|
|
328
|
+
if (path.length > 0) {
|
|
329
|
+
assertSafePath(path);
|
|
330
|
+
out.push({ path: [...path], value: {}, delete: false });
|
|
331
|
+
}
|
|
332
|
+
return;
|
|
333
|
+
}
|
|
334
|
+
for (const key of keys) {
|
|
335
|
+
if (key === SERIALIZATION_TAG) {
|
|
336
|
+
const where = path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
337
|
+
throw new Error(
|
|
338
|
+
`firegraph: update payload contains a literal \`${SERIALIZATION_TAG}\` key at ${where}. That key is reserved for firegraph's serialization envelope and cannot appear on a plain object in user data. Use a different field name, or pass a recognized tagged value through replaceNode/replaceEdge instead.`
|
|
339
|
+
);
|
|
340
|
+
}
|
|
341
|
+
walk(obj[key], [...path, key], out);
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
// src/batch.ts
|
|
346
|
+
function buildWritableNodeRecord(aType, uid, data) {
|
|
347
|
+
return { aType, aUid: uid, axbType: NODE_RELATION, bType: aType, bUid: uid, data };
|
|
348
|
+
}
|
|
349
|
+
function buildWritableEdgeRecord(aType, aUid, axbType, bType, bUid, data) {
|
|
350
|
+
return { aType, aUid, axbType, bType, bUid, data };
|
|
351
|
+
}
|
|
352
|
+
var GraphBatchImpl = class {
|
|
353
|
+
constructor(backend, registry, scopePath = "") {
|
|
354
|
+
this.backend = backend;
|
|
355
|
+
this.registry = registry;
|
|
356
|
+
this.scopePath = scopePath;
|
|
357
|
+
}
|
|
358
|
+
async putNode(aType, uid, data) {
|
|
359
|
+
this.writeNode(aType, uid, data, "merge");
|
|
360
|
+
}
|
|
361
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
362
|
+
this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
363
|
+
}
|
|
364
|
+
async replaceNode(aType, uid, data) {
|
|
365
|
+
this.writeNode(aType, uid, data, "replace");
|
|
366
|
+
}
|
|
367
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
368
|
+
this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
369
|
+
}
|
|
370
|
+
writeNode(aType, uid, data, mode) {
|
|
371
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
372
|
+
if (this.registry) {
|
|
373
|
+
this.registry.validate(aType, NODE_RELATION, aType, data, this.scopePath);
|
|
374
|
+
}
|
|
375
|
+
const docId = computeNodeDocId(uid);
|
|
376
|
+
const record = buildWritableNodeRecord(aType, uid, data);
|
|
377
|
+
if (this.registry) {
|
|
378
|
+
const entry = this.registry.lookup(aType, NODE_RELATION, aType);
|
|
379
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
380
|
+
record.v = entry.schemaVersion;
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
this.backend.setDoc(docId, record, mode);
|
|
384
|
+
}
|
|
385
|
+
writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
386
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
387
|
+
if (this.registry) {
|
|
388
|
+
this.registry.validate(aType, axbType, bType, data, this.scopePath);
|
|
389
|
+
}
|
|
390
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
391
|
+
const record = buildWritableEdgeRecord(aType, aUid, axbType, bType, bUid, data);
|
|
392
|
+
if (this.registry) {
|
|
393
|
+
const entry = this.registry.lookup(aType, axbType, bType);
|
|
394
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
395
|
+
record.v = entry.schemaVersion;
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
this.backend.setDoc(docId, record, mode);
|
|
399
|
+
}
|
|
400
|
+
async updateNode(uid, data) {
|
|
401
|
+
const docId = computeNodeDocId(uid);
|
|
402
|
+
this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
403
|
+
}
|
|
404
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
405
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
406
|
+
this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
407
|
+
}
|
|
408
|
+
async removeNode(uid) {
|
|
409
|
+
const docId = computeNodeDocId(uid);
|
|
410
|
+
this.backend.deleteDoc(docId);
|
|
411
|
+
}
|
|
412
|
+
async removeEdge(aUid, axbType, bUid) {
|
|
413
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
414
|
+
this.backend.deleteDoc(docId);
|
|
415
|
+
}
|
|
416
|
+
async commit() {
|
|
417
|
+
await this.backend.commit();
|
|
418
|
+
}
|
|
419
|
+
};
|
|
420
|
+
|
|
421
|
+
// src/dynamic-registry.ts
|
|
422
|
+
var import_node_crypto3 = require("crypto");
|
|
423
|
+
|
|
424
|
+
// src/errors.ts
|
|
425
|
+
var FiregraphError = class extends Error {
|
|
426
|
+
constructor(message, code) {
|
|
427
|
+
super(message);
|
|
428
|
+
this.code = code;
|
|
429
|
+
this.name = "FiregraphError";
|
|
430
|
+
}
|
|
431
|
+
};
|
|
432
|
+
var ValidationError = class extends FiregraphError {
|
|
433
|
+
constructor(message, details) {
|
|
434
|
+
super(message, "VALIDATION_ERROR");
|
|
435
|
+
this.details = details;
|
|
436
|
+
this.name = "ValidationError";
|
|
437
|
+
}
|
|
438
|
+
};
|
|
439
|
+
var RegistryViolationError = class extends FiregraphError {
|
|
440
|
+
constructor(aType, axbType, bType) {
|
|
441
|
+
super(`Unregistered triple: (${aType}) -[${axbType}]-> (${bType})`, "REGISTRY_VIOLATION");
|
|
442
|
+
this.name = "RegistryViolationError";
|
|
443
|
+
}
|
|
444
|
+
};
|
|
445
|
+
var InvalidQueryError = class extends FiregraphError {
|
|
446
|
+
constructor(message) {
|
|
447
|
+
super(message, "INVALID_QUERY");
|
|
448
|
+
this.name = "InvalidQueryError";
|
|
449
|
+
}
|
|
450
|
+
};
|
|
451
|
+
var DynamicRegistryError = class extends FiregraphError {
|
|
452
|
+
constructor(message) {
|
|
453
|
+
super(message, "DYNAMIC_REGISTRY_ERROR");
|
|
454
|
+
this.name = "DynamicRegistryError";
|
|
455
|
+
}
|
|
456
|
+
};
|
|
457
|
+
var QuerySafetyError = class extends FiregraphError {
|
|
458
|
+
constructor(message) {
|
|
459
|
+
super(message, "QUERY_SAFETY");
|
|
460
|
+
this.name = "QuerySafetyError";
|
|
461
|
+
}
|
|
462
|
+
};
|
|
463
|
+
var RegistryScopeError = class extends FiregraphError {
|
|
464
|
+
constructor(aType, axbType, bType, scopePath, allowedIn) {
|
|
465
|
+
super(
|
|
466
|
+
`Type (${aType}) -[${axbType}]-> (${bType}) is not allowed at scope "${scopePath || "root"}". Allowed in: [${allowedIn.join(", ")}]`,
|
|
467
|
+
"REGISTRY_SCOPE"
|
|
468
|
+
);
|
|
469
|
+
this.name = "RegistryScopeError";
|
|
470
|
+
}
|
|
471
|
+
};
|
|
472
|
+
var MigrationError = class extends FiregraphError {
|
|
473
|
+
constructor(message) {
|
|
474
|
+
super(message, "MIGRATION_ERROR");
|
|
475
|
+
this.name = "MigrationError";
|
|
476
|
+
}
|
|
477
|
+
};
|
|
478
|
+
|
|
479
|
+
// src/json-schema.ts
|
|
480
|
+
var import_json_schema = require("@cfworker/json-schema");
|
|
481
|
+
var MAX_RENDERED_ERRORS = 20;
|
|
482
|
+
function compileSchema(schema, label) {
|
|
483
|
+
const validator = new import_json_schema.Validator(schema, "2020-12", false);
|
|
484
|
+
return (data) => {
|
|
485
|
+
const result = validator.validate(data);
|
|
486
|
+
if (!result.valid) {
|
|
487
|
+
const total = result.errors.length;
|
|
488
|
+
const head = result.errors.slice(0, MAX_RENDERED_ERRORS).map(formatError).join("; ");
|
|
489
|
+
const overflow = total > MAX_RENDERED_ERRORS ? ` (+${total - MAX_RENDERED_ERRORS} more)` : "";
|
|
490
|
+
throw new ValidationError(
|
|
491
|
+
`Data validation failed${label ? " for " + label : ""}: ${head}${overflow}`,
|
|
492
|
+
result.errors
|
|
493
|
+
);
|
|
494
|
+
}
|
|
495
|
+
};
|
|
496
|
+
}
|
|
497
|
+
function formatError(err) {
|
|
498
|
+
const path = err.instanceLocation.replace(/^#/, "") || "/";
|
|
499
|
+
const keyword = err.keyword ? `[${err.keyword}] ` : "";
|
|
500
|
+
const detail = err.error ? `: ${keyword}${err.error}` : "";
|
|
501
|
+
return `${path}${detail}`;
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
// src/migration.ts
|
|
505
|
+
async function applyMigrationChain(data, currentVersion, targetVersion, migrations) {
|
|
506
|
+
const sorted = [...migrations].sort((a, b) => a.fromVersion - b.fromVersion);
|
|
507
|
+
let result = { ...data };
|
|
508
|
+
let version = currentVersion;
|
|
509
|
+
for (const step of sorted) {
|
|
510
|
+
if (step.fromVersion === version) {
|
|
511
|
+
try {
|
|
512
|
+
result = await step.up(result);
|
|
513
|
+
} catch (err) {
|
|
514
|
+
if (err instanceof MigrationError) throw err;
|
|
515
|
+
throw new MigrationError(
|
|
516
|
+
`Migration from v${step.fromVersion} to v${step.toVersion} failed: ${err.message}`
|
|
517
|
+
);
|
|
518
|
+
}
|
|
519
|
+
if (!result || typeof result !== "object") {
|
|
520
|
+
throw new MigrationError(
|
|
521
|
+
`Migration from v${step.fromVersion} to v${step.toVersion} returned invalid data (expected object)`
|
|
522
|
+
);
|
|
523
|
+
}
|
|
524
|
+
version = step.toVersion;
|
|
525
|
+
}
|
|
526
|
+
}
|
|
527
|
+
if (version !== targetVersion) {
|
|
528
|
+
throw new MigrationError(
|
|
529
|
+
`Incomplete migration chain: reached v${version} but target is v${targetVersion}`
|
|
530
|
+
);
|
|
531
|
+
}
|
|
532
|
+
return result;
|
|
533
|
+
}
|
|
534
|
+
function validateMigrationChain(migrations, label) {
|
|
535
|
+
if (migrations.length === 0) return;
|
|
536
|
+
const seen = /* @__PURE__ */ new Set();
|
|
537
|
+
for (const step of migrations) {
|
|
538
|
+
if (step.toVersion <= step.fromVersion) {
|
|
539
|
+
throw new MigrationError(
|
|
540
|
+
`${label}: migration step has toVersion (${step.toVersion}) <= fromVersion (${step.fromVersion})`
|
|
541
|
+
);
|
|
542
|
+
}
|
|
543
|
+
if (seen.has(step.fromVersion)) {
|
|
544
|
+
throw new MigrationError(
|
|
545
|
+
`${label}: duplicate migration step for fromVersion ${step.fromVersion}`
|
|
546
|
+
);
|
|
547
|
+
}
|
|
548
|
+
seen.add(step.fromVersion);
|
|
549
|
+
}
|
|
550
|
+
const sorted = [...migrations].sort((a, b) => a.fromVersion - b.fromVersion);
|
|
551
|
+
const targetVersion = Math.max(...migrations.map((m) => m.toVersion));
|
|
552
|
+
let version = 0;
|
|
553
|
+
for (const step of sorted) {
|
|
554
|
+
if (step.fromVersion === version) {
|
|
555
|
+
version = step.toVersion;
|
|
556
|
+
} else if (step.fromVersion > version) {
|
|
557
|
+
throw new MigrationError(
|
|
558
|
+
`${label}: migration chain has a gap \u2014 no step covers v${version} \u2192 v${step.fromVersion}`
|
|
559
|
+
);
|
|
560
|
+
}
|
|
561
|
+
}
|
|
562
|
+
if (version !== targetVersion) {
|
|
563
|
+
throw new MigrationError(
|
|
564
|
+
`${label}: migration chain does not reach v${targetVersion} (stuck at v${version})`
|
|
565
|
+
);
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
async function migrateRecord(record, registry, globalWriteBack = "off") {
|
|
569
|
+
const entry = registry.lookup(record.aType, record.axbType, record.bType);
|
|
570
|
+
if (!entry?.migrations?.length || !entry.schemaVersion) {
|
|
571
|
+
return { record, migrated: false, writeBack: "off" };
|
|
572
|
+
}
|
|
573
|
+
const currentVersion = record.v ?? 0;
|
|
574
|
+
if (currentVersion >= entry.schemaVersion) {
|
|
575
|
+
return { record, migrated: false, writeBack: "off" };
|
|
576
|
+
}
|
|
577
|
+
const migratedData = await applyMigrationChain(
|
|
578
|
+
record.data,
|
|
579
|
+
currentVersion,
|
|
580
|
+
entry.schemaVersion,
|
|
581
|
+
entry.migrations
|
|
582
|
+
);
|
|
583
|
+
const writeBack = entry.migrationWriteBack ?? globalWriteBack ?? "off";
|
|
584
|
+
return {
|
|
585
|
+
record: { ...record, data: migratedData, v: entry.schemaVersion },
|
|
586
|
+
migrated: true,
|
|
587
|
+
writeBack
|
|
588
|
+
};
|
|
589
|
+
}
|
|
590
|
+
async function migrateRecords(records, registry, globalWriteBack = "off") {
|
|
591
|
+
return Promise.all(records.map((r) => migrateRecord(r, registry, globalWriteBack)));
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
// src/scope.ts
|
|
595
|
+
function matchScope(scopePath, pattern) {
|
|
596
|
+
if (pattern === "root") return scopePath === "";
|
|
597
|
+
if (pattern === "**") return true;
|
|
598
|
+
const pathSegments = scopePath === "" ? [] : scopePath.split("/");
|
|
599
|
+
const patternSegments = pattern.split("/");
|
|
600
|
+
return matchSegments(pathSegments, 0, patternSegments, 0);
|
|
601
|
+
}
|
|
602
|
+
function matchScopeAny(scopePath, patterns) {
|
|
603
|
+
if (!patterns || patterns.length === 0) return true;
|
|
604
|
+
return patterns.some((p) => matchScope(scopePath, p));
|
|
605
|
+
}
|
|
606
|
+
function matchSegments(path, pi, pattern, qi) {
|
|
607
|
+
if (pi === path.length && qi === pattern.length) return true;
|
|
608
|
+
if (qi === pattern.length) return false;
|
|
609
|
+
const seg = pattern[qi];
|
|
610
|
+
if (seg === "**") {
|
|
611
|
+
if (qi === pattern.length - 1) return true;
|
|
612
|
+
for (let skip = 0; skip <= path.length - pi; skip++) {
|
|
613
|
+
if (matchSegments(path, pi + skip, pattern, qi + 1)) return true;
|
|
614
|
+
}
|
|
615
|
+
return false;
|
|
616
|
+
}
|
|
617
|
+
if (pi === path.length) return false;
|
|
618
|
+
if (seg === "*") {
|
|
619
|
+
return matchSegments(path, pi + 1, pattern, qi + 1);
|
|
620
|
+
}
|
|
621
|
+
if (path[pi] === seg) {
|
|
622
|
+
return matchSegments(path, pi + 1, pattern, qi + 1);
|
|
623
|
+
}
|
|
624
|
+
return false;
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
// src/registry.ts
|
|
628
|
+
function tripleKey(aType, axbType, bType) {
|
|
629
|
+
return `${aType}:${axbType}:${bType}`;
|
|
630
|
+
}
|
|
631
|
+
function tripleKeyFor(e) {
|
|
632
|
+
return tripleKey(e.aType, e.axbType, e.bType);
|
|
633
|
+
}
|
|
634
|
+
function createRegistry(input) {
|
|
635
|
+
const map = /* @__PURE__ */ new Map();
|
|
636
|
+
let entries;
|
|
637
|
+
if (Array.isArray(input)) {
|
|
638
|
+
entries = input;
|
|
639
|
+
} else {
|
|
640
|
+
entries = discoveryToEntries(input);
|
|
641
|
+
}
|
|
642
|
+
const entryList = Object.freeze([...entries]);
|
|
643
|
+
for (const entry of entries) {
|
|
644
|
+
if (entry.targetGraph && entry.targetGraph.includes("/")) {
|
|
645
|
+
throw new ValidationError(
|
|
646
|
+
`Entry (${entry.aType}) -[${entry.axbType}]-> (${entry.bType}) has invalid targetGraph "${entry.targetGraph}" \u2014 must be a single segment (no "/")`
|
|
647
|
+
);
|
|
648
|
+
}
|
|
649
|
+
if (entry.migrations?.length) {
|
|
650
|
+
const label = `Entry (${entry.aType}) -[${entry.axbType}]-> (${entry.bType})`;
|
|
651
|
+
validateMigrationChain(entry.migrations, label);
|
|
652
|
+
entry.schemaVersion = Math.max(...entry.migrations.map((m) => m.toVersion));
|
|
653
|
+
} else {
|
|
654
|
+
entry.schemaVersion = void 0;
|
|
655
|
+
}
|
|
656
|
+
const key = tripleKey(entry.aType, entry.axbType, entry.bType);
|
|
657
|
+
const validator = entry.jsonSchema ? compileSchema(entry.jsonSchema, `(${entry.aType}) -[${entry.axbType}]-> (${entry.bType})`) : void 0;
|
|
658
|
+
map.set(key, { entry, validate: validator });
|
|
659
|
+
}
|
|
660
|
+
const axbIndex = /* @__PURE__ */ new Map();
|
|
661
|
+
const axbBuild = /* @__PURE__ */ new Map();
|
|
662
|
+
for (const entry of entries) {
|
|
663
|
+
const existing = axbBuild.get(entry.axbType);
|
|
664
|
+
if (existing) {
|
|
665
|
+
existing.push(entry);
|
|
666
|
+
} else {
|
|
667
|
+
axbBuild.set(entry.axbType, [entry]);
|
|
668
|
+
}
|
|
669
|
+
}
|
|
670
|
+
for (const [key, arr] of axbBuild) {
|
|
671
|
+
axbIndex.set(key, Object.freeze(arr));
|
|
672
|
+
}
|
|
673
|
+
const topologyIndex = /* @__PURE__ */ new Map();
|
|
674
|
+
const topologyBuild = /* @__PURE__ */ new Map();
|
|
675
|
+
const topologySeen = /* @__PURE__ */ new Map();
|
|
676
|
+
for (const entry of entries) {
|
|
677
|
+
if (!entry.targetGraph) continue;
|
|
678
|
+
let seen = topologySeen.get(entry.aType);
|
|
679
|
+
if (!seen) {
|
|
680
|
+
seen = /* @__PURE__ */ new Set();
|
|
681
|
+
topologySeen.set(entry.aType, seen);
|
|
682
|
+
}
|
|
683
|
+
if (seen.has(entry.targetGraph)) continue;
|
|
684
|
+
seen.add(entry.targetGraph);
|
|
685
|
+
const existing = topologyBuild.get(entry.aType);
|
|
686
|
+
if (existing) {
|
|
687
|
+
existing.push(entry);
|
|
688
|
+
} else {
|
|
689
|
+
topologyBuild.set(entry.aType, [entry]);
|
|
690
|
+
}
|
|
691
|
+
}
|
|
692
|
+
for (const [key, arr] of topologyBuild) {
|
|
693
|
+
topologyIndex.set(key, Object.freeze(arr));
|
|
694
|
+
}
|
|
695
|
+
return {
|
|
696
|
+
lookup(aType, axbType, bType) {
|
|
697
|
+
return map.get(tripleKey(aType, axbType, bType))?.entry;
|
|
698
|
+
},
|
|
699
|
+
lookupByAxbType(axbType) {
|
|
700
|
+
return axbIndex.get(axbType) ?? [];
|
|
701
|
+
},
|
|
702
|
+
getSubgraphTopology(aType) {
|
|
703
|
+
return topologyIndex.get(aType) ?? [];
|
|
704
|
+
},
|
|
705
|
+
validate(aType, axbType, bType, data, scopePath) {
|
|
706
|
+
const rec = map.get(tripleKey(aType, axbType, bType));
|
|
707
|
+
if (!rec) {
|
|
708
|
+
throw new RegistryViolationError(aType, axbType, bType);
|
|
709
|
+
}
|
|
710
|
+
if (scopePath !== void 0 && rec.entry.allowedIn && rec.entry.allowedIn.length > 0) {
|
|
711
|
+
if (!matchScopeAny(scopePath, rec.entry.allowedIn)) {
|
|
712
|
+
throw new RegistryScopeError(aType, axbType, bType, scopePath, rec.entry.allowedIn);
|
|
713
|
+
}
|
|
714
|
+
}
|
|
715
|
+
if (rec.validate) {
|
|
716
|
+
try {
|
|
717
|
+
rec.validate(data);
|
|
718
|
+
} catch (err) {
|
|
719
|
+
if (err instanceof ValidationError) throw err;
|
|
720
|
+
throw new ValidationError(
|
|
721
|
+
`Data validation failed for (${aType}) -[${axbType}]-> (${bType})`,
|
|
722
|
+
err
|
|
723
|
+
);
|
|
724
|
+
}
|
|
725
|
+
}
|
|
726
|
+
},
|
|
727
|
+
entries() {
|
|
728
|
+
return entryList;
|
|
729
|
+
}
|
|
730
|
+
};
|
|
731
|
+
}
|
|
732
|
+
function createMergedRegistry(base, extension) {
|
|
733
|
+
const baseKeys = new Set(base.entries().map(tripleKeyFor));
|
|
734
|
+
return {
|
|
735
|
+
lookup(aType, axbType, bType) {
|
|
736
|
+
return base.lookup(aType, axbType, bType) ?? extension.lookup(aType, axbType, bType);
|
|
737
|
+
},
|
|
738
|
+
lookupByAxbType(axbType) {
|
|
739
|
+
const baseResults = base.lookupByAxbType(axbType);
|
|
740
|
+
const extResults = extension.lookupByAxbType(axbType);
|
|
741
|
+
if (extResults.length === 0) return baseResults;
|
|
742
|
+
if (baseResults.length === 0) return extResults;
|
|
743
|
+
const seen = new Set(baseResults.map(tripleKeyFor));
|
|
744
|
+
const merged = [...baseResults];
|
|
745
|
+
for (const entry of extResults) {
|
|
746
|
+
if (!seen.has(tripleKeyFor(entry))) {
|
|
747
|
+
merged.push(entry);
|
|
748
|
+
}
|
|
749
|
+
}
|
|
750
|
+
return Object.freeze(merged);
|
|
751
|
+
},
|
|
752
|
+
getSubgraphTopology(aType) {
|
|
753
|
+
const baseResults = base.getSubgraphTopology(aType);
|
|
754
|
+
const extResults = extension.getSubgraphTopology(aType);
|
|
755
|
+
if (extResults.length === 0) return baseResults;
|
|
756
|
+
if (baseResults.length === 0) return extResults;
|
|
757
|
+
const seen = new Set(baseResults.map((e) => e.targetGraph));
|
|
758
|
+
const merged = [...baseResults];
|
|
759
|
+
for (const entry of extResults) {
|
|
760
|
+
if (!seen.has(entry.targetGraph)) {
|
|
761
|
+
seen.add(entry.targetGraph);
|
|
762
|
+
merged.push(entry);
|
|
763
|
+
}
|
|
764
|
+
}
|
|
765
|
+
return Object.freeze(merged);
|
|
766
|
+
},
|
|
767
|
+
validate(aType, axbType, bType, data, scopePath) {
|
|
768
|
+
if (baseKeys.has(tripleKey(aType, axbType, bType))) {
|
|
769
|
+
return base.validate(aType, axbType, bType, data, scopePath);
|
|
770
|
+
}
|
|
771
|
+
return extension.validate(aType, axbType, bType, data, scopePath);
|
|
772
|
+
},
|
|
773
|
+
entries() {
|
|
774
|
+
const extEntries = extension.entries();
|
|
775
|
+
if (extEntries.length === 0) return base.entries();
|
|
776
|
+
const merged = [...base.entries()];
|
|
777
|
+
for (const entry of extEntries) {
|
|
778
|
+
if (!baseKeys.has(tripleKeyFor(entry))) {
|
|
779
|
+
merged.push(entry);
|
|
780
|
+
}
|
|
781
|
+
}
|
|
782
|
+
return Object.freeze(merged);
|
|
783
|
+
}
|
|
784
|
+
};
|
|
785
|
+
}
|
|
786
|
+
function discoveryToEntries(discovery) {
|
|
787
|
+
const entries = [];
|
|
788
|
+
for (const [name, entity] of discovery.nodes) {
|
|
789
|
+
entries.push({
|
|
790
|
+
aType: name,
|
|
791
|
+
axbType: NODE_RELATION,
|
|
792
|
+
bType: name,
|
|
793
|
+
jsonSchema: entity.schema,
|
|
794
|
+
description: entity.description,
|
|
795
|
+
titleField: entity.titleField,
|
|
796
|
+
subtitleField: entity.subtitleField,
|
|
797
|
+
allowedIn: entity.allowedIn,
|
|
798
|
+
migrations: entity.migrations,
|
|
799
|
+
migrationWriteBack: entity.migrationWriteBack,
|
|
800
|
+
indexes: entity.indexes
|
|
801
|
+
});
|
|
802
|
+
}
|
|
803
|
+
for (const [axbType, entity] of discovery.edges) {
|
|
804
|
+
const topology = entity.topology;
|
|
805
|
+
if (!topology) continue;
|
|
806
|
+
const fromTypes = Array.isArray(topology.from) ? topology.from : [topology.from];
|
|
807
|
+
const toTypes = Array.isArray(topology.to) ? topology.to : [topology.to];
|
|
808
|
+
const resolvedTargetGraph = entity.targetGraph ?? topology.targetGraph;
|
|
809
|
+
if (resolvedTargetGraph && resolvedTargetGraph.includes("/")) {
|
|
810
|
+
throw new ValidationError(
|
|
811
|
+
`Edge "${axbType}" has invalid targetGraph "${resolvedTargetGraph}" \u2014 must be a single segment (no "/")`
|
|
812
|
+
);
|
|
813
|
+
}
|
|
814
|
+
for (const aType of fromTypes) {
|
|
815
|
+
for (const bType of toTypes) {
|
|
816
|
+
entries.push({
|
|
817
|
+
aType,
|
|
818
|
+
axbType,
|
|
819
|
+
bType,
|
|
820
|
+
jsonSchema: entity.schema,
|
|
821
|
+
description: entity.description,
|
|
822
|
+
inverseLabel: topology.inverseLabel,
|
|
823
|
+
titleField: entity.titleField,
|
|
824
|
+
subtitleField: entity.subtitleField,
|
|
825
|
+
allowedIn: entity.allowedIn,
|
|
826
|
+
targetGraph: resolvedTargetGraph,
|
|
827
|
+
migrations: entity.migrations,
|
|
828
|
+
migrationWriteBack: entity.migrationWriteBack,
|
|
829
|
+
indexes: entity.indexes
|
|
830
|
+
});
|
|
831
|
+
}
|
|
832
|
+
}
|
|
833
|
+
}
|
|
834
|
+
return entries;
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
// src/sandbox.ts
|
|
838
|
+
var import_node_crypto2 = require("crypto");
|
|
839
|
+
var import_meta = {};
|
|
840
|
+
var _worker = null;
|
|
841
|
+
var _requestId = 0;
|
|
842
|
+
var _pending = /* @__PURE__ */ new Map();
|
|
843
|
+
var WORKER_SOURCE = [
|
|
844
|
+
`'use strict';`,
|
|
845
|
+
`var _wt = require('node:worker_threads');`,
|
|
846
|
+
`var _mod = require('node:module');`,
|
|
847
|
+
`var _crypto = require('node:crypto');`,
|
|
848
|
+
`var parentPort = _wt.parentPort;`,
|
|
849
|
+
`var workerData = _wt.workerData;`,
|
|
850
|
+
``,
|
|
851
|
+
`// Load SES using the parent module's resolution context`,
|
|
852
|
+
`var esmRequire = _mod.createRequire(workerData.parentUrl);`,
|
|
853
|
+
`esmRequire('ses');`,
|
|
854
|
+
``,
|
|
855
|
+
`lockdown({`,
|
|
856
|
+
` errorTaming: 'unsafe',`,
|
|
857
|
+
` consoleTaming: 'unsafe',`,
|
|
858
|
+
` evalTaming: 'safe-eval',`,
|
|
859
|
+
` overrideTaming: 'moderate',`,
|
|
860
|
+
` stackFiltering: 'verbose'`,
|
|
861
|
+
`});`,
|
|
862
|
+
``,
|
|
863
|
+
`// Defense-in-depth: verify lockdown() actually hardened JSON.`,
|
|
864
|
+
`if (!Object.isFrozen(JSON)) {`,
|
|
865
|
+
` throw new Error('SES lockdown failed: JSON is not frozen');`,
|
|
866
|
+
`}`,
|
|
867
|
+
``,
|
|
868
|
+
`var cache = new Map();`,
|
|
869
|
+
``,
|
|
870
|
+
`function hashSource(s) {`,
|
|
871
|
+
` return _crypto.createHash('sha256').update(s).digest('hex');`,
|
|
872
|
+
`}`,
|
|
873
|
+
``,
|
|
874
|
+
`function buildWrapper(source) {`,
|
|
875
|
+
` return '(function() {' +`,
|
|
876
|
+
` ' var fn = (' + source + ');\\n' +`,
|
|
877
|
+
` ' if (typeof fn !== "function") return null;\\n' +`,
|
|
878
|
+
` ' return function(jsonIn) {\\n' +`,
|
|
879
|
+
` ' var data = JSON.parse(jsonIn);\\n' +`,
|
|
880
|
+
` ' var result = fn(data);\\n' +`,
|
|
881
|
+
` ' if (result !== null && typeof result === "object" && typeof result.then === "function") {\\n' +`,
|
|
882
|
+
` ' return result.then(function(r) { return JSON.stringify(r); });\\n' +`,
|
|
883
|
+
` ' }\\n' +`,
|
|
884
|
+
` ' return JSON.stringify(result);\\n' +`,
|
|
885
|
+
` ' };\\n' +`,
|
|
886
|
+
` '})()';`,
|
|
887
|
+
`}`,
|
|
888
|
+
``,
|
|
889
|
+
`function compileSource(source) {`,
|
|
890
|
+
` var key = hashSource(source);`,
|
|
891
|
+
` var cached = cache.get(key);`,
|
|
892
|
+
` if (cached) return cached;`,
|
|
893
|
+
``,
|
|
894
|
+
` var compartmentFn;`,
|
|
895
|
+
` try {`,
|
|
896
|
+
` var c = new Compartment({ JSON: JSON });`,
|
|
897
|
+
` compartmentFn = c.evaluate(buildWrapper(source));`,
|
|
898
|
+
` } catch (err) {`,
|
|
899
|
+
` throw new Error('Failed to compile migration source: ' + (err.message || String(err)));`,
|
|
900
|
+
` }`,
|
|
901
|
+
``,
|
|
902
|
+
` if (typeof compartmentFn !== 'function') {`,
|
|
903
|
+
` throw new Error('Migration source did not produce a function: ' + source.slice(0, 80));`,
|
|
904
|
+
` }`,
|
|
905
|
+
``,
|
|
906
|
+
` cache.set(key, compartmentFn);`,
|
|
907
|
+
` return compartmentFn;`,
|
|
908
|
+
`}`,
|
|
909
|
+
``,
|
|
910
|
+
`parentPort.on('message', function(msg) {`,
|
|
911
|
+
` var id = msg.id;`,
|
|
912
|
+
` try {`,
|
|
913
|
+
` if (msg.type === 'compile') {`,
|
|
914
|
+
` compileSource(msg.source);`,
|
|
915
|
+
` parentPort.postMessage({ id: id, type: 'compiled' });`,
|
|
916
|
+
` return;`,
|
|
917
|
+
` }`,
|
|
918
|
+
` if (msg.type === 'execute') {`,
|
|
919
|
+
` var fn = compileSource(msg.source);`,
|
|
920
|
+
` var raw;`,
|
|
921
|
+
` try {`,
|
|
922
|
+
` raw = fn(msg.jsonData);`,
|
|
923
|
+
` } catch (err) {`,
|
|
924
|
+
` parentPort.postMessage({ id: id, type: 'error', message: 'Migration function threw: ' + (err.message || String(err)) });`,
|
|
925
|
+
` return;`,
|
|
926
|
+
` }`,
|
|
927
|
+
` if (raw !== null && typeof raw === 'object' && typeof raw.then === 'function') {`,
|
|
928
|
+
` raw.then(`,
|
|
929
|
+
` function(jsonResult) {`,
|
|
930
|
+
` if (jsonResult === undefined || jsonResult === null) {`,
|
|
931
|
+
` parentPort.postMessage({ id: id, type: 'error', message: 'Migration returned a non-JSON-serializable value' });`,
|
|
932
|
+
` } else {`,
|
|
933
|
+
` parentPort.postMessage({ id: id, type: 'result', jsonResult: jsonResult });`,
|
|
934
|
+
` }`,
|
|
935
|
+
` },`,
|
|
936
|
+
` function(err) {`,
|
|
937
|
+
` parentPort.postMessage({ id: id, type: 'error', message: 'Async migration function threw: ' + (err.message || String(err)) });`,
|
|
938
|
+
` }`,
|
|
939
|
+
` );`,
|
|
940
|
+
` return;`,
|
|
941
|
+
` }`,
|
|
942
|
+
` if (raw === undefined || raw === null) {`,
|
|
943
|
+
` parentPort.postMessage({ id: id, type: 'error', message: 'Migration returned a non-JSON-serializable value' });`,
|
|
944
|
+
` } else {`,
|
|
945
|
+
` parentPort.postMessage({ id: id, type: 'result', jsonResult: raw });`,
|
|
946
|
+
` }`,
|
|
947
|
+
` }`,
|
|
948
|
+
` } catch (err) {`,
|
|
949
|
+
` parentPort.postMessage({ id: id, type: 'error', message: err.message || String(err) });`,
|
|
950
|
+
` }`,
|
|
951
|
+
`});`
|
|
952
|
+
].join("\n");
|
|
953
|
+
var _WorkerCtor = null;
|
|
954
|
+
async function loadWorkerCtor() {
|
|
955
|
+
if (_WorkerCtor) return _WorkerCtor;
|
|
956
|
+
const wt = await import("worker_threads");
|
|
957
|
+
_WorkerCtor = wt.Worker;
|
|
958
|
+
return _WorkerCtor;
|
|
959
|
+
}
|
|
960
|
+
async function ensureWorker() {
|
|
961
|
+
if (_worker) return _worker;
|
|
962
|
+
const Ctor = await loadWorkerCtor();
|
|
963
|
+
_worker = new Ctor(WORKER_SOURCE, {
|
|
964
|
+
eval: true,
|
|
965
|
+
workerData: { parentUrl: import_meta.url }
|
|
966
|
+
});
|
|
967
|
+
_worker.unref();
|
|
968
|
+
_worker.on("message", (msg) => {
|
|
969
|
+
if (msg.id === void 0) return;
|
|
970
|
+
const pending = _pending.get(msg.id);
|
|
971
|
+
if (!pending) return;
|
|
972
|
+
_pending.delete(msg.id);
|
|
973
|
+
if (msg.type === "error") {
|
|
974
|
+
pending.reject(new MigrationError(msg.message ?? "Unknown sandbox error"));
|
|
975
|
+
} else {
|
|
976
|
+
pending.resolve(msg);
|
|
977
|
+
}
|
|
978
|
+
});
|
|
979
|
+
_worker.on("error", (err) => {
|
|
980
|
+
for (const [, p] of _pending) {
|
|
981
|
+
p.reject(new MigrationError(`Sandbox worker error: ${err.message}`));
|
|
982
|
+
}
|
|
983
|
+
_pending.clear();
|
|
984
|
+
_worker = null;
|
|
985
|
+
});
|
|
986
|
+
_worker.on("exit", (code) => {
|
|
987
|
+
if (_pending.size > 0) {
|
|
988
|
+
for (const [, p] of _pending) {
|
|
989
|
+
p.reject(new MigrationError(`Sandbox worker exited with code ${code}`));
|
|
990
|
+
}
|
|
991
|
+
_pending.clear();
|
|
992
|
+
}
|
|
993
|
+
_worker = null;
|
|
994
|
+
});
|
|
995
|
+
return _worker;
|
|
996
|
+
}
|
|
997
|
+
async function sendToWorker(msg) {
|
|
998
|
+
const worker = await ensureWorker();
|
|
999
|
+
if (_requestId >= Number.MAX_SAFE_INTEGER) _requestId = 0;
|
|
1000
|
+
const id = ++_requestId;
|
|
1001
|
+
return new Promise((resolve, reject) => {
|
|
1002
|
+
_pending.set(id, { resolve, reject });
|
|
1003
|
+
worker.postMessage({ ...msg, id });
|
|
1004
|
+
});
|
|
1005
|
+
}
|
|
1006
|
+
var compiledCache = /* @__PURE__ */ new WeakMap();
|
|
1007
|
+
function getExecutorCache(executor) {
|
|
1008
|
+
let cache = compiledCache.get(executor);
|
|
1009
|
+
if (!cache) {
|
|
1010
|
+
cache = /* @__PURE__ */ new Map();
|
|
1011
|
+
compiledCache.set(executor, cache);
|
|
1012
|
+
}
|
|
1013
|
+
return cache;
|
|
1014
|
+
}
|
|
1015
|
+
function hashSource(source) {
|
|
1016
|
+
return (0, import_node_crypto2.createHash)("sha256").update(source).digest("hex");
|
|
1017
|
+
}
|
|
1018
|
+
var _serializationModule = null;
|
|
1019
|
+
async function loadSerialization() {
|
|
1020
|
+
if (_serializationModule) return _serializationModule;
|
|
1021
|
+
_serializationModule = await Promise.resolve().then(() => (init_serialization(), serialization_exports));
|
|
1022
|
+
return _serializationModule;
|
|
1023
|
+
}
|
|
1024
|
+
function defaultExecutor(source) {
|
|
1025
|
+
return (async (data) => {
|
|
1026
|
+
const { serializeFirestoreTypes: serializeFirestoreTypes2, deserializeFirestoreTypes: deserializeFirestoreTypes2 } = await loadSerialization();
|
|
1027
|
+
const jsonData = JSON.stringify(serializeFirestoreTypes2(data));
|
|
1028
|
+
const response = await sendToWorker({ type: "execute", source, jsonData });
|
|
1029
|
+
if (response.jsonResult === void 0 || response.jsonResult === null) {
|
|
1030
|
+
throw new MigrationError("Migration returned a non-JSON-serializable value");
|
|
1031
|
+
}
|
|
1032
|
+
try {
|
|
1033
|
+
return deserializeFirestoreTypes2(JSON.parse(response.jsonResult));
|
|
1034
|
+
} catch {
|
|
1035
|
+
throw new MigrationError("Migration returned a non-JSON-serializable value");
|
|
1036
|
+
}
|
|
1037
|
+
});
|
|
1038
|
+
}
|
|
1039
|
+
async function precompileSource(source, executor) {
|
|
1040
|
+
if (executor && executor !== defaultExecutor) {
|
|
1041
|
+
try {
|
|
1042
|
+
executor(source);
|
|
1043
|
+
} catch (err) {
|
|
1044
|
+
if (err instanceof MigrationError) throw err;
|
|
1045
|
+
throw new MigrationError(`Failed to compile migration source: ${err.message}`);
|
|
1046
|
+
}
|
|
1047
|
+
return;
|
|
1048
|
+
}
|
|
1049
|
+
await sendToWorker({ type: "compile", source });
|
|
1050
|
+
}
|
|
1051
|
+
function compileMigrationFn(source, executor = defaultExecutor) {
|
|
1052
|
+
const cache = getExecutorCache(executor);
|
|
1053
|
+
const key = hashSource(source);
|
|
1054
|
+
const cached = cache.get(key);
|
|
1055
|
+
if (cached) return cached;
|
|
1056
|
+
try {
|
|
1057
|
+
const fn = executor(source);
|
|
1058
|
+
cache.set(key, fn);
|
|
1059
|
+
return fn;
|
|
1060
|
+
} catch (err) {
|
|
1061
|
+
if (err instanceof MigrationError) throw err;
|
|
1062
|
+
throw new MigrationError(`Failed to compile migration source: ${err.message}`);
|
|
1063
|
+
}
|
|
1064
|
+
}
|
|
1065
|
+
function compileMigrations(stored, executor) {
|
|
1066
|
+
return stored.map((step) => ({
|
|
1067
|
+
fromVersion: step.fromVersion,
|
|
1068
|
+
toVersion: step.toVersion,
|
|
1069
|
+
up: compileMigrationFn(step.up, executor)
|
|
1070
|
+
}));
|
|
1071
|
+
}
|
|
1072
|
+
|
|
1073
|
+
// src/dynamic-registry.ts
|
|
1074
|
+
var META_NODE_TYPE = "nodeType";
|
|
1075
|
+
var META_EDGE_TYPE = "edgeType";
|
|
1076
|
+
var STORED_MIGRATION_STEP_SCHEMA = {
|
|
1077
|
+
type: "object",
|
|
1078
|
+
required: ["fromVersion", "toVersion", "up"],
|
|
1079
|
+
properties: {
|
|
1080
|
+
fromVersion: { type: "integer", minimum: 0 },
|
|
1081
|
+
toVersion: { type: "integer", minimum: 1 },
|
|
1082
|
+
up: { type: "string", minLength: 1 }
|
|
1083
|
+
},
|
|
1084
|
+
additionalProperties: false
|
|
1085
|
+
};
|
|
1086
|
+
var NODE_TYPE_SCHEMA = {
|
|
1087
|
+
type: "object",
|
|
1088
|
+
required: ["name", "jsonSchema"],
|
|
1089
|
+
properties: {
|
|
1090
|
+
name: { type: "string", minLength: 1 },
|
|
1091
|
+
jsonSchema: { type: "object" },
|
|
1092
|
+
description: { type: "string" },
|
|
1093
|
+
titleField: { type: "string" },
|
|
1094
|
+
subtitleField: { type: "string" },
|
|
1095
|
+
viewTemplate: { type: "string" },
|
|
1096
|
+
viewCss: { type: "string" },
|
|
1097
|
+
allowedIn: { type: "array", items: { type: "string", minLength: 1 } },
|
|
1098
|
+
schemaVersion: { type: "integer", minimum: 0 },
|
|
1099
|
+
migrations: { type: "array", items: STORED_MIGRATION_STEP_SCHEMA },
|
|
1100
|
+
migrationWriteBack: { type: "string", enum: ["off", "eager", "background"] }
|
|
1101
|
+
},
|
|
1102
|
+
additionalProperties: false
|
|
1103
|
+
};
|
|
1104
|
+
var EDGE_TYPE_SCHEMA = {
|
|
1105
|
+
type: "object",
|
|
1106
|
+
required: ["name", "from", "to"],
|
|
1107
|
+
properties: {
|
|
1108
|
+
name: { type: "string", minLength: 1 },
|
|
1109
|
+
from: {
|
|
1110
|
+
oneOf: [
|
|
1111
|
+
{ type: "string", minLength: 1 },
|
|
1112
|
+
{ type: "array", items: { type: "string", minLength: 1 }, minItems: 1 }
|
|
1113
|
+
]
|
|
1114
|
+
},
|
|
1115
|
+
to: {
|
|
1116
|
+
oneOf: [
|
|
1117
|
+
{ type: "string", minLength: 1 },
|
|
1118
|
+
{ type: "array", items: { type: "string", minLength: 1 }, minItems: 1 }
|
|
1119
|
+
]
|
|
1120
|
+
},
|
|
1121
|
+
jsonSchema: { type: "object" },
|
|
1122
|
+
inverseLabel: { type: "string" },
|
|
1123
|
+
description: { type: "string" },
|
|
1124
|
+
titleField: { type: "string" },
|
|
1125
|
+
subtitleField: { type: "string" },
|
|
1126
|
+
viewTemplate: { type: "string" },
|
|
1127
|
+
viewCss: { type: "string" },
|
|
1128
|
+
allowedIn: { type: "array", items: { type: "string", minLength: 1 } },
|
|
1129
|
+
targetGraph: { type: "string", minLength: 1, pattern: "^[^/]+$" },
|
|
1130
|
+
schemaVersion: { type: "integer", minimum: 0 },
|
|
1131
|
+
migrations: { type: "array", items: STORED_MIGRATION_STEP_SCHEMA },
|
|
1132
|
+
migrationWriteBack: { type: "string", enum: ["off", "eager", "background"] }
|
|
1133
|
+
},
|
|
1134
|
+
additionalProperties: false
|
|
1135
|
+
};
|
|
1136
|
+
var BOOTSTRAP_ENTRIES = [
|
|
1137
|
+
{
|
|
1138
|
+
aType: META_NODE_TYPE,
|
|
1139
|
+
axbType: NODE_RELATION,
|
|
1140
|
+
bType: META_NODE_TYPE,
|
|
1141
|
+
jsonSchema: NODE_TYPE_SCHEMA,
|
|
1142
|
+
description: "Meta-type: defines a node type"
|
|
1143
|
+
},
|
|
1144
|
+
{
|
|
1145
|
+
aType: META_EDGE_TYPE,
|
|
1146
|
+
axbType: NODE_RELATION,
|
|
1147
|
+
bType: META_EDGE_TYPE,
|
|
1148
|
+
jsonSchema: EDGE_TYPE_SCHEMA,
|
|
1149
|
+
description: "Meta-type: defines an edge type"
|
|
1150
|
+
}
|
|
1151
|
+
];
|
|
1152
|
+
var _bootstrapRegistry = null;
|
|
1153
|
+
function createBootstrapRegistry() {
|
|
1154
|
+
if (_bootstrapRegistry) return _bootstrapRegistry;
|
|
1155
|
+
_bootstrapRegistry = createRegistry([...BOOTSTRAP_ENTRIES]);
|
|
1156
|
+
return _bootstrapRegistry;
|
|
1157
|
+
}
|
|
1158
|
+
function generateDeterministicUid(metaType, name) {
|
|
1159
|
+
const hash = (0, import_node_crypto3.createHash)("sha256").update(`${metaType}:${name}`).digest("base64url");
|
|
1160
|
+
return hash.slice(0, 21);
|
|
1161
|
+
}
|
|
1162
|
+
async function createRegistryFromGraph(reader, executor) {
|
|
1163
|
+
const [nodeTypes, edgeTypes] = await Promise.all([
|
|
1164
|
+
reader.findNodes({ aType: META_NODE_TYPE }),
|
|
1165
|
+
reader.findNodes({ aType: META_EDGE_TYPE })
|
|
1166
|
+
]);
|
|
1167
|
+
const entries = [...BOOTSTRAP_ENTRIES];
|
|
1168
|
+
const prevalidations = [];
|
|
1169
|
+
for (const record of nodeTypes) {
|
|
1170
|
+
const data = record.data;
|
|
1171
|
+
if (data.migrations) {
|
|
1172
|
+
for (const m of data.migrations) {
|
|
1173
|
+
prevalidations.push(precompileSource(m.up, executor));
|
|
1174
|
+
}
|
|
1175
|
+
}
|
|
1176
|
+
}
|
|
1177
|
+
for (const record of edgeTypes) {
|
|
1178
|
+
const data = record.data;
|
|
1179
|
+
if (data.migrations) {
|
|
1180
|
+
for (const m of data.migrations) {
|
|
1181
|
+
prevalidations.push(precompileSource(m.up, executor));
|
|
1182
|
+
}
|
|
1183
|
+
}
|
|
1184
|
+
}
|
|
1185
|
+
await Promise.all(prevalidations);
|
|
1186
|
+
for (const record of nodeTypes) {
|
|
1187
|
+
const data = record.data;
|
|
1188
|
+
entries.push({
|
|
1189
|
+
aType: data.name,
|
|
1190
|
+
axbType: NODE_RELATION,
|
|
1191
|
+
bType: data.name,
|
|
1192
|
+
jsonSchema: data.jsonSchema,
|
|
1193
|
+
description: data.description,
|
|
1194
|
+
titleField: data.titleField,
|
|
1195
|
+
subtitleField: data.subtitleField,
|
|
1196
|
+
allowedIn: data.allowedIn,
|
|
1197
|
+
migrations: data.migrations ? compileMigrations(data.migrations, executor) : void 0,
|
|
1198
|
+
migrationWriteBack: data.migrationWriteBack
|
|
1199
|
+
});
|
|
1200
|
+
}
|
|
1201
|
+
for (const record of edgeTypes) {
|
|
1202
|
+
const data = record.data;
|
|
1203
|
+
const fromTypes = Array.isArray(data.from) ? data.from : [data.from];
|
|
1204
|
+
const toTypes = Array.isArray(data.to) ? data.to : [data.to];
|
|
1205
|
+
const compiledMigrations = data.migrations ? compileMigrations(data.migrations, executor) : void 0;
|
|
1206
|
+
for (const aType of fromTypes) {
|
|
1207
|
+
for (const bType of toTypes) {
|
|
1208
|
+
entries.push({
|
|
1209
|
+
aType,
|
|
1210
|
+
axbType: data.name,
|
|
1211
|
+
bType,
|
|
1212
|
+
jsonSchema: data.jsonSchema,
|
|
1213
|
+
description: data.description,
|
|
1214
|
+
inverseLabel: data.inverseLabel,
|
|
1215
|
+
titleField: data.titleField,
|
|
1216
|
+
subtitleField: data.subtitleField,
|
|
1217
|
+
allowedIn: data.allowedIn,
|
|
1218
|
+
targetGraph: data.targetGraph,
|
|
1219
|
+
migrations: compiledMigrations,
|
|
1220
|
+
migrationWriteBack: data.migrationWriteBack
|
|
1221
|
+
});
|
|
1222
|
+
}
|
|
1223
|
+
}
|
|
1224
|
+
}
|
|
1225
|
+
return createRegistry(entries);
|
|
1226
|
+
}
|
|
1227
|
+
|
|
1228
|
+
// src/query.ts
|
|
1229
|
+
function buildEdgeQueryPlan(params) {
|
|
1230
|
+
const { aType, aUid, axbType, bType, bUid, limit, orderBy } = params;
|
|
1231
|
+
if (aUid && axbType && bUid && !params.where?.length) {
|
|
1232
|
+
return { strategy: "get", docId: computeEdgeDocId(aUid, axbType, bUid) };
|
|
1233
|
+
}
|
|
1234
|
+
const filters = [];
|
|
1235
|
+
if (aType) filters.push({ field: "aType", op: "==", value: aType });
|
|
1236
|
+
if (aUid) filters.push({ field: "aUid", op: "==", value: aUid });
|
|
1237
|
+
if (axbType) filters.push({ field: "axbType", op: "==", value: axbType });
|
|
1238
|
+
if (bType) filters.push({ field: "bType", op: "==", value: bType });
|
|
1239
|
+
if (bUid) filters.push({ field: "bUid", op: "==", value: bUid });
|
|
1240
|
+
if (params.where) {
|
|
1241
|
+
for (const clause of params.where) {
|
|
1242
|
+
const field = BUILTIN_FIELDS.has(clause.field) ? clause.field : clause.field.startsWith("data.") ? clause.field : `data.${clause.field}`;
|
|
1243
|
+
filters.push({ field, op: clause.op, value: clause.value });
|
|
1244
|
+
}
|
|
1245
|
+
}
|
|
1246
|
+
if (filters.length === 0) {
|
|
1247
|
+
throw new InvalidQueryError("findEdges requires at least one filter parameter");
|
|
1248
|
+
}
|
|
1249
|
+
const effectiveLimit = limit === void 0 ? DEFAULT_QUERY_LIMIT : limit || void 0;
|
|
1250
|
+
return { strategy: "query", filters, options: { limit: effectiveLimit, orderBy } };
|
|
1251
|
+
}
|
|
1252
|
+
function buildNodeQueryPlan(params) {
|
|
1253
|
+
const { aType, limit, orderBy } = params;
|
|
1254
|
+
const filters = [
|
|
1255
|
+
{ field: "aType", op: "==", value: aType },
|
|
1256
|
+
{ field: "axbType", op: "==", value: NODE_RELATION }
|
|
1257
|
+
];
|
|
1258
|
+
if (params.where) {
|
|
1259
|
+
for (const clause of params.where) {
|
|
1260
|
+
const field = BUILTIN_FIELDS.has(clause.field) ? clause.field : clause.field.startsWith("data.") ? clause.field : `data.${clause.field}`;
|
|
1261
|
+
filters.push({ field, op: clause.op, value: clause.value });
|
|
1262
|
+
}
|
|
1263
|
+
}
|
|
1264
|
+
const effectiveLimit = limit === void 0 ? DEFAULT_QUERY_LIMIT : limit || void 0;
|
|
1265
|
+
return { strategy: "query", filters, options: { limit: effectiveLimit, orderBy } };
|
|
1266
|
+
}
|
|
1267
|
+
|
|
1268
|
+
// src/query-safety.ts
|
|
1269
|
+
var SAFE_INDEX_PATTERNS = [
|
|
1270
|
+
/* @__PURE__ */ new Set(["aUid", "axbType"]),
|
|
1271
|
+
/* @__PURE__ */ new Set(["axbType", "bUid"]),
|
|
1272
|
+
/* @__PURE__ */ new Set(["aType", "axbType"]),
|
|
1273
|
+
/* @__PURE__ */ new Set(["axbType", "bType"])
|
|
1274
|
+
];
|
|
1275
|
+
function analyzeQuerySafety(filters) {
|
|
1276
|
+
const builtinFieldsPresent = /* @__PURE__ */ new Set();
|
|
1277
|
+
let hasDataFilters = false;
|
|
1278
|
+
for (const f of filters) {
|
|
1279
|
+
if (BUILTIN_FIELDS.has(f.field)) {
|
|
1280
|
+
builtinFieldsPresent.add(f.field);
|
|
1281
|
+
} else {
|
|
1282
|
+
hasDataFilters = true;
|
|
1283
|
+
}
|
|
1284
|
+
}
|
|
1285
|
+
for (const pattern of SAFE_INDEX_PATTERNS) {
|
|
1286
|
+
let matched = true;
|
|
1287
|
+
for (const field of pattern) {
|
|
1288
|
+
if (!builtinFieldsPresent.has(field)) {
|
|
1289
|
+
matched = false;
|
|
1290
|
+
break;
|
|
1291
|
+
}
|
|
1292
|
+
}
|
|
1293
|
+
if (matched) {
|
|
1294
|
+
return { safe: true };
|
|
1295
|
+
}
|
|
1296
|
+
}
|
|
1297
|
+
const presentFields = [...builtinFieldsPresent];
|
|
1298
|
+
if (presentFields.length === 0 && hasDataFilters) {
|
|
1299
|
+
return {
|
|
1300
|
+
safe: false,
|
|
1301
|
+
reason: "Query filters only use data.* fields with no builtin field constraints. This requires a full collection scan. Add aType, aUid, axbType, bType, or bUid filters, or set allowCollectionScan: true."
|
|
1302
|
+
};
|
|
1303
|
+
}
|
|
1304
|
+
if (hasDataFilters) {
|
|
1305
|
+
return {
|
|
1306
|
+
safe: false,
|
|
1307
|
+
reason: `Query filters on [${presentFields.join(", ")}] do not match any indexed pattern. data.* filters without an indexed base require a full collection scan. Safe patterns: (aUid + axbType), (axbType + bUid), (aType + axbType), (axbType + bType). Set allowCollectionScan: true to override.`
|
|
1308
|
+
};
|
|
1309
|
+
}
|
|
1310
|
+
return {
|
|
1311
|
+
safe: false,
|
|
1312
|
+
reason: `Query filters on [${presentFields.join(", ")}] do not match any indexed pattern. This may cause a full collection scan on Firestore Enterprise. Safe patterns: (aUid + axbType), (axbType + bUid), (aType + axbType), (axbType + bType). Set allowCollectionScan: true to override.`
|
|
1313
|
+
};
|
|
1314
|
+
}
|
|
1315
|
+
|
|
1316
|
+
// src/transaction.ts
|
|
1317
|
+
function buildWritableNodeRecord2(aType, uid, data) {
|
|
1318
|
+
return { aType, aUid: uid, axbType: NODE_RELATION, bType: aType, bUid: uid, data };
|
|
1319
|
+
}
|
|
1320
|
+
function buildWritableEdgeRecord2(aType, aUid, axbType, bType, bUid, data) {
|
|
1321
|
+
return { aType, aUid, axbType, bType, bUid, data };
|
|
1322
|
+
}
|
|
1323
|
+
var GraphTransactionImpl = class {
|
|
1324
|
+
constructor(backend, registry, scanProtection = "error", scopePath = "", globalWriteBack = "off") {
|
|
1325
|
+
this.backend = backend;
|
|
1326
|
+
this.registry = registry;
|
|
1327
|
+
this.scanProtection = scanProtection;
|
|
1328
|
+
this.scopePath = scopePath;
|
|
1329
|
+
this.globalWriteBack = globalWriteBack;
|
|
1330
|
+
}
|
|
1331
|
+
async getNode(uid) {
|
|
1332
|
+
const docId = computeNodeDocId(uid);
|
|
1333
|
+
const record = await this.backend.getDoc(docId);
|
|
1334
|
+
if (!record || !this.registry) return record;
|
|
1335
|
+
const result = await migrateRecord(record, this.registry, this.globalWriteBack);
|
|
1336
|
+
if (result.migrated && result.writeBack !== "off") {
|
|
1337
|
+
await this.backend.updateDoc(docId, {
|
|
1338
|
+
replaceData: result.record.data,
|
|
1339
|
+
v: result.record.v
|
|
1340
|
+
});
|
|
1341
|
+
}
|
|
1342
|
+
return result.record;
|
|
1343
|
+
}
|
|
1344
|
+
async getEdge(aUid, axbType, bUid) {
|
|
1345
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1346
|
+
const record = await this.backend.getDoc(docId);
|
|
1347
|
+
if (!record || !this.registry) return record;
|
|
1348
|
+
const result = await migrateRecord(record, this.registry, this.globalWriteBack);
|
|
1349
|
+
if (result.migrated && result.writeBack !== "off") {
|
|
1350
|
+
await this.backend.updateDoc(docId, {
|
|
1351
|
+
replaceData: result.record.data,
|
|
1352
|
+
v: result.record.v
|
|
1353
|
+
});
|
|
1354
|
+
}
|
|
1355
|
+
return result.record;
|
|
1356
|
+
}
|
|
1357
|
+
async edgeExists(aUid, axbType, bUid) {
|
|
1358
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1359
|
+
const record = await this.backend.getDoc(docId);
|
|
1360
|
+
return record !== null;
|
|
1361
|
+
}
|
|
1362
|
+
checkQuerySafety(filters, allowCollectionScan) {
|
|
1363
|
+
if (allowCollectionScan || this.scanProtection === "off") return;
|
|
1364
|
+
const result = analyzeQuerySafety(filters);
|
|
1365
|
+
if (result.safe) return;
|
|
1366
|
+
if (this.scanProtection === "error") {
|
|
1367
|
+
throw new QuerySafetyError(result.reason);
|
|
1368
|
+
}
|
|
1369
|
+
console.warn(`[firegraph] Query safety warning: ${result.reason}`);
|
|
1370
|
+
}
|
|
1371
|
+
async findEdges(params) {
|
|
1372
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1373
|
+
let records;
|
|
1374
|
+
if (plan.strategy === "get") {
|
|
1375
|
+
const record = await this.backend.getDoc(plan.docId);
|
|
1376
|
+
records = record ? [record] : [];
|
|
1377
|
+
} else {
|
|
1378
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1379
|
+
records = await this.backend.query(plan.filters, plan.options);
|
|
1380
|
+
}
|
|
1381
|
+
return this.applyMigrations(records);
|
|
1382
|
+
}
|
|
1383
|
+
async findNodes(params) {
|
|
1384
|
+
const plan = buildNodeQueryPlan(params);
|
|
1385
|
+
let records;
|
|
1386
|
+
if (plan.strategy === "get") {
|
|
1387
|
+
const record = await this.backend.getDoc(plan.docId);
|
|
1388
|
+
records = record ? [record] : [];
|
|
1389
|
+
} else {
|
|
1390
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1391
|
+
records = await this.backend.query(plan.filters, plan.options);
|
|
1392
|
+
}
|
|
1393
|
+
return this.applyMigrations(records);
|
|
1394
|
+
}
|
|
1395
|
+
async applyMigrations(records) {
|
|
1396
|
+
if (!this.registry || records.length === 0) return records;
|
|
1397
|
+
const results = await migrateRecords(records, this.registry, this.globalWriteBack);
|
|
1398
|
+
for (const result of results) {
|
|
1399
|
+
if (result.migrated && result.writeBack !== "off") {
|
|
1400
|
+
const docId = result.record.axbType === NODE_RELATION ? computeNodeDocId(result.record.aUid) : computeEdgeDocId(result.record.aUid, result.record.axbType, result.record.bUid);
|
|
1401
|
+
await this.backend.updateDoc(docId, {
|
|
1402
|
+
replaceData: result.record.data,
|
|
1403
|
+
v: result.record.v
|
|
1404
|
+
});
|
|
1405
|
+
}
|
|
1406
|
+
}
|
|
1407
|
+
return results.map((r) => r.record);
|
|
1408
|
+
}
|
|
1409
|
+
async putNode(aType, uid, data) {
|
|
1410
|
+
await this.writeNode(aType, uid, data, "merge");
|
|
1411
|
+
}
|
|
1412
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1413
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
1414
|
+
}
|
|
1415
|
+
async replaceNode(aType, uid, data) {
|
|
1416
|
+
await this.writeNode(aType, uid, data, "replace");
|
|
1417
|
+
}
|
|
1418
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1419
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
1420
|
+
}
|
|
1421
|
+
async writeNode(aType, uid, data, mode) {
|
|
1422
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
1423
|
+
if (this.registry) {
|
|
1424
|
+
this.registry.validate(aType, NODE_RELATION, aType, data, this.scopePath);
|
|
1425
|
+
}
|
|
1426
|
+
const docId = computeNodeDocId(uid);
|
|
1427
|
+
const record = buildWritableNodeRecord2(aType, uid, data);
|
|
1428
|
+
if (this.registry) {
|
|
1429
|
+
const entry = this.registry.lookup(aType, NODE_RELATION, aType);
|
|
1430
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
1431
|
+
record.v = entry.schemaVersion;
|
|
1432
|
+
}
|
|
1433
|
+
}
|
|
1434
|
+
await this.backend.setDoc(docId, record, mode);
|
|
1435
|
+
}
|
|
1436
|
+
async writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
1437
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
1438
|
+
if (this.registry) {
|
|
1439
|
+
this.registry.validate(aType, axbType, bType, data, this.scopePath);
|
|
1440
|
+
}
|
|
1441
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1442
|
+
const record = buildWritableEdgeRecord2(aType, aUid, axbType, bType, bUid, data);
|
|
1443
|
+
if (this.registry) {
|
|
1444
|
+
const entry = this.registry.lookup(aType, axbType, bType);
|
|
1445
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
1446
|
+
record.v = entry.schemaVersion;
|
|
1447
|
+
}
|
|
1448
|
+
}
|
|
1449
|
+
await this.backend.setDoc(docId, record, mode);
|
|
1450
|
+
}
|
|
1451
|
+
async updateNode(uid, data) {
|
|
1452
|
+
const docId = computeNodeDocId(uid);
|
|
1453
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1454
|
+
}
|
|
1455
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
1456
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1457
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1458
|
+
}
|
|
1459
|
+
async removeNode(uid) {
|
|
1460
|
+
const docId = computeNodeDocId(uid);
|
|
1461
|
+
await this.backend.deleteDoc(docId);
|
|
1462
|
+
}
|
|
1463
|
+
async removeEdge(aUid, axbType, bUid) {
|
|
1464
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1465
|
+
await this.backend.deleteDoc(docId);
|
|
1466
|
+
}
|
|
1467
|
+
};
|
|
1468
|
+
|
|
1469
|
+
// src/client.ts
|
|
1470
|
+
var RESERVED_TYPE_NAMES = /* @__PURE__ */ new Set([META_NODE_TYPE, META_EDGE_TYPE]);
|
|
1471
|
+
function buildWritableNodeRecord3(aType, uid, data) {
|
|
1472
|
+
return { aType, aUid: uid, axbType: NODE_RELATION, bType: aType, bUid: uid, data };
|
|
1473
|
+
}
|
|
1474
|
+
function buildWritableEdgeRecord3(aType, aUid, axbType, bType, bUid, data) {
|
|
1475
|
+
return { aType, aUid, axbType, bType, bUid, data };
|
|
1476
|
+
}
|
|
1477
|
+
var GraphClientImpl = class _GraphClientImpl {
|
|
1478
|
+
constructor(backend, options, metaBackend) {
|
|
1479
|
+
this.backend = backend;
|
|
1480
|
+
this.globalWriteBack = options?.migrationWriteBack ?? "off";
|
|
1481
|
+
this.migrationSandbox = options?.migrationSandbox;
|
|
1482
|
+
if (options?.registryMode) {
|
|
1483
|
+
this.dynamicConfig = options.registryMode;
|
|
1484
|
+
this.bootstrapRegistry = createBootstrapRegistry();
|
|
1485
|
+
if (options.registry) {
|
|
1486
|
+
this.staticRegistry = options.registry;
|
|
1487
|
+
}
|
|
1488
|
+
this.metaBackend = metaBackend;
|
|
1489
|
+
} else {
|
|
1490
|
+
this.staticRegistry = options?.registry;
|
|
1491
|
+
}
|
|
1492
|
+
this.scanProtection = options?.scanProtection ?? "error";
|
|
1493
|
+
}
|
|
1494
|
+
scanProtection;
|
|
1495
|
+
/**
|
|
1496
|
+
* Capability set of the underlying backend. Mirrors `backend.capabilities`
|
|
1497
|
+
* verbatim so callers can portability-check (`client.capabilities.has(
|
|
1498
|
+
* 'query.join')`) without reaching for the backend handle. Static for the
|
|
1499
|
+
* lifetime of the client.
|
|
1500
|
+
*/
|
|
1501
|
+
get capabilities() {
|
|
1502
|
+
return this.backend.capabilities;
|
|
1503
|
+
}
|
|
1504
|
+
// Static mode
|
|
1505
|
+
staticRegistry;
|
|
1506
|
+
// Dynamic mode
|
|
1507
|
+
dynamicConfig;
|
|
1508
|
+
bootstrapRegistry;
|
|
1509
|
+
dynamicRegistry;
|
|
1510
|
+
metaBackend;
|
|
1511
|
+
// Migration settings
|
|
1512
|
+
globalWriteBack;
|
|
1513
|
+
migrationSandbox;
|
|
1514
|
+
// ---------------------------------------------------------------------------
|
|
1515
|
+
// Backend access (exposed for traversal helpers and subgraph cloning)
|
|
1516
|
+
// ---------------------------------------------------------------------------
|
|
1517
|
+
/** @internal */
|
|
1518
|
+
getBackend() {
|
|
1519
|
+
return this.backend;
|
|
1520
|
+
}
|
|
1521
|
+
/**
|
|
1522
|
+
* Snapshot of the currently-effective registry. Returns the merged view
|
|
1523
|
+
* used for domain-type validation and migration — in dynamic mode this is
|
|
1524
|
+
* `dynamicRegistry ?? staticRegistry ?? bootstrapRegistry`, so callers see
|
|
1525
|
+
* updates after `reloadRegistry()` without having to re-resolve anything.
|
|
1526
|
+
*
|
|
1527
|
+
* Exposed for backends that need topology access during bulk operations
|
|
1528
|
+
* (e.g. the Cloudflare DO backend's cross-DO cascade). Not part of the
|
|
1529
|
+
* public `GraphClient` surface.
|
|
1530
|
+
*
|
|
1531
|
+
* @internal
|
|
1532
|
+
*/
|
|
1533
|
+
getRegistrySnapshot() {
|
|
1534
|
+
return this.getCombinedRegistry();
|
|
1535
|
+
}
|
|
1536
|
+
// ---------------------------------------------------------------------------
|
|
1537
|
+
// Registry routing
|
|
1538
|
+
// ---------------------------------------------------------------------------
|
|
1539
|
+
getRegistryForType(aType) {
|
|
1540
|
+
if (!this.dynamicConfig) return this.staticRegistry;
|
|
1541
|
+
if (aType === META_NODE_TYPE || aType === META_EDGE_TYPE) {
|
|
1542
|
+
return this.bootstrapRegistry;
|
|
1543
|
+
}
|
|
1544
|
+
return this.dynamicRegistry ?? this.staticRegistry ?? this.bootstrapRegistry;
|
|
1545
|
+
}
|
|
1546
|
+
getBackendForType(aType) {
|
|
1547
|
+
if (this.metaBackend && (aType === META_NODE_TYPE || aType === META_EDGE_TYPE)) {
|
|
1548
|
+
return this.metaBackend;
|
|
1549
|
+
}
|
|
1550
|
+
return this.backend;
|
|
1551
|
+
}
|
|
1552
|
+
getCombinedRegistry() {
|
|
1553
|
+
if (!this.dynamicConfig) return this.staticRegistry;
|
|
1554
|
+
return this.dynamicRegistry ?? this.staticRegistry ?? this.bootstrapRegistry;
|
|
1555
|
+
}
|
|
1556
|
+
// ---------------------------------------------------------------------------
|
|
1557
|
+
// Query safety
|
|
1558
|
+
// ---------------------------------------------------------------------------
|
|
1559
|
+
checkQuerySafety(filters, allowCollectionScan) {
|
|
1560
|
+
if (allowCollectionScan || this.scanProtection === "off") return;
|
|
1561
|
+
const result = analyzeQuerySafety(filters);
|
|
1562
|
+
if (result.safe) return;
|
|
1563
|
+
if (this.scanProtection === "error") {
|
|
1564
|
+
throw new QuerySafetyError(result.reason);
|
|
1565
|
+
}
|
|
1566
|
+
console.warn(`[firegraph] Query safety warning: ${result.reason}`);
|
|
1567
|
+
}
|
|
1568
|
+
// ---------------------------------------------------------------------------
|
|
1569
|
+
// Migration helpers
|
|
1570
|
+
// ---------------------------------------------------------------------------
|
|
1571
|
+
async applyMigration(record, docId) {
|
|
1572
|
+
const registry = this.getCombinedRegistry();
|
|
1573
|
+
if (!registry) return record;
|
|
1574
|
+
const result = await migrateRecord(record, registry, this.globalWriteBack);
|
|
1575
|
+
if (result.migrated) {
|
|
1576
|
+
this.handleWriteBack(result, docId);
|
|
1577
|
+
}
|
|
1578
|
+
return result.record;
|
|
1579
|
+
}
|
|
1580
|
+
async applyMigrations(records) {
|
|
1581
|
+
const registry = this.getCombinedRegistry();
|
|
1582
|
+
if (!registry || records.length === 0) return records;
|
|
1583
|
+
const results = await migrateRecords(records, registry, this.globalWriteBack);
|
|
1584
|
+
for (const result of results) {
|
|
1585
|
+
if (result.migrated) {
|
|
1586
|
+
const docId = result.record.axbType === NODE_RELATION ? computeNodeDocId(result.record.aUid) : computeEdgeDocId(result.record.aUid, result.record.axbType, result.record.bUid);
|
|
1587
|
+
this.handleWriteBack(result, docId);
|
|
1588
|
+
}
|
|
1589
|
+
}
|
|
1590
|
+
return results.map((r) => r.record);
|
|
1591
|
+
}
|
|
1592
|
+
/**
|
|
1593
|
+
* Fire-and-forget write-back for a migrated record. Both `'eager'` and
|
|
1594
|
+
* `'background'` are non-blocking; the difference is the log level on
|
|
1595
|
+
* failure. For synchronous write-back, use a transaction — see
|
|
1596
|
+
* `GraphTransactionImpl`.
|
|
1597
|
+
*/
|
|
1598
|
+
handleWriteBack(result, docId) {
|
|
1599
|
+
if (result.writeBack === "off") return;
|
|
1600
|
+
const doWriteBack = async () => {
|
|
1601
|
+
try {
|
|
1602
|
+
await this.backend.updateDoc(docId, {
|
|
1603
|
+
replaceData: result.record.data,
|
|
1604
|
+
v: result.record.v
|
|
1605
|
+
});
|
|
1606
|
+
} catch (err) {
|
|
1607
|
+
const msg = `[firegraph] Migration write-back failed for ${docId}: ${err.message}`;
|
|
1608
|
+
if (result.writeBack === "eager") {
|
|
1609
|
+
console.error(msg);
|
|
1610
|
+
} else {
|
|
1611
|
+
console.warn(msg);
|
|
1612
|
+
}
|
|
1613
|
+
}
|
|
1614
|
+
};
|
|
1615
|
+
void doWriteBack();
|
|
1616
|
+
}
|
|
1617
|
+
// ---------------------------------------------------------------------------
|
|
1618
|
+
// GraphReader
|
|
1619
|
+
// ---------------------------------------------------------------------------
|
|
1620
|
+
async getNode(uid) {
|
|
1621
|
+
const docId = computeNodeDocId(uid);
|
|
1622
|
+
const record = await this.backend.getDoc(docId);
|
|
1623
|
+
if (!record) return null;
|
|
1624
|
+
return this.applyMigration(record, docId);
|
|
1625
|
+
}
|
|
1626
|
+
async getEdge(aUid, axbType, bUid) {
|
|
1627
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1628
|
+
const record = await this.backend.getDoc(docId);
|
|
1629
|
+
if (!record) return null;
|
|
1630
|
+
return this.applyMigration(record, docId);
|
|
1631
|
+
}
|
|
1632
|
+
async edgeExists(aUid, axbType, bUid) {
|
|
1633
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1634
|
+
const record = await this.backend.getDoc(docId);
|
|
1635
|
+
return record !== null;
|
|
1636
|
+
}
|
|
1637
|
+
async findEdges(params) {
|
|
1638
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1639
|
+
let records;
|
|
1640
|
+
if (plan.strategy === "get") {
|
|
1641
|
+
const record = await this.backend.getDoc(plan.docId);
|
|
1642
|
+
records = record ? [record] : [];
|
|
1643
|
+
} else {
|
|
1644
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1645
|
+
records = await this.backend.query(plan.filters, plan.options);
|
|
1646
|
+
}
|
|
1647
|
+
return this.applyMigrations(records);
|
|
1648
|
+
}
|
|
1649
|
+
async findNodes(params) {
|
|
1650
|
+
const plan = buildNodeQueryPlan(params);
|
|
1651
|
+
let records;
|
|
1652
|
+
if (plan.strategy === "get") {
|
|
1653
|
+
const record = await this.backend.getDoc(plan.docId);
|
|
1654
|
+
records = record ? [record] : [];
|
|
1655
|
+
} else {
|
|
1656
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1657
|
+
records = await this.backend.query(plan.filters, plan.options);
|
|
1658
|
+
}
|
|
1659
|
+
return this.applyMigrations(records);
|
|
1660
|
+
}
|
|
1661
|
+
// ---------------------------------------------------------------------------
|
|
1662
|
+
// GraphWriter
|
|
1663
|
+
// ---------------------------------------------------------------------------
|
|
1664
|
+
async putNode(aType, uid, data) {
|
|
1665
|
+
await this.writeNode(aType, uid, data, "merge");
|
|
1666
|
+
}
|
|
1667
|
+
async putEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1668
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "merge");
|
|
1669
|
+
}
|
|
1670
|
+
async replaceNode(aType, uid, data) {
|
|
1671
|
+
await this.writeNode(aType, uid, data, "replace");
|
|
1672
|
+
}
|
|
1673
|
+
async replaceEdge(aType, aUid, axbType, bType, bUid, data) {
|
|
1674
|
+
await this.writeEdge(aType, aUid, axbType, bType, bUid, data, "replace");
|
|
1675
|
+
}
|
|
1676
|
+
async writeNode(aType, uid, data, mode) {
|
|
1677
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceNode" : "putNode");
|
|
1678
|
+
const registry = this.getRegistryForType(aType);
|
|
1679
|
+
if (registry) {
|
|
1680
|
+
registry.validate(aType, NODE_RELATION, aType, data, this.backend.scopePath);
|
|
1681
|
+
}
|
|
1682
|
+
const backend = this.getBackendForType(aType);
|
|
1683
|
+
const docId = computeNodeDocId(uid);
|
|
1684
|
+
const record = buildWritableNodeRecord3(aType, uid, data);
|
|
1685
|
+
if (registry) {
|
|
1686
|
+
const entry = registry.lookup(aType, NODE_RELATION, aType);
|
|
1687
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
1688
|
+
record.v = entry.schemaVersion;
|
|
1689
|
+
}
|
|
1690
|
+
}
|
|
1691
|
+
await backend.setDoc(docId, record, mode);
|
|
1692
|
+
}
|
|
1693
|
+
async writeEdge(aType, aUid, axbType, bType, bUid, data, mode) {
|
|
1694
|
+
assertNoDeleteSentinels(data, mode === "replace" ? "replaceEdge" : "putEdge");
|
|
1695
|
+
const registry = this.getRegistryForType(aType);
|
|
1696
|
+
if (registry) {
|
|
1697
|
+
registry.validate(aType, axbType, bType, data, this.backend.scopePath);
|
|
1698
|
+
}
|
|
1699
|
+
const backend = this.getBackendForType(aType);
|
|
1700
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1701
|
+
const record = buildWritableEdgeRecord3(aType, aUid, axbType, bType, bUid, data);
|
|
1702
|
+
if (registry) {
|
|
1703
|
+
const entry = registry.lookup(aType, axbType, bType);
|
|
1704
|
+
if (entry?.schemaVersion && entry.schemaVersion > 0) {
|
|
1705
|
+
record.v = entry.schemaVersion;
|
|
1706
|
+
}
|
|
1707
|
+
}
|
|
1708
|
+
await backend.setDoc(docId, record, mode);
|
|
1709
|
+
}
|
|
1710
|
+
async updateNode(uid, data) {
|
|
1711
|
+
const docId = computeNodeDocId(uid);
|
|
1712
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1713
|
+
}
|
|
1714
|
+
async updateEdge(aUid, axbType, bUid, data) {
|
|
1715
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1716
|
+
await this.backend.updateDoc(docId, { dataOps: flattenPatch(data) });
|
|
1717
|
+
}
|
|
1718
|
+
async removeNode(uid) {
|
|
1719
|
+
const docId = computeNodeDocId(uid);
|
|
1720
|
+
await this.backend.deleteDoc(docId);
|
|
1721
|
+
}
|
|
1722
|
+
async removeEdge(aUid, axbType, bUid) {
|
|
1723
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
1724
|
+
await this.backend.deleteDoc(docId);
|
|
1725
|
+
}
|
|
1726
|
+
// ---------------------------------------------------------------------------
|
|
1727
|
+
// Transactions & Batches
|
|
1728
|
+
// ---------------------------------------------------------------------------
|
|
1729
|
+
async runTransaction(fn) {
|
|
1730
|
+
return this.backend.runTransaction(async (txBackend) => {
|
|
1731
|
+
const graphTx = new GraphTransactionImpl(
|
|
1732
|
+
txBackend,
|
|
1733
|
+
this.getCombinedRegistry(),
|
|
1734
|
+
this.scanProtection,
|
|
1735
|
+
this.backend.scopePath,
|
|
1736
|
+
this.globalWriteBack
|
|
1737
|
+
);
|
|
1738
|
+
return fn(graphTx);
|
|
1739
|
+
});
|
|
1740
|
+
}
|
|
1741
|
+
batch() {
|
|
1742
|
+
return new GraphBatchImpl(
|
|
1743
|
+
this.backend.createBatch(),
|
|
1744
|
+
this.getCombinedRegistry(),
|
|
1745
|
+
this.backend.scopePath
|
|
1746
|
+
);
|
|
1747
|
+
}
|
|
1748
|
+
// ---------------------------------------------------------------------------
|
|
1749
|
+
// Subgraph
|
|
1750
|
+
// ---------------------------------------------------------------------------
|
|
1751
|
+
subgraph(parentNodeUid, name = "graph") {
|
|
1752
|
+
if (!parentNodeUid || parentNodeUid.includes("/")) {
|
|
1753
|
+
throw new FiregraphError(
|
|
1754
|
+
`Invalid parentNodeUid for subgraph: "${parentNodeUid}". Must be a non-empty string without "/".`,
|
|
1755
|
+
"INVALID_SUBGRAPH"
|
|
1756
|
+
);
|
|
1757
|
+
}
|
|
1758
|
+
if (name.includes("/")) {
|
|
1759
|
+
throw new FiregraphError(
|
|
1760
|
+
`Subgraph name must not contain "/": got "${name}". Use chained .subgraph() calls for nested subgraphs.`,
|
|
1761
|
+
"INVALID_SUBGRAPH"
|
|
1762
|
+
);
|
|
1763
|
+
}
|
|
1764
|
+
const childBackend = this.backend.subgraph(parentNodeUid, name);
|
|
1765
|
+
return new _GraphClientImpl(
|
|
1766
|
+
childBackend,
|
|
1767
|
+
{
|
|
1768
|
+
registry: this.getCombinedRegistry(),
|
|
1769
|
+
scanProtection: this.scanProtection,
|
|
1770
|
+
migrationWriteBack: this.globalWriteBack,
|
|
1771
|
+
migrationSandbox: this.migrationSandbox
|
|
1772
|
+
}
|
|
1773
|
+
// Subgraphs do not have meta-backends; meta lives only at the root.
|
|
1774
|
+
);
|
|
1775
|
+
}
|
|
1776
|
+
// ---------------------------------------------------------------------------
|
|
1777
|
+
// Collection group query
|
|
1778
|
+
// ---------------------------------------------------------------------------
|
|
1779
|
+
async findEdgesGlobal(params, collectionName) {
|
|
1780
|
+
if (!this.backend.findEdgesGlobal) {
|
|
1781
|
+
throw new FiregraphError(
|
|
1782
|
+
"findEdgesGlobal() is not supported by the current storage backend.",
|
|
1783
|
+
"UNSUPPORTED_OPERATION"
|
|
1784
|
+
);
|
|
1785
|
+
}
|
|
1786
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1787
|
+
if (plan.strategy === "get") {
|
|
1788
|
+
throw new FiregraphError(
|
|
1789
|
+
"findEdgesGlobal() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
1790
|
+
"INVALID_QUERY"
|
|
1791
|
+
);
|
|
1792
|
+
}
|
|
1793
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1794
|
+
const records = await this.backend.findEdgesGlobal(params, collectionName);
|
|
1795
|
+
return this.applyMigrations(records);
|
|
1796
|
+
}
|
|
1797
|
+
// ---------------------------------------------------------------------------
|
|
1798
|
+
// Aggregate query (capability: query.aggregate)
|
|
1799
|
+
// ---------------------------------------------------------------------------
|
|
1800
|
+
async aggregate(params) {
|
|
1801
|
+
if (!this.backend.aggregate) {
|
|
1802
|
+
throw new FiregraphError(
|
|
1803
|
+
"aggregate() is not supported by the current storage backend.",
|
|
1804
|
+
"UNSUPPORTED_OPERATION"
|
|
1805
|
+
);
|
|
1806
|
+
}
|
|
1807
|
+
const hasAnyFilter = params.aType || params.aUid || params.axbType || params.bType || params.bUid || params.where && params.where.length > 0;
|
|
1808
|
+
if (!hasAnyFilter) {
|
|
1809
|
+
this.checkQuerySafety([], params.allowCollectionScan);
|
|
1810
|
+
const result2 = await this.backend.aggregate(params.aggregates, []);
|
|
1811
|
+
return result2;
|
|
1812
|
+
}
|
|
1813
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1814
|
+
if (plan.strategy === "get") {
|
|
1815
|
+
throw new FiregraphError(
|
|
1816
|
+
"aggregate() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
1817
|
+
"INVALID_QUERY"
|
|
1818
|
+
);
|
|
1819
|
+
}
|
|
1820
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
1821
|
+
const result = await this.backend.aggregate(params.aggregates, plan.filters);
|
|
1822
|
+
return result;
|
|
1823
|
+
}
|
|
1824
|
+
// ---------------------------------------------------------------------------
|
|
1825
|
+
// Bulk operations
|
|
1826
|
+
// ---------------------------------------------------------------------------
|
|
1827
|
+
async removeNodeCascade(uid, options) {
|
|
1828
|
+
return this.backend.removeNodeCascade(uid, this, options);
|
|
1829
|
+
}
|
|
1830
|
+
async bulkRemoveEdges(params, options) {
|
|
1831
|
+
return this.backend.bulkRemoveEdges(params, this, options);
|
|
1832
|
+
}
|
|
1833
|
+
// ---------------------------------------------------------------------------
|
|
1834
|
+
// Server-side DML (capability: query.dml)
|
|
1835
|
+
// ---------------------------------------------------------------------------
|
|
1836
|
+
/**
|
|
1837
|
+
* Single-statement bulk DELETE. Translates `params` to a filter list via
|
|
1838
|
+
* `buildEdgeQueryPlan` (the same plan `findEdges` uses) and dispatches to
|
|
1839
|
+
* `backend.bulkDelete`. The fetch-then-delete loop in `bulkRemoveEdges`
|
|
1840
|
+
* is the cap-less fallback; this method is the fast path on backends
|
|
1841
|
+
* declaring `query.dml`.
|
|
1842
|
+
*
|
|
1843
|
+
* Scan-protection rules match `findEdges`: a query with no identifying
|
|
1844
|
+
* fields requires `allowCollectionScan: true` to pass. A bare-empty
|
|
1845
|
+
* filter set (no `aType`, `aUid`, etc., no `where`) is allowed at this
|
|
1846
|
+
* layer — shared SQLite bounds the blast radius via its leading `scope`
|
|
1847
|
+
* predicate — but the DO RPC backend rejects empty filters at the wire
|
|
1848
|
+
* boundary as defense-in-depth. To wipe a routed subgraph DO, use
|
|
1849
|
+
* `removeNodeCascade` on the parent node instead.
|
|
1850
|
+
*/
|
|
1851
|
+
async bulkDelete(params, options) {
|
|
1852
|
+
if (!this.backend.bulkDelete) {
|
|
1853
|
+
throw new FiregraphError(
|
|
1854
|
+
"bulkDelete() is not supported by the current storage backend. Fall back to bulkRemoveEdges() for backends without query.dml (e.g. Firestore Standard).",
|
|
1855
|
+
"UNSUPPORTED_OPERATION"
|
|
1856
|
+
);
|
|
1857
|
+
}
|
|
1858
|
+
const filters = this.buildDmlFilters(params);
|
|
1859
|
+
return this.backend.bulkDelete(filters, options);
|
|
1860
|
+
}
|
|
1861
|
+
/**
|
|
1862
|
+
* Single-statement bulk UPDATE. Same translation path as `bulkDelete`,
|
|
1863
|
+
* but the patch is deep-merged into each matching row's `data` via the
|
|
1864
|
+
* shared `flattenPatch` pipeline. Identifying columns are immutable
|
|
1865
|
+
* through this path (see `BulkUpdatePatch` JSDoc).
|
|
1866
|
+
*
|
|
1867
|
+
* Empty-patch rejection happens inside the backend (`compileBulkUpdate`)
|
|
1868
|
+
* — a `data: {}` payload would only rewrite `updated_at`, which is
|
|
1869
|
+
* almost certainly a bug.
|
|
1870
|
+
*/
|
|
1871
|
+
async bulkUpdate(params, patch, options) {
|
|
1872
|
+
if (!this.backend.bulkUpdate) {
|
|
1873
|
+
throw new FiregraphError(
|
|
1874
|
+
"bulkUpdate() is not supported by the current storage backend.",
|
|
1875
|
+
"UNSUPPORTED_OPERATION"
|
|
1876
|
+
);
|
|
1877
|
+
}
|
|
1878
|
+
const filters = this.buildDmlFilters(params);
|
|
1879
|
+
return this.backend.bulkUpdate(filters, patch, options);
|
|
1880
|
+
}
|
|
1881
|
+
// ---------------------------------------------------------------------------
|
|
1882
|
+
// Multi-source fan-out (capability: query.join)
|
|
1883
|
+
// ---------------------------------------------------------------------------
|
|
1884
|
+
/**
|
|
1885
|
+
* Fan out from `params.sources` over a single edge type in one round trip.
|
|
1886
|
+
* On backends without `query.join`, throws `UNSUPPORTED_OPERATION` — the
|
|
1887
|
+
* cap-less fallback is the per-source `findEdges` loop, which lives in
|
|
1888
|
+
* `traverse.ts` (the higher-level traversal walker) rather than here.
|
|
1889
|
+
*
|
|
1890
|
+
* `expand()` is intentionally edge-type-only — the source set is a flat
|
|
1891
|
+
* UID list and the hop matches one `axbType`. Multi-axbType expansions
|
|
1892
|
+
* become multiple `expand()` calls, one per relation.
|
|
1893
|
+
*
|
|
1894
|
+
* `params.sources.length === 0` short-circuits to an empty result. The
|
|
1895
|
+
* backend never sees the call. (`compileExpand` itself rejects empty
|
|
1896
|
+
* because `IN ()` is not valid SQL.)
|
|
1897
|
+
*/
|
|
1898
|
+
async expand(params) {
|
|
1899
|
+
if (!this.backend.expand) {
|
|
1900
|
+
throw new FiregraphError(
|
|
1901
|
+
"expand() is not supported by the current storage backend. Backends without `query.join` can use createTraversal() instead \u2014 the per-hop loop is functionally equivalent (just slower).",
|
|
1902
|
+
"UNSUPPORTED_OPERATION"
|
|
1903
|
+
);
|
|
1904
|
+
}
|
|
1905
|
+
if (params.sources.length === 0) {
|
|
1906
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
1907
|
+
}
|
|
1908
|
+
return this.backend.expand(params);
|
|
1909
|
+
}
|
|
1910
|
+
// ---------------------------------------------------------------------------
|
|
1911
|
+
// Engine-level multi-hop traversal (capability: traversal.serverSide)
|
|
1912
|
+
// ---------------------------------------------------------------------------
|
|
1913
|
+
/**
|
|
1914
|
+
* Compile a multi-hop traversal spec into one server-side nested
|
|
1915
|
+
* Pipeline and dispatch a single round trip.
|
|
1916
|
+
*
|
|
1917
|
+
* Backends declaring `traversal.serverSide` (Firestore Enterprise
|
|
1918
|
+
* today) install this method; everywhere else, it throws
|
|
1919
|
+
* `UNSUPPORTED_OPERATION`. The capability gate matches the type-level
|
|
1920
|
+
* surface — `GraphClient<C>` only exposes `runEngineTraversal` when
|
|
1921
|
+
* `'traversal.serverSide' extends C`.
|
|
1922
|
+
*
|
|
1923
|
+
* Most callers should not invoke this method directly; the
|
|
1924
|
+
* `createTraversal(...).run()` builder routes through it
|
|
1925
|
+
* automatically when `engineTraversal: 'auto'` (the default) and
|
|
1926
|
+
* the spec is eligible per `firestore-traverse-compiler.ts`. Calling
|
|
1927
|
+
* directly is appropriate for benchmarking or for callers that have
|
|
1928
|
+
* already shaped their hop chain into the strict
|
|
1929
|
+
* `EngineTraversalParams` shape.
|
|
1930
|
+
*
|
|
1931
|
+
* `params.sources.length === 0` short-circuits to empty per-hop
|
|
1932
|
+
* arrays. The backend never sees the call.
|
|
1933
|
+
*/
|
|
1934
|
+
async runEngineTraversal(params) {
|
|
1935
|
+
if (!this.backend.runEngineTraversal) {
|
|
1936
|
+
throw new FiregraphError(
|
|
1937
|
+
"runEngineTraversal() is not supported by the current storage backend. Backends without `traversal.serverSide` can use createTraversal() instead \u2014 the per-hop loop is functionally equivalent for in-graph specs (different round-trip profile).",
|
|
1938
|
+
"UNSUPPORTED_OPERATION"
|
|
1939
|
+
);
|
|
1940
|
+
}
|
|
1941
|
+
if (params.sources.length === 0) {
|
|
1942
|
+
return {
|
|
1943
|
+
hops: params.hops.map(() => ({ edges: [], sourceCount: 0 })),
|
|
1944
|
+
totalReads: 0
|
|
1945
|
+
};
|
|
1946
|
+
}
|
|
1947
|
+
return this.backend.runEngineTraversal(params);
|
|
1948
|
+
}
|
|
1949
|
+
// ---------------------------------------------------------------------------
|
|
1950
|
+
// Server-side projection (capability: query.select)
|
|
1951
|
+
// ---------------------------------------------------------------------------
|
|
1952
|
+
/**
|
|
1953
|
+
* Server-side projection — fetch only the requested fields from each
|
|
1954
|
+
* matching edge. The backend translates the call into a projecting query
|
|
1955
|
+
* (`SELECT json_extract(...)` on SQLite/DO, `Query.select(...)` on
|
|
1956
|
+
* Firestore Standard, classic projection on Enterprise) so the wire
|
|
1957
|
+
* payload is reduced to just the requested fields.
|
|
1958
|
+
*
|
|
1959
|
+
* Resolution rules for `select` (mirrored across all backends):
|
|
1960
|
+
*
|
|
1961
|
+
* - Built-in envelope fields (`aType`, `aUid`, `axbType`, `bType`,
|
|
1962
|
+
* `bUid`, `createdAt`, `updatedAt`, `v`) → resolve to the typed
|
|
1963
|
+
* column / Firestore field directly.
|
|
1964
|
+
* - `'data'` literal → returns the whole user payload.
|
|
1965
|
+
* - `'data.<x>'` → explicit nested path, returned at the same shape.
|
|
1966
|
+
* - bare name → rewritten to `data.<name>` (the canonical "give me a
|
|
1967
|
+
* few keys out of the JSON payload" shape).
|
|
1968
|
+
*
|
|
1969
|
+
* Empty `select: []` is rejected with `INVALID_QUERY`. Duplicate entries
|
|
1970
|
+
* are de-duped (first-occurrence order preserved); the result row carries
|
|
1971
|
+
* one slot per unique field.
|
|
1972
|
+
*
|
|
1973
|
+
* Migrations are *not* applied to the result. The caller asked for a
|
|
1974
|
+
* partial shape, and rehydrating it through the migration pipeline would
|
|
1975
|
+
* require synthesising every absent field — see
|
|
1976
|
+
* `StorageBackend.findEdgesProjected` for the rationale.
|
|
1977
|
+
*
|
|
1978
|
+
* Scan protection follows the `findEdges` rules: a query with no
|
|
1979
|
+
* identifying fields requires `allowCollectionScan: true` to pass. The
|
|
1980
|
+
* cap-less fallback would be `findEdges` + JS-side projection, but that
|
|
1981
|
+
* defeats the wire-payload reduction; backends without `query.select`
|
|
1982
|
+
* throw `UNSUPPORTED_OPERATION` rather than silently materialising full
|
|
1983
|
+
* rows.
|
|
1984
|
+
*/
|
|
1985
|
+
async findEdgesProjected(params) {
|
|
1986
|
+
if (!this.backend.findEdgesProjected) {
|
|
1987
|
+
throw new FiregraphError(
|
|
1988
|
+
"findEdgesProjected() is not supported by the current storage backend. There is no client-side fallback because the wire-payload reduction is the entire point of the API \u2014 use findEdges() and project in JS if the backend does not declare `query.select`.",
|
|
1989
|
+
"UNSUPPORTED_OPERATION"
|
|
1990
|
+
);
|
|
1991
|
+
}
|
|
1992
|
+
if (params.select.length === 0) {
|
|
1993
|
+
throw new FiregraphError(
|
|
1994
|
+
"findEdgesProjected() requires a non-empty `select` list.",
|
|
1995
|
+
"INVALID_QUERY"
|
|
1996
|
+
);
|
|
1997
|
+
}
|
|
1998
|
+
const plan = buildEdgeQueryPlan(params);
|
|
1999
|
+
let filters;
|
|
2000
|
+
let options;
|
|
2001
|
+
if (plan.strategy === "get") {
|
|
2002
|
+
filters = [
|
|
2003
|
+
{ field: "aUid", op: "==", value: params.aUid },
|
|
2004
|
+
{ field: "axbType", op: "==", value: params.axbType },
|
|
2005
|
+
{ field: "bUid", op: "==", value: params.bUid }
|
|
2006
|
+
];
|
|
2007
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2008
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2009
|
+
options = void 0;
|
|
2010
|
+
} else {
|
|
2011
|
+
filters = plan.filters;
|
|
2012
|
+
options = plan.options;
|
|
2013
|
+
}
|
|
2014
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2015
|
+
const rows = await this.backend.findEdgesProjected(params.select, filters, options);
|
|
2016
|
+
return rows;
|
|
2017
|
+
}
|
|
2018
|
+
/**
|
|
2019
|
+
* Native vector / nearest-neighbour search (capability `search.vector`).
|
|
2020
|
+
*
|
|
2021
|
+
* Resolves to the top-K records by similarity, sorted nearest-first
|
|
2022
|
+
* (`EUCLIDEAN` / `COSINE`) or highest-first (`DOT_PRODUCT`). The wrapper
|
|
2023
|
+
* is intentionally thin: capability check, scan-protection, then forward
|
|
2024
|
+
* `params` verbatim to the backend. All field-path normalisation and
|
|
2025
|
+
* SDK-shape validation lives in the shared
|
|
2026
|
+
* `runFirestoreFindNearest` helper that both Firestore editions call —
|
|
2027
|
+
* keeping it there means the validation surface stays in lockstep with
|
|
2028
|
+
* the SDK call site, regardless of which backend is plugged in.
|
|
2029
|
+
*
|
|
2030
|
+
* Migrations are NOT applied. The vector index walked the raw stored
|
|
2031
|
+
* shape; rehydrating each row through the migration pipeline would
|
|
2032
|
+
* change the candidate set the index already chose. If you need
|
|
2033
|
+
* migrated shape, follow up with `getNode` / `findEdges` on the
|
|
2034
|
+
* returned UIDs — those paths apply migrations normally.
|
|
2035
|
+
*
|
|
2036
|
+
* Scan-protection mirrors `findEdges`: if no identifying filters
|
|
2037
|
+
* (`aType` / `axbType` / `bType`) and no `where` clauses are supplied,
|
|
2038
|
+
* the request must opt in via `allowCollectionScan: true`. The ANN
|
|
2039
|
+
* query still walks the candidate set the WHERE clause produces, so
|
|
2040
|
+
* an unfiltered nearest-neighbour search over a million-row collection
|
|
2041
|
+
* is the same scan trap as an unfiltered `findEdges`.
|
|
2042
|
+
*
|
|
2043
|
+
* Backends without `search.vector` throw `UNSUPPORTED_OPERATION` —
|
|
2044
|
+
* there is no client-side fallback because emulating ANN over the
|
|
2045
|
+
* generic backend surface (`findEdges` + JS-side cosine) doesn't scale
|
|
2046
|
+
* past trivial datasets and would give callers the wrong mental model
|
|
2047
|
+
* about cost.
|
|
2048
|
+
*/
|
|
2049
|
+
async findNearest(params) {
|
|
2050
|
+
if (!this.backend.findNearest) {
|
|
2051
|
+
throw new FiregraphError(
|
|
2052
|
+
"findNearest() is not supported by the current storage backend. Vector search requires a backend that declares `search.vector` (currently Firestore Standard and Enterprise). There is no client-side fallback because emulating ANN on top of the generic backend surface does not scale beyond toy datasets.",
|
|
2053
|
+
"UNSUPPORTED_OPERATION"
|
|
2054
|
+
);
|
|
2055
|
+
}
|
|
2056
|
+
const filters = [];
|
|
2057
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2058
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2059
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2060
|
+
if (params.where) filters.push(...params.where);
|
|
2061
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2062
|
+
return this.backend.findNearest(params);
|
|
2063
|
+
}
|
|
2064
|
+
/**
|
|
2065
|
+
* Native full-text search (capability `search.fullText`).
|
|
2066
|
+
*
|
|
2067
|
+
* Returns the top-N records by relevance, ordered by the search
|
|
2068
|
+
* index's score. Only Firestore Enterprise declares this capability
|
|
2069
|
+
* today — the underlying Pipelines `search({ query: documentMatches(...) })`
|
|
2070
|
+
* stage requires Enterprise's FTS index. Standard does not declare
|
|
2071
|
+
* the cap (FTS is an Enterprise-only product feature, not a
|
|
2072
|
+
* typed-API gap), and the SQLite-shaped backends have no native
|
|
2073
|
+
* FTS index. Backends without `search.fullText` throw
|
|
2074
|
+
* `UNSUPPORTED_OPERATION` from this wrapper.
|
|
2075
|
+
*
|
|
2076
|
+
* Scan-protection mirrors `findNearest`: a search with no
|
|
2077
|
+
* identifying filters (`aType` / `axbType` / `bType`) walks every
|
|
2078
|
+
* row the index scored, so the request must opt in via
|
|
2079
|
+
* `allowCollectionScan: true`.
|
|
2080
|
+
*
|
|
2081
|
+
* Migrations are NOT applied. The FTS index walked the raw stored
|
|
2082
|
+
* shape; rehydrating each row through the migration pipeline would
|
|
2083
|
+
* change the candidate set the index already scored. If you need
|
|
2084
|
+
* migrated shape, follow up with `getNode` / `findEdges` on the
|
|
2085
|
+
* returned UIDs.
|
|
2086
|
+
*/
|
|
2087
|
+
async fullTextSearch(params) {
|
|
2088
|
+
if (!this.backend.fullTextSearch) {
|
|
2089
|
+
throw new FiregraphError(
|
|
2090
|
+
"fullTextSearch() is not supported by the current storage backend. Full-text search requires a backend that declares `search.fullText` (currently Firestore Enterprise only \u2014 FTS is an Enterprise product feature). There is no client-side fallback because emulating FTS over the generic backend surface would not scale beyond toy datasets.",
|
|
2091
|
+
"UNSUPPORTED_OPERATION"
|
|
2092
|
+
);
|
|
2093
|
+
}
|
|
2094
|
+
const filters = [];
|
|
2095
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2096
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2097
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2098
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2099
|
+
return this.backend.fullTextSearch(params);
|
|
2100
|
+
}
|
|
2101
|
+
/**
|
|
2102
|
+
* Native geospatial distance search (capability `search.geo`).
|
|
2103
|
+
*
|
|
2104
|
+
* Returns rows whose `geoField` lies within `radiusMeters` of
|
|
2105
|
+
* `point`, ordered nearest-first by default. Only Firestore
|
|
2106
|
+
* Enterprise declares this capability — same Enterprise-only
|
|
2107
|
+
* gating as `fullTextSearch`. Backends without `search.geo` throw
|
|
2108
|
+
* `UNSUPPORTED_OPERATION` from this wrapper.
|
|
2109
|
+
*
|
|
2110
|
+
* Scan-protection mirrors `findNearest` and `fullTextSearch`.
|
|
2111
|
+
*
|
|
2112
|
+
* Migrations are NOT applied — same rationale as the other search
|
|
2113
|
+
* extensions.
|
|
2114
|
+
*/
|
|
2115
|
+
async geoSearch(params) {
|
|
2116
|
+
if (!this.backend.geoSearch) {
|
|
2117
|
+
throw new FiregraphError(
|
|
2118
|
+
"geoSearch() is not supported by the current storage backend. Geospatial search requires a backend that declares `search.geo` (currently Firestore Enterprise only \u2014 geo queries are an Enterprise product feature). There is no client-side fallback because emulating geo over the generic backend surface (haversine over `findEdges`) would not scale beyond trivial datasets.",
|
|
2119
|
+
"UNSUPPORTED_OPERATION"
|
|
2120
|
+
);
|
|
2121
|
+
}
|
|
2122
|
+
const filters = [];
|
|
2123
|
+
if (params.aType) filters.push({ field: "aType", op: "==", value: params.aType });
|
|
2124
|
+
if (params.axbType) filters.push({ field: "axbType", op: "==", value: params.axbType });
|
|
2125
|
+
if (params.bType) filters.push({ field: "bType", op: "==", value: params.bType });
|
|
2126
|
+
this.checkQuerySafety(filters, params.allowCollectionScan);
|
|
2127
|
+
return this.backend.geoSearch(params);
|
|
2128
|
+
}
|
|
2129
|
+
/**
|
|
2130
|
+
* Translate a `FindEdgesParams` into the `QueryFilter[]` shape the
|
|
2131
|
+
* backend `bulkDelete` / `bulkUpdate` methods expect. Mirrors the
|
|
2132
|
+
* `aggregate()` plan: a bare-empty params object becomes an empty
|
|
2133
|
+
* filter list (after a scan-protection check); a GET-shape (all three
|
|
2134
|
+
* identifiers) is rejected so we never silently turn a single-row
|
|
2135
|
+
* lookup into a server-side DML; otherwise we run `buildEdgeQueryPlan`
|
|
2136
|
+
* and surface its filters.
|
|
2137
|
+
*/
|
|
2138
|
+
buildDmlFilters(params) {
|
|
2139
|
+
const hasAnyFilter = params.aType || params.aUid || params.axbType || params.bType || params.bUid || params.where && params.where.length > 0;
|
|
2140
|
+
if (!hasAnyFilter) {
|
|
2141
|
+
this.checkQuerySafety([], params.allowCollectionScan);
|
|
2142
|
+
return [];
|
|
2143
|
+
}
|
|
2144
|
+
const plan = buildEdgeQueryPlan(params);
|
|
2145
|
+
if (plan.strategy === "get") {
|
|
2146
|
+
throw new FiregraphError(
|
|
2147
|
+
"bulkDelete() / bulkUpdate() require a query, not a direct document lookup. Use removeEdge() / updateEdge() for single-row operations, or omit one of aUid/axbType/bUid to force a query strategy.",
|
|
2148
|
+
"INVALID_QUERY"
|
|
2149
|
+
);
|
|
2150
|
+
}
|
|
2151
|
+
this.checkQuerySafety(plan.filters, params.allowCollectionScan);
|
|
2152
|
+
return plan.filters;
|
|
2153
|
+
}
|
|
2154
|
+
// ---------------------------------------------------------------------------
|
|
2155
|
+
// Dynamic registry methods
|
|
2156
|
+
// ---------------------------------------------------------------------------
|
|
2157
|
+
async defineNodeType(name, jsonSchema, description, options) {
|
|
2158
|
+
if (!this.dynamicConfig) {
|
|
2159
|
+
throw new DynamicRegistryError(
|
|
2160
|
+
'defineNodeType() is only available in dynamic registry mode. Pass registryMode: { mode: "dynamic" } to createGraphClient().'
|
|
2161
|
+
);
|
|
2162
|
+
}
|
|
2163
|
+
if (RESERVED_TYPE_NAMES.has(name)) {
|
|
2164
|
+
throw new DynamicRegistryError(
|
|
2165
|
+
`Cannot define type "${name}": this name is reserved for the meta-registry.`
|
|
2166
|
+
);
|
|
2167
|
+
}
|
|
2168
|
+
if (this.staticRegistry?.lookup(name, NODE_RELATION, name)) {
|
|
2169
|
+
throw new DynamicRegistryError(
|
|
2170
|
+
`Cannot define node type "${name}": already defined in the static registry.`
|
|
2171
|
+
);
|
|
2172
|
+
}
|
|
2173
|
+
const uid = generateDeterministicUid(META_NODE_TYPE, name);
|
|
2174
|
+
const data = { name, jsonSchema };
|
|
2175
|
+
if (description !== void 0) data.description = description;
|
|
2176
|
+
if (options?.titleField !== void 0) data.titleField = options.titleField;
|
|
2177
|
+
if (options?.subtitleField !== void 0) data.subtitleField = options.subtitleField;
|
|
2178
|
+
if (options?.viewTemplate !== void 0) data.viewTemplate = options.viewTemplate;
|
|
2179
|
+
if (options?.viewCss !== void 0) data.viewCss = options.viewCss;
|
|
2180
|
+
if (options?.allowedIn !== void 0) data.allowedIn = options.allowedIn;
|
|
2181
|
+
if (options?.migrationWriteBack !== void 0)
|
|
2182
|
+
data.migrationWriteBack = options.migrationWriteBack;
|
|
2183
|
+
if (options?.migrations !== void 0) {
|
|
2184
|
+
data.migrations = await this.serializeMigrations(options.migrations);
|
|
2185
|
+
}
|
|
2186
|
+
await this.putNode(META_NODE_TYPE, uid, data);
|
|
2187
|
+
}
|
|
2188
|
+
async defineEdgeType(name, topology, jsonSchema, description, options) {
|
|
2189
|
+
if (!this.dynamicConfig) {
|
|
2190
|
+
throw new DynamicRegistryError(
|
|
2191
|
+
'defineEdgeType() is only available in dynamic registry mode. Pass registryMode: { mode: "dynamic" } to createGraphClient().'
|
|
2192
|
+
);
|
|
2193
|
+
}
|
|
2194
|
+
if (RESERVED_TYPE_NAMES.has(name)) {
|
|
2195
|
+
throw new DynamicRegistryError(
|
|
2196
|
+
`Cannot define type "${name}": this name is reserved for the meta-registry.`
|
|
2197
|
+
);
|
|
2198
|
+
}
|
|
2199
|
+
if (this.staticRegistry) {
|
|
2200
|
+
const fromTypes = Array.isArray(topology.from) ? topology.from : [topology.from];
|
|
2201
|
+
const toTypes = Array.isArray(topology.to) ? topology.to : [topology.to];
|
|
2202
|
+
for (const aType of fromTypes) {
|
|
2203
|
+
for (const bType of toTypes) {
|
|
2204
|
+
if (this.staticRegistry.lookup(aType, name, bType)) {
|
|
2205
|
+
throw new DynamicRegistryError(
|
|
2206
|
+
`Cannot define edge type "${name}" for (${aType}) -> (${bType}): already defined in the static registry.`
|
|
2207
|
+
);
|
|
2208
|
+
}
|
|
2209
|
+
}
|
|
2210
|
+
}
|
|
2211
|
+
}
|
|
2212
|
+
const uid = generateDeterministicUid(META_EDGE_TYPE, name);
|
|
2213
|
+
const data = {
|
|
2214
|
+
name,
|
|
2215
|
+
from: topology.from,
|
|
2216
|
+
to: topology.to
|
|
2217
|
+
};
|
|
2218
|
+
if (jsonSchema !== void 0) data.jsonSchema = jsonSchema;
|
|
2219
|
+
if (topology.inverseLabel !== void 0) data.inverseLabel = topology.inverseLabel;
|
|
2220
|
+
if (topology.targetGraph !== void 0) data.targetGraph = topology.targetGraph;
|
|
2221
|
+
if (description !== void 0) data.description = description;
|
|
2222
|
+
if (options?.titleField !== void 0) data.titleField = options.titleField;
|
|
2223
|
+
if (options?.subtitleField !== void 0) data.subtitleField = options.subtitleField;
|
|
2224
|
+
if (options?.viewTemplate !== void 0) data.viewTemplate = options.viewTemplate;
|
|
2225
|
+
if (options?.viewCss !== void 0) data.viewCss = options.viewCss;
|
|
2226
|
+
if (options?.allowedIn !== void 0) data.allowedIn = options.allowedIn;
|
|
2227
|
+
if (options?.migrationWriteBack !== void 0)
|
|
2228
|
+
data.migrationWriteBack = options.migrationWriteBack;
|
|
2229
|
+
if (options?.migrations !== void 0) {
|
|
2230
|
+
data.migrations = await this.serializeMigrations(options.migrations);
|
|
2231
|
+
}
|
|
2232
|
+
await this.putNode(META_EDGE_TYPE, uid, data);
|
|
2233
|
+
}
|
|
2234
|
+
async reloadRegistry() {
|
|
2235
|
+
if (!this.dynamicConfig) {
|
|
2236
|
+
throw new DynamicRegistryError(
|
|
2237
|
+
'reloadRegistry() is only available in dynamic registry mode. Pass registryMode: { mode: "dynamic" } to createGraphClient().'
|
|
2238
|
+
);
|
|
2239
|
+
}
|
|
2240
|
+
const reader = this.createMetaReader();
|
|
2241
|
+
const dynamicOnly = await createRegistryFromGraph(reader, this.migrationSandbox);
|
|
2242
|
+
if (this.staticRegistry) {
|
|
2243
|
+
this.dynamicRegistry = createMergedRegistry(this.staticRegistry, dynamicOnly);
|
|
2244
|
+
} else {
|
|
2245
|
+
this.dynamicRegistry = dynamicOnly;
|
|
2246
|
+
}
|
|
2247
|
+
}
|
|
2248
|
+
async serializeMigrations(migrations) {
|
|
2249
|
+
const result = migrations.map((m) => {
|
|
2250
|
+
const source = typeof m.up === "function" ? m.up.toString() : m.up;
|
|
2251
|
+
return { fromVersion: m.fromVersion, toVersion: m.toVersion, up: source };
|
|
2252
|
+
});
|
|
2253
|
+
await Promise.all(result.map((m) => precompileSource(m.up, this.migrationSandbox)));
|
|
2254
|
+
return result;
|
|
2255
|
+
}
|
|
2256
|
+
/**
|
|
2257
|
+
* Build a `GraphReader` over the meta-backend. If meta lives in the same
|
|
2258
|
+
* collection as the main backend, `this` is returned directly.
|
|
2259
|
+
*/
|
|
2260
|
+
createMetaReader() {
|
|
2261
|
+
if (!this.metaBackend) return this;
|
|
2262
|
+
const backend = this.metaBackend;
|
|
2263
|
+
const executeMetaQuery = (filters, options) => backend.query(filters, options);
|
|
2264
|
+
return {
|
|
2265
|
+
async getNode(uid) {
|
|
2266
|
+
return backend.getDoc(computeNodeDocId(uid));
|
|
2267
|
+
},
|
|
2268
|
+
async getEdge(aUid, axbType, bUid) {
|
|
2269
|
+
return backend.getDoc(computeEdgeDocId(aUid, axbType, bUid));
|
|
2270
|
+
},
|
|
2271
|
+
async edgeExists(aUid, axbType, bUid) {
|
|
2272
|
+
const record = await backend.getDoc(computeEdgeDocId(aUid, axbType, bUid));
|
|
2273
|
+
return record !== null;
|
|
2274
|
+
},
|
|
2275
|
+
async findEdges(params) {
|
|
2276
|
+
const plan = buildEdgeQueryPlan(params);
|
|
2277
|
+
if (plan.strategy === "get") {
|
|
2278
|
+
const record = await backend.getDoc(plan.docId);
|
|
2279
|
+
return record ? [record] : [];
|
|
2280
|
+
}
|
|
2281
|
+
return executeMetaQuery(plan.filters, plan.options);
|
|
2282
|
+
},
|
|
2283
|
+
async findNodes(params) {
|
|
2284
|
+
const plan = buildNodeQueryPlan(params);
|
|
2285
|
+
if (plan.strategy === "get") {
|
|
2286
|
+
const record = await backend.getDoc(plan.docId);
|
|
2287
|
+
return record ? [record] : [];
|
|
2288
|
+
}
|
|
2289
|
+
return executeMetaQuery(plan.filters, plan.options);
|
|
2290
|
+
}
|
|
2291
|
+
};
|
|
2292
|
+
}
|
|
2293
|
+
};
|
|
2294
|
+
function createGraphClient(backend, options, metaBackend) {
|
|
2295
|
+
return new GraphClientImpl(backend, options, metaBackend);
|
|
2296
|
+
}
|
|
2297
|
+
|
|
2298
|
+
// src/id.ts
|
|
2299
|
+
var import_nanoid = require("nanoid");
|
|
2300
|
+
function generateId() {
|
|
2301
|
+
return (0, import_nanoid.nanoid)();
|
|
2302
|
+
}
|
|
2303
|
+
|
|
2304
|
+
// src/internal/backend.ts
|
|
2305
|
+
function createCapabilities(caps) {
|
|
2306
|
+
return {
|
|
2307
|
+
has: (capability) => caps.has(capability),
|
|
2308
|
+
values: () => caps.values()
|
|
2309
|
+
};
|
|
2310
|
+
}
|
|
2311
|
+
|
|
2312
|
+
// src/internal/sqlite-data-ops.ts
|
|
2313
|
+
var FIRESTORE_TYPE_NAMES = /* @__PURE__ */ new Set([
|
|
2314
|
+
"Timestamp",
|
|
2315
|
+
"GeoPoint",
|
|
2316
|
+
"VectorValue",
|
|
2317
|
+
"DocumentReference",
|
|
2318
|
+
"FieldValue"
|
|
2319
|
+
]);
|
|
2320
|
+
function isFirestoreSpecialType(value) {
|
|
2321
|
+
const ctorName = value.constructor?.name;
|
|
2322
|
+
if (ctorName && FIRESTORE_TYPE_NAMES.has(ctorName)) return ctorName;
|
|
2323
|
+
return null;
|
|
2324
|
+
}
|
|
2325
|
+
var JSON_PATH_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
2326
|
+
function validateJsonPathKey(key, backendLabel) {
|
|
2327
|
+
if (key.length === 0) {
|
|
2328
|
+
throw new FiregraphError(
|
|
2329
|
+
`${backendLabel}: empty JSON path component is not allowed`,
|
|
2330
|
+
"INVALID_QUERY"
|
|
2331
|
+
);
|
|
2332
|
+
}
|
|
2333
|
+
if (!JSON_PATH_KEY_RE.test(key)) {
|
|
2334
|
+
throw new FiregraphError(
|
|
2335
|
+
`${backendLabel}: data field path component "${key}" is not a safe JSON-path identifier. Allowed pattern: /^[A-Za-z_][A-Za-z0-9_-]*$/. Use replaceNode/replaceEdge (full-data overwrite) for keys with reserved characters (whitespace, dots, brackets, quotes, etc.).`,
|
|
2336
|
+
"INVALID_QUERY"
|
|
2337
|
+
);
|
|
2338
|
+
}
|
|
2339
|
+
}
|
|
2340
|
+
function jsonBind(value, backendLabel) {
|
|
2341
|
+
if (value === void 0) return "null";
|
|
2342
|
+
if (value !== null && typeof value === "object") {
|
|
2343
|
+
const firestoreType = isFirestoreSpecialType(value);
|
|
2344
|
+
if (firestoreType) {
|
|
2345
|
+
throw new FiregraphError(
|
|
2346
|
+
`${backendLabel} cannot persist a Firestore ${firestoreType} value. Convert to a primitive before writing (e.g. \`ts.toMillis()\` for Timestamp).`,
|
|
2347
|
+
"INVALID_ARGUMENT"
|
|
2348
|
+
);
|
|
2349
|
+
}
|
|
2350
|
+
}
|
|
2351
|
+
return JSON.stringify(value);
|
|
2352
|
+
}
|
|
2353
|
+
function compileDataOpsExpr(ops, base, params, backendLabel) {
|
|
2354
|
+
if (ops.length === 0) return null;
|
|
2355
|
+
const deletes = [];
|
|
2356
|
+
const sets = [];
|
|
2357
|
+
for (const op of ops) (op.delete ? deletes : sets).push(op);
|
|
2358
|
+
let expr = base;
|
|
2359
|
+
if (deletes.length > 0) {
|
|
2360
|
+
const placeholders = deletes.map(() => "?").join(", ");
|
|
2361
|
+
expr = `json_remove(${expr}, ${placeholders})`;
|
|
2362
|
+
for (const op of deletes) {
|
|
2363
|
+
for (const seg of op.path) validateJsonPathKey(seg, backendLabel);
|
|
2364
|
+
params.push(`$.${op.path.join(".")}`);
|
|
2365
|
+
}
|
|
2366
|
+
}
|
|
2367
|
+
if (sets.length > 0) {
|
|
2368
|
+
const pieces = sets.map(() => "?, json(?)").join(", ");
|
|
2369
|
+
expr = `json_set(${expr}, ${pieces})`;
|
|
2370
|
+
for (const op of sets) {
|
|
2371
|
+
for (const seg of op.path) validateJsonPathKey(seg, backendLabel);
|
|
2372
|
+
params.push(`$.${op.path.join(".")}`);
|
|
2373
|
+
params.push(jsonBind(op.value, backendLabel));
|
|
2374
|
+
}
|
|
2375
|
+
}
|
|
2376
|
+
return expr;
|
|
2377
|
+
}
|
|
2378
|
+
|
|
2379
|
+
// src/internal/sqlite-payload-guard.ts
|
|
2380
|
+
init_serialization_tag();
|
|
2381
|
+
var FIRESTORE_TYPE_NAMES2 = /* @__PURE__ */ new Set([
|
|
2382
|
+
"Timestamp",
|
|
2383
|
+
"GeoPoint",
|
|
2384
|
+
"VectorValue",
|
|
2385
|
+
"DocumentReference",
|
|
2386
|
+
"FieldValue"
|
|
2387
|
+
]);
|
|
2388
|
+
function assertJsonSafePayload(data, label) {
|
|
2389
|
+
walk2(data, [], label);
|
|
2390
|
+
}
|
|
2391
|
+
function walk2(node, path, label) {
|
|
2392
|
+
if (node === null || node === void 0) return;
|
|
2393
|
+
if (isDeleteSentinel(node)) {
|
|
2394
|
+
throw new FiregraphError(
|
|
2395
|
+
`${label} backend cannot persist a deleteField() sentinel inside a full-data payload (replaceNode/replaceEdge or first-insert). The sentinel is only valid inside an updateNode/updateEdge dataOps patch. Path: ${formatPath(path)}.`,
|
|
2396
|
+
"INVALID_ARGUMENT"
|
|
2397
|
+
);
|
|
2398
|
+
}
|
|
2399
|
+
const t = typeof node;
|
|
2400
|
+
if (t === "symbol" || t === "function") {
|
|
2401
|
+
throw new FiregraphError(
|
|
2402
|
+
`${label} backend cannot persist a value of type ${t}. JSON.stringify drops it silently. Path: ${formatPath(path)}.`,
|
|
2403
|
+
"INVALID_ARGUMENT"
|
|
2404
|
+
);
|
|
2405
|
+
}
|
|
2406
|
+
if (t === "bigint") {
|
|
2407
|
+
throw new FiregraphError(
|
|
2408
|
+
`${label} backend cannot persist a value of type bigint. JSON.stringify cannot serialize this type (throws TypeError). Path: ${formatPath(path)}.`,
|
|
2409
|
+
"INVALID_ARGUMENT"
|
|
2410
|
+
);
|
|
2411
|
+
}
|
|
2412
|
+
if (t !== "object") return;
|
|
2413
|
+
if (Array.isArray(node)) {
|
|
2414
|
+
for (let i = 0; i < node.length; i++) {
|
|
2415
|
+
walk2(node[i], [...path, String(i)], label);
|
|
2416
|
+
}
|
|
2417
|
+
return;
|
|
2418
|
+
}
|
|
2419
|
+
const obj = node;
|
|
2420
|
+
if (Object.prototype.hasOwnProperty.call(obj, SERIALIZATION_TAG)) {
|
|
2421
|
+
const tagValue = obj[SERIALIZATION_TAG];
|
|
2422
|
+
throw new FiregraphError(
|
|
2423
|
+
`${label} backend cannot persist an object with a \`${SERIALIZATION_TAG}\` key (value: ${formatTagValue(tagValue)}). Recognised tags are valid only on the Firestore backend (migration-sandbox output); a literal \`${SERIALIZATION_TAG}\` field in user data is reserved and not allowed. Path: ${formatPath(path)}.`,
|
|
2424
|
+
"INVALID_ARGUMENT"
|
|
2425
|
+
);
|
|
2426
|
+
}
|
|
2427
|
+
const proto = Object.getPrototypeOf(node);
|
|
2428
|
+
if (proto !== null && proto !== Object.prototype) {
|
|
2429
|
+
const ctor = node.constructor;
|
|
2430
|
+
const ctorName = ctor && typeof ctor.name === "string" ? ctor.name : "<anonymous>";
|
|
2431
|
+
if (FIRESTORE_TYPE_NAMES2.has(ctorName)) {
|
|
2432
|
+
throw new FiregraphError(
|
|
2433
|
+
`${label} backend cannot persist a Firestore ${ctorName} value. Convert to a primitive before writing (e.g. \`ts.toMillis()\` for Timestamp, \`{lat,lng}\` for GeoPoint). Path: ${formatPath(path)}.`,
|
|
2434
|
+
"INVALID_ARGUMENT"
|
|
2435
|
+
);
|
|
2436
|
+
}
|
|
2437
|
+
if (node instanceof Date) return;
|
|
2438
|
+
throw new FiregraphError(
|
|
2439
|
+
`${label} backend cannot persist a class instance of type ${ctorName}. Only plain objects, arrays, and primitives round-trip safely through JSON storage. Path: ${formatPath(path)}.`,
|
|
2440
|
+
"INVALID_ARGUMENT"
|
|
2441
|
+
);
|
|
2442
|
+
}
|
|
2443
|
+
for (const key of Object.keys(obj)) {
|
|
2444
|
+
walk2(obj[key], [...path, key], label);
|
|
2445
|
+
}
|
|
2446
|
+
}
|
|
2447
|
+
function formatPath(path) {
|
|
2448
|
+
return path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
2449
|
+
}
|
|
2450
|
+
function formatTagValue(value) {
|
|
2451
|
+
if (value === null) return "null";
|
|
2452
|
+
if (value === void 0) return "undefined";
|
|
2453
|
+
if (typeof value === "string") return JSON.stringify(value);
|
|
2454
|
+
if (typeof value === "number" || typeof value === "boolean" || typeof value === "bigint") {
|
|
2455
|
+
return String(value);
|
|
2456
|
+
}
|
|
2457
|
+
return typeof value;
|
|
2458
|
+
}
|
|
2459
|
+
|
|
2460
|
+
// src/default-indexes.ts
|
|
2461
|
+
var DEFAULT_CORE_INDEXES = Object.freeze([
|
|
2462
|
+
{ fields: ["aUid"] },
|
|
2463
|
+
{ fields: ["bUid"] },
|
|
2464
|
+
{ fields: ["aType"] },
|
|
2465
|
+
{ fields: ["bType"] },
|
|
2466
|
+
{ fields: ["aUid", "axbType"] },
|
|
2467
|
+
{ fields: ["axbType", "bUid"] },
|
|
2468
|
+
{ fields: ["aType", "axbType"] },
|
|
2469
|
+
{ fields: ["axbType", "bType"] }
|
|
2470
|
+
]);
|
|
2471
|
+
|
|
2472
|
+
// src/internal/sqlite-schema.ts
|
|
2473
|
+
var FIELD_TO_COLUMN = {
|
|
2474
|
+
aType: "a_type",
|
|
2475
|
+
aUid: "a_uid",
|
|
2476
|
+
axbType: "axb_type",
|
|
2477
|
+
bType: "b_type",
|
|
2478
|
+
bUid: "b_uid",
|
|
2479
|
+
v: "v",
|
|
2480
|
+
createdAt: "created_at",
|
|
2481
|
+
updatedAt: "updated_at"
|
|
2482
|
+
};
|
|
2483
|
+
function quoteIdent(name) {
|
|
2484
|
+
validateTableName(name);
|
|
2485
|
+
return `"${name}"`;
|
|
2486
|
+
}
|
|
2487
|
+
function validateTableName(name) {
|
|
2488
|
+
if (!/^[A-Za-z_][A-Za-z0-9_]*$/.test(name)) {
|
|
2489
|
+
throw new Error(`Invalid SQL identifier: ${name}. Must match /^[A-Za-z_][A-Za-z0-9_]*$/.`);
|
|
2490
|
+
}
|
|
2491
|
+
}
|
|
2492
|
+
function quoteColumnAlias(label) {
|
|
2493
|
+
return `"${label.replace(/"/g, '""')}"`;
|
|
2494
|
+
}
|
|
2495
|
+
|
|
2496
|
+
// src/timestamp.ts
|
|
2497
|
+
var GraphTimestampImpl = class _GraphTimestampImpl {
|
|
2498
|
+
constructor(seconds, nanoseconds) {
|
|
2499
|
+
this.seconds = seconds;
|
|
2500
|
+
this.nanoseconds = nanoseconds;
|
|
2501
|
+
}
|
|
2502
|
+
toDate() {
|
|
2503
|
+
return new Date(this.toMillis());
|
|
2504
|
+
}
|
|
2505
|
+
toMillis() {
|
|
2506
|
+
return this.seconds * 1e3 + Math.floor(this.nanoseconds / 1e6);
|
|
2507
|
+
}
|
|
2508
|
+
toJSON() {
|
|
2509
|
+
return { seconds: this.seconds, nanoseconds: this.nanoseconds };
|
|
2510
|
+
}
|
|
2511
|
+
static fromMillis(ms) {
|
|
2512
|
+
const seconds = Math.floor(ms / 1e3);
|
|
2513
|
+
const nanoseconds = (ms - seconds * 1e3) * 1e6;
|
|
2514
|
+
return new _GraphTimestampImpl(seconds, nanoseconds);
|
|
2515
|
+
}
|
|
2516
|
+
static now() {
|
|
2517
|
+
return _GraphTimestampImpl.fromMillis(Date.now());
|
|
2518
|
+
}
|
|
2519
|
+
};
|
|
2520
|
+
|
|
2521
|
+
// src/sqlite/sql.ts
|
|
2522
|
+
var SQLITE_BACKEND_LABEL = "shared-table SQLite";
|
|
2523
|
+
var SQLITE_BACKEND_ERR_LABEL = "SQLite backend";
|
|
2524
|
+
function compileFieldRef(field) {
|
|
2525
|
+
const column = FIELD_TO_COLUMN[field];
|
|
2526
|
+
if (column) {
|
|
2527
|
+
return { expr: quoteIdent(column) };
|
|
2528
|
+
}
|
|
2529
|
+
if (field.startsWith("data.")) {
|
|
2530
|
+
const suffix = field.slice(5);
|
|
2531
|
+
for (const part of suffix.split(".")) {
|
|
2532
|
+
validateJsonPathKey(part, SQLITE_BACKEND_ERR_LABEL);
|
|
2533
|
+
}
|
|
2534
|
+
return { expr: `json_extract("data", '$.${suffix}')` };
|
|
2535
|
+
}
|
|
2536
|
+
if (field === "data") {
|
|
2537
|
+
return { expr: `json_extract("data", '$')` };
|
|
2538
|
+
}
|
|
2539
|
+
throw new FiregraphError(`SQLite backend cannot resolve filter field: ${field}`, "INVALID_QUERY");
|
|
2540
|
+
}
|
|
2541
|
+
function bindValue(value) {
|
|
2542
|
+
if (value === null || value === void 0) return null;
|
|
2543
|
+
if (typeof value === "string" || typeof value === "number" || typeof value === "bigint" || typeof value === "boolean") {
|
|
2544
|
+
return value;
|
|
2545
|
+
}
|
|
2546
|
+
if (value instanceof Date) return value.getTime();
|
|
2547
|
+
if (typeof value === "object") {
|
|
2548
|
+
const firestoreType = isFirestoreSpecialType(value);
|
|
2549
|
+
if (firestoreType) {
|
|
2550
|
+
throw new FiregraphError(
|
|
2551
|
+
`SQLite backend cannot bind a Firestore ${firestoreType} value \u2014 JSON serialization would silently drop fields and the resulting bind would never match a stored row. Convert to a primitive (e.g. \`ts.toMillis()\` for Timestamp) before filtering or updating.`,
|
|
2552
|
+
"INVALID_QUERY"
|
|
2553
|
+
);
|
|
2554
|
+
}
|
|
2555
|
+
return JSON.stringify(value);
|
|
2556
|
+
}
|
|
2557
|
+
return String(value);
|
|
2558
|
+
}
|
|
2559
|
+
function compileFilter(filter, params) {
|
|
2560
|
+
const { expr } = compileFieldRef(filter.field);
|
|
2561
|
+
switch (filter.op) {
|
|
2562
|
+
case "==":
|
|
2563
|
+
params.push(bindValue(filter.value));
|
|
2564
|
+
return `${expr} = ?`;
|
|
2565
|
+
case "!=":
|
|
2566
|
+
params.push(bindValue(filter.value));
|
|
2567
|
+
return `${expr} != ?`;
|
|
2568
|
+
case "<":
|
|
2569
|
+
params.push(bindValue(filter.value));
|
|
2570
|
+
return `${expr} < ?`;
|
|
2571
|
+
case "<=":
|
|
2572
|
+
params.push(bindValue(filter.value));
|
|
2573
|
+
return `${expr} <= ?`;
|
|
2574
|
+
case ">":
|
|
2575
|
+
params.push(bindValue(filter.value));
|
|
2576
|
+
return `${expr} > ?`;
|
|
2577
|
+
case ">=":
|
|
2578
|
+
params.push(bindValue(filter.value));
|
|
2579
|
+
return `${expr} >= ?`;
|
|
2580
|
+
case "in": {
|
|
2581
|
+
const values = asArray(filter.value, "in");
|
|
2582
|
+
const placeholders = values.map(() => "?").join(", ");
|
|
2583
|
+
for (const v of values) params.push(bindValue(v));
|
|
2584
|
+
return `${expr} IN (${placeholders})`;
|
|
2585
|
+
}
|
|
2586
|
+
case "not-in": {
|
|
2587
|
+
const values = asArray(filter.value, "not-in");
|
|
2588
|
+
const placeholders = values.map(() => "?").join(", ");
|
|
2589
|
+
for (const v of values) params.push(bindValue(v));
|
|
2590
|
+
return `${expr} NOT IN (${placeholders})`;
|
|
2591
|
+
}
|
|
2592
|
+
case "array-contains": {
|
|
2593
|
+
params.push(bindValue(filter.value));
|
|
2594
|
+
return `EXISTS (SELECT 1 FROM json_each(${expr}) WHERE value = ?)`;
|
|
2595
|
+
}
|
|
2596
|
+
case "array-contains-any": {
|
|
2597
|
+
const values = asArray(filter.value, "array-contains-any");
|
|
2598
|
+
const placeholders = values.map(() => "?").join(", ");
|
|
2599
|
+
for (const v of values) params.push(bindValue(v));
|
|
2600
|
+
return `EXISTS (SELECT 1 FROM json_each(${expr}) WHERE value IN (${placeholders}))`;
|
|
2601
|
+
}
|
|
2602
|
+
default:
|
|
2603
|
+
throw new FiregraphError(
|
|
2604
|
+
`SQLite backend does not support filter operator: ${String(filter.op)}`,
|
|
2605
|
+
"INVALID_QUERY"
|
|
2606
|
+
);
|
|
2607
|
+
}
|
|
2608
|
+
}
|
|
2609
|
+
function asArray(value, op) {
|
|
2610
|
+
if (!Array.isArray(value) || value.length === 0) {
|
|
2611
|
+
throw new FiregraphError(`Operator "${op}" requires a non-empty array value`, "INVALID_QUERY");
|
|
2612
|
+
}
|
|
2613
|
+
return value;
|
|
2614
|
+
}
|
|
2615
|
+
function compileOrderBy(options, _params) {
|
|
2616
|
+
if (!options?.orderBy) return "";
|
|
2617
|
+
const { field, direction } = options.orderBy;
|
|
2618
|
+
const { expr } = compileFieldRef(field);
|
|
2619
|
+
const dir = direction === "desc" ? "DESC" : "ASC";
|
|
2620
|
+
return ` ORDER BY ${expr} ${dir}`;
|
|
2621
|
+
}
|
|
2622
|
+
function compileLimit(options, params) {
|
|
2623
|
+
if (options?.limit === void 0) return "";
|
|
2624
|
+
params.push(options.limit);
|
|
2625
|
+
return ` LIMIT ?`;
|
|
2626
|
+
}
|
|
2627
|
+
function compileSelect(table, scope, filters, options) {
|
|
2628
|
+
const params = [];
|
|
2629
|
+
const conditions = ['"scope" = ?'];
|
|
2630
|
+
params.push(scope);
|
|
2631
|
+
for (const f of filters) {
|
|
2632
|
+
conditions.push(compileFilter(f, params));
|
|
2633
|
+
}
|
|
2634
|
+
let sql = `SELECT * FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}`;
|
|
2635
|
+
const orderClause = compileOrderBy(options, params);
|
|
2636
|
+
sql += orderClause;
|
|
2637
|
+
sql += compileLimit(options, params);
|
|
2638
|
+
return { sql, params };
|
|
2639
|
+
}
|
|
2640
|
+
function compileAggregate(table, scope, spec, filters) {
|
|
2641
|
+
const aliases = Object.keys(spec);
|
|
2642
|
+
if (aliases.length === 0) {
|
|
2643
|
+
throw new FiregraphError(
|
|
2644
|
+
"aggregate() requires at least one aggregation in the `aggregates` map.",
|
|
2645
|
+
"INVALID_QUERY"
|
|
2646
|
+
);
|
|
2647
|
+
}
|
|
2648
|
+
const projections = [];
|
|
2649
|
+
for (const alias of aliases) {
|
|
2650
|
+
const { op, field } = spec[alias];
|
|
2651
|
+
validateJsonPathKey(alias, SQLITE_BACKEND_ERR_LABEL);
|
|
2652
|
+
if (op === "count") {
|
|
2653
|
+
if (field !== void 0) {
|
|
2654
|
+
throw new FiregraphError(
|
|
2655
|
+
`Aggregate '${alias}' op 'count' must not specify a field \u2014 count operates on rows, not a column expression.`,
|
|
2656
|
+
"INVALID_QUERY"
|
|
2657
|
+
);
|
|
2658
|
+
}
|
|
2659
|
+
projections.push(`COUNT(*) AS ${quoteIdent(alias)}`);
|
|
2660
|
+
continue;
|
|
2661
|
+
}
|
|
2662
|
+
if (!field) {
|
|
2663
|
+
throw new FiregraphError(
|
|
2664
|
+
`Aggregate '${alias}' op '${op}' requires a field.`,
|
|
2665
|
+
"INVALID_QUERY"
|
|
2666
|
+
);
|
|
2667
|
+
}
|
|
2668
|
+
const { expr } = compileFieldRef(field);
|
|
2669
|
+
const numeric = `CAST(${expr} AS REAL)`;
|
|
2670
|
+
if (op === "sum") projections.push(`SUM(${numeric}) AS ${quoteIdent(alias)}`);
|
|
2671
|
+
else if (op === "avg") projections.push(`AVG(${numeric}) AS ${quoteIdent(alias)}`);
|
|
2672
|
+
else if (op === "min") projections.push(`MIN(${numeric}) AS ${quoteIdent(alias)}`);
|
|
2673
|
+
else if (op === "max") projections.push(`MAX(${numeric}) AS ${quoteIdent(alias)}`);
|
|
2674
|
+
else
|
|
2675
|
+
throw new FiregraphError(
|
|
2676
|
+
`SQLite backend does not support aggregate op: ${String(op)}`,
|
|
2677
|
+
"INVALID_QUERY"
|
|
2678
|
+
);
|
|
2679
|
+
}
|
|
2680
|
+
const params = [scope];
|
|
2681
|
+
const conditions = ['"scope" = ?'];
|
|
2682
|
+
for (const f of filters) {
|
|
2683
|
+
conditions.push(compileFilter(f, params));
|
|
2684
|
+
}
|
|
2685
|
+
const sql = `SELECT ${projections.join(", ")} FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}`;
|
|
2686
|
+
return { stmt: { sql, params }, aliases };
|
|
2687
|
+
}
|
|
2688
|
+
function compileSelectGlobal(table, filters, options, scopeNameFilter) {
|
|
2689
|
+
if (filters.length === 0) {
|
|
2690
|
+
throw new FiregraphError(
|
|
2691
|
+
"compileSelectGlobal requires at least one filter \u2014 refusing to issue an unbounded SELECT.",
|
|
2692
|
+
"INVALID_QUERY"
|
|
2693
|
+
);
|
|
2694
|
+
}
|
|
2695
|
+
const params = [];
|
|
2696
|
+
const conditions = [];
|
|
2697
|
+
if (scopeNameFilter) {
|
|
2698
|
+
if (scopeNameFilter.isRoot) {
|
|
2699
|
+
conditions.push(`"scope" = ?`);
|
|
2700
|
+
params.push("");
|
|
2701
|
+
} else {
|
|
2702
|
+
conditions.push(`"scope" LIKE ? ESCAPE '\\'`);
|
|
2703
|
+
params.push(`%/${escapeLike(scopeNameFilter.name)}`);
|
|
2704
|
+
}
|
|
2705
|
+
}
|
|
2706
|
+
for (const f of filters) {
|
|
2707
|
+
conditions.push(compileFilter(f, params));
|
|
2708
|
+
}
|
|
2709
|
+
const sql = `SELECT * FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}` + compileOrderBy(options, params) + compileLimit(options, params);
|
|
2710
|
+
return { sql, params };
|
|
2711
|
+
}
|
|
2712
|
+
function compileSelectByDocId(table, scope, docId) {
|
|
2713
|
+
return {
|
|
2714
|
+
sql: `SELECT * FROM ${quoteIdent(table)} WHERE "scope" = ? AND "doc_id" = ? LIMIT 1`,
|
|
2715
|
+
params: [scope, docId]
|
|
2716
|
+
};
|
|
2717
|
+
}
|
|
2718
|
+
function normalizeProjectionField(field) {
|
|
2719
|
+
if (field in FIELD_TO_COLUMN) return field;
|
|
2720
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
2721
|
+
return `data.${field}`;
|
|
2722
|
+
}
|
|
2723
|
+
function compileFindEdgesProjected(table, scope, select, filters, options) {
|
|
2724
|
+
if (select.length === 0) {
|
|
2725
|
+
throw new FiregraphError(
|
|
2726
|
+
"compileFindEdgesProjected requires a non-empty select list \u2014 an empty projection has no SQL representation distinct from `findEdges`.",
|
|
2727
|
+
"INVALID_QUERY"
|
|
2728
|
+
);
|
|
2729
|
+
}
|
|
2730
|
+
const seen = /* @__PURE__ */ new Set();
|
|
2731
|
+
const uniqueFields = [];
|
|
2732
|
+
for (const f of select) {
|
|
2733
|
+
if (!seen.has(f)) {
|
|
2734
|
+
seen.add(f);
|
|
2735
|
+
uniqueFields.push(f);
|
|
2736
|
+
}
|
|
2737
|
+
}
|
|
2738
|
+
const projections = [];
|
|
2739
|
+
const columns = [];
|
|
2740
|
+
for (let idx = 0; idx < uniqueFields.length; idx++) {
|
|
2741
|
+
const field = uniqueFields[idx];
|
|
2742
|
+
const canonical = normalizeProjectionField(field);
|
|
2743
|
+
const { expr } = compileFieldRef(canonical);
|
|
2744
|
+
const alias = quoteColumnAlias(field);
|
|
2745
|
+
projections.push(`${expr} AS ${alias}`);
|
|
2746
|
+
let kind;
|
|
2747
|
+
let typeAliasName;
|
|
2748
|
+
if (canonical === "data") {
|
|
2749
|
+
kind = "data";
|
|
2750
|
+
} else if (canonical.startsWith("data.")) {
|
|
2751
|
+
kind = "json";
|
|
2752
|
+
typeAliasName = `__fg_t_${idx}`;
|
|
2753
|
+
const typeAlias = quoteColumnAlias(typeAliasName);
|
|
2754
|
+
projections.push(`json_type("data", '$.${canonical.slice(5)}') AS ${typeAlias}`);
|
|
2755
|
+
} else {
|
|
2756
|
+
if (canonical === "v") kind = "builtin-int";
|
|
2757
|
+
else if (canonical === "createdAt" || canonical === "updatedAt") kind = "builtin-timestamp";
|
|
2758
|
+
else kind = "builtin-text";
|
|
2759
|
+
}
|
|
2760
|
+
columns.push({ field, kind, typeAlias: typeAliasName });
|
|
2761
|
+
}
|
|
2762
|
+
const params = [scope];
|
|
2763
|
+
const conditions = ['"scope" = ?'];
|
|
2764
|
+
for (const f of filters) {
|
|
2765
|
+
conditions.push(compileFilter(f, params));
|
|
2766
|
+
}
|
|
2767
|
+
let sql = `SELECT ${projections.join(", ")} FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}`;
|
|
2768
|
+
sql += compileOrderBy(options, params);
|
|
2769
|
+
sql += compileLimit(options, params);
|
|
2770
|
+
return { stmt: { sql, params }, columns };
|
|
2771
|
+
}
|
|
2772
|
+
function decodeProjectedRow(row, columns) {
|
|
2773
|
+
const out = {};
|
|
2774
|
+
for (const c of columns) {
|
|
2775
|
+
const raw = row[c.field];
|
|
2776
|
+
switch (c.kind) {
|
|
2777
|
+
case "builtin-text":
|
|
2778
|
+
out[c.field] = raw === null || raw === void 0 ? null : String(raw);
|
|
2779
|
+
break;
|
|
2780
|
+
case "builtin-int":
|
|
2781
|
+
if (raw === null || raw === void 0) {
|
|
2782
|
+
out[c.field] = null;
|
|
2783
|
+
} else if (typeof raw === "bigint") {
|
|
2784
|
+
out[c.field] = Number(raw);
|
|
2785
|
+
} else if (typeof raw === "number") {
|
|
2786
|
+
out[c.field] = raw;
|
|
2787
|
+
} else {
|
|
2788
|
+
out[c.field] = Number(raw);
|
|
2789
|
+
}
|
|
2790
|
+
break;
|
|
2791
|
+
case "builtin-timestamp": {
|
|
2792
|
+
const ms = toMillis(raw);
|
|
2793
|
+
out[c.field] = GraphTimestampImpl.fromMillis(ms);
|
|
2794
|
+
break;
|
|
2795
|
+
}
|
|
2796
|
+
case "data":
|
|
2797
|
+
if (raw === null || raw === void 0 || raw === "") {
|
|
2798
|
+
out[c.field] = {};
|
|
2799
|
+
} else {
|
|
2800
|
+
out[c.field] = JSON.parse(raw);
|
|
2801
|
+
}
|
|
2802
|
+
break;
|
|
2803
|
+
case "json": {
|
|
2804
|
+
const t = row[c.typeAlias];
|
|
2805
|
+
if (raw === null || raw === void 0) {
|
|
2806
|
+
out[c.field] = null;
|
|
2807
|
+
} else if (t === "object" || t === "array") {
|
|
2808
|
+
out[c.field] = typeof raw === "string" ? JSON.parse(raw) : raw;
|
|
2809
|
+
} else if (t === "integer" && typeof raw === "bigint") {
|
|
2810
|
+
out[c.field] = Number(raw);
|
|
2811
|
+
} else {
|
|
2812
|
+
out[c.field] = raw;
|
|
2813
|
+
}
|
|
2814
|
+
break;
|
|
2815
|
+
}
|
|
2816
|
+
}
|
|
2817
|
+
}
|
|
2818
|
+
return out;
|
|
2819
|
+
}
|
|
2820
|
+
function compileExpand(table, scope, params) {
|
|
2821
|
+
if (params.sources.length === 0) {
|
|
2822
|
+
throw new FiregraphError(
|
|
2823
|
+
"compileExpand requires a non-empty sources list \u2014 empty IN () is invalid SQL. Callers should short-circuit empty input before reaching the compiler.",
|
|
2824
|
+
"INVALID_QUERY"
|
|
2825
|
+
);
|
|
2826
|
+
}
|
|
2827
|
+
const direction = params.direction ?? "forward";
|
|
2828
|
+
const aUidCol = compileFieldRef("aUid").expr;
|
|
2829
|
+
const bUidCol = compileFieldRef("bUid").expr;
|
|
2830
|
+
const aTypeCol = compileFieldRef("aType").expr;
|
|
2831
|
+
const bTypeCol = compileFieldRef("bType").expr;
|
|
2832
|
+
const axbTypeCol = compileFieldRef("axbType").expr;
|
|
2833
|
+
const sourceColumn = direction === "forward" ? aUidCol : bUidCol;
|
|
2834
|
+
const sqlParams = [scope, params.axbType];
|
|
2835
|
+
const conditions = ['"scope" = ?', `${axbTypeCol} = ?`];
|
|
2836
|
+
const placeholders = params.sources.map(() => "?").join(", ");
|
|
2837
|
+
conditions.push(`${sourceColumn} IN (${placeholders})`);
|
|
2838
|
+
for (const uid of params.sources) sqlParams.push(uid);
|
|
2839
|
+
if (params.aType !== void 0) {
|
|
2840
|
+
conditions.push(`${aTypeCol} = ?`);
|
|
2841
|
+
sqlParams.push(params.aType);
|
|
2842
|
+
}
|
|
2843
|
+
if (params.bType !== void 0) {
|
|
2844
|
+
conditions.push(`${bTypeCol} = ?`);
|
|
2845
|
+
sqlParams.push(params.bType);
|
|
2846
|
+
}
|
|
2847
|
+
if (params.axbType === NODE_RELATION) {
|
|
2848
|
+
conditions.push(`${aUidCol} != ${bUidCol}`);
|
|
2849
|
+
}
|
|
2850
|
+
let sql = `SELECT * FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}`;
|
|
2851
|
+
if (params.orderBy) {
|
|
2852
|
+
sql += compileOrderBy({ orderBy: params.orderBy }, sqlParams);
|
|
2853
|
+
}
|
|
2854
|
+
if (params.limitPerSource !== void 0) {
|
|
2855
|
+
const totalLimit = params.sources.length * params.limitPerSource;
|
|
2856
|
+
sql += ` LIMIT ?`;
|
|
2857
|
+
sqlParams.push(totalLimit);
|
|
2858
|
+
}
|
|
2859
|
+
return { sql, params: sqlParams };
|
|
2860
|
+
}
|
|
2861
|
+
function compileExpandHydrate(table, scope, targetUids) {
|
|
2862
|
+
if (targetUids.length === 0) {
|
|
2863
|
+
throw new FiregraphError(
|
|
2864
|
+
"compileExpandHydrate requires a non-empty target list \u2014 empty IN () is invalid SQL.",
|
|
2865
|
+
"INVALID_QUERY"
|
|
2866
|
+
);
|
|
2867
|
+
}
|
|
2868
|
+
const placeholders = targetUids.map(() => "?").join(", ");
|
|
2869
|
+
const sqlParams = [scope, NODE_RELATION];
|
|
2870
|
+
for (const uid of targetUids) sqlParams.push(uid);
|
|
2871
|
+
const aUidCol = compileFieldRef("aUid").expr;
|
|
2872
|
+
const bUidCol = compileFieldRef("bUid").expr;
|
|
2873
|
+
const axbTypeCol = compileFieldRef("axbType").expr;
|
|
2874
|
+
return {
|
|
2875
|
+
sql: `SELECT * FROM ${quoteIdent(table)} WHERE "scope" = ? AND ${axbTypeCol} = ? AND ${aUidCol} = ${bUidCol} AND ${bUidCol} IN (${placeholders})`,
|
|
2876
|
+
params: sqlParams
|
|
2877
|
+
};
|
|
2878
|
+
}
|
|
2879
|
+
function compileSet(table, scope, docId, record, nowMillis, mode) {
|
|
2880
|
+
assertJsonSafePayload(record.data, SQLITE_BACKEND_LABEL);
|
|
2881
|
+
if (mode === "replace") {
|
|
2882
|
+
const sql2 = `INSERT OR REPLACE INTO ${quoteIdent(table)} (
|
|
2883
|
+
doc_id, scope, a_type, a_uid, axb_type, b_type, b_uid, data, v, created_at, updated_at
|
|
2884
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`;
|
|
2885
|
+
const params = [
|
|
2886
|
+
docId,
|
|
2887
|
+
scope,
|
|
2888
|
+
record.aType,
|
|
2889
|
+
record.aUid,
|
|
2890
|
+
record.axbType,
|
|
2891
|
+
record.bType,
|
|
2892
|
+
record.bUid,
|
|
2893
|
+
JSON.stringify(record.data ?? {}),
|
|
2894
|
+
record.v ?? null,
|
|
2895
|
+
nowMillis,
|
|
2896
|
+
nowMillis
|
|
2897
|
+
];
|
|
2898
|
+
return { sql: sql2, params };
|
|
2899
|
+
}
|
|
2900
|
+
const insertParams = [
|
|
2901
|
+
docId,
|
|
2902
|
+
scope,
|
|
2903
|
+
record.aType,
|
|
2904
|
+
record.aUid,
|
|
2905
|
+
record.axbType,
|
|
2906
|
+
record.bType,
|
|
2907
|
+
record.bUid,
|
|
2908
|
+
JSON.stringify(record.data ?? {}),
|
|
2909
|
+
record.v ?? null,
|
|
2910
|
+
nowMillis,
|
|
2911
|
+
nowMillis
|
|
2912
|
+
];
|
|
2913
|
+
const ops = flattenPatch(record.data ?? {});
|
|
2914
|
+
const updateParams = [];
|
|
2915
|
+
const dataExpr = compileDataOpsExpr(ops, `COALESCE("data", '{}')`, updateParams, SQLITE_BACKEND_ERR_LABEL) ?? `COALESCE("data", '{}')`;
|
|
2916
|
+
const sql = `INSERT INTO ${quoteIdent(table)} (
|
|
2917
|
+
doc_id, scope, a_type, a_uid, axb_type, b_type, b_uid, data, v, created_at, updated_at
|
|
2918
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
2919
|
+
ON CONFLICT(scope, doc_id) DO UPDATE SET
|
|
2920
|
+
"a_type" = excluded."a_type",
|
|
2921
|
+
"a_uid" = excluded."a_uid",
|
|
2922
|
+
"axb_type" = excluded."axb_type",
|
|
2923
|
+
"b_type" = excluded."b_type",
|
|
2924
|
+
"b_uid" = excluded."b_uid",
|
|
2925
|
+
"data" = ${dataExpr},
|
|
2926
|
+
"v" = COALESCE(excluded."v", "v"),
|
|
2927
|
+
"created_at" = excluded."created_at",
|
|
2928
|
+
"updated_at" = excluded."updated_at"`;
|
|
2929
|
+
return { sql, params: [...insertParams, ...updateParams] };
|
|
2930
|
+
}
|
|
2931
|
+
function compileUpdate(table, scope, docId, update, nowMillis) {
|
|
2932
|
+
assertUpdatePayloadExclusive(update);
|
|
2933
|
+
const setClauses = [];
|
|
2934
|
+
const params = [];
|
|
2935
|
+
if (update.replaceData) {
|
|
2936
|
+
assertJsonSafePayload(update.replaceData, SQLITE_BACKEND_LABEL);
|
|
2937
|
+
setClauses.push(`"data" = ?`);
|
|
2938
|
+
params.push(JSON.stringify(update.replaceData));
|
|
2939
|
+
} else if (update.dataOps && update.dataOps.length > 0) {
|
|
2940
|
+
for (const op of update.dataOps) {
|
|
2941
|
+
if (!op.delete) assertJsonSafePayload(op.value, SQLITE_BACKEND_LABEL);
|
|
2942
|
+
}
|
|
2943
|
+
const expr = compileDataOpsExpr(
|
|
2944
|
+
update.dataOps,
|
|
2945
|
+
`COALESCE("data", '{}')`,
|
|
2946
|
+
params,
|
|
2947
|
+
SQLITE_BACKEND_ERR_LABEL
|
|
2948
|
+
);
|
|
2949
|
+
if (expr !== null) {
|
|
2950
|
+
setClauses.push(`"data" = ${expr}`);
|
|
2951
|
+
}
|
|
2952
|
+
}
|
|
2953
|
+
if (update.v !== void 0) {
|
|
2954
|
+
setClauses.push(`"v" = ?`);
|
|
2955
|
+
params.push(update.v);
|
|
2956
|
+
}
|
|
2957
|
+
setClauses.push(`"updated_at" = ?`);
|
|
2958
|
+
params.push(nowMillis);
|
|
2959
|
+
params.push(scope, docId);
|
|
2960
|
+
return {
|
|
2961
|
+
sql: `UPDATE ${quoteIdent(table)} SET ${setClauses.join(", ")} WHERE "scope" = ? AND "doc_id" = ?`,
|
|
2962
|
+
params
|
|
2963
|
+
};
|
|
2964
|
+
}
|
|
2965
|
+
function compileDelete(table, scope, docId) {
|
|
2966
|
+
return {
|
|
2967
|
+
sql: `DELETE FROM ${quoteIdent(table)} WHERE "scope" = ? AND "doc_id" = ?`,
|
|
2968
|
+
params: [scope, docId]
|
|
2969
|
+
};
|
|
2970
|
+
}
|
|
2971
|
+
function compileBulkDelete(table, scope, filters) {
|
|
2972
|
+
const params = [scope];
|
|
2973
|
+
const conditions = ['"scope" = ?'];
|
|
2974
|
+
for (const f of filters) {
|
|
2975
|
+
conditions.push(compileFilter(f, params));
|
|
2976
|
+
}
|
|
2977
|
+
return {
|
|
2978
|
+
sql: `DELETE FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}`,
|
|
2979
|
+
params
|
|
2980
|
+
};
|
|
2981
|
+
}
|
|
2982
|
+
function compileBulkUpdate(table, scope, filters, patchData, nowMillis) {
|
|
2983
|
+
const dataOps = flattenPatch(patchData);
|
|
2984
|
+
if (dataOps.length === 0) {
|
|
2985
|
+
throw new FiregraphError(
|
|
2986
|
+
"bulkUpdate() patch.data must contain at least one leaf \u2014 an empty patch would only rewrite `updated_at`, which is almost certainly a bug. Use `setDoc` with merge mode if you want to stamp without editing data.",
|
|
2987
|
+
"INVALID_QUERY"
|
|
2988
|
+
);
|
|
2989
|
+
}
|
|
2990
|
+
for (const op of dataOps) {
|
|
2991
|
+
if (!op.delete) assertJsonSafePayload(op.value, SQLITE_BACKEND_LABEL);
|
|
2992
|
+
}
|
|
2993
|
+
const setParams = [];
|
|
2994
|
+
const expr = compileDataOpsExpr(
|
|
2995
|
+
dataOps,
|
|
2996
|
+
`COALESCE("data", '{}')`,
|
|
2997
|
+
setParams,
|
|
2998
|
+
SQLITE_BACKEND_ERR_LABEL
|
|
2999
|
+
);
|
|
3000
|
+
if (expr === null) {
|
|
3001
|
+
throw new FiregraphError(
|
|
3002
|
+
"bulkUpdate() patch produced no SQL operations \u2014 internal invariant violated.",
|
|
3003
|
+
"INVALID_ARGUMENT"
|
|
3004
|
+
);
|
|
3005
|
+
}
|
|
3006
|
+
const setClauses = [`"data" = ${expr}`, `"updated_at" = ?`];
|
|
3007
|
+
setParams.push(nowMillis);
|
|
3008
|
+
const whereParams = [scope];
|
|
3009
|
+
const conditions = ['"scope" = ?'];
|
|
3010
|
+
for (const f of filters) {
|
|
3011
|
+
conditions.push(compileFilter(f, whereParams));
|
|
3012
|
+
}
|
|
3013
|
+
return {
|
|
3014
|
+
sql: `UPDATE ${quoteIdent(table)} SET ${setClauses.join(", ")} WHERE ${conditions.join(" AND ")}`,
|
|
3015
|
+
params: [...setParams, ...whereParams]
|
|
3016
|
+
};
|
|
3017
|
+
}
|
|
3018
|
+
function compileDeleteScopePrefix(table, scopePrefix) {
|
|
3019
|
+
const escaped = escapeLike(scopePrefix);
|
|
3020
|
+
return {
|
|
3021
|
+
sql: `DELETE FROM ${quoteIdent(table)} WHERE "scope" LIKE ? ESCAPE '\\'`,
|
|
3022
|
+
params: [`${escaped}/%`]
|
|
3023
|
+
};
|
|
3024
|
+
}
|
|
3025
|
+
function compileCountScopePrefix(table, scopePrefix) {
|
|
3026
|
+
const escaped = escapeLike(scopePrefix);
|
|
3027
|
+
return {
|
|
3028
|
+
sql: `SELECT COUNT(*) AS n FROM ${quoteIdent(table)} WHERE "scope" LIKE ? ESCAPE '\\'`,
|
|
3029
|
+
params: [`${escaped}/%`]
|
|
3030
|
+
};
|
|
3031
|
+
}
|
|
3032
|
+
function escapeLike(value) {
|
|
3033
|
+
return value.replace(/\\/g, "\\\\").replace(/%/g, "\\%").replace(/_/g, "\\_");
|
|
3034
|
+
}
|
|
3035
|
+
function rowToRecord(row) {
|
|
3036
|
+
const dataString = row.data;
|
|
3037
|
+
const data = dataString ? JSON.parse(dataString) : {};
|
|
3038
|
+
const createdMs = toMillis(row.created_at);
|
|
3039
|
+
const updatedMs = toMillis(row.updated_at);
|
|
3040
|
+
const record = {
|
|
3041
|
+
aType: row.a_type,
|
|
3042
|
+
aUid: row.a_uid,
|
|
3043
|
+
axbType: row.axb_type,
|
|
3044
|
+
bType: row.b_type,
|
|
3045
|
+
bUid: row.b_uid,
|
|
3046
|
+
data,
|
|
3047
|
+
createdAt: GraphTimestampImpl.fromMillis(createdMs),
|
|
3048
|
+
updatedAt: GraphTimestampImpl.fromMillis(updatedMs)
|
|
3049
|
+
};
|
|
3050
|
+
if (row.v !== null && row.v !== void 0) {
|
|
3051
|
+
record.v = Number(row.v);
|
|
3052
|
+
}
|
|
3053
|
+
return record;
|
|
3054
|
+
}
|
|
3055
|
+
function toMillis(value) {
|
|
3056
|
+
if (typeof value === "number") return value;
|
|
3057
|
+
if (typeof value === "bigint") return Number(value);
|
|
3058
|
+
if (typeof value === "string") return Number(value);
|
|
3059
|
+
return 0;
|
|
3060
|
+
}
|
|
3061
|
+
|
|
3062
|
+
// src/sqlite/backend.ts
|
|
3063
|
+
var DEFAULT_MAX_RETRIES = 3;
|
|
3064
|
+
var BASE_RETRY_DELAY_MS = 200;
|
|
3065
|
+
var MAX_RETRY_DELAY_MS = 5e3;
|
|
3066
|
+
function sleep(ms) {
|
|
3067
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
3068
|
+
}
|
|
3069
|
+
function minDefined(a, b) {
|
|
3070
|
+
if (a === void 0) return b;
|
|
3071
|
+
if (b === void 0) return a;
|
|
3072
|
+
return Math.min(a, b);
|
|
3073
|
+
}
|
|
3074
|
+
function chunkStatements(statements, maxStatements, maxParams) {
|
|
3075
|
+
const stmtCap = maxStatements && maxStatements > 0 && Number.isFinite(maxStatements) ? Math.floor(maxStatements) : Infinity;
|
|
3076
|
+
const paramCap = maxParams && maxParams > 0 && Number.isFinite(maxParams) ? Math.floor(maxParams) : Infinity;
|
|
3077
|
+
if (stmtCap === Infinity && paramCap === Infinity) {
|
|
3078
|
+
return [statements];
|
|
3079
|
+
}
|
|
3080
|
+
const chunks = [];
|
|
3081
|
+
let current = [];
|
|
3082
|
+
let currentParamCount = 0;
|
|
3083
|
+
for (const stmt of statements) {
|
|
3084
|
+
const stmtParams = stmt.params.length;
|
|
3085
|
+
const wouldExceedStmt = current.length + 1 > stmtCap;
|
|
3086
|
+
const wouldExceedParam = currentParamCount + stmtParams > paramCap;
|
|
3087
|
+
if (current.length > 0 && (wouldExceedStmt || wouldExceedParam)) {
|
|
3088
|
+
chunks.push(current);
|
|
3089
|
+
current = [];
|
|
3090
|
+
currentParamCount = 0;
|
|
3091
|
+
}
|
|
3092
|
+
current.push(stmt);
|
|
3093
|
+
currentParamCount += stmtParams;
|
|
3094
|
+
}
|
|
3095
|
+
if (current.length > 0) chunks.push(current);
|
|
3096
|
+
return chunks;
|
|
3097
|
+
}
|
|
3098
|
+
var SqliteTransactionBackendImpl = class {
|
|
3099
|
+
constructor(tx, tableName, storageScope) {
|
|
3100
|
+
this.tx = tx;
|
|
3101
|
+
this.tableName = tableName;
|
|
3102
|
+
this.storageScope = storageScope;
|
|
3103
|
+
}
|
|
3104
|
+
async getDoc(docId) {
|
|
3105
|
+
const stmt = compileSelectByDocId(this.tableName, this.storageScope, docId);
|
|
3106
|
+
const rows = await this.tx.all(stmt.sql, stmt.params);
|
|
3107
|
+
return rows.length === 0 ? null : rowToRecord(rows[0]);
|
|
3108
|
+
}
|
|
3109
|
+
async query(filters, options) {
|
|
3110
|
+
const stmt = compileSelect(this.tableName, this.storageScope, filters, options);
|
|
3111
|
+
const rows = await this.tx.all(stmt.sql, stmt.params);
|
|
3112
|
+
return rows.map(rowToRecord);
|
|
3113
|
+
}
|
|
3114
|
+
async setDoc(docId, record, mode) {
|
|
3115
|
+
const stmt = compileSet(this.tableName, this.storageScope, docId, record, Date.now(), mode);
|
|
3116
|
+
await this.tx.run(stmt.sql, stmt.params);
|
|
3117
|
+
}
|
|
3118
|
+
async updateDoc(docId, update) {
|
|
3119
|
+
const stmt = compileUpdate(this.tableName, this.storageScope, docId, update, Date.now());
|
|
3120
|
+
const sqlWithReturning = `${stmt.sql} RETURNING "doc_id"`;
|
|
3121
|
+
const rows = await this.tx.all(sqlWithReturning, stmt.params);
|
|
3122
|
+
if (rows.length === 0) {
|
|
3123
|
+
throw new FiregraphError(
|
|
3124
|
+
`updateDoc: no document found for doc_id=${docId} (scope=${this.storageScope})`,
|
|
3125
|
+
"NOT_FOUND"
|
|
3126
|
+
);
|
|
3127
|
+
}
|
|
3128
|
+
}
|
|
3129
|
+
async deleteDoc(docId) {
|
|
3130
|
+
const stmt = compileDelete(this.tableName, this.storageScope, docId);
|
|
3131
|
+
await this.tx.run(stmt.sql, stmt.params);
|
|
3132
|
+
}
|
|
3133
|
+
};
|
|
3134
|
+
var SqliteBatchBackendImpl = class {
|
|
3135
|
+
constructor(executor, tableName, storageScope) {
|
|
3136
|
+
this.executor = executor;
|
|
3137
|
+
this.tableName = tableName;
|
|
3138
|
+
this.storageScope = storageScope;
|
|
3139
|
+
}
|
|
3140
|
+
statements = [];
|
|
3141
|
+
setDoc(docId, record, mode) {
|
|
3142
|
+
this.statements.push(
|
|
3143
|
+
compileSet(this.tableName, this.storageScope, docId, record, Date.now(), mode)
|
|
3144
|
+
);
|
|
3145
|
+
}
|
|
3146
|
+
updateDoc(docId, update) {
|
|
3147
|
+
this.statements.push(
|
|
3148
|
+
compileUpdate(this.tableName, this.storageScope, docId, update, Date.now())
|
|
3149
|
+
);
|
|
3150
|
+
}
|
|
3151
|
+
deleteDoc(docId) {
|
|
3152
|
+
this.statements.push(compileDelete(this.tableName, this.storageScope, docId));
|
|
3153
|
+
}
|
|
3154
|
+
async commit() {
|
|
3155
|
+
if (this.statements.length === 0) return;
|
|
3156
|
+
await this.executor.batch(this.statements);
|
|
3157
|
+
this.statements.length = 0;
|
|
3158
|
+
}
|
|
3159
|
+
};
|
|
3160
|
+
var SQLITE_CORE_CAPS = [
|
|
3161
|
+
"core.read",
|
|
3162
|
+
"core.write",
|
|
3163
|
+
"core.batch",
|
|
3164
|
+
"core.subgraph",
|
|
3165
|
+
"query.aggregate",
|
|
3166
|
+
"query.dml",
|
|
3167
|
+
"query.join",
|
|
3168
|
+
"query.select",
|
|
3169
|
+
"raw.sql"
|
|
3170
|
+
];
|
|
3171
|
+
var SqliteBackendImpl = class _SqliteBackendImpl {
|
|
3172
|
+
constructor(executor, tableName, storageScope, scopePath) {
|
|
3173
|
+
this.executor = executor;
|
|
3174
|
+
this.collectionPath = tableName;
|
|
3175
|
+
this.storageScope = storageScope;
|
|
3176
|
+
this.scopePath = scopePath;
|
|
3177
|
+
const caps = new Set(SQLITE_CORE_CAPS);
|
|
3178
|
+
if (typeof executor.transaction === "function") {
|
|
3179
|
+
caps.add("core.transactions");
|
|
3180
|
+
}
|
|
3181
|
+
this.capabilities = createCapabilities(caps);
|
|
3182
|
+
}
|
|
3183
|
+
capabilities;
|
|
3184
|
+
/** Logical table name (returned through `collectionPath` for parity with Firestore). */
|
|
3185
|
+
collectionPath;
|
|
3186
|
+
scopePath;
|
|
3187
|
+
/** Materialized storage scope (interleaved parent UIDs + subgraph names). */
|
|
3188
|
+
storageScope;
|
|
3189
|
+
// --- Reads ---
|
|
3190
|
+
async getDoc(docId) {
|
|
3191
|
+
const stmt = compileSelectByDocId(this.collectionPath, this.storageScope, docId);
|
|
3192
|
+
const rows = await this.executor.all(stmt.sql, stmt.params);
|
|
3193
|
+
return rows.length === 0 ? null : rowToRecord(rows[0]);
|
|
3194
|
+
}
|
|
3195
|
+
async query(filters, options) {
|
|
3196
|
+
const stmt = compileSelect(this.collectionPath, this.storageScope, filters, options);
|
|
3197
|
+
const rows = await this.executor.all(stmt.sql, stmt.params);
|
|
3198
|
+
return rows.map(rowToRecord);
|
|
3199
|
+
}
|
|
3200
|
+
// --- Writes ---
|
|
3201
|
+
async setDoc(docId, record, mode) {
|
|
3202
|
+
const stmt = compileSet(
|
|
3203
|
+
this.collectionPath,
|
|
3204
|
+
this.storageScope,
|
|
3205
|
+
docId,
|
|
3206
|
+
record,
|
|
3207
|
+
Date.now(),
|
|
3208
|
+
mode
|
|
3209
|
+
);
|
|
3210
|
+
await this.executor.run(stmt.sql, stmt.params);
|
|
3211
|
+
}
|
|
3212
|
+
async updateDoc(docId, update) {
|
|
3213
|
+
const stmt = compileUpdate(this.collectionPath, this.storageScope, docId, update, Date.now());
|
|
3214
|
+
const sqlWithReturning = `${stmt.sql} RETURNING "doc_id"`;
|
|
3215
|
+
const rows = await this.executor.all(sqlWithReturning, stmt.params);
|
|
3216
|
+
if (rows.length === 0) {
|
|
3217
|
+
throw new FiregraphError(
|
|
3218
|
+
`updateDoc: no document found for doc_id=${docId} (scope=${this.storageScope})`,
|
|
3219
|
+
"NOT_FOUND"
|
|
3220
|
+
);
|
|
3221
|
+
}
|
|
3222
|
+
}
|
|
3223
|
+
async deleteDoc(docId) {
|
|
3224
|
+
const stmt = compileDelete(this.collectionPath, this.storageScope, docId);
|
|
3225
|
+
await this.executor.run(stmt.sql, stmt.params);
|
|
3226
|
+
}
|
|
3227
|
+
// --- Transactions / Batches ---
|
|
3228
|
+
async runTransaction(fn) {
|
|
3229
|
+
if (!this.executor.transaction) {
|
|
3230
|
+
throw new FiregraphError(
|
|
3231
|
+
"Interactive transactions are not supported by this SQLite driver. D1 in particular has no read-then-conditional-write transactions; use a Durable Object SQLite client instead, or rewrite the code path as a batch().",
|
|
3232
|
+
"UNSUPPORTED_OPERATION"
|
|
3233
|
+
);
|
|
3234
|
+
}
|
|
3235
|
+
return this.executor.transaction(async (tx) => {
|
|
3236
|
+
const txBackend = new SqliteTransactionBackendImpl(
|
|
3237
|
+
tx,
|
|
3238
|
+
this.collectionPath,
|
|
3239
|
+
this.storageScope
|
|
3240
|
+
);
|
|
3241
|
+
return fn(txBackend);
|
|
3242
|
+
});
|
|
3243
|
+
}
|
|
3244
|
+
createBatch() {
|
|
3245
|
+
return new SqliteBatchBackendImpl(this.executor, this.collectionPath, this.storageScope);
|
|
3246
|
+
}
|
|
3247
|
+
// --- Subgraphs ---
|
|
3248
|
+
subgraph(parentNodeUid, name) {
|
|
3249
|
+
if (!parentNodeUid || parentNodeUid.includes("/")) {
|
|
3250
|
+
throw new FiregraphError(
|
|
3251
|
+
`Invalid parentNodeUid for subgraph: "${parentNodeUid}". Must be a non-empty string without "/".`,
|
|
3252
|
+
"INVALID_SUBGRAPH"
|
|
3253
|
+
);
|
|
3254
|
+
}
|
|
3255
|
+
if (!name || name.includes("/")) {
|
|
3256
|
+
throw new FiregraphError(
|
|
3257
|
+
`Subgraph name must not contain "/" and must be non-empty: got "${name}". Use chained .subgraph() calls for nested subgraphs.`,
|
|
3258
|
+
"INVALID_SUBGRAPH"
|
|
3259
|
+
);
|
|
3260
|
+
}
|
|
3261
|
+
const newStorageScope = this.storageScope ? `${this.storageScope}/${parentNodeUid}/${name}` : `${parentNodeUid}/${name}`;
|
|
3262
|
+
const newScope = this.scopePath ? `${this.scopePath}/${name}` : name;
|
|
3263
|
+
return new _SqliteBackendImpl(this.executor, this.collectionPath, newStorageScope, newScope);
|
|
3264
|
+
}
|
|
3265
|
+
// --- Cascade & bulk ---
|
|
3266
|
+
async removeNodeCascade(uid, reader, options) {
|
|
3267
|
+
const [outgoingRaw, incomingRaw] = await Promise.all([
|
|
3268
|
+
reader.findEdges({ aUid: uid, allowCollectionScan: true, limit: 0 }),
|
|
3269
|
+
reader.findEdges({ bUid: uid, allowCollectionScan: true, limit: 0 })
|
|
3270
|
+
]);
|
|
3271
|
+
const seen = /* @__PURE__ */ new Set();
|
|
3272
|
+
const edgeDocIds = [];
|
|
3273
|
+
for (const edge of [...outgoingRaw, ...incomingRaw]) {
|
|
3274
|
+
if (edge.axbType === NODE_RELATION) continue;
|
|
3275
|
+
const docId = computeEdgeDocId(edge.aUid, edge.axbType, edge.bUid);
|
|
3276
|
+
if (!seen.has(docId)) {
|
|
3277
|
+
seen.add(docId);
|
|
3278
|
+
edgeDocIds.push(docId);
|
|
3279
|
+
}
|
|
3280
|
+
}
|
|
3281
|
+
const nodeDocId = computeNodeDocId(uid);
|
|
3282
|
+
const shouldDeleteSubgraphs = options?.deleteSubcollections !== false;
|
|
3283
|
+
let subgraphRowCount = 0;
|
|
3284
|
+
if (shouldDeleteSubgraphs) {
|
|
3285
|
+
const prefix = this.storageScope ? `${this.storageScope}/${uid}` : uid;
|
|
3286
|
+
const countStmt = compileCountScopePrefix(this.collectionPath, prefix);
|
|
3287
|
+
const countRows = await this.executor.all(countStmt.sql, countStmt.params);
|
|
3288
|
+
const first = countRows[0];
|
|
3289
|
+
const n = first?.n;
|
|
3290
|
+
subgraphRowCount = typeof n === "bigint" ? Number(n) : Number(n ?? 0);
|
|
3291
|
+
}
|
|
3292
|
+
const writeStatements = edgeDocIds.map(
|
|
3293
|
+
(id) => compileDelete(this.collectionPath, this.storageScope, id)
|
|
3294
|
+
);
|
|
3295
|
+
writeStatements.push(compileDelete(this.collectionPath, this.storageScope, nodeDocId));
|
|
3296
|
+
if (shouldDeleteSubgraphs) {
|
|
3297
|
+
const prefix = this.storageScope ? `${this.storageScope}/${uid}` : uid;
|
|
3298
|
+
writeStatements.push(compileDeleteScopePrefix(this.collectionPath, prefix));
|
|
3299
|
+
}
|
|
3300
|
+
const {
|
|
3301
|
+
deleted: stmtDeleted,
|
|
3302
|
+
batches,
|
|
3303
|
+
errors
|
|
3304
|
+
} = await this.executeChunkedBatches(writeStatements, options);
|
|
3305
|
+
const allOk = errors.length === 0;
|
|
3306
|
+
const edgesDeleted = allOk ? edgeDocIds.length : 0;
|
|
3307
|
+
const nodeDeleted = allOk;
|
|
3308
|
+
const prefixStatementContribution = shouldDeleteSubgraphs && allOk ? 1 : 0;
|
|
3309
|
+
const deleted = stmtDeleted - prefixStatementContribution + (allOk ? subgraphRowCount : 0);
|
|
3310
|
+
return { deleted, batches, errors, edgesDeleted, nodeDeleted };
|
|
3311
|
+
}
|
|
3312
|
+
async bulkRemoveEdges(params, reader, options) {
|
|
3313
|
+
const effectiveParams = params.limit !== void 0 ? { ...params, allowCollectionScan: params.allowCollectionScan ?? true } : { ...params, limit: 0, allowCollectionScan: params.allowCollectionScan ?? true };
|
|
3314
|
+
const edges = await reader.findEdges(effectiveParams);
|
|
3315
|
+
const docIds = edges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
|
|
3316
|
+
if (docIds.length === 0) {
|
|
3317
|
+
return { deleted: 0, batches: 0, errors: [] };
|
|
3318
|
+
}
|
|
3319
|
+
const statements = docIds.map(
|
|
3320
|
+
(id) => compileDelete(this.collectionPath, this.storageScope, id)
|
|
3321
|
+
);
|
|
3322
|
+
return this.executeChunkedBatches(statements, options);
|
|
3323
|
+
}
|
|
3324
|
+
/**
|
|
3325
|
+
* Submit `statements` to the executor as one or more `batch()` calls,
|
|
3326
|
+
* chunking by `executor.maxBatchSize` (e.g. D1's ~100-statement cap).
|
|
3327
|
+
* Drivers that don't advertise a cap submit everything in one batch,
|
|
3328
|
+
* preserving cross-batch atomicity.
|
|
3329
|
+
*
|
|
3330
|
+
* Each chunk is retried with exponential backoff up to `maxRetries`
|
|
3331
|
+
* (default 3) before being recorded in `errors`. The loop continues past
|
|
3332
|
+
* a permanently failed chunk so the caller still gets partial progress
|
|
3333
|
+
* visibility — to halt on first failure, set `maxRetries: 0` and check
|
|
3334
|
+
* `result.errors.length` after the call.
|
|
3335
|
+
*
|
|
3336
|
+
* Returns `BulkResult`-shaped fields. `deleted` reflects only the
|
|
3337
|
+
* statement count of *successfully committed* batches — a prefix-delete
|
|
3338
|
+
* statement contributes 1 to that total even though it may match many
|
|
3339
|
+
* rows; `removeNodeCascade` patches that up with a pre-counted row total.
|
|
3340
|
+
*
|
|
3341
|
+
* **Atomicity caveat (D1):** when chunking kicks in, atomicity is lost
|
|
3342
|
+
* across chunk boundaries — one chunk may commit while a later one fails.
|
|
3343
|
+
* `removeNodeCascade` is idempotent (deleting the same docs again is a
|
|
3344
|
+
* no-op) so a caller can simply retry on partial failure. `bulkRemoveEdges`
|
|
3345
|
+
* is also idempotent for the same reason. DO SQLite leaves `maxBatchSize`
|
|
3346
|
+
* unset, so everything funnels through one atomic `transactionSync` and
|
|
3347
|
+
* this caveat does not apply.
|
|
3348
|
+
*/
|
|
3349
|
+
async executeChunkedBatches(statements, options) {
|
|
3350
|
+
if (statements.length === 0) {
|
|
3351
|
+
return { deleted: 0, batches: 0, errors: [] };
|
|
3352
|
+
}
|
|
3353
|
+
const maxRetries = options?.maxRetries ?? DEFAULT_MAX_RETRIES;
|
|
3354
|
+
const callerBatchSize = options?.batchSize;
|
|
3355
|
+
const stmtCap = minDefined(callerBatchSize, this.executor.maxBatchSize);
|
|
3356
|
+
const chunks = chunkStatements(statements, stmtCap, this.executor.maxBatchParams);
|
|
3357
|
+
const errors = [];
|
|
3358
|
+
let deleted = 0;
|
|
3359
|
+
let batches = 0;
|
|
3360
|
+
const totalBatches = chunks.length;
|
|
3361
|
+
const driverParamCap = this.executor.maxBatchParams;
|
|
3362
|
+
for (let batchIndex = 0; batchIndex < chunks.length; batchIndex++) {
|
|
3363
|
+
const chunk = chunks[batchIndex];
|
|
3364
|
+
const isUnretriableOversize = chunk.length === 1 && driverParamCap !== void 0 && chunk[0].params.length > driverParamCap;
|
|
3365
|
+
let committed = false;
|
|
3366
|
+
let lastError = null;
|
|
3367
|
+
const effectiveRetries = isUnretriableOversize ? 0 : maxRetries;
|
|
3368
|
+
for (let attempt = 0; attempt <= effectiveRetries; attempt++) {
|
|
3369
|
+
try {
|
|
3370
|
+
await this.executor.batch(chunk);
|
|
3371
|
+
committed = true;
|
|
3372
|
+
break;
|
|
3373
|
+
} catch (err) {
|
|
3374
|
+
lastError = err instanceof Error ? err : new Error(String(err));
|
|
3375
|
+
if (attempt < effectiveRetries) {
|
|
3376
|
+
const delay = Math.min(BASE_RETRY_DELAY_MS * Math.pow(2, attempt), MAX_RETRY_DELAY_MS);
|
|
3377
|
+
await sleep(delay);
|
|
3378
|
+
}
|
|
3379
|
+
}
|
|
3380
|
+
}
|
|
3381
|
+
if (committed) {
|
|
3382
|
+
deleted += chunk.length;
|
|
3383
|
+
batches += 1;
|
|
3384
|
+
} else if (lastError) {
|
|
3385
|
+
errors.push({
|
|
3386
|
+
batchIndex,
|
|
3387
|
+
error: lastError,
|
|
3388
|
+
operationCount: chunk.length
|
|
3389
|
+
});
|
|
3390
|
+
}
|
|
3391
|
+
if (options?.onProgress) {
|
|
3392
|
+
options.onProgress({
|
|
3393
|
+
completedBatches: batches,
|
|
3394
|
+
totalBatches,
|
|
3395
|
+
deletedSoFar: deleted
|
|
3396
|
+
});
|
|
3397
|
+
}
|
|
3398
|
+
}
|
|
3399
|
+
return { deleted, batches, errors };
|
|
3400
|
+
}
|
|
3401
|
+
// --- Cross-scope (collection group) ---
|
|
3402
|
+
async findEdgesGlobal(params, collectionName) {
|
|
3403
|
+
const plan = buildEdgeQueryPlan(params);
|
|
3404
|
+
if (plan.strategy === "get") {
|
|
3405
|
+
throw new FiregraphError(
|
|
3406
|
+
"findEdgesGlobal() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
3407
|
+
"INVALID_QUERY"
|
|
3408
|
+
);
|
|
3409
|
+
}
|
|
3410
|
+
const name = collectionName ?? this.collectionPath;
|
|
3411
|
+
const scopeNameFilter = {
|
|
3412
|
+
name,
|
|
3413
|
+
isRoot: name === this.collectionPath
|
|
3414
|
+
};
|
|
3415
|
+
const stmt = compileSelectGlobal(
|
|
3416
|
+
this.collectionPath,
|
|
3417
|
+
plan.filters,
|
|
3418
|
+
plan.options,
|
|
3419
|
+
scopeNameFilter
|
|
3420
|
+
);
|
|
3421
|
+
const rows = await this.executor.all(stmt.sql, stmt.params);
|
|
3422
|
+
return rows.map(rowToRecord);
|
|
3423
|
+
}
|
|
3424
|
+
// --- Aggregate ---
|
|
3425
|
+
/**
|
|
3426
|
+
* Run an aggregate query in a single SQL statement. Supports the full
|
|
3427
|
+
* count/sum/avg/min/max set — the SQLite engine evaluates each aggregate
|
|
3428
|
+
* function over the filtered row set and the executor returns one row
|
|
3429
|
+
* with one column per alias. SUM/MIN/MAX of an empty set returns 0
|
|
3430
|
+
* (SQLite's `SUM(NULL) = NULL` is mapped to a clean number for the
|
|
3431
|
+
* cross-backend contract); AVG returns NaN, matching the mathematical
|
|
3432
|
+
* convention and the Firestore Standard helper.
|
|
3433
|
+
*/
|
|
3434
|
+
async aggregate(spec, filters) {
|
|
3435
|
+
const { stmt, aliases } = compileAggregate(
|
|
3436
|
+
this.collectionPath,
|
|
3437
|
+
this.storageScope,
|
|
3438
|
+
spec,
|
|
3439
|
+
filters
|
|
3440
|
+
);
|
|
3441
|
+
const rows = await this.executor.all(stmt.sql, stmt.params);
|
|
3442
|
+
const row = rows[0] ?? {};
|
|
3443
|
+
const out = {};
|
|
3444
|
+
for (const alias of aliases) {
|
|
3445
|
+
const v = row[alias];
|
|
3446
|
+
if (v === null || v === void 0) {
|
|
3447
|
+
const op = spec[alias].op;
|
|
3448
|
+
out[alias] = op === "avg" ? Number.NaN : 0;
|
|
3449
|
+
} else if (typeof v === "bigint") {
|
|
3450
|
+
out[alias] = Number(v);
|
|
3451
|
+
} else if (typeof v === "number") {
|
|
3452
|
+
out[alias] = v;
|
|
3453
|
+
} else {
|
|
3454
|
+
out[alias] = Number(v);
|
|
3455
|
+
}
|
|
3456
|
+
}
|
|
3457
|
+
return out;
|
|
3458
|
+
}
|
|
3459
|
+
// --- Server-side DML ---
|
|
3460
|
+
/**
|
|
3461
|
+
* Delete every row matching `filters` in a single SQL DELETE statement.
|
|
3462
|
+
*
|
|
3463
|
+
* Uses `RETURNING "doc_id"` to count rows touched — the SQLite executor's
|
|
3464
|
+
* `run` returns void, so RETURNING + `all()` is the portable way to learn
|
|
3465
|
+
* how many rows the engine actually deleted. SQLite ≥ 3.35 supports
|
|
3466
|
+
* `DELETE … RETURNING`; better-sqlite3, D1, and DO SQLite all run on a
|
|
3467
|
+
* recent enough engine.
|
|
3468
|
+
*
|
|
3469
|
+
* Single-statement DML doesn't chunk: the engine handles N rows in one
|
|
3470
|
+
* shot, so `BulkOptions.batchSize` is intentionally ignored. The retry
|
|
3471
|
+
* loop here exists only for transient driver errors (e.g. D1 surface
|
|
3472
|
+
* congestion); a permanent failure is surfaced via the `errors` array
|
|
3473
|
+
* with `batchIndex: 0` so callers see the same shape as `bulkRemoveEdges`.
|
|
3474
|
+
*
|
|
3475
|
+
* Subgraph scoping is enforced inside `compileBulkDelete` (the leading
|
|
3476
|
+
* `"scope" = ?` predicate) so this method, like every other backend
|
|
3477
|
+
* surface, naturally honours subgraph isolation.
|
|
3478
|
+
*/
|
|
3479
|
+
async bulkDelete(filters, options) {
|
|
3480
|
+
const stmt = compileBulkDelete(this.collectionPath, this.storageScope, filters);
|
|
3481
|
+
return this.executeDmlWithReturning(stmt, options);
|
|
3482
|
+
}
|
|
3483
|
+
/**
|
|
3484
|
+
* Update every row matching `filters` with `patch.data` in a single SQL
|
|
3485
|
+
* UPDATE statement. The patch is deep-merged into each row's `data`
|
|
3486
|
+
* column via the same `flattenPatch` → `compileDataOpsExpr` pipeline that
|
|
3487
|
+
* `compileUpdate` (single-row) uses.
|
|
3488
|
+
*
|
|
3489
|
+
* Same contract notes as `bulkDelete` apply: single-statement, no
|
|
3490
|
+
* chunking, `RETURNING "doc_id"` for the affected count, retry loop for
|
|
3491
|
+
* transient driver errors.
|
|
3492
|
+
*/
|
|
3493
|
+
async bulkUpdate(filters, patch, options) {
|
|
3494
|
+
const stmt = compileBulkUpdate(
|
|
3495
|
+
this.collectionPath,
|
|
3496
|
+
this.storageScope,
|
|
3497
|
+
filters,
|
|
3498
|
+
patch.data,
|
|
3499
|
+
Date.now()
|
|
3500
|
+
);
|
|
3501
|
+
return this.executeDmlWithReturning(stmt, options);
|
|
3502
|
+
}
|
|
3503
|
+
/**
|
|
3504
|
+
* Multi-source fan-out — `query.join` capability.
|
|
3505
|
+
*
|
|
3506
|
+
* Issues a single `SELECT … WHERE "aUid" IN (?, ?, …)` statement that
|
|
3507
|
+
* matches every edge from every source UID in one round trip. When
|
|
3508
|
+
* `params.hydrate === true`, follows up with a second statement that
|
|
3509
|
+
* fetches the target node rows; both queries hit the same table so
|
|
3510
|
+
* the executor amortises connection / parsing cost across them.
|
|
3511
|
+
*
|
|
3512
|
+
* Empty `params.sources` short-circuits to an empty result without
|
|
3513
|
+
* touching the executor — `IN ()` is not valid SQL.
|
|
3514
|
+
*
|
|
3515
|
+
* Per-source ordering / strict per-source LIMIT enforcement is NOT
|
|
3516
|
+
* implemented here; see the `ExpandParams.limitPerSource` JSDoc and
|
|
3517
|
+
* `compileExpand` for the cap semantics. Strict per-source caps would
|
|
3518
|
+
* require window functions and were judged out of scope for the
|
|
3519
|
+
* round-trip-collapse goal.
|
|
3520
|
+
*/
|
|
3521
|
+
async expand(params) {
|
|
3522
|
+
if (params.sources.length === 0) {
|
|
3523
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
3524
|
+
}
|
|
3525
|
+
const stmt = compileExpand(this.collectionPath, this.storageScope, params);
|
|
3526
|
+
const rows = await this.executor.all(stmt.sql, stmt.params);
|
|
3527
|
+
const edges = rows.map(rowToRecord);
|
|
3528
|
+
if (!params.hydrate) {
|
|
3529
|
+
return { edges };
|
|
3530
|
+
}
|
|
3531
|
+
const direction = params.direction ?? "forward";
|
|
3532
|
+
const targetUids = edges.map((e) => direction === "forward" ? e.bUid : e.aUid);
|
|
3533
|
+
const uniqueTargets = [...new Set(targetUids)];
|
|
3534
|
+
if (uniqueTargets.length === 0) {
|
|
3535
|
+
return { edges, targets: [] };
|
|
3536
|
+
}
|
|
3537
|
+
const hydrateStmt = compileExpandHydrate(this.collectionPath, this.storageScope, uniqueTargets);
|
|
3538
|
+
const hydrateRows = await this.executor.all(hydrateStmt.sql, hydrateStmt.params);
|
|
3539
|
+
const byUid = /* @__PURE__ */ new Map();
|
|
3540
|
+
for (const row of hydrateRows) {
|
|
3541
|
+
const node = rowToRecord(row);
|
|
3542
|
+
byUid.set(node.bUid, node);
|
|
3543
|
+
}
|
|
3544
|
+
const targets = targetUids.map((uid) => byUid.get(uid) ?? null);
|
|
3545
|
+
return { edges, targets };
|
|
3546
|
+
}
|
|
3547
|
+
/**
|
|
3548
|
+
* Server-side projection — `query.select` capability.
|
|
3549
|
+
*
|
|
3550
|
+
* Issues a single `SELECT json_extract(data, '$.f1'), …` statement that
|
|
3551
|
+
* returns only the requested fields. The compiler emits one column per
|
|
3552
|
+
* unique field plus a paired `json_type` column for `data.*` projections
|
|
3553
|
+
* so the decoder can recover JSON-encoded objects/arrays without a
|
|
3554
|
+
* second round trip. Migrations are NOT applied — the caller asked for
|
|
3555
|
+
* a partial shape, and rehydrating that into the migration pipeline
|
|
3556
|
+
* would require synthesising every absent field.
|
|
3557
|
+
*
|
|
3558
|
+
* The wire-payload reduction is the entire reason this method exists:
|
|
3559
|
+
* a list view that only needs `title` / `date` no longer drags the
|
|
3560
|
+
* full `data` JSON across the network. Callers that need the full
|
|
3561
|
+
* record should use `findEdges` (with migration support).
|
|
3562
|
+
*/
|
|
3563
|
+
async findEdgesProjected(select, filters, options) {
|
|
3564
|
+
const { stmt, columns } = compileFindEdgesProjected(
|
|
3565
|
+
this.collectionPath,
|
|
3566
|
+
this.storageScope,
|
|
3567
|
+
select,
|
|
3568
|
+
filters,
|
|
3569
|
+
options
|
|
3570
|
+
);
|
|
3571
|
+
const rows = await this.executor.all(stmt.sql, stmt.params);
|
|
3572
|
+
return rows.map((row) => decodeProjectedRow(row, columns));
|
|
3573
|
+
}
|
|
3574
|
+
/**
|
|
3575
|
+
* Run a DML statement with `RETURNING "doc_id"` so we can count the
|
|
3576
|
+
* rows the engine touched, with the same retry/backoff contract as
|
|
3577
|
+
* `executeChunkedBatches`. Single statement, single batch.
|
|
3578
|
+
*/
|
|
3579
|
+
async executeDmlWithReturning(stmt, options) {
|
|
3580
|
+
const sqlWithReturning = `${stmt.sql} RETURNING "doc_id"`;
|
|
3581
|
+
const maxRetries = options?.maxRetries ?? DEFAULT_MAX_RETRIES;
|
|
3582
|
+
let lastError = null;
|
|
3583
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
3584
|
+
try {
|
|
3585
|
+
const rows = await this.executor.all(sqlWithReturning, stmt.params);
|
|
3586
|
+
const deleted = rows.length;
|
|
3587
|
+
if (options?.onProgress) {
|
|
3588
|
+
options.onProgress({
|
|
3589
|
+
completedBatches: 1,
|
|
3590
|
+
totalBatches: 1,
|
|
3591
|
+
deletedSoFar: deleted
|
|
3592
|
+
});
|
|
3593
|
+
}
|
|
3594
|
+
return { deleted, batches: 1, errors: [] };
|
|
3595
|
+
} catch (err) {
|
|
3596
|
+
lastError = err instanceof Error ? err : new Error(String(err));
|
|
3597
|
+
if (attempt < maxRetries) {
|
|
3598
|
+
const delay = Math.min(BASE_RETRY_DELAY_MS * Math.pow(2, attempt), MAX_RETRY_DELAY_MS);
|
|
3599
|
+
await sleep(delay);
|
|
3600
|
+
}
|
|
3601
|
+
}
|
|
3602
|
+
}
|
|
3603
|
+
return {
|
|
3604
|
+
deleted: 0,
|
|
3605
|
+
batches: 0,
|
|
3606
|
+
errors: [
|
|
3607
|
+
{
|
|
3608
|
+
batchIndex: 0,
|
|
3609
|
+
error: lastError ?? new Error("bulk DML failed for unknown reason"),
|
|
3610
|
+
operationCount: 0
|
|
3611
|
+
}
|
|
3612
|
+
]
|
|
3613
|
+
};
|
|
3614
|
+
}
|
|
3615
|
+
};
|
|
3616
|
+
function createSqliteBackend(executor, tableName, options = {}) {
|
|
3617
|
+
const storageScope = options.storageScope ?? "";
|
|
3618
|
+
const scopePath = options.scopePath ?? "";
|
|
3619
|
+
return new SqliteBackendImpl(executor, tableName, storageScope, scopePath);
|
|
3620
|
+
}
|
|
3621
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
3622
|
+
0 && (module.exports = {
|
|
3623
|
+
META_EDGE_TYPE,
|
|
3624
|
+
META_NODE_TYPE,
|
|
3625
|
+
createGraphClient,
|
|
3626
|
+
createMergedRegistry,
|
|
3627
|
+
createRegistry,
|
|
3628
|
+
createSqliteBackend,
|
|
3629
|
+
generateId
|
|
3630
|
+
});
|
|
3631
|
+
//# sourceMappingURL=index.cjs.map
|