@typicalday/firegraph 0.9.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +93 -90
- package/bin/firegraph.mjs +21 -7
- package/dist/backend-U-MLShlg.d.ts +97 -0
- package/dist/backend-np4gEVhB.d.cts +97 -0
- package/dist/backend.cjs.map +1 -1
- package/dist/backend.d.cts +7 -6
- package/dist/backend.d.ts +7 -6
- package/dist/backend.js +1 -1
- package/dist/backend.js.map +1 -1
- package/dist/{chunk-EVUM6ORB.js → chunk-6SB34IPQ.js} +76 -8
- package/dist/chunk-6SB34IPQ.js.map +1 -0
- package/dist/{chunk-SU4FNLC3.js → chunk-EEKWRX5E.js} +1 -1
- package/dist/{chunk-SU4FNLC3.js.map → chunk-EEKWRX5E.js.map} +1 -1
- package/dist/{chunk-YLGXLEUE.js → chunk-GJVVRTQT.js} +5 -14
- package/dist/chunk-GJVVRTQT.js.map +1 -0
- package/dist/{chunk-GLOVWKQH.js → chunk-R7CRGYY4.js} +1 -1
- package/dist/{chunk-GLOVWKQH.js.map → chunk-R7CRGYY4.js.map} +1 -1
- package/dist/{do-sqlite.cjs → cloudflare/index.cjs} +1659 -1422
- package/dist/cloudflare/index.cjs.map +1 -0
- package/dist/cloudflare/index.d.cts +529 -0
- package/dist/cloudflare/index.d.ts +529 -0
- package/dist/cloudflare/index.js +934 -0
- package/dist/cloudflare/index.js.map +1 -0
- package/dist/codegen/index.cjs +4 -13
- package/dist/codegen/index.cjs.map +1 -1
- package/dist/codegen/index.d.cts +1 -1
- package/dist/codegen/index.d.ts +1 -1
- package/dist/codegen/index.js +1 -1
- package/dist/index.cjs +144 -132
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +116 -27
- package/dist/index.d.ts +116 -27
- package/dist/index.js +77 -123
- package/dist/index.js.map +1 -1
- package/dist/query-client/index.cjs.map +1 -1
- package/dist/query-client/index.js +1 -1
- package/dist/{scope-path-BtajqNK5.d.ts → scope-path-B1G3YiA7.d.cts} +5 -100
- package/dist/{scope-path-D2mNENJ-.d.cts → scope-path-B1G3YiA7.d.ts} +5 -100
- package/dist/{types-DfWVTsMn.d.ts → types-BGWxcpI_.d.cts} +92 -1
- package/dist/{types-DfWVTsMn.d.cts → types-BGWxcpI_.d.ts} +92 -1
- package/package.json +13 -17
- package/dist/chunk-EVUM6ORB.js.map +0 -1
- package/dist/chunk-SZ6W4VAS.js +0 -701
- package/dist/chunk-SZ6W4VAS.js.map +0 -1
- package/dist/chunk-YLGXLEUE.js.map +0 -1
- package/dist/d1.cjs +0 -2421
- package/dist/d1.cjs.map +0 -1
- package/dist/d1.d.cts +0 -54
- package/dist/d1.d.ts +0 -54
- package/dist/d1.js +0 -76
- package/dist/d1.js.map +0 -1
- package/dist/do-sqlite.cjs.map +0 -1
- package/dist/do-sqlite.d.cts +0 -41
- package/dist/do-sqlite.d.ts +0 -41
- package/dist/do-sqlite.js +0 -79
- package/dist/do-sqlite.js.map +0 -1
- package/dist/editor/client/assets/index-Bq2bfzeY.js +0 -411
- package/dist/editor/client/assets/index-CJ4m_EOL.css +0 -1
- package/dist/editor/client/index.html +0 -16
- package/dist/editor/server/index.mjs +0 -51511
|
@@ -0,0 +1,934 @@
|
|
|
1
|
+
import {
|
|
2
|
+
DEFAULT_CORE_INDEXES,
|
|
3
|
+
NODE_RELATION,
|
|
4
|
+
buildEdgeQueryPlan,
|
|
5
|
+
computeEdgeDocId,
|
|
6
|
+
computeNodeDocId,
|
|
7
|
+
createGraphClientFromBackend
|
|
8
|
+
} from "../chunk-6SB34IPQ.js";
|
|
9
|
+
import {
|
|
10
|
+
FiregraphError
|
|
11
|
+
} from "../chunk-R7CRGYY4.js";
|
|
12
|
+
|
|
13
|
+
// src/timestamp.ts
|
|
14
|
+
var GraphTimestampImpl = class _GraphTimestampImpl {
|
|
15
|
+
constructor(seconds, nanoseconds) {
|
|
16
|
+
this.seconds = seconds;
|
|
17
|
+
this.nanoseconds = nanoseconds;
|
|
18
|
+
}
|
|
19
|
+
toDate() {
|
|
20
|
+
return new Date(this.toMillis());
|
|
21
|
+
}
|
|
22
|
+
toMillis() {
|
|
23
|
+
return this.seconds * 1e3 + Math.floor(this.nanoseconds / 1e6);
|
|
24
|
+
}
|
|
25
|
+
toJSON() {
|
|
26
|
+
return { seconds: this.seconds, nanoseconds: this.nanoseconds };
|
|
27
|
+
}
|
|
28
|
+
static fromMillis(ms) {
|
|
29
|
+
const seconds = Math.floor(ms / 1e3);
|
|
30
|
+
const nanoseconds = (ms - seconds * 1e3) * 1e6;
|
|
31
|
+
return new _GraphTimestampImpl(seconds, nanoseconds);
|
|
32
|
+
}
|
|
33
|
+
static now() {
|
|
34
|
+
return _GraphTimestampImpl.fromMillis(Date.now());
|
|
35
|
+
}
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
// src/internal/sqlite-index-ddl.ts
|
|
39
|
+
var IDENT_RE = /^[A-Za-z_][A-Za-z0-9_]*$/;
|
|
40
|
+
var JSON_PATH_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
41
|
+
function quoteIdent(name) {
|
|
42
|
+
if (!IDENT_RE.test(name)) {
|
|
43
|
+
throw new FiregraphError(
|
|
44
|
+
`Invalid SQL identifier in index DDL: ${name}. Must match /^[A-Za-z_][A-Za-z0-9_]*$/.`,
|
|
45
|
+
"INVALID_INDEX"
|
|
46
|
+
);
|
|
47
|
+
}
|
|
48
|
+
return `"${name}"`;
|
|
49
|
+
}
|
|
50
|
+
function fnv1a32(str) {
|
|
51
|
+
let h = 2166136261;
|
|
52
|
+
for (let i = 0; i < str.length; i++) {
|
|
53
|
+
h ^= str.charCodeAt(i);
|
|
54
|
+
h = Math.imul(h, 16777619);
|
|
55
|
+
}
|
|
56
|
+
return (h >>> 0).toString(16).padStart(8, "0");
|
|
57
|
+
}
|
|
58
|
+
function normalizeFields(fields) {
|
|
59
|
+
return fields.map((f) => {
|
|
60
|
+
if (typeof f === "string") return { path: f, desc: false };
|
|
61
|
+
if (!f.path || typeof f.path !== "string") {
|
|
62
|
+
throw new FiregraphError(
|
|
63
|
+
`IndexSpec field must be a string or { path: string, desc?: boolean }; got ${JSON.stringify(f)}`,
|
|
64
|
+
"INVALID_INDEX"
|
|
65
|
+
);
|
|
66
|
+
}
|
|
67
|
+
return { path: f.path, desc: !!f.desc };
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
function specFingerprint(spec, leadingColumns) {
|
|
71
|
+
const normalized = {
|
|
72
|
+
lead: leadingColumns,
|
|
73
|
+
fields: normalizeFields(spec.fields),
|
|
74
|
+
where: spec.where ?? ""
|
|
75
|
+
};
|
|
76
|
+
return fnv1a32(JSON.stringify(normalized));
|
|
77
|
+
}
|
|
78
|
+
function compileFieldExpr(path, fieldToColumn) {
|
|
79
|
+
const col = fieldToColumn[path];
|
|
80
|
+
if (col) return quoteIdent(col);
|
|
81
|
+
if (path === "data") {
|
|
82
|
+
return `json_extract("data", '$')`;
|
|
83
|
+
}
|
|
84
|
+
if (path.startsWith("data.")) {
|
|
85
|
+
const suffix = path.slice(5);
|
|
86
|
+
const parts = suffix.split(".");
|
|
87
|
+
for (const part of parts) {
|
|
88
|
+
if (!JSON_PATH_KEY_RE.test(part)) {
|
|
89
|
+
throw new FiregraphError(
|
|
90
|
+
`IndexSpec data path "${path}" has invalid component "${part}". Each component must match /^[A-Za-z_][A-Za-z0-9_-]*$/.`,
|
|
91
|
+
"INVALID_INDEX"
|
|
92
|
+
);
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
return `json_extract("data", '$.${suffix}')`;
|
|
96
|
+
}
|
|
97
|
+
throw new FiregraphError(
|
|
98
|
+
`IndexSpec field "${path}" is not a known firegraph field. Use a top-level field (aType, aUid, axbType, bType, bUid, createdAt, updatedAt, v) or a dotted data path like 'data.status'.`,
|
|
99
|
+
"INVALID_INDEX"
|
|
100
|
+
);
|
|
101
|
+
}
|
|
102
|
+
function buildIndexDDL(spec, options) {
|
|
103
|
+
const { table, fieldToColumn, leadingColumns = [] } = options;
|
|
104
|
+
if (!spec.fields || spec.fields.length === 0) {
|
|
105
|
+
throw new FiregraphError("IndexSpec.fields must be a non-empty array", "INVALID_INDEX");
|
|
106
|
+
}
|
|
107
|
+
const normalized = normalizeFields(spec.fields);
|
|
108
|
+
const hash = specFingerprint(spec, leadingColumns);
|
|
109
|
+
const indexName = `${table}_idx_${hash}`;
|
|
110
|
+
const cols = [];
|
|
111
|
+
for (const col of leadingColumns) {
|
|
112
|
+
cols.push(quoteIdent(col));
|
|
113
|
+
}
|
|
114
|
+
for (const f of normalized) {
|
|
115
|
+
const expr = compileFieldExpr(f.path, fieldToColumn);
|
|
116
|
+
cols.push(f.desc ? `${expr} DESC` : expr);
|
|
117
|
+
}
|
|
118
|
+
let ddl = `CREATE INDEX IF NOT EXISTS ${quoteIdent(indexName)} ON ${quoteIdent(table)}(${cols.join(", ")})`;
|
|
119
|
+
if (spec.where) {
|
|
120
|
+
ddl += ` WHERE ${spec.where}`;
|
|
121
|
+
}
|
|
122
|
+
return ddl;
|
|
123
|
+
}
|
|
124
|
+
function dedupeIndexSpecs(specs, leadingColumns = []) {
|
|
125
|
+
const seen = /* @__PURE__ */ new Set();
|
|
126
|
+
const out = [];
|
|
127
|
+
for (const spec of specs) {
|
|
128
|
+
const fp = specFingerprint(spec, leadingColumns);
|
|
129
|
+
if (seen.has(fp)) continue;
|
|
130
|
+
seen.add(fp);
|
|
131
|
+
out.push(spec);
|
|
132
|
+
}
|
|
133
|
+
return out;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// src/cloudflare/schema.ts
|
|
137
|
+
var DO_FIELD_TO_COLUMN = {
|
|
138
|
+
aType: "a_type",
|
|
139
|
+
aUid: "a_uid",
|
|
140
|
+
axbType: "axb_type",
|
|
141
|
+
bType: "b_type",
|
|
142
|
+
bUid: "b_uid",
|
|
143
|
+
v: "v",
|
|
144
|
+
createdAt: "created_at",
|
|
145
|
+
updatedAt: "updated_at"
|
|
146
|
+
};
|
|
147
|
+
var IDENT_RE2 = /^[A-Za-z_][A-Za-z0-9_]*$/;
|
|
148
|
+
function validateDOTableName(name) {
|
|
149
|
+
if (!IDENT_RE2.test(name)) {
|
|
150
|
+
throw new Error(`Invalid SQL identifier: ${name}. Must match /^[A-Za-z_][A-Za-z0-9_]*$/.`);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
function quoteDOIdent(name) {
|
|
154
|
+
validateDOTableName(name);
|
|
155
|
+
return `"${name}"`;
|
|
156
|
+
}
|
|
157
|
+
function buildDOSchemaStatements(table, options = {}) {
|
|
158
|
+
const t = quoteDOIdent(table);
|
|
159
|
+
const statements = [
|
|
160
|
+
`CREATE TABLE IF NOT EXISTS ${t} (
|
|
161
|
+
doc_id TEXT NOT NULL PRIMARY KEY,
|
|
162
|
+
a_type TEXT NOT NULL,
|
|
163
|
+
a_uid TEXT NOT NULL,
|
|
164
|
+
axb_type TEXT NOT NULL,
|
|
165
|
+
b_type TEXT NOT NULL,
|
|
166
|
+
b_uid TEXT NOT NULL,
|
|
167
|
+
data TEXT NOT NULL,
|
|
168
|
+
v INTEGER,
|
|
169
|
+
created_at INTEGER NOT NULL,
|
|
170
|
+
updated_at INTEGER NOT NULL
|
|
171
|
+
)`
|
|
172
|
+
];
|
|
173
|
+
const core = options.coreIndexes ?? [...DEFAULT_CORE_INDEXES];
|
|
174
|
+
const fromRegistry = options.registry?.entries().flatMap((e) => e.indexes ?? []) ?? [];
|
|
175
|
+
const deduped = dedupeIndexSpecs([...core, ...fromRegistry]);
|
|
176
|
+
for (const spec of deduped) {
|
|
177
|
+
statements.push(buildIndexDDL(spec, { table, fieldToColumn: DO_FIELD_TO_COLUMN }));
|
|
178
|
+
}
|
|
179
|
+
return statements;
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
// src/cloudflare/sql.ts
|
|
183
|
+
function compileFieldRef(field) {
|
|
184
|
+
const column = DO_FIELD_TO_COLUMN[field];
|
|
185
|
+
if (column) {
|
|
186
|
+
return { expr: quoteDOIdent(column) };
|
|
187
|
+
}
|
|
188
|
+
if (field.startsWith("data.")) {
|
|
189
|
+
const suffix = field.slice(5);
|
|
190
|
+
for (const part of suffix.split(".")) {
|
|
191
|
+
validateJsonPathKey(part);
|
|
192
|
+
}
|
|
193
|
+
return { expr: `json_extract("data", '$.${suffix}')` };
|
|
194
|
+
}
|
|
195
|
+
if (field === "data") {
|
|
196
|
+
return { expr: `json_extract("data", '$')` };
|
|
197
|
+
}
|
|
198
|
+
throw new FiregraphError(
|
|
199
|
+
`DO SQLite backend cannot resolve filter field: ${field}`,
|
|
200
|
+
"INVALID_QUERY"
|
|
201
|
+
);
|
|
202
|
+
}
|
|
203
|
+
var FIRESTORE_TYPE_NAMES = /* @__PURE__ */ new Set([
|
|
204
|
+
"Timestamp",
|
|
205
|
+
"GeoPoint",
|
|
206
|
+
"VectorValue",
|
|
207
|
+
"DocumentReference",
|
|
208
|
+
"FieldValue"
|
|
209
|
+
]);
|
|
210
|
+
function isFirestoreSpecialType(value) {
|
|
211
|
+
const ctorName = value.constructor?.name;
|
|
212
|
+
if (ctorName && FIRESTORE_TYPE_NAMES.has(ctorName)) return ctorName;
|
|
213
|
+
return null;
|
|
214
|
+
}
|
|
215
|
+
function bindValue(value) {
|
|
216
|
+
if (value === null || value === void 0) return null;
|
|
217
|
+
if (typeof value === "string" || typeof value === "number" || typeof value === "bigint" || typeof value === "boolean") {
|
|
218
|
+
return value;
|
|
219
|
+
}
|
|
220
|
+
if (value instanceof Date) return value.getTime();
|
|
221
|
+
if (typeof value === "object") {
|
|
222
|
+
const firestoreType = isFirestoreSpecialType(value);
|
|
223
|
+
if (firestoreType) {
|
|
224
|
+
throw new FiregraphError(
|
|
225
|
+
`DO SQLite backend cannot bind a Firestore ${firestoreType} value \u2014 JSON serialization would silently drop fields and the resulting bind would never match a stored row. Convert to a primitive (e.g. \`ts.toMillis()\` for Timestamp) before filtering or updating.`,
|
|
226
|
+
"INVALID_QUERY"
|
|
227
|
+
);
|
|
228
|
+
}
|
|
229
|
+
return JSON.stringify(value);
|
|
230
|
+
}
|
|
231
|
+
return String(value);
|
|
232
|
+
}
|
|
233
|
+
var JSON_PATH_KEY_RE2 = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
234
|
+
function validateJsonPathKey(key) {
|
|
235
|
+
if (key.length === 0) {
|
|
236
|
+
throw new FiregraphError(
|
|
237
|
+
"DO SQLite backend: empty JSON path component is not allowed",
|
|
238
|
+
"INVALID_QUERY"
|
|
239
|
+
);
|
|
240
|
+
}
|
|
241
|
+
if (!JSON_PATH_KEY_RE2.test(key)) {
|
|
242
|
+
throw new FiregraphError(
|
|
243
|
+
`DO SQLite backend: data field path component "${key}" is not a safe JSON-path identifier. Allowed pattern: /^[A-Za-z_][A-Za-z0-9_-]*$/. Use replaceData (full-data overwrite) for keys with reserved characters (whitespace, dots, brackets, quotes, etc.).`,
|
|
244
|
+
"INVALID_QUERY"
|
|
245
|
+
);
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
function compileFilter(filter, params) {
|
|
249
|
+
const { expr } = compileFieldRef(filter.field);
|
|
250
|
+
switch (filter.op) {
|
|
251
|
+
case "==":
|
|
252
|
+
params.push(bindValue(filter.value));
|
|
253
|
+
return `${expr} = ?`;
|
|
254
|
+
case "!=":
|
|
255
|
+
params.push(bindValue(filter.value));
|
|
256
|
+
return `${expr} != ?`;
|
|
257
|
+
case "<":
|
|
258
|
+
params.push(bindValue(filter.value));
|
|
259
|
+
return `${expr} < ?`;
|
|
260
|
+
case "<=":
|
|
261
|
+
params.push(bindValue(filter.value));
|
|
262
|
+
return `${expr} <= ?`;
|
|
263
|
+
case ">":
|
|
264
|
+
params.push(bindValue(filter.value));
|
|
265
|
+
return `${expr} > ?`;
|
|
266
|
+
case ">=":
|
|
267
|
+
params.push(bindValue(filter.value));
|
|
268
|
+
return `${expr} >= ?`;
|
|
269
|
+
case "in": {
|
|
270
|
+
const values = asArray(filter.value, "in");
|
|
271
|
+
const placeholders = values.map(() => "?").join(", ");
|
|
272
|
+
for (const v of values) params.push(bindValue(v));
|
|
273
|
+
return `${expr} IN (${placeholders})`;
|
|
274
|
+
}
|
|
275
|
+
case "not-in": {
|
|
276
|
+
const values = asArray(filter.value, "not-in");
|
|
277
|
+
const placeholders = values.map(() => "?").join(", ");
|
|
278
|
+
for (const v of values) params.push(bindValue(v));
|
|
279
|
+
return `${expr} NOT IN (${placeholders})`;
|
|
280
|
+
}
|
|
281
|
+
case "array-contains": {
|
|
282
|
+
params.push(bindValue(filter.value));
|
|
283
|
+
return `EXISTS (SELECT 1 FROM json_each(${expr}) WHERE value = ?)`;
|
|
284
|
+
}
|
|
285
|
+
case "array-contains-any": {
|
|
286
|
+
const values = asArray(filter.value, "array-contains-any");
|
|
287
|
+
const placeholders = values.map(() => "?").join(", ");
|
|
288
|
+
for (const v of values) params.push(bindValue(v));
|
|
289
|
+
return `EXISTS (SELECT 1 FROM json_each(${expr}) WHERE value IN (${placeholders}))`;
|
|
290
|
+
}
|
|
291
|
+
default:
|
|
292
|
+
throw new FiregraphError(
|
|
293
|
+
`DO SQLite backend does not support filter operator: ${String(filter.op)}`,
|
|
294
|
+
"INVALID_QUERY"
|
|
295
|
+
);
|
|
296
|
+
}
|
|
297
|
+
}
|
|
298
|
+
function asArray(value, op) {
|
|
299
|
+
if (!Array.isArray(value) || value.length === 0) {
|
|
300
|
+
throw new FiregraphError(`Operator "${op}" requires a non-empty array value`, "INVALID_QUERY");
|
|
301
|
+
}
|
|
302
|
+
return value;
|
|
303
|
+
}
|
|
304
|
+
function compileOrderBy(options, _params) {
|
|
305
|
+
if (!options?.orderBy) return "";
|
|
306
|
+
const { field, direction } = options.orderBy;
|
|
307
|
+
const { expr } = compileFieldRef(field);
|
|
308
|
+
const dir = direction === "desc" ? "DESC" : "ASC";
|
|
309
|
+
return ` ORDER BY ${expr} ${dir}`;
|
|
310
|
+
}
|
|
311
|
+
function compileLimit(options, params) {
|
|
312
|
+
if (options?.limit === void 0) return "";
|
|
313
|
+
params.push(options.limit);
|
|
314
|
+
return ` LIMIT ?`;
|
|
315
|
+
}
|
|
316
|
+
function compileDOSelect(table, filters, options) {
|
|
317
|
+
const params = [];
|
|
318
|
+
const conditions = [];
|
|
319
|
+
for (const f of filters) {
|
|
320
|
+
conditions.push(compileFilter(f, params));
|
|
321
|
+
}
|
|
322
|
+
const where = conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "";
|
|
323
|
+
let sql = `SELECT * FROM ${quoteDOIdent(table)}${where}`;
|
|
324
|
+
sql += compileOrderBy(options, params);
|
|
325
|
+
sql += compileLimit(options, params);
|
|
326
|
+
return { sql, params };
|
|
327
|
+
}
|
|
328
|
+
function compileDOSelectByDocId(table, docId) {
|
|
329
|
+
return {
|
|
330
|
+
sql: `SELECT * FROM ${quoteDOIdent(table)} WHERE "doc_id" = ? LIMIT 1`,
|
|
331
|
+
params: [docId]
|
|
332
|
+
};
|
|
333
|
+
}
|
|
334
|
+
function compileDOSet(table, docId, record, nowMillis) {
|
|
335
|
+
const sql = `INSERT OR REPLACE INTO ${quoteDOIdent(table)} (
|
|
336
|
+
doc_id, a_type, a_uid, axb_type, b_type, b_uid, data, v, created_at, updated_at
|
|
337
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`;
|
|
338
|
+
const params = [
|
|
339
|
+
docId,
|
|
340
|
+
record.aType,
|
|
341
|
+
record.aUid,
|
|
342
|
+
record.axbType,
|
|
343
|
+
record.bType,
|
|
344
|
+
record.bUid,
|
|
345
|
+
JSON.stringify(record.data ?? {}),
|
|
346
|
+
record.v ?? null,
|
|
347
|
+
nowMillis,
|
|
348
|
+
nowMillis
|
|
349
|
+
];
|
|
350
|
+
return { sql, params };
|
|
351
|
+
}
|
|
352
|
+
function compileDOUpdate(table, docId, update, nowMillis) {
|
|
353
|
+
const setClauses = [];
|
|
354
|
+
const params = [];
|
|
355
|
+
if (update.replaceData) {
|
|
356
|
+
setClauses.push(`"data" = ?`);
|
|
357
|
+
params.push(JSON.stringify(update.replaceData));
|
|
358
|
+
} else if (update.dataFields && Object.keys(update.dataFields).length > 0) {
|
|
359
|
+
const entries = Object.entries(update.dataFields);
|
|
360
|
+
const pathArgs = entries.map(() => `?, ?`).join(", ");
|
|
361
|
+
setClauses.push(`"data" = json_set(COALESCE("data", '{}'), ${pathArgs})`);
|
|
362
|
+
for (const [k, v] of entries) {
|
|
363
|
+
validateJsonPathKey(k);
|
|
364
|
+
params.push(`$.${k}`);
|
|
365
|
+
params.push(bindValue(v));
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
if (update.v !== void 0) {
|
|
369
|
+
setClauses.push(`"v" = ?`);
|
|
370
|
+
params.push(update.v);
|
|
371
|
+
}
|
|
372
|
+
setClauses.push(`"updated_at" = ?`);
|
|
373
|
+
params.push(nowMillis);
|
|
374
|
+
params.push(docId);
|
|
375
|
+
return {
|
|
376
|
+
sql: `UPDATE ${quoteDOIdent(table)} SET ${setClauses.join(", ")} WHERE "doc_id" = ?`,
|
|
377
|
+
params
|
|
378
|
+
};
|
|
379
|
+
}
|
|
380
|
+
function compileDODelete(table, docId) {
|
|
381
|
+
return {
|
|
382
|
+
sql: `DELETE FROM ${quoteDOIdent(table)} WHERE "doc_id" = ?`,
|
|
383
|
+
params: [docId]
|
|
384
|
+
};
|
|
385
|
+
}
|
|
386
|
+
function compileDODeleteAll(table) {
|
|
387
|
+
return {
|
|
388
|
+
sql: `DELETE FROM ${quoteDOIdent(table)}`,
|
|
389
|
+
params: []
|
|
390
|
+
};
|
|
391
|
+
}
|
|
392
|
+
function rowToDORecord(row) {
|
|
393
|
+
const dataString = row.data;
|
|
394
|
+
const data = dataString ? JSON.parse(dataString) : {};
|
|
395
|
+
const createdAtMs = toMillis(row.created_at);
|
|
396
|
+
const updatedAtMs = toMillis(row.updated_at);
|
|
397
|
+
const record = {
|
|
398
|
+
aType: row.a_type,
|
|
399
|
+
aUid: row.a_uid,
|
|
400
|
+
axbType: row.axb_type,
|
|
401
|
+
bType: row.b_type,
|
|
402
|
+
bUid: row.b_uid,
|
|
403
|
+
data,
|
|
404
|
+
createdAtMs,
|
|
405
|
+
updatedAtMs
|
|
406
|
+
};
|
|
407
|
+
if (row.v !== null && row.v !== void 0) {
|
|
408
|
+
record.v = Number(row.v);
|
|
409
|
+
}
|
|
410
|
+
return record;
|
|
411
|
+
}
|
|
412
|
+
function hydrateDORecord(wire) {
|
|
413
|
+
const record = {
|
|
414
|
+
aType: wire.aType,
|
|
415
|
+
aUid: wire.aUid,
|
|
416
|
+
axbType: wire.axbType,
|
|
417
|
+
bType: wire.bType,
|
|
418
|
+
bUid: wire.bUid,
|
|
419
|
+
data: wire.data,
|
|
420
|
+
createdAt: GraphTimestampImpl.fromMillis(wire.createdAtMs),
|
|
421
|
+
updatedAt: GraphTimestampImpl.fromMillis(wire.updatedAtMs)
|
|
422
|
+
};
|
|
423
|
+
if (wire.v !== void 0) {
|
|
424
|
+
record.v = wire.v;
|
|
425
|
+
}
|
|
426
|
+
return record;
|
|
427
|
+
}
|
|
428
|
+
function toMillis(value) {
|
|
429
|
+
if (typeof value === "number") return value;
|
|
430
|
+
if (typeof value === "bigint") return Number(value);
|
|
431
|
+
if (typeof value === "string") {
|
|
432
|
+
const n = Number(value);
|
|
433
|
+
if (Number.isFinite(n)) return n;
|
|
434
|
+
}
|
|
435
|
+
throw new FiregraphError(
|
|
436
|
+
`DO SQLite row has non-numeric timestamp column: ${typeof value} (${String(value)})`,
|
|
437
|
+
"INVALID_QUERY"
|
|
438
|
+
);
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
// src/cloudflare/backend.ts
|
|
442
|
+
function validateSegment(value, label) {
|
|
443
|
+
if (!value || value.includes("/")) {
|
|
444
|
+
throw new FiregraphError(
|
|
445
|
+
`Invalid ${label} for subgraph: "${value}". Must be non-empty and not contain "/".`,
|
|
446
|
+
"INVALID_SUBGRAPH"
|
|
447
|
+
);
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
function transactionsUnsupported() {
|
|
451
|
+
return new FiregraphError(
|
|
452
|
+
"Interactive transactions are not supported by the Cloudflare DO backend. Use `batch()` for atomic multi-write commits, or restructure the read-then-conditional-write as an explicit read \u2192 decide \u2192 batch sequence.",
|
|
453
|
+
"UNSUPPORTED_OPERATION"
|
|
454
|
+
);
|
|
455
|
+
}
|
|
456
|
+
var DORPCBatchBackend = class {
|
|
457
|
+
constructor(getStub) {
|
|
458
|
+
this.getStub = getStub;
|
|
459
|
+
}
|
|
460
|
+
ops = [];
|
|
461
|
+
setDoc(docId, record) {
|
|
462
|
+
this.ops.push({ kind: "set", docId, record });
|
|
463
|
+
}
|
|
464
|
+
updateDoc(docId, update) {
|
|
465
|
+
this.ops.push({ kind: "update", docId, update });
|
|
466
|
+
}
|
|
467
|
+
deleteDoc(docId) {
|
|
468
|
+
this.ops.push({ kind: "delete", docId });
|
|
469
|
+
}
|
|
470
|
+
async commit() {
|
|
471
|
+
if (this.ops.length === 0) return;
|
|
472
|
+
const ops = this.ops.slice();
|
|
473
|
+
this.ops.length = 0;
|
|
474
|
+
await this.getStub()._fgBatch(ops);
|
|
475
|
+
}
|
|
476
|
+
};
|
|
477
|
+
var DORPCBackend = class _DORPCBackend {
|
|
478
|
+
collectionPath = "firegraph";
|
|
479
|
+
scopePath;
|
|
480
|
+
/** @internal */
|
|
481
|
+
storageKey;
|
|
482
|
+
/** @internal */
|
|
483
|
+
namespace;
|
|
484
|
+
registryAccessor;
|
|
485
|
+
/** @internal — see `DORPCBackendOptions.makeSiblingClient` for the union-type rationale. */
|
|
486
|
+
makeSiblingClient;
|
|
487
|
+
cachedStub = null;
|
|
488
|
+
constructor(namespace, options) {
|
|
489
|
+
this.namespace = namespace;
|
|
490
|
+
this.scopePath = options.scopePath ?? "";
|
|
491
|
+
this.storageKey = options.storageKey;
|
|
492
|
+
this.registryAccessor = options.registryAccessor;
|
|
493
|
+
this.makeSiblingClient = options.makeSiblingClient;
|
|
494
|
+
}
|
|
495
|
+
get stub() {
|
|
496
|
+
if (!this.cachedStub) {
|
|
497
|
+
const id = this.namespace.idFromName(this.storageKey);
|
|
498
|
+
this.cachedStub = this.namespace.get(id);
|
|
499
|
+
}
|
|
500
|
+
return this.cachedStub;
|
|
501
|
+
}
|
|
502
|
+
// --- Reads ---
|
|
503
|
+
async getDoc(docId) {
|
|
504
|
+
const wire = await this.stub._fgGetDoc(docId);
|
|
505
|
+
return wire ? hydrateDORecord(wire) : null;
|
|
506
|
+
}
|
|
507
|
+
async query(filters, options) {
|
|
508
|
+
const wires = await this.stub._fgQuery(filters, options);
|
|
509
|
+
return wires.map(hydrateDORecord);
|
|
510
|
+
}
|
|
511
|
+
// --- Writes ---
|
|
512
|
+
async setDoc(docId, record) {
|
|
513
|
+
return this.stub._fgSetDoc(docId, record);
|
|
514
|
+
}
|
|
515
|
+
async updateDoc(docId, update) {
|
|
516
|
+
return this.stub._fgUpdateDoc(docId, update);
|
|
517
|
+
}
|
|
518
|
+
async deleteDoc(docId) {
|
|
519
|
+
return this.stub._fgDeleteDoc(docId);
|
|
520
|
+
}
|
|
521
|
+
// --- Transactions / batches ---
|
|
522
|
+
async runTransaction(_fn) {
|
|
523
|
+
void _fn;
|
|
524
|
+
throw transactionsUnsupported();
|
|
525
|
+
}
|
|
526
|
+
createBatch() {
|
|
527
|
+
return new DORPCBatchBackend(() => this.stub);
|
|
528
|
+
}
|
|
529
|
+
// --- Subgraphs ---
|
|
530
|
+
subgraph(parentNodeUid, name) {
|
|
531
|
+
validateSegment(parentNodeUid, "parentNodeUid");
|
|
532
|
+
validateSegment(name, "subgraph name");
|
|
533
|
+
const newStorageKey = `${this.storageKey}/${parentNodeUid}/${name}`;
|
|
534
|
+
const newScopePath = this.scopePath ? `${this.scopePath}/${name}` : name;
|
|
535
|
+
return new _DORPCBackend(this.namespace, {
|
|
536
|
+
scopePath: newScopePath,
|
|
537
|
+
storageKey: newStorageKey,
|
|
538
|
+
// Subgraph backends share the same live registry accessor so a cascade
|
|
539
|
+
// invoked on a subgraph client still fans out correctly. The sibling
|
|
540
|
+
// factory is also carried forward so `createSiblingClient` works from
|
|
541
|
+
// any subgraph client in the chain.
|
|
542
|
+
registryAccessor: this.registryAccessor,
|
|
543
|
+
makeSiblingClient: this.makeSiblingClient
|
|
544
|
+
});
|
|
545
|
+
}
|
|
546
|
+
// --- Cascade & bulk ---
|
|
547
|
+
async removeNodeCascade(uid, reader, options) {
|
|
548
|
+
const shouldDeleteSubgraphs = options?.deleteSubcollections !== false;
|
|
549
|
+
const registry = this.registryAccessor?.();
|
|
550
|
+
if (shouldDeleteSubgraphs && registry) {
|
|
551
|
+
const node = await reader.getNode(uid);
|
|
552
|
+
if (node) {
|
|
553
|
+
const topology = registry.getSubgraphTopology(node.aType);
|
|
554
|
+
for (const entry of topology) {
|
|
555
|
+
const target = entry.targetGraph;
|
|
556
|
+
const childBackend = this.subgraph(uid, target);
|
|
557
|
+
await childBackend.destroyRecursively(registry);
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
return this.stub._fgRemoveNodeCascade(uid);
|
|
562
|
+
}
|
|
563
|
+
async bulkRemoveEdges(params, _reader, options) {
|
|
564
|
+
void _reader;
|
|
565
|
+
return this.stub._fgBulkRemoveEdges(params, options);
|
|
566
|
+
}
|
|
567
|
+
// --- Cross-scope queries ---
|
|
568
|
+
//
|
|
569
|
+
// `findEdgesGlobal` is deliberately NOT defined on this class. The
|
|
570
|
+
// GraphClient checks for its presence before running query planning and
|
|
571
|
+
// throws `UNSUPPORTED_OPERATION` when absent, giving the caller an
|
|
572
|
+
// immediate, accurate error. Defining the method with a throwing body
|
|
573
|
+
// would only surface the same error AFTER `checkQuerySafety` had already
|
|
574
|
+
// fired — and for scan-unsafe calls that results in a misleading
|
|
575
|
+
// `QuerySafetyError` ("add filters like aUid+axbType") when no filter
|
|
576
|
+
// combination would actually make the call work on this backend. See the
|
|
577
|
+
// "What's not supported" section in `createDOClient` for the design
|
|
578
|
+
// rationale (no collection-group index across DOs).
|
|
579
|
+
// --- Destroy helpers ---
|
|
580
|
+
/**
|
|
581
|
+
* Wipe this DO's storage. The DO itself can't be deleted — its ID
|
|
582
|
+
* persists forever — but its rows can be emptied, which is what the
|
|
583
|
+
* cascade walk does on every descendant subgraph DO.
|
|
584
|
+
*
|
|
585
|
+
* Exposed on the concrete class (not `StorageBackend`) so generic
|
|
586
|
+
* backend code doesn't reach for it.
|
|
587
|
+
*/
|
|
588
|
+
async destroy() {
|
|
589
|
+
await this.stub._fgDestroy();
|
|
590
|
+
}
|
|
591
|
+
/**
|
|
592
|
+
* Tear down every descendant subgraph DO, then wipe this DO's own rows.
|
|
593
|
+
*
|
|
594
|
+
* Invoked by cross-DO cascade: for each node in this DO we enumerate the
|
|
595
|
+
* subgraph topology and recurse into child DOs depth-first before
|
|
596
|
+
* wiping the current DO. The current DO's own rows are destroyed last so
|
|
597
|
+
* that a partial failure mid-recursion leaves the caller's reader able
|
|
598
|
+
* to discover what's still present.
|
|
599
|
+
*
|
|
600
|
+
* @internal
|
|
601
|
+
*/
|
|
602
|
+
async destroyRecursively(registry) {
|
|
603
|
+
const nodes = await this.query([{ field: "axbType", op: "==", value: NODE_RELATION }]);
|
|
604
|
+
for (const node of nodes) {
|
|
605
|
+
const topology = registry.getSubgraphTopology(node.aType);
|
|
606
|
+
for (const entry of topology) {
|
|
607
|
+
const target = entry.targetGraph;
|
|
608
|
+
const childBackend = this.subgraph(node.aUid, target);
|
|
609
|
+
await childBackend.destroyRecursively(registry);
|
|
610
|
+
}
|
|
611
|
+
}
|
|
612
|
+
await this.destroy();
|
|
613
|
+
}
|
|
614
|
+
};
|
|
615
|
+
|
|
616
|
+
// src/cloudflare/client.ts
|
|
617
|
+
function createDOClient(namespace, rootKey, options = {}) {
|
|
618
|
+
if (!rootKey || typeof rootKey !== "string") {
|
|
619
|
+
throw new FiregraphError(
|
|
620
|
+
`createDOClient: rootKey must be a non-empty string, got ${JSON.stringify(rootKey)}.`,
|
|
621
|
+
"INVALID_ARGUMENT"
|
|
622
|
+
);
|
|
623
|
+
}
|
|
624
|
+
if (rootKey.includes("/")) {
|
|
625
|
+
throw new FiregraphError(
|
|
626
|
+
`createDOClient: rootKey must not contain "/". Got: "${rootKey}".`,
|
|
627
|
+
"INVALID_ARGUMENT"
|
|
628
|
+
);
|
|
629
|
+
}
|
|
630
|
+
let client;
|
|
631
|
+
const registryAccessor = () => {
|
|
632
|
+
if (!client) {
|
|
633
|
+
throw new FiregraphError(
|
|
634
|
+
"createDOClient: registryAccessor fired before the client was assigned. This indicates a programming error in the DO backend \u2014 the accessor must only be invoked lazily from `removeNodeCascade`, never synchronously from the `DORPCBackend` constructor.",
|
|
635
|
+
"INTERNAL"
|
|
636
|
+
);
|
|
637
|
+
}
|
|
638
|
+
return client.getRegistrySnapshot();
|
|
639
|
+
};
|
|
640
|
+
const siblingOptions = { ...options };
|
|
641
|
+
const makeSiblingClient = (siblingRootKey) => createDOClient(namespace, siblingRootKey, siblingOptions);
|
|
642
|
+
const backend = new DORPCBackend(namespace, {
|
|
643
|
+
scopePath: "",
|
|
644
|
+
storageKey: rootKey,
|
|
645
|
+
registryAccessor,
|
|
646
|
+
makeSiblingClient
|
|
647
|
+
});
|
|
648
|
+
let metaBackend;
|
|
649
|
+
if (options.registryMode?.collection) {
|
|
650
|
+
const metaKey = options.registryMode.collection;
|
|
651
|
+
if (metaKey.includes("/")) {
|
|
652
|
+
throw new FiregraphError(
|
|
653
|
+
`createDOClient: registryMode.collection must not contain "/". Got: "${metaKey}".`,
|
|
654
|
+
"INVALID_ARGUMENT"
|
|
655
|
+
);
|
|
656
|
+
}
|
|
657
|
+
if (metaKey !== rootKey) {
|
|
658
|
+
metaBackend = new DORPCBackend(namespace, {
|
|
659
|
+
scopePath: "",
|
|
660
|
+
storageKey: metaKey,
|
|
661
|
+
// Meta backend shares the accessor so its own `removeNodeCascade`
|
|
662
|
+
// (unlikely, but safe) would also see the live registry. Sibling
|
|
663
|
+
// factory is carried for consistency; there's no user-facing path
|
|
664
|
+
// that creates a sibling from the meta backend, but it costs
|
|
665
|
+
// nothing to keep the two backends in sync.
|
|
666
|
+
registryAccessor,
|
|
667
|
+
makeSiblingClient
|
|
668
|
+
});
|
|
669
|
+
}
|
|
670
|
+
}
|
|
671
|
+
client = createGraphClientFromBackend(backend, options, metaBackend);
|
|
672
|
+
return client;
|
|
673
|
+
}
|
|
674
|
+
function createSiblingClient(client, siblingRootKey) {
|
|
675
|
+
if (!siblingRootKey || typeof siblingRootKey !== "string") {
|
|
676
|
+
throw new FiregraphError(
|
|
677
|
+
`createSiblingClient: siblingRootKey must be a non-empty string, got ${JSON.stringify(siblingRootKey)}.`,
|
|
678
|
+
"INVALID_ARGUMENT"
|
|
679
|
+
);
|
|
680
|
+
}
|
|
681
|
+
if (siblingRootKey.includes("/")) {
|
|
682
|
+
throw new FiregraphError(
|
|
683
|
+
`createSiblingClient: siblingRootKey must not contain "/". Got: "${siblingRootKey}".`,
|
|
684
|
+
"INVALID_ARGUMENT"
|
|
685
|
+
);
|
|
686
|
+
}
|
|
687
|
+
const impl = client;
|
|
688
|
+
const backend = typeof impl.getBackend === "function" ? impl.getBackend() : void 0;
|
|
689
|
+
const maker = backend && backend.makeSiblingClient;
|
|
690
|
+
if (typeof maker !== "function") {
|
|
691
|
+
throw new FiregraphError(
|
|
692
|
+
"createSiblingClient: the provided client is not backed by a DO client produced by `createDOClient`. Sibling construction is only available for DO-backed clients.",
|
|
693
|
+
"UNSUPPORTED_OPERATION"
|
|
694
|
+
);
|
|
695
|
+
}
|
|
696
|
+
return maker(siblingRootKey);
|
|
697
|
+
}
|
|
698
|
+
|
|
699
|
+
// src/cloudflare/do.ts
|
|
700
|
+
var DEFAULT_OPTIONS = {
|
|
701
|
+
table: "firegraph",
|
|
702
|
+
autoMigrate: true
|
|
703
|
+
};
|
|
704
|
+
var FiregraphDO = class {
|
|
705
|
+
/** @internal — exposed for subclass access, not part of the public RPC. */
|
|
706
|
+
ctx;
|
|
707
|
+
/** @internal — exposed for subclass access; opaque to this class. */
|
|
708
|
+
env;
|
|
709
|
+
/** @internal — table name used by every compiled statement. */
|
|
710
|
+
table;
|
|
711
|
+
/** @internal — registry consulted by `runSchema` for per-entry indexes. */
|
|
712
|
+
registry;
|
|
713
|
+
/** @internal — overrides `DEFAULT_CORE_INDEXES` when set. */
|
|
714
|
+
coreIndexes;
|
|
715
|
+
constructor(ctx, env, options = {}) {
|
|
716
|
+
this.ctx = ctx;
|
|
717
|
+
this.env = env;
|
|
718
|
+
const table = options.table ?? DEFAULT_OPTIONS.table;
|
|
719
|
+
validateDOTableName(table);
|
|
720
|
+
this.table = table;
|
|
721
|
+
this.registry = options.registry;
|
|
722
|
+
this.coreIndexes = options.coreIndexes;
|
|
723
|
+
const autoMigrate = options.autoMigrate ?? DEFAULT_OPTIONS.autoMigrate;
|
|
724
|
+
if (autoMigrate) {
|
|
725
|
+
void this.ctx.blockConcurrencyWhile(async () => {
|
|
726
|
+
this.runSchema();
|
|
727
|
+
});
|
|
728
|
+
}
|
|
729
|
+
}
|
|
730
|
+
// ---------------------------------------------------------------------------
|
|
731
|
+
// RPC: reads
|
|
732
|
+
//
|
|
733
|
+
// Method names are prefixed `_fg` so user subclasses can add their own RPC
|
|
734
|
+
// methods without name collisions. The client-side backend in
|
|
735
|
+
// `src/cloudflare/backend.ts` calls these directly on the DO stub.
|
|
736
|
+
// ---------------------------------------------------------------------------
|
|
737
|
+
async _fgGetDoc(docId) {
|
|
738
|
+
const stmt = compileDOSelectByDocId(this.table, docId);
|
|
739
|
+
const rows = this.execAll(stmt);
|
|
740
|
+
return rows.length === 0 ? null : rowToDORecord(rows[0]);
|
|
741
|
+
}
|
|
742
|
+
async _fgQuery(filters, options) {
|
|
743
|
+
const stmt = compileDOSelect(this.table, filters, options);
|
|
744
|
+
const rows = this.execAll(stmt);
|
|
745
|
+
return rows.map(rowToDORecord);
|
|
746
|
+
}
|
|
747
|
+
// ---------------------------------------------------------------------------
|
|
748
|
+
// RPC: writes
|
|
749
|
+
// ---------------------------------------------------------------------------
|
|
750
|
+
async _fgSetDoc(docId, record) {
|
|
751
|
+
const stmt = compileDOSet(this.table, docId, record, Date.now());
|
|
752
|
+
this.execRun(stmt);
|
|
753
|
+
}
|
|
754
|
+
async _fgUpdateDoc(docId, update) {
|
|
755
|
+
const stmt = compileDOUpdate(this.table, docId, update, Date.now());
|
|
756
|
+
const sqlWithReturning = `${stmt.sql} RETURNING "doc_id"`;
|
|
757
|
+
const rows = this.ctx.storage.sql.exec(sqlWithReturning, ...stmt.params).toArray();
|
|
758
|
+
if (rows.length === 0) {
|
|
759
|
+
throw new FiregraphError(`updateDoc: no document found for doc_id=${docId}`, "NOT_FOUND");
|
|
760
|
+
}
|
|
761
|
+
}
|
|
762
|
+
async _fgDeleteDoc(docId) {
|
|
763
|
+
const stmt = compileDODelete(this.table, docId);
|
|
764
|
+
this.execRun(stmt);
|
|
765
|
+
}
|
|
766
|
+
// ---------------------------------------------------------------------------
|
|
767
|
+
// RPC: batch
|
|
768
|
+
// ---------------------------------------------------------------------------
|
|
769
|
+
/**
|
|
770
|
+
* Execute a list of write ops atomically. DO SQLite's `transactionSync`
|
|
771
|
+
* provides real atomicity — either every statement commits or none do.
|
|
772
|
+
* No statement-count cap applies (contrast with D1's ~100-statement batch
|
|
773
|
+
* limit), so the caller can submit as many ops as they like in one call.
|
|
774
|
+
*/
|
|
775
|
+
async _fgBatch(ops) {
|
|
776
|
+
if (ops.length === 0) return;
|
|
777
|
+
const now = Date.now();
|
|
778
|
+
const statements = ops.map((op) => {
|
|
779
|
+
switch (op.kind) {
|
|
780
|
+
case "set":
|
|
781
|
+
return compileDOSet(this.table, op.docId, op.record, now);
|
|
782
|
+
case "update":
|
|
783
|
+
return compileDOUpdate(this.table, op.docId, op.update, now);
|
|
784
|
+
case "delete":
|
|
785
|
+
return compileDODelete(this.table, op.docId);
|
|
786
|
+
}
|
|
787
|
+
});
|
|
788
|
+
this.ctx.storage.transactionSync(() => {
|
|
789
|
+
for (const stmt of statements) {
|
|
790
|
+
this.ctx.storage.sql.exec(stmt.sql, ...stmt.params).toArray();
|
|
791
|
+
}
|
|
792
|
+
});
|
|
793
|
+
}
|
|
794
|
+
// ---------------------------------------------------------------------------
|
|
795
|
+
// RPC: cascade + bulk (local DO only)
|
|
796
|
+
//
|
|
797
|
+
// These cascade *within this DO*. Subgraph DOs (nested under this node) are
|
|
798
|
+
// not reachable from here — the client-side `DORPCBackend.removeNodeCascade`
|
|
799
|
+
// consults the registry topology to discover descendant subgraph DOs and
|
|
800
|
+
// fans out explicit `_fgDestroy` calls to each before invoking this method.
|
|
801
|
+
// Without that topology the DO has no way to enumerate its children.
|
|
802
|
+
// ---------------------------------------------------------------------------
|
|
803
|
+
async _fgRemoveNodeCascade(uid) {
|
|
804
|
+
const outgoingStmt = compileDOSelect(this.table, [{ field: "aUid", op: "==", value: uid }]);
|
|
805
|
+
const incomingStmt = compileDOSelect(this.table, [{ field: "bUid", op: "==", value: uid }]);
|
|
806
|
+
const outgoingRows = this.execAll(outgoingStmt);
|
|
807
|
+
const incomingRows = this.execAll(incomingStmt);
|
|
808
|
+
const seen = /* @__PURE__ */ new Set();
|
|
809
|
+
const edgeDocIds = [];
|
|
810
|
+
let nodeExists = false;
|
|
811
|
+
for (const row of [...outgoingRows, ...incomingRows]) {
|
|
812
|
+
const axbType = row.axb_type;
|
|
813
|
+
const aUid = row.a_uid;
|
|
814
|
+
const bUid = row.b_uid;
|
|
815
|
+
if (axbType === NODE_RELATION && aUid === bUid) {
|
|
816
|
+
nodeExists = true;
|
|
817
|
+
continue;
|
|
818
|
+
}
|
|
819
|
+
const docId = computeEdgeDocId(aUid, axbType, bUid);
|
|
820
|
+
if (!seen.has(docId)) {
|
|
821
|
+
seen.add(docId);
|
|
822
|
+
edgeDocIds.push(docId);
|
|
823
|
+
}
|
|
824
|
+
}
|
|
825
|
+
const statements = edgeDocIds.map((id) => compileDODelete(this.table, id));
|
|
826
|
+
if (nodeExists) {
|
|
827
|
+
statements.push(compileDODelete(this.table, computeNodeDocId(uid)));
|
|
828
|
+
}
|
|
829
|
+
if (statements.length === 0) {
|
|
830
|
+
return {
|
|
831
|
+
deleted: 0,
|
|
832
|
+
batches: 0,
|
|
833
|
+
errors: [],
|
|
834
|
+
edgesDeleted: 0,
|
|
835
|
+
nodeDeleted: false
|
|
836
|
+
};
|
|
837
|
+
}
|
|
838
|
+
try {
|
|
839
|
+
this.ctx.storage.transactionSync(() => {
|
|
840
|
+
for (const stmt of statements) {
|
|
841
|
+
this.ctx.storage.sql.exec(stmt.sql, ...stmt.params).toArray();
|
|
842
|
+
}
|
|
843
|
+
});
|
|
844
|
+
return {
|
|
845
|
+
deleted: statements.length,
|
|
846
|
+
batches: 1,
|
|
847
|
+
errors: [],
|
|
848
|
+
edgesDeleted: edgeDocIds.length,
|
|
849
|
+
nodeDeleted: nodeExists
|
|
850
|
+
};
|
|
851
|
+
} catch (err) {
|
|
852
|
+
const error = err instanceof Error ? err : new Error(String(err));
|
|
853
|
+
return {
|
|
854
|
+
deleted: 0,
|
|
855
|
+
batches: 0,
|
|
856
|
+
errors: [{ batchIndex: 0, error, operationCount: statements.length }],
|
|
857
|
+
edgesDeleted: 0,
|
|
858
|
+
nodeDeleted: false
|
|
859
|
+
};
|
|
860
|
+
}
|
|
861
|
+
}
|
|
862
|
+
async _fgBulkRemoveEdges(params, _options) {
|
|
863
|
+
const plan = buildEdgeQueryPlan(params);
|
|
864
|
+
let docIds;
|
|
865
|
+
if (plan.strategy === "get") {
|
|
866
|
+
const existsStmt = compileDOSelectByDocId(this.table, plan.docId);
|
|
867
|
+
const rows = this.execAll(existsStmt);
|
|
868
|
+
docIds = rows.length > 0 ? [plan.docId] : [];
|
|
869
|
+
} else {
|
|
870
|
+
const selectStmt = compileDOSelect(this.table, plan.filters, plan.options);
|
|
871
|
+
const rows = this.execAll(selectStmt);
|
|
872
|
+
docIds = rows.map(
|
|
873
|
+
(row) => computeEdgeDocId(row.a_uid, row.axb_type, row.b_uid)
|
|
874
|
+
);
|
|
875
|
+
}
|
|
876
|
+
if (docIds.length === 0) {
|
|
877
|
+
return { deleted: 0, batches: 0, errors: [] };
|
|
878
|
+
}
|
|
879
|
+
const deleteStmts = docIds.map((id) => compileDODelete(this.table, id));
|
|
880
|
+
try {
|
|
881
|
+
this.ctx.storage.transactionSync(() => {
|
|
882
|
+
for (const stmt of deleteStmts) {
|
|
883
|
+
this.ctx.storage.sql.exec(stmt.sql, ...stmt.params).toArray();
|
|
884
|
+
}
|
|
885
|
+
});
|
|
886
|
+
return { deleted: deleteStmts.length, batches: 1, errors: [] };
|
|
887
|
+
} catch (err) {
|
|
888
|
+
const error = err instanceof Error ? err : new Error(String(err));
|
|
889
|
+
return {
|
|
890
|
+
deleted: 0,
|
|
891
|
+
batches: 0,
|
|
892
|
+
errors: [{ batchIndex: 0, error, operationCount: deleteStmts.length }]
|
|
893
|
+
};
|
|
894
|
+
}
|
|
895
|
+
}
|
|
896
|
+
// ---------------------------------------------------------------------------
|
|
897
|
+
// RPC: admin
|
|
898
|
+
// ---------------------------------------------------------------------------
|
|
899
|
+
/**
|
|
900
|
+
* Wipe every row. Called by the client when tearing down a subgraph DO as
|
|
901
|
+
* part of cascade — the DO itself can't be destroyed (DO IDs persist
|
|
902
|
+
* forever), but its storage can be emptied.
|
|
903
|
+
*/
|
|
904
|
+
async _fgDestroy() {
|
|
905
|
+
const stmt = compileDODeleteAll(this.table);
|
|
906
|
+
this.execRun(stmt);
|
|
907
|
+
}
|
|
908
|
+
// ---------------------------------------------------------------------------
|
|
909
|
+
// Internals
|
|
910
|
+
// ---------------------------------------------------------------------------
|
|
911
|
+
runSchema() {
|
|
912
|
+
const statements = buildDOSchemaStatements(this.table, {
|
|
913
|
+
coreIndexes: this.coreIndexes,
|
|
914
|
+
registry: this.registry
|
|
915
|
+
});
|
|
916
|
+
for (const sql of statements) {
|
|
917
|
+
this.ctx.storage.sql.exec(sql).toArray();
|
|
918
|
+
}
|
|
919
|
+
}
|
|
920
|
+
execAll(stmt) {
|
|
921
|
+
return this.ctx.storage.sql.exec(stmt.sql, ...stmt.params).toArray();
|
|
922
|
+
}
|
|
923
|
+
execRun(stmt) {
|
|
924
|
+
this.ctx.storage.sql.exec(stmt.sql, ...stmt.params).toArray();
|
|
925
|
+
}
|
|
926
|
+
};
|
|
927
|
+
export {
|
|
928
|
+
DORPCBackend,
|
|
929
|
+
FiregraphDO,
|
|
930
|
+
buildDOSchemaStatements,
|
|
931
|
+
createDOClient,
|
|
932
|
+
createSiblingClient
|
|
933
|
+
};
|
|
934
|
+
//# sourceMappingURL=index.js.map
|