@typicalday/firegraph 0.12.0 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +317 -73
- package/dist/backend-DuvHGgK1.d.cts +1897 -0
- package/dist/backend-DuvHGgK1.d.ts +1897 -0
- package/dist/backend.cjs +222 -3
- package/dist/backend.cjs.map +1 -1
- package/dist/backend.d.cts +25 -5
- package/dist/backend.d.ts +25 -5
- package/dist/backend.js +197 -4
- package/dist/backend.js.map +1 -1
- package/dist/chunk-2DHMNTV6.js +16 -0
- package/dist/chunk-2DHMNTV6.js.map +1 -0
- package/dist/chunk-4MMQ5W74.js +288 -0
- package/dist/chunk-4MMQ5W74.js.map +1 -0
- package/dist/chunk-D4J7Z4FE.js +67 -0
- package/dist/chunk-D4J7Z4FE.js.map +1 -0
- package/dist/chunk-N5HFDWQX.js +23 -0
- package/dist/chunk-N5HFDWQX.js.map +1 -0
- package/dist/chunk-PAD7WFFU.js +573 -0
- package/dist/chunk-PAD7WFFU.js.map +1 -0
- package/dist/{chunk-AWW4MUJ5.js → chunk-TK64DNVK.js} +12 -1
- package/dist/chunk-TK64DNVK.js.map +1 -0
- package/dist/{chunk-HONQY4HF.js → chunk-WRTFC5NG.js} +362 -17
- package/dist/chunk-WRTFC5NG.js.map +1 -0
- package/dist/client-BKi3vk0Q.d.ts +34 -0
- package/dist/client-BrsaXtDV.d.cts +34 -0
- package/dist/cloudflare/index.cjs +930 -3
- package/dist/cloudflare/index.cjs.map +1 -1
- package/dist/cloudflare/index.d.cts +213 -12
- package/dist/cloudflare/index.d.ts +213 -12
- package/dist/cloudflare/index.js +562 -281
- package/dist/cloudflare/index.js.map +1 -1
- package/dist/codegen/index.d.cts +1 -1
- package/dist/codegen/index.d.ts +1 -1
- package/dist/errors-BRc3I_eH.d.cts +73 -0
- package/dist/errors-BRc3I_eH.d.ts +73 -0
- package/dist/firestore-enterprise/index.cjs +3877 -0
- package/dist/firestore-enterprise/index.cjs.map +1 -0
- package/dist/firestore-enterprise/index.d.cts +141 -0
- package/dist/firestore-enterprise/index.d.ts +141 -0
- package/dist/firestore-enterprise/index.js +985 -0
- package/dist/firestore-enterprise/index.js.map +1 -0
- package/dist/firestore-standard/index.cjs +3117 -0
- package/dist/firestore-standard/index.cjs.map +1 -0
- package/dist/firestore-standard/index.d.cts +49 -0
- package/dist/firestore-standard/index.d.ts +49 -0
- package/dist/firestore-standard/index.js +283 -0
- package/dist/firestore-standard/index.js.map +1 -0
- package/dist/index.cjs +590 -550
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +9 -37
- package/dist/index.d.ts +9 -37
- package/dist/index.js +178 -555
- package/dist/index.js.map +1 -1
- package/dist/{registry-Fi074zVa.d.ts → registry-Bc7h6WTM.d.cts} +1 -1
- package/dist/{registry-B1qsVL0E.d.cts → registry-C2KUPVZj.d.ts} +1 -1
- package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.cts} +1 -56
- package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.ts} +1 -56
- package/dist/sqlite/index.cjs +3631 -0
- package/dist/sqlite/index.cjs.map +1 -0
- package/dist/sqlite/index.d.cts +111 -0
- package/dist/sqlite/index.d.ts +111 -0
- package/dist/sqlite/index.js +1164 -0
- package/dist/sqlite/index.js.map +1 -0
- package/package.json +33 -3
- package/dist/backend-BsR0lnFL.d.ts +0 -200
- package/dist/backend-Ct-fLlkG.d.cts +0 -200
- package/dist/chunk-AWW4MUJ5.js.map +0 -1
- package/dist/chunk-HONQY4HF.js.map +0 -1
- package/dist/types-DxYLy8Ol.d.cts +0 -770
- package/dist/types-DxYLy8Ol.d.ts +0 -770
package/dist/cloudflare/index.js
CHANGED
|
@@ -1,5 +1,20 @@
|
|
|
1
1
|
import {
|
|
2
|
-
|
|
2
|
+
GraphTimestampImpl,
|
|
3
|
+
assertJsonSafePayload,
|
|
4
|
+
buildIndexDDL,
|
|
5
|
+
compileDataOpsExpr,
|
|
6
|
+
dedupeIndexSpecs,
|
|
7
|
+
isFirestoreSpecialType,
|
|
8
|
+
validateJsonPathKey
|
|
9
|
+
} from "../chunk-4MMQ5W74.js";
|
|
10
|
+
import {
|
|
11
|
+
DEFAULT_CORE_INDEXES
|
|
12
|
+
} from "../chunk-2DHMNTV6.js";
|
|
13
|
+
import {
|
|
14
|
+
createCapabilities,
|
|
15
|
+
intersectCapabilities
|
|
16
|
+
} from "../chunk-N5HFDWQX.js";
|
|
17
|
+
import {
|
|
3
18
|
META_EDGE_TYPE,
|
|
4
19
|
META_NODE_TYPE,
|
|
5
20
|
NODE_RELATION,
|
|
@@ -10,287 +25,15 @@ import {
|
|
|
10
25
|
createMergedRegistry,
|
|
11
26
|
createRegistry,
|
|
12
27
|
generateId
|
|
13
|
-
} from "../chunk-
|
|
28
|
+
} from "../chunk-WRTFC5NG.js";
|
|
14
29
|
import {
|
|
30
|
+
CapabilityNotSupportedError,
|
|
15
31
|
FiregraphError,
|
|
16
32
|
assertUpdatePayloadExclusive,
|
|
17
33
|
deleteField,
|
|
18
|
-
flattenPatch
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
import {
|
|
22
|
-
SERIALIZATION_TAG
|
|
23
|
-
} from "../chunk-EQJUUVFG.js";
|
|
24
|
-
|
|
25
|
-
// src/internal/sqlite-data-ops.ts
|
|
26
|
-
var FIRESTORE_TYPE_NAMES = /* @__PURE__ */ new Set([
|
|
27
|
-
"Timestamp",
|
|
28
|
-
"GeoPoint",
|
|
29
|
-
"VectorValue",
|
|
30
|
-
"DocumentReference",
|
|
31
|
-
"FieldValue"
|
|
32
|
-
]);
|
|
33
|
-
function isFirestoreSpecialType(value) {
|
|
34
|
-
const ctorName = value.constructor?.name;
|
|
35
|
-
if (ctorName && FIRESTORE_TYPE_NAMES.has(ctorName)) return ctorName;
|
|
36
|
-
return null;
|
|
37
|
-
}
|
|
38
|
-
var JSON_PATH_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
39
|
-
function validateJsonPathKey(key, backendLabel) {
|
|
40
|
-
if (key.length === 0) {
|
|
41
|
-
throw new FiregraphError(
|
|
42
|
-
`${backendLabel}: empty JSON path component is not allowed`,
|
|
43
|
-
"INVALID_QUERY"
|
|
44
|
-
);
|
|
45
|
-
}
|
|
46
|
-
if (!JSON_PATH_KEY_RE.test(key)) {
|
|
47
|
-
throw new FiregraphError(
|
|
48
|
-
`${backendLabel}: data field path component "${key}" is not a safe JSON-path identifier. Allowed pattern: /^[A-Za-z_][A-Za-z0-9_-]*$/. Use replaceNode/replaceEdge (full-data overwrite) for keys with reserved characters (whitespace, dots, brackets, quotes, etc.).`,
|
|
49
|
-
"INVALID_QUERY"
|
|
50
|
-
);
|
|
51
|
-
}
|
|
52
|
-
}
|
|
53
|
-
function jsonBind(value, backendLabel) {
|
|
54
|
-
if (value === void 0) return "null";
|
|
55
|
-
if (value !== null && typeof value === "object") {
|
|
56
|
-
const firestoreType = isFirestoreSpecialType(value);
|
|
57
|
-
if (firestoreType) {
|
|
58
|
-
throw new FiregraphError(
|
|
59
|
-
`${backendLabel} cannot persist a Firestore ${firestoreType} value. Convert to a primitive before writing (e.g. \`ts.toMillis()\` for Timestamp).`,
|
|
60
|
-
"INVALID_ARGUMENT"
|
|
61
|
-
);
|
|
62
|
-
}
|
|
63
|
-
}
|
|
64
|
-
return JSON.stringify(value);
|
|
65
|
-
}
|
|
66
|
-
function compileDataOpsExpr(ops, base, params, backendLabel) {
|
|
67
|
-
if (ops.length === 0) return null;
|
|
68
|
-
const deletes = [];
|
|
69
|
-
const sets = [];
|
|
70
|
-
for (const op of ops) (op.delete ? deletes : sets).push(op);
|
|
71
|
-
let expr = base;
|
|
72
|
-
if (deletes.length > 0) {
|
|
73
|
-
const placeholders = deletes.map(() => "?").join(", ");
|
|
74
|
-
expr = `json_remove(${expr}, ${placeholders})`;
|
|
75
|
-
for (const op of deletes) {
|
|
76
|
-
for (const seg of op.path) validateJsonPathKey(seg, backendLabel);
|
|
77
|
-
params.push(`$.${op.path.join(".")}`);
|
|
78
|
-
}
|
|
79
|
-
}
|
|
80
|
-
if (sets.length > 0) {
|
|
81
|
-
const pieces = sets.map(() => "?, json(?)").join(", ");
|
|
82
|
-
expr = `json_set(${expr}, ${pieces})`;
|
|
83
|
-
for (const op of sets) {
|
|
84
|
-
for (const seg of op.path) validateJsonPathKey(seg, backendLabel);
|
|
85
|
-
params.push(`$.${op.path.join(".")}`);
|
|
86
|
-
params.push(jsonBind(op.value, backendLabel));
|
|
87
|
-
}
|
|
88
|
-
}
|
|
89
|
-
return expr;
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
// src/internal/sqlite-payload-guard.ts
|
|
93
|
-
var FIRESTORE_TYPE_NAMES2 = /* @__PURE__ */ new Set([
|
|
94
|
-
"Timestamp",
|
|
95
|
-
"GeoPoint",
|
|
96
|
-
"VectorValue",
|
|
97
|
-
"DocumentReference",
|
|
98
|
-
"FieldValue"
|
|
99
|
-
]);
|
|
100
|
-
function assertJsonSafePayload(data, label) {
|
|
101
|
-
walk(data, [], label);
|
|
102
|
-
}
|
|
103
|
-
function walk(node, path, label) {
|
|
104
|
-
if (node === null || node === void 0) return;
|
|
105
|
-
if (isDeleteSentinel(node)) {
|
|
106
|
-
throw new FiregraphError(
|
|
107
|
-
`${label} backend cannot persist a deleteField() sentinel inside a full-data payload (replaceNode/replaceEdge or first-insert). The sentinel is only valid inside an updateNode/updateEdge dataOps patch. Path: ${formatPath(path)}.`,
|
|
108
|
-
"INVALID_ARGUMENT"
|
|
109
|
-
);
|
|
110
|
-
}
|
|
111
|
-
const t = typeof node;
|
|
112
|
-
if (t === "symbol" || t === "function") {
|
|
113
|
-
throw new FiregraphError(
|
|
114
|
-
`${label} backend cannot persist a value of type ${t}. JSON.stringify drops it silently. Path: ${formatPath(path)}.`,
|
|
115
|
-
"INVALID_ARGUMENT"
|
|
116
|
-
);
|
|
117
|
-
}
|
|
118
|
-
if (t === "bigint") {
|
|
119
|
-
throw new FiregraphError(
|
|
120
|
-
`${label} backend cannot persist a value of type bigint. JSON.stringify cannot serialize this type (throws TypeError). Path: ${formatPath(path)}.`,
|
|
121
|
-
"INVALID_ARGUMENT"
|
|
122
|
-
);
|
|
123
|
-
}
|
|
124
|
-
if (t !== "object") return;
|
|
125
|
-
if (Array.isArray(node)) {
|
|
126
|
-
for (let i = 0; i < node.length; i++) {
|
|
127
|
-
walk(node[i], [...path, String(i)], label);
|
|
128
|
-
}
|
|
129
|
-
return;
|
|
130
|
-
}
|
|
131
|
-
const obj = node;
|
|
132
|
-
if (Object.prototype.hasOwnProperty.call(obj, SERIALIZATION_TAG)) {
|
|
133
|
-
const tagValue = obj[SERIALIZATION_TAG];
|
|
134
|
-
throw new FiregraphError(
|
|
135
|
-
`${label} backend cannot persist an object with a \`${SERIALIZATION_TAG}\` key (value: ${formatTagValue(tagValue)}). Recognised tags are valid only on the Firestore backend (migration-sandbox output); a literal \`${SERIALIZATION_TAG}\` field in user data is reserved and not allowed. Path: ${formatPath(path)}.`,
|
|
136
|
-
"INVALID_ARGUMENT"
|
|
137
|
-
);
|
|
138
|
-
}
|
|
139
|
-
const proto = Object.getPrototypeOf(node);
|
|
140
|
-
if (proto !== null && proto !== Object.prototype) {
|
|
141
|
-
const ctor = node.constructor;
|
|
142
|
-
const ctorName = ctor && typeof ctor.name === "string" ? ctor.name : "<anonymous>";
|
|
143
|
-
if (FIRESTORE_TYPE_NAMES2.has(ctorName)) {
|
|
144
|
-
throw new FiregraphError(
|
|
145
|
-
`${label} backend cannot persist a Firestore ${ctorName} value. Convert to a primitive before writing (e.g. \`ts.toMillis()\` for Timestamp, \`{lat,lng}\` for GeoPoint). Path: ${formatPath(path)}.`,
|
|
146
|
-
"INVALID_ARGUMENT"
|
|
147
|
-
);
|
|
148
|
-
}
|
|
149
|
-
if (node instanceof Date) return;
|
|
150
|
-
throw new FiregraphError(
|
|
151
|
-
`${label} backend cannot persist a class instance of type ${ctorName}. Only plain objects, arrays, and primitives round-trip safely through JSON storage. Path: ${formatPath(path)}.`,
|
|
152
|
-
"INVALID_ARGUMENT"
|
|
153
|
-
);
|
|
154
|
-
}
|
|
155
|
-
for (const key of Object.keys(obj)) {
|
|
156
|
-
walk(obj[key], [...path, key], label);
|
|
157
|
-
}
|
|
158
|
-
}
|
|
159
|
-
function formatPath(path) {
|
|
160
|
-
return path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
161
|
-
}
|
|
162
|
-
function formatTagValue(value) {
|
|
163
|
-
if (value === null) return "null";
|
|
164
|
-
if (value === void 0) return "undefined";
|
|
165
|
-
if (typeof value === "string") return JSON.stringify(value);
|
|
166
|
-
if (typeof value === "number" || typeof value === "boolean" || typeof value === "bigint") {
|
|
167
|
-
return String(value);
|
|
168
|
-
}
|
|
169
|
-
return typeof value;
|
|
170
|
-
}
|
|
171
|
-
|
|
172
|
-
// src/timestamp.ts
|
|
173
|
-
var GraphTimestampImpl = class _GraphTimestampImpl {
|
|
174
|
-
constructor(seconds, nanoseconds) {
|
|
175
|
-
this.seconds = seconds;
|
|
176
|
-
this.nanoseconds = nanoseconds;
|
|
177
|
-
}
|
|
178
|
-
toDate() {
|
|
179
|
-
return new Date(this.toMillis());
|
|
180
|
-
}
|
|
181
|
-
toMillis() {
|
|
182
|
-
return this.seconds * 1e3 + Math.floor(this.nanoseconds / 1e6);
|
|
183
|
-
}
|
|
184
|
-
toJSON() {
|
|
185
|
-
return { seconds: this.seconds, nanoseconds: this.nanoseconds };
|
|
186
|
-
}
|
|
187
|
-
static fromMillis(ms) {
|
|
188
|
-
const seconds = Math.floor(ms / 1e3);
|
|
189
|
-
const nanoseconds = (ms - seconds * 1e3) * 1e6;
|
|
190
|
-
return new _GraphTimestampImpl(seconds, nanoseconds);
|
|
191
|
-
}
|
|
192
|
-
static now() {
|
|
193
|
-
return _GraphTimestampImpl.fromMillis(Date.now());
|
|
194
|
-
}
|
|
195
|
-
};
|
|
196
|
-
|
|
197
|
-
// src/internal/sqlite-index-ddl.ts
|
|
198
|
-
var IDENT_RE = /^[A-Za-z_][A-Za-z0-9_]*$/;
|
|
199
|
-
var JSON_PATH_KEY_RE2 = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
200
|
-
function quoteIdent(name) {
|
|
201
|
-
if (!IDENT_RE.test(name)) {
|
|
202
|
-
throw new FiregraphError(
|
|
203
|
-
`Invalid SQL identifier in index DDL: ${name}. Must match /^[A-Za-z_][A-Za-z0-9_]*$/.`,
|
|
204
|
-
"INVALID_INDEX"
|
|
205
|
-
);
|
|
206
|
-
}
|
|
207
|
-
return `"${name}"`;
|
|
208
|
-
}
|
|
209
|
-
function fnv1a32(str) {
|
|
210
|
-
let h = 2166136261;
|
|
211
|
-
for (let i = 0; i < str.length; i++) {
|
|
212
|
-
h ^= str.charCodeAt(i);
|
|
213
|
-
h = Math.imul(h, 16777619);
|
|
214
|
-
}
|
|
215
|
-
return (h >>> 0).toString(16).padStart(8, "0");
|
|
216
|
-
}
|
|
217
|
-
function normalizeFields(fields) {
|
|
218
|
-
return fields.map((f) => {
|
|
219
|
-
if (typeof f === "string") return { path: f, desc: false };
|
|
220
|
-
if (!f.path || typeof f.path !== "string") {
|
|
221
|
-
throw new FiregraphError(
|
|
222
|
-
`IndexSpec field must be a string or { path: string, desc?: boolean }; got ${JSON.stringify(f)}`,
|
|
223
|
-
"INVALID_INDEX"
|
|
224
|
-
);
|
|
225
|
-
}
|
|
226
|
-
return { path: f.path, desc: !!f.desc };
|
|
227
|
-
});
|
|
228
|
-
}
|
|
229
|
-
function specFingerprint(spec, leadingColumns) {
|
|
230
|
-
const normalized = {
|
|
231
|
-
lead: leadingColumns,
|
|
232
|
-
fields: normalizeFields(spec.fields),
|
|
233
|
-
where: spec.where ?? ""
|
|
234
|
-
};
|
|
235
|
-
return fnv1a32(JSON.stringify(normalized));
|
|
236
|
-
}
|
|
237
|
-
function compileFieldExpr(path, fieldToColumn) {
|
|
238
|
-
const col = fieldToColumn[path];
|
|
239
|
-
if (col) return quoteIdent(col);
|
|
240
|
-
if (path === "data") {
|
|
241
|
-
return `json_extract("data", '$')`;
|
|
242
|
-
}
|
|
243
|
-
if (path.startsWith("data.")) {
|
|
244
|
-
const suffix = path.slice(5);
|
|
245
|
-
const parts = suffix.split(".");
|
|
246
|
-
for (const part of parts) {
|
|
247
|
-
if (!JSON_PATH_KEY_RE2.test(part)) {
|
|
248
|
-
throw new FiregraphError(
|
|
249
|
-
`IndexSpec data path "${path}" has invalid component "${part}". Each component must match /^[A-Za-z_][A-Za-z0-9_-]*$/.`,
|
|
250
|
-
"INVALID_INDEX"
|
|
251
|
-
);
|
|
252
|
-
}
|
|
253
|
-
}
|
|
254
|
-
return `json_extract("data", '$.${suffix}')`;
|
|
255
|
-
}
|
|
256
|
-
throw new FiregraphError(
|
|
257
|
-
`IndexSpec field "${path}" is not a known firegraph field. Use a top-level field (aType, aUid, axbType, bType, bUid, createdAt, updatedAt, v) or a dotted data path like 'data.status'.`,
|
|
258
|
-
"INVALID_INDEX"
|
|
259
|
-
);
|
|
260
|
-
}
|
|
261
|
-
function buildIndexDDL(spec, options) {
|
|
262
|
-
const { table, fieldToColumn, leadingColumns = [] } = options;
|
|
263
|
-
if (!spec.fields || spec.fields.length === 0) {
|
|
264
|
-
throw new FiregraphError("IndexSpec.fields must be a non-empty array", "INVALID_INDEX");
|
|
265
|
-
}
|
|
266
|
-
const normalized = normalizeFields(spec.fields);
|
|
267
|
-
const hash = specFingerprint(spec, leadingColumns);
|
|
268
|
-
const indexName = `${table}_idx_${hash}`;
|
|
269
|
-
const cols = [];
|
|
270
|
-
for (const col of leadingColumns) {
|
|
271
|
-
cols.push(quoteIdent(col));
|
|
272
|
-
}
|
|
273
|
-
for (const f of normalized) {
|
|
274
|
-
const expr = compileFieldExpr(f.path, fieldToColumn);
|
|
275
|
-
cols.push(f.desc ? `${expr} DESC` : expr);
|
|
276
|
-
}
|
|
277
|
-
let ddl = `CREATE INDEX IF NOT EXISTS ${quoteIdent(indexName)} ON ${quoteIdent(table)}(${cols.join(", ")})`;
|
|
278
|
-
if (spec.where) {
|
|
279
|
-
ddl += ` WHERE ${spec.where}`;
|
|
280
|
-
}
|
|
281
|
-
return ddl;
|
|
282
|
-
}
|
|
283
|
-
function dedupeIndexSpecs(specs, leadingColumns = []) {
|
|
284
|
-
const seen = /* @__PURE__ */ new Set();
|
|
285
|
-
const out = [];
|
|
286
|
-
for (const spec of specs) {
|
|
287
|
-
const fp = specFingerprint(spec, leadingColumns);
|
|
288
|
-
if (seen.has(fp)) continue;
|
|
289
|
-
seen.add(fp);
|
|
290
|
-
out.push(spec);
|
|
291
|
-
}
|
|
292
|
-
return out;
|
|
293
|
-
}
|
|
34
|
+
flattenPatch
|
|
35
|
+
} from "../chunk-TK64DNVK.js";
|
|
36
|
+
import "../chunk-EQJUUVFG.js";
|
|
294
37
|
|
|
295
38
|
// src/cloudflare/schema.ts
|
|
296
39
|
var DO_FIELD_TO_COLUMN = {
|
|
@@ -303,9 +46,9 @@ var DO_FIELD_TO_COLUMN = {
|
|
|
303
46
|
createdAt: "created_at",
|
|
304
47
|
updatedAt: "updated_at"
|
|
305
48
|
};
|
|
306
|
-
var
|
|
49
|
+
var IDENT_RE = /^[A-Za-z_][A-Za-z0-9_]*$/;
|
|
307
50
|
function validateDOTableName(name) {
|
|
308
|
-
if (!
|
|
51
|
+
if (!IDENT_RE.test(name)) {
|
|
309
52
|
throw new Error(`Invalid SQL identifier: ${name}. Must match /^[A-Za-z_][A-Za-z0-9_]*$/.`);
|
|
310
53
|
}
|
|
311
54
|
}
|
|
@@ -313,6 +56,9 @@ function quoteDOIdent(name) {
|
|
|
313
56
|
validateDOTableName(name);
|
|
314
57
|
return `"${name}"`;
|
|
315
58
|
}
|
|
59
|
+
function quoteDOColumnAlias(label) {
|
|
60
|
+
return `"${label.replace(/"/g, '""')}"`;
|
|
61
|
+
}
|
|
316
62
|
function buildDOSchemaStatements(table, options = {}) {
|
|
317
63
|
const t = quoteDOIdent(table);
|
|
318
64
|
const statements = [
|
|
@@ -459,12 +205,223 @@ function compileDOSelect(table, filters, options) {
|
|
|
459
205
|
sql += compileLimit(options, params);
|
|
460
206
|
return { sql, params };
|
|
461
207
|
}
|
|
208
|
+
function compileDOExpand(table, params) {
|
|
209
|
+
if (params.sources.length === 0) {
|
|
210
|
+
throw new FiregraphError(
|
|
211
|
+
"compileDOExpand requires a non-empty sources list \u2014 empty IN () is invalid SQL.",
|
|
212
|
+
"INVALID_QUERY"
|
|
213
|
+
);
|
|
214
|
+
}
|
|
215
|
+
const direction = params.direction ?? "forward";
|
|
216
|
+
const aUidCol = compileFieldRef("aUid").expr;
|
|
217
|
+
const bUidCol = compileFieldRef("bUid").expr;
|
|
218
|
+
const aTypeCol = compileFieldRef("aType").expr;
|
|
219
|
+
const bTypeCol = compileFieldRef("bType").expr;
|
|
220
|
+
const axbTypeCol = compileFieldRef("axbType").expr;
|
|
221
|
+
const sourceColumn = direction === "forward" ? aUidCol : bUidCol;
|
|
222
|
+
const sqlParams = [params.axbType];
|
|
223
|
+
const conditions = [`${axbTypeCol} = ?`];
|
|
224
|
+
const placeholders = params.sources.map(() => "?").join(", ");
|
|
225
|
+
conditions.push(`${sourceColumn} IN (${placeholders})`);
|
|
226
|
+
for (const uid of params.sources) sqlParams.push(uid);
|
|
227
|
+
if (params.aType !== void 0) {
|
|
228
|
+
conditions.push(`${aTypeCol} = ?`);
|
|
229
|
+
sqlParams.push(params.aType);
|
|
230
|
+
}
|
|
231
|
+
if (params.bType !== void 0) {
|
|
232
|
+
conditions.push(`${bTypeCol} = ?`);
|
|
233
|
+
sqlParams.push(params.bType);
|
|
234
|
+
}
|
|
235
|
+
if (params.axbType === NODE_RELATION) {
|
|
236
|
+
conditions.push(`${aUidCol} != ${bUidCol}`);
|
|
237
|
+
}
|
|
238
|
+
let sql = `SELECT * FROM ${quoteDOIdent(table)} WHERE ${conditions.join(" AND ")}`;
|
|
239
|
+
if (params.orderBy) {
|
|
240
|
+
sql += compileOrderBy({ orderBy: params.orderBy }, sqlParams);
|
|
241
|
+
}
|
|
242
|
+
if (params.limitPerSource !== void 0) {
|
|
243
|
+
const totalLimit = params.sources.length * params.limitPerSource;
|
|
244
|
+
sql += ` LIMIT ?`;
|
|
245
|
+
sqlParams.push(totalLimit);
|
|
246
|
+
}
|
|
247
|
+
return { sql, params: sqlParams };
|
|
248
|
+
}
|
|
249
|
+
function compileDOExpandHydrate(table, targetUids) {
|
|
250
|
+
if (targetUids.length === 0) {
|
|
251
|
+
throw new FiregraphError(
|
|
252
|
+
"compileDOExpandHydrate requires a non-empty target list \u2014 empty IN () is invalid SQL.",
|
|
253
|
+
"INVALID_QUERY"
|
|
254
|
+
);
|
|
255
|
+
}
|
|
256
|
+
const placeholders = targetUids.map(() => "?").join(", ");
|
|
257
|
+
const sqlParams = [NODE_RELATION];
|
|
258
|
+
for (const uid of targetUids) sqlParams.push(uid);
|
|
259
|
+
const aUidCol = compileFieldRef("aUid").expr;
|
|
260
|
+
const bUidCol = compileFieldRef("bUid").expr;
|
|
261
|
+
const axbTypeCol = compileFieldRef("axbType").expr;
|
|
262
|
+
return {
|
|
263
|
+
sql: `SELECT * FROM ${quoteDOIdent(table)} WHERE ${axbTypeCol} = ? AND ${aUidCol} = ${bUidCol} AND ${bUidCol} IN (${placeholders})`,
|
|
264
|
+
params: sqlParams
|
|
265
|
+
};
|
|
266
|
+
}
|
|
462
267
|
function compileDOSelectByDocId(table, docId) {
|
|
463
268
|
return {
|
|
464
269
|
sql: `SELECT * FROM ${quoteDOIdent(table)} WHERE "doc_id" = ? LIMIT 1`,
|
|
465
270
|
params: [docId]
|
|
466
271
|
};
|
|
467
272
|
}
|
|
273
|
+
function normalizeDOProjectionField(field) {
|
|
274
|
+
if (field in DO_FIELD_TO_COLUMN) return field;
|
|
275
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
276
|
+
return `data.${field}`;
|
|
277
|
+
}
|
|
278
|
+
function compileDOFindEdgesProjected(table, select, filters, options) {
|
|
279
|
+
if (select.length === 0) {
|
|
280
|
+
throw new FiregraphError(
|
|
281
|
+
"compileDOFindEdgesProjected requires a non-empty select list \u2014 an empty projection has no SQL representation distinct from `findEdges`.",
|
|
282
|
+
"INVALID_QUERY"
|
|
283
|
+
);
|
|
284
|
+
}
|
|
285
|
+
const seen = /* @__PURE__ */ new Set();
|
|
286
|
+
const uniqueFields = [];
|
|
287
|
+
for (const f of select) {
|
|
288
|
+
if (!seen.has(f)) {
|
|
289
|
+
seen.add(f);
|
|
290
|
+
uniqueFields.push(f);
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
const projections = [];
|
|
294
|
+
const columns = [];
|
|
295
|
+
for (let idx = 0; idx < uniqueFields.length; idx++) {
|
|
296
|
+
const field = uniqueFields[idx];
|
|
297
|
+
const canonical = normalizeDOProjectionField(field);
|
|
298
|
+
const { expr } = compileFieldRef(canonical);
|
|
299
|
+
const alias = quoteDOColumnAlias(field);
|
|
300
|
+
projections.push(`${expr} AS ${alias}`);
|
|
301
|
+
let kind;
|
|
302
|
+
let typeAliasName;
|
|
303
|
+
if (canonical === "data") {
|
|
304
|
+
kind = "data";
|
|
305
|
+
} else if (canonical.startsWith("data.")) {
|
|
306
|
+
kind = "json";
|
|
307
|
+
typeAliasName = `__fg_t_${idx}`;
|
|
308
|
+
const typeAlias = quoteDOColumnAlias(typeAliasName);
|
|
309
|
+
projections.push(`json_type("data", '$.${canonical.slice(5)}') AS ${typeAlias}`);
|
|
310
|
+
} else {
|
|
311
|
+
if (canonical === "v") kind = "builtin-int";
|
|
312
|
+
else if (canonical === "createdAt" || canonical === "updatedAt") kind = "builtin-timestamp";
|
|
313
|
+
else kind = "builtin-text";
|
|
314
|
+
}
|
|
315
|
+
columns.push({ field, kind, typeAlias: typeAliasName });
|
|
316
|
+
}
|
|
317
|
+
const params = [];
|
|
318
|
+
const conditions = [];
|
|
319
|
+
for (const f of filters) {
|
|
320
|
+
conditions.push(compileFilter(f, params));
|
|
321
|
+
}
|
|
322
|
+
const where = conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "";
|
|
323
|
+
let sql = `SELECT ${projections.join(", ")} FROM ${quoteDOIdent(table)}${where}`;
|
|
324
|
+
sql += compileOrderBy(options, params);
|
|
325
|
+
sql += compileLimit(options, params);
|
|
326
|
+
return { stmt: { sql, params }, columns };
|
|
327
|
+
}
|
|
328
|
+
function decodeDOProjectedRow(row, columns) {
|
|
329
|
+
const out = {};
|
|
330
|
+
for (const c of columns) {
|
|
331
|
+
const raw = row[c.field];
|
|
332
|
+
switch (c.kind) {
|
|
333
|
+
case "builtin-text":
|
|
334
|
+
out[c.field] = raw === null || raw === void 0 ? null : String(raw);
|
|
335
|
+
break;
|
|
336
|
+
case "builtin-int":
|
|
337
|
+
if (raw === null || raw === void 0) {
|
|
338
|
+
out[c.field] = null;
|
|
339
|
+
} else if (typeof raw === "bigint") {
|
|
340
|
+
out[c.field] = Number(raw);
|
|
341
|
+
} else if (typeof raw === "number") {
|
|
342
|
+
out[c.field] = raw;
|
|
343
|
+
} else {
|
|
344
|
+
out[c.field] = Number(raw);
|
|
345
|
+
}
|
|
346
|
+
break;
|
|
347
|
+
case "builtin-timestamp": {
|
|
348
|
+
const ms = toMillis(raw);
|
|
349
|
+
out[c.field] = GraphTimestampImpl.fromMillis(ms);
|
|
350
|
+
break;
|
|
351
|
+
}
|
|
352
|
+
case "data":
|
|
353
|
+
if (raw === null || raw === void 0 || raw === "") {
|
|
354
|
+
out[c.field] = {};
|
|
355
|
+
} else {
|
|
356
|
+
out[c.field] = JSON.parse(raw);
|
|
357
|
+
}
|
|
358
|
+
break;
|
|
359
|
+
case "json": {
|
|
360
|
+
const t = row[c.typeAlias];
|
|
361
|
+
if (raw === null || raw === void 0) {
|
|
362
|
+
out[c.field] = null;
|
|
363
|
+
} else if (t === "object" || t === "array") {
|
|
364
|
+
out[c.field] = typeof raw === "string" ? JSON.parse(raw) : raw;
|
|
365
|
+
} else if (t === "integer" && typeof raw === "bigint") {
|
|
366
|
+
out[c.field] = Number(raw);
|
|
367
|
+
} else {
|
|
368
|
+
out[c.field] = raw;
|
|
369
|
+
}
|
|
370
|
+
break;
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
}
|
|
374
|
+
return out;
|
|
375
|
+
}
|
|
376
|
+
function compileDOAggregate(table, spec, filters) {
|
|
377
|
+
const aliases = Object.keys(spec);
|
|
378
|
+
if (aliases.length === 0) {
|
|
379
|
+
throw new FiregraphError(
|
|
380
|
+
"aggregate() requires at least one aggregation in the `aggregates` map.",
|
|
381
|
+
"INVALID_QUERY"
|
|
382
|
+
);
|
|
383
|
+
}
|
|
384
|
+
const projections = [];
|
|
385
|
+
for (const alias of aliases) {
|
|
386
|
+
const { op, field } = spec[alias];
|
|
387
|
+
validateJsonPathKey(alias, DO_BACKEND_ERR_LABEL);
|
|
388
|
+
if (op === "count") {
|
|
389
|
+
if (field !== void 0) {
|
|
390
|
+
throw new FiregraphError(
|
|
391
|
+
`Aggregate '${alias}' op 'count' must not specify a field \u2014 count operates on rows, not a column expression.`,
|
|
392
|
+
"INVALID_QUERY"
|
|
393
|
+
);
|
|
394
|
+
}
|
|
395
|
+
projections.push(`COUNT(*) AS ${quoteDOIdent(alias)}`);
|
|
396
|
+
continue;
|
|
397
|
+
}
|
|
398
|
+
if (!field) {
|
|
399
|
+
throw new FiregraphError(
|
|
400
|
+
`Aggregate '${alias}' op '${op}' requires a field.`,
|
|
401
|
+
"INVALID_QUERY"
|
|
402
|
+
);
|
|
403
|
+
}
|
|
404
|
+
const { expr } = compileFieldRef(field);
|
|
405
|
+
const numeric = `CAST(${expr} AS REAL)`;
|
|
406
|
+
if (op === "sum") projections.push(`SUM(${numeric}) AS ${quoteDOIdent(alias)}`);
|
|
407
|
+
else if (op === "avg") projections.push(`AVG(${numeric}) AS ${quoteDOIdent(alias)}`);
|
|
408
|
+
else if (op === "min") projections.push(`MIN(${numeric}) AS ${quoteDOIdent(alias)}`);
|
|
409
|
+
else if (op === "max") projections.push(`MAX(${numeric}) AS ${quoteDOIdent(alias)}`);
|
|
410
|
+
else
|
|
411
|
+
throw new FiregraphError(
|
|
412
|
+
`DO SQLite backend does not support aggregate op: ${String(op)}`,
|
|
413
|
+
"INVALID_QUERY"
|
|
414
|
+
);
|
|
415
|
+
}
|
|
416
|
+
const params = [];
|
|
417
|
+
const conditions = [];
|
|
418
|
+
for (const f of filters) {
|
|
419
|
+
conditions.push(compileFilter(f, params));
|
|
420
|
+
}
|
|
421
|
+
const where = conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "";
|
|
422
|
+
const sql = `SELECT ${projections.join(", ")} FROM ${quoteDOIdent(table)}${where}`;
|
|
423
|
+
return { stmt: { sql, params }, aliases };
|
|
424
|
+
}
|
|
468
425
|
function compileDOSet(table, docId, record, nowMillis, mode) {
|
|
469
426
|
assertJsonSafePayload(record.data, DO_BACKEND_LABEL);
|
|
470
427
|
if (mode === "replace") {
|
|
@@ -555,6 +512,55 @@ function compileDODelete(table, docId) {
|
|
|
555
512
|
params: [docId]
|
|
556
513
|
};
|
|
557
514
|
}
|
|
515
|
+
function compileDOBulkDelete(table, filters) {
|
|
516
|
+
const params = [];
|
|
517
|
+
const conditions = [];
|
|
518
|
+
for (const f of filters) {
|
|
519
|
+
conditions.push(compileFilter(f, params));
|
|
520
|
+
}
|
|
521
|
+
const where = conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "";
|
|
522
|
+
return {
|
|
523
|
+
sql: `DELETE FROM ${quoteDOIdent(table)}${where}`,
|
|
524
|
+
params
|
|
525
|
+
};
|
|
526
|
+
}
|
|
527
|
+
function compileDOBulkUpdate(table, filters, patchData, nowMillis) {
|
|
528
|
+
const dataOps = flattenPatch(patchData);
|
|
529
|
+
if (dataOps.length === 0) {
|
|
530
|
+
throw new FiregraphError(
|
|
531
|
+
"bulkUpdate() patch.data must contain at least one leaf \u2014 an empty patch would only rewrite `updated_at`, which is almost certainly a bug. Use `setDoc` with merge mode if you want to stamp without editing data.",
|
|
532
|
+
"INVALID_QUERY"
|
|
533
|
+
);
|
|
534
|
+
}
|
|
535
|
+
for (const op of dataOps) {
|
|
536
|
+
if (!op.delete) assertJsonSafePayload(op.value, DO_BACKEND_LABEL);
|
|
537
|
+
}
|
|
538
|
+
const setParams = [];
|
|
539
|
+
const expr = compileDataOpsExpr(
|
|
540
|
+
dataOps,
|
|
541
|
+
`COALESCE("data", '{}')`,
|
|
542
|
+
setParams,
|
|
543
|
+
DO_BACKEND_ERR_LABEL
|
|
544
|
+
);
|
|
545
|
+
if (expr === null) {
|
|
546
|
+
throw new FiregraphError(
|
|
547
|
+
"bulkUpdate() patch produced no SQL operations \u2014 internal invariant violated.",
|
|
548
|
+
"INVALID_ARGUMENT"
|
|
549
|
+
);
|
|
550
|
+
}
|
|
551
|
+
const setClauses = [`"data" = ${expr}`, `"updated_at" = ?`];
|
|
552
|
+
setParams.push(nowMillis);
|
|
553
|
+
const whereParams = [];
|
|
554
|
+
const conditions = [];
|
|
555
|
+
for (const f of filters) {
|
|
556
|
+
conditions.push(compileFilter(f, whereParams));
|
|
557
|
+
}
|
|
558
|
+
const where = conditions.length > 0 ? ` WHERE ${conditions.join(" AND ")}` : "";
|
|
559
|
+
return {
|
|
560
|
+
sql: `UPDATE ${quoteDOIdent(table)} SET ${setClauses.join(", ")}${where}`,
|
|
561
|
+
params: [...setParams, ...whereParams]
|
|
562
|
+
};
|
|
563
|
+
}
|
|
558
564
|
function compileDODeleteAll(table) {
|
|
559
565
|
return {
|
|
560
566
|
sql: `DELETE FROM ${quoteDOIdent(table)}`,
|
|
@@ -646,7 +652,18 @@ var DORPCBatchBackend = class {
|
|
|
646
652
|
await this.getStub()._fgBatch(ops);
|
|
647
653
|
}
|
|
648
654
|
};
|
|
655
|
+
var DO_CAPS = /* @__PURE__ */ new Set([
|
|
656
|
+
"core.read",
|
|
657
|
+
"core.write",
|
|
658
|
+
"core.batch",
|
|
659
|
+
"core.subgraph",
|
|
660
|
+
"query.aggregate",
|
|
661
|
+
"query.dml",
|
|
662
|
+
"query.join",
|
|
663
|
+
"query.select"
|
|
664
|
+
]);
|
|
649
665
|
var DORPCBackend = class _DORPCBackend {
|
|
666
|
+
capabilities = createCapabilities(DO_CAPS);
|
|
650
667
|
collectionPath = "firegraph";
|
|
651
668
|
scopePath;
|
|
652
669
|
/** @internal */
|
|
@@ -680,6 +697,34 @@ var DORPCBackend = class _DORPCBackend {
|
|
|
680
697
|
const wires = await this.stub._fgQuery(filters, options);
|
|
681
698
|
return wires.map(hydrateDORecord);
|
|
682
699
|
}
|
|
700
|
+
// --- Aggregate ---
|
|
701
|
+
/**
|
|
702
|
+
* Run an aggregate query inside the backing DO. The DO returns a row of
|
|
703
|
+
* `{ alias: number | null }` (null = SQLite NULL for SUM/MIN/MAX over an
|
|
704
|
+
* empty set, or the count being literally 0); this method resolves NULL
|
|
705
|
+
* to 0 for SUM/MIN/MAX and to NaN for AVG, matching the SQLite backend
|
|
706
|
+
* and the Firestore Standard helper.
|
|
707
|
+
*/
|
|
708
|
+
async aggregate(spec, filters) {
|
|
709
|
+
const stub = this.stub;
|
|
710
|
+
if (!stub._fgAggregate) {
|
|
711
|
+
throw new FiregraphError(
|
|
712
|
+
"aggregate() not supported by this Durable Object stub. The wrapped stub does not implement `_fgAggregate`. If you control the stub wrapper, forward `_fgAggregate` to the underlying DO.",
|
|
713
|
+
"UNSUPPORTED_OPERATION"
|
|
714
|
+
);
|
|
715
|
+
}
|
|
716
|
+
const wire = await stub._fgAggregate(spec, filters);
|
|
717
|
+
const out = {};
|
|
718
|
+
for (const [alias, { op }] of Object.entries(spec)) {
|
|
719
|
+
const v = wire[alias];
|
|
720
|
+
if (v === null || v === void 0) {
|
|
721
|
+
out[alias] = op === "avg" ? Number.NaN : 0;
|
|
722
|
+
} else {
|
|
723
|
+
out[alias] = v;
|
|
724
|
+
}
|
|
725
|
+
}
|
|
726
|
+
return out;
|
|
727
|
+
}
|
|
683
728
|
// --- Writes ---
|
|
684
729
|
async setDoc(docId, record, mode) {
|
|
685
730
|
return this.stub._fgSetDoc(docId, record, mode);
|
|
@@ -736,6 +781,105 @@ var DORPCBackend = class _DORPCBackend {
|
|
|
736
781
|
void _reader;
|
|
737
782
|
return this.stub._fgBulkRemoveEdges(params, options);
|
|
738
783
|
}
|
|
784
|
+
// --- Server-side DML (capability: query.dml) ---
|
|
785
|
+
/**
|
|
786
|
+
* Single-statement bulk DELETE inside the backing DO. The DO compiles
|
|
787
|
+
* the filter list to one `DELETE … WHERE …` statement and returns a
|
|
788
|
+
* `BulkResult` whose `deleted` is the affected-row count.
|
|
789
|
+
*
|
|
790
|
+
* Defensive `_fgBulkDelete` presence check mirrors `aggregate()`: the
|
|
791
|
+
* RPC method is optional on `FiregraphStub` so external worker code with
|
|
792
|
+
* a hand-rolled stub wrapper still type-checks. Surface a clear
|
|
793
|
+
* `UNSUPPORTED_OPERATION` rather than `TypeError: stub._fgBulkDelete is
|
|
794
|
+
* not a function` when the wrapper hasn't forwarded the method.
|
|
795
|
+
*/
|
|
796
|
+
async bulkDelete(filters, options) {
|
|
797
|
+
const stub = this.stub;
|
|
798
|
+
if (!stub._fgBulkDelete) {
|
|
799
|
+
throw new FiregraphError(
|
|
800
|
+
"bulkDelete() not supported by this Durable Object stub. The wrapped stub does not implement `_fgBulkDelete`. If you control the stub wrapper, forward `_fgBulkDelete` to the underlying DO.",
|
|
801
|
+
"UNSUPPORTED_OPERATION"
|
|
802
|
+
);
|
|
803
|
+
}
|
|
804
|
+
return stub._fgBulkDelete(filters, options);
|
|
805
|
+
}
|
|
806
|
+
/**
|
|
807
|
+
* Single-statement bulk UPDATE inside the backing DO. Same contract as
|
|
808
|
+
* `bulkDelete` for the missing-method case; the DO compiles the patch to
|
|
809
|
+
* one `UPDATE … SET data = json_patch(...) WHERE …` statement.
|
|
810
|
+
*/
|
|
811
|
+
async bulkUpdate(filters, patch, options) {
|
|
812
|
+
const stub = this.stub;
|
|
813
|
+
if (!stub._fgBulkUpdate) {
|
|
814
|
+
throw new FiregraphError(
|
|
815
|
+
"bulkUpdate() not supported by this Durable Object stub. The wrapped stub does not implement `_fgBulkUpdate`. If you control the stub wrapper, forward `_fgBulkUpdate` to the underlying DO.",
|
|
816
|
+
"UNSUPPORTED_OPERATION"
|
|
817
|
+
);
|
|
818
|
+
}
|
|
819
|
+
return stub._fgBulkUpdate(filters, patch, options);
|
|
820
|
+
}
|
|
821
|
+
/**
|
|
822
|
+
* Multi-source fan-out — `query.join` capability. Routes the call through
|
|
823
|
+
* the DO's `_fgExpand` RPC, which compiles to one `SELECT … WHERE
|
|
824
|
+
* "aUid" IN (?, …)` statement (plus, when `params.hydrate === true`, a
|
|
825
|
+
* second IN-clause statement against the node rows).
|
|
826
|
+
*
|
|
827
|
+
* Defensive `_fgExpand` presence check matches the bulk-DML pattern: the
|
|
828
|
+
* RPC method is optional on `FiregraphStub` so external worker code with
|
|
829
|
+
* a hand-rolled stub wrapper still type-checks. We surface a clear
|
|
830
|
+
* `UNSUPPORTED_OPERATION` rather than `TypeError: stub._fgExpand is not a
|
|
831
|
+
* function` if the wrapper hasn't forwarded the method.
|
|
832
|
+
*/
|
|
833
|
+
async expand(params) {
|
|
834
|
+
const stub = this.stub;
|
|
835
|
+
if (!stub._fgExpand) {
|
|
836
|
+
throw new FiregraphError(
|
|
837
|
+
"expand() not supported by this Durable Object stub. The wrapped stub does not implement `_fgExpand`. If you control the stub wrapper, forward `_fgExpand` to the underlying DO.",
|
|
838
|
+
"UNSUPPORTED_OPERATION"
|
|
839
|
+
);
|
|
840
|
+
}
|
|
841
|
+
if (params.sources.length === 0) {
|
|
842
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
843
|
+
}
|
|
844
|
+
const wire = await stub._fgExpand(params);
|
|
845
|
+
const edges = wire.edges.map(hydrateDORecord);
|
|
846
|
+
if (!params.hydrate) {
|
|
847
|
+
return { edges };
|
|
848
|
+
}
|
|
849
|
+
const targets = (wire.targets ?? []).map((row) => row ? hydrateDORecord(row) : null);
|
|
850
|
+
return { edges, targets };
|
|
851
|
+
}
|
|
852
|
+
// --- Server-side projection (capability: query.select) ---
|
|
853
|
+
/**
|
|
854
|
+
* Server-side projection — `query.select` capability. Forwards the call to
|
|
855
|
+
* the DO's `_fgFindEdgesProjected` RPC, which compiles to a single
|
|
856
|
+
* `SELECT json_extract(...) AS …, json_type(...) AS …__t FROM <table>
|
|
857
|
+
* WHERE …` statement. The DO returns raw rows + per-column metadata; this
|
|
858
|
+
* method decodes each row locally via `decodeDOProjectedRow`.
|
|
859
|
+
*
|
|
860
|
+
* Decoding lives on this side (not inside the DO) because
|
|
861
|
+
* `GraphTimestampImpl` is a class — its prototype does not survive
|
|
862
|
+
* workerd's structured-clone boundary — so timestamp rehydration must
|
|
863
|
+
* happen wherever the rows are consumed by the GraphClient.
|
|
864
|
+
*
|
|
865
|
+
* Defensive `_fgFindEdgesProjected` presence check matches the `expand` /
|
|
866
|
+
* bulk-DML / aggregate pattern: the RPC method is optional on
|
|
867
|
+
* `FiregraphStub` so external worker code with a hand-rolled stub wrapper
|
|
868
|
+
* still type-checks. Surface a clear `UNSUPPORTED_OPERATION` rather than
|
|
869
|
+
* `TypeError: stub._fgFindEdgesProjected is not a function` if the
|
|
870
|
+
* wrapper hasn't forwarded the method.
|
|
871
|
+
*/
|
|
872
|
+
async findEdgesProjected(select, filters, options) {
|
|
873
|
+
const stub = this.stub;
|
|
874
|
+
if (!stub._fgFindEdgesProjected) {
|
|
875
|
+
throw new FiregraphError(
|
|
876
|
+
"findEdgesProjected() not supported by this Durable Object stub. The wrapped stub does not implement `_fgFindEdgesProjected`. If you control the stub wrapper, forward `_fgFindEdgesProjected` to the underlying DO.",
|
|
877
|
+
"UNSUPPORTED_OPERATION"
|
|
878
|
+
);
|
|
879
|
+
}
|
|
880
|
+
const { rows, columns } = await stub._fgFindEdgesProjected(select, filters, options);
|
|
881
|
+
return rows.map((row) => decodeDOProjectedRow(row, columns));
|
|
882
|
+
}
|
|
739
883
|
// --- Cross-scope queries ---
|
|
740
884
|
//
|
|
741
885
|
// `findEdgesGlobal` is deliberately NOT defined on this class. The
|
|
@@ -929,6 +1073,27 @@ var FiregraphDO = class extends DurableObject {
|
|
|
929
1073
|
const rows = this.execAll(stmt);
|
|
930
1074
|
return rows.map(rowToDORecord);
|
|
931
1075
|
}
|
|
1076
|
+
/**
|
|
1077
|
+
* Aggregate query (capability `query.aggregate`). Compiles a single
|
|
1078
|
+
* `SELECT` projecting one column per alias; SQLite handles count, sum,
|
|
1079
|
+
* avg, min, max natively. Empty-set fix-ups (NULL → 0 for sum/min/max,
|
|
1080
|
+
* NaN for avg) happen on the client side in `DORPCBackend.aggregate` so
|
|
1081
|
+
* the wire payload stays a plain row of (alias → number | null).
|
|
1082
|
+
*/
|
|
1083
|
+
async _fgAggregate(spec, filters) {
|
|
1084
|
+
const { stmt, aliases } = compileDOAggregate(this.table, spec, filters);
|
|
1085
|
+
const rows = this.execAll(stmt);
|
|
1086
|
+
const row = rows[0] ?? {};
|
|
1087
|
+
const out = {};
|
|
1088
|
+
for (const alias of aliases) {
|
|
1089
|
+
const v = row[alias];
|
|
1090
|
+
if (v === null || v === void 0) out[alias] = null;
|
|
1091
|
+
else if (typeof v === "bigint") out[alias] = Number(v);
|
|
1092
|
+
else if (typeof v === "number") out[alias] = v;
|
|
1093
|
+
else out[alias] = Number(v);
|
|
1094
|
+
}
|
|
1095
|
+
return out;
|
|
1096
|
+
}
|
|
932
1097
|
// ---------------------------------------------------------------------------
|
|
933
1098
|
// RPC: writes
|
|
934
1099
|
// ---------------------------------------------------------------------------
|
|
@@ -1079,6 +1244,119 @@ var FiregraphDO = class extends DurableObject {
|
|
|
1079
1244
|
}
|
|
1080
1245
|
}
|
|
1081
1246
|
// ---------------------------------------------------------------------------
|
|
1247
|
+
// RPC: server-side DML (capability `query.dml`)
|
|
1248
|
+
//
|
|
1249
|
+
// Single-statement DELETE/UPDATE WHERE that the SQLite engine handles in
|
|
1250
|
+
// one shot — the cap-less alternative is `_fgBulkRemoveEdges` which fetches
|
|
1251
|
+
// doc IDs first, then deletes them one-by-one inside a transaction. The
|
|
1252
|
+
// DML path skips the round-trip and lets SQLite optimize the WHERE.
|
|
1253
|
+
//
|
|
1254
|
+
// RETURNING "doc_id" gives us an authoritative affected-row count; SQLite
|
|
1255
|
+
// ≥3.35 supports it for both DELETE and UPDATE and DO SQLite is always
|
|
1256
|
+
// recent enough.
|
|
1257
|
+
//
|
|
1258
|
+
// Retry policy: unlike `SqliteBackendImpl.bulkDelete` / `bulkUpdate`, which
|
|
1259
|
+
// wrap a chunked retry/backoff loop around each batch (D1's 1000-statement
|
|
1260
|
+
// cap forces chunking, so a single transient failure shouldn't kill the
|
|
1261
|
+
// whole job), the DO path runs a single un-chunked statement against
|
|
1262
|
+
// `state.storage.sql` synchronously. There's nothing to retry inside the
|
|
1263
|
+
// DO — the engine commits or it doesn't. If a caller wants retry semantics
|
|
1264
|
+
// on the wire, they wrap the `bulkDelete` / `bulkUpdate` call themselves.
|
|
1265
|
+
// ---------------------------------------------------------------------------
|
|
1266
|
+
async _fgBulkDelete(filters, _options) {
|
|
1267
|
+
void _options;
|
|
1268
|
+
if (filters.length === 0) {
|
|
1269
|
+
throw new FiregraphError(
|
|
1270
|
+
"bulkDelete() requires at least one filter when targeting a Durable Object backend. An empty filter list would wipe every row in the DO. To wipe a routed subgraph DO, use `removeNodeCascade` on the parent node or `_fgDestroy` directly on the stub.",
|
|
1271
|
+
"INVALID_ARGUMENT"
|
|
1272
|
+
);
|
|
1273
|
+
}
|
|
1274
|
+
const stmt = compileDOBulkDelete(this.table, filters);
|
|
1275
|
+
return this.execDmlWithReturning(stmt);
|
|
1276
|
+
}
|
|
1277
|
+
async _fgBulkUpdate(filters, patch, _options) {
|
|
1278
|
+
void _options;
|
|
1279
|
+
const stmt = compileDOBulkUpdate(this.table, filters, patch.data, Date.now());
|
|
1280
|
+
return this.execDmlWithReturning(stmt);
|
|
1281
|
+
}
|
|
1282
|
+
// ---------------------------------------------------------------------------
|
|
1283
|
+
// RPC: multi-source fan-out (`query.join`)
|
|
1284
|
+
//
|
|
1285
|
+
// One `SELECT … WHERE "aUid" IN (?, ?, …)` (or `"bUid"` for reverse)
|
|
1286
|
+
// collapses N per-source `findEdges` round trips into one. When the
|
|
1287
|
+
// caller asks for hydration, a second IN-clause statement fetches the
|
|
1288
|
+
// target node rows; the DO does the alignment in JS so the wire payload
|
|
1289
|
+
// is two `DORecordWire[]` arrays instead of a JOIN-shaped row that
|
|
1290
|
+
// would force a custom client-side decoder.
|
|
1291
|
+
// ---------------------------------------------------------------------------
|
|
1292
|
+
async _fgExpand(params) {
|
|
1293
|
+
if (params.sources.length === 0) {
|
|
1294
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
1295
|
+
}
|
|
1296
|
+
const stmt = compileDOExpand(this.table, params);
|
|
1297
|
+
const rows = this.state.storage.sql.exec(stmt.sql, ...stmt.params).toArray();
|
|
1298
|
+
const edges = rows.map((row) => rowToDORecord(row));
|
|
1299
|
+
if (!params.hydrate) {
|
|
1300
|
+
return { edges };
|
|
1301
|
+
}
|
|
1302
|
+
const direction = params.direction ?? "forward";
|
|
1303
|
+
const targetUids = edges.map((e) => direction === "forward" ? e.bUid : e.aUid);
|
|
1304
|
+
const uniqueTargets = [...new Set(targetUids)];
|
|
1305
|
+
if (uniqueTargets.length === 0) {
|
|
1306
|
+
return { edges, targets: [] };
|
|
1307
|
+
}
|
|
1308
|
+
const hydrateStmt = compileDOExpandHydrate(this.table, uniqueTargets);
|
|
1309
|
+
const hydrateRows = this.state.storage.sql.exec(hydrateStmt.sql, ...hydrateStmt.params).toArray();
|
|
1310
|
+
const byUid = /* @__PURE__ */ new Map();
|
|
1311
|
+
for (const row of hydrateRows) {
|
|
1312
|
+
const node = rowToDORecord(row);
|
|
1313
|
+
byUid.set(node.bUid, node);
|
|
1314
|
+
}
|
|
1315
|
+
const targets = targetUids.map((uid) => byUid.get(uid) ?? null);
|
|
1316
|
+
return { edges, targets };
|
|
1317
|
+
}
|
|
1318
|
+
// ---------------------------------------------------------------------------
|
|
1319
|
+
// RPC: server-side projection (`query.select`)
|
|
1320
|
+
//
|
|
1321
|
+
// One `SELECT json_extract(data, '$.f1'), …` returns the projected fields.
|
|
1322
|
+
// The DO leaves decoding to the client because timestamp values need to
|
|
1323
|
+
// rewrap as `GraphTimestampImpl` (a class instance, lost by structured
|
|
1324
|
+
// clone) — instead of inventing per-field timestamp sentinels, we send the
|
|
1325
|
+
// raw rows and the column spec, and let `DORPCBackend.findEdgesProjected`
|
|
1326
|
+
// call `decodeDOProjectedRow` once. The spec is small (≤ ~100 bytes for
|
|
1327
|
+
// a typical projection); structured clone copes happily.
|
|
1328
|
+
// ---------------------------------------------------------------------------
|
|
1329
|
+
async _fgFindEdgesProjected(select, filters, options) {
|
|
1330
|
+
const { stmt, columns } = compileDOFindEdgesProjected(this.table, select, filters, options);
|
|
1331
|
+
const rows = this.state.storage.sql.exec(stmt.sql, ...stmt.params).toArray();
|
|
1332
|
+
return { rows, columns };
|
|
1333
|
+
}
|
|
1334
|
+
/**
|
|
1335
|
+
* Run a DML statement with `RETURNING "doc_id"` so the affected-row count
|
|
1336
|
+
* comes back authoritatively. Errors are caught and surfaced via the
|
|
1337
|
+
* `BulkResult.errors` array (single batch, batchIndex 0) so the wire
|
|
1338
|
+
* payload stays a regular `BulkResult` and the client doesn't have to
|
|
1339
|
+
* differentiate "RPC threw" from "single-statement failure."
|
|
1340
|
+
*/
|
|
1341
|
+
execDmlWithReturning(stmt) {
|
|
1342
|
+
const sqlWithReturning = `${stmt.sql} RETURNING "doc_id"`;
|
|
1343
|
+
try {
|
|
1344
|
+
const rows = this.state.storage.sql.exec(sqlWithReturning, ...stmt.params).toArray();
|
|
1345
|
+
return { deleted: rows.length, batches: 1, errors: [] };
|
|
1346
|
+
} catch (err) {
|
|
1347
|
+
const error = err instanceof Error ? err : new Error(String(err));
|
|
1348
|
+
return {
|
|
1349
|
+
deleted: 0,
|
|
1350
|
+
batches: 0,
|
|
1351
|
+
// Like `_fgBulkRemoveEdges`'s catch arm: a single failed statement
|
|
1352
|
+
// is one batch, and the operationCount is "unknown" for a server-
|
|
1353
|
+
// side DML — we report 0 as the lower bound. Callers that care
|
|
1354
|
+
// about partial state should re-query and reconcile.
|
|
1355
|
+
errors: [{ batchIndex: 0, error, operationCount: 0 }]
|
|
1356
|
+
};
|
|
1357
|
+
}
|
|
1358
|
+
}
|
|
1359
|
+
// ---------------------------------------------------------------------------
|
|
1082
1360
|
// RPC: admin
|
|
1083
1361
|
// ---------------------------------------------------------------------------
|
|
1084
1362
|
/**
|
|
@@ -1110,16 +1388,19 @@ var FiregraphDO = class extends DurableObject {
|
|
|
1110
1388
|
}
|
|
1111
1389
|
};
|
|
1112
1390
|
export {
|
|
1391
|
+
CapabilityNotSupportedError,
|
|
1113
1392
|
DORPCBackend,
|
|
1114
1393
|
FiregraphDO,
|
|
1115
1394
|
META_EDGE_TYPE,
|
|
1116
1395
|
META_NODE_TYPE,
|
|
1117
1396
|
buildDOSchemaStatements,
|
|
1397
|
+
createCapabilities,
|
|
1118
1398
|
createDOClient,
|
|
1119
1399
|
createMergedRegistry,
|
|
1120
1400
|
createRegistry,
|
|
1121
1401
|
createSiblingClient,
|
|
1122
1402
|
deleteField,
|
|
1123
|
-
generateId
|
|
1403
|
+
generateId,
|
|
1404
|
+
intersectCapabilities
|
|
1124
1405
|
};
|
|
1125
1406
|
//# sourceMappingURL=index.js.map
|