@typicalday/firegraph 0.12.0 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +317 -73
- package/dist/backend-DuvHGgK1.d.cts +1897 -0
- package/dist/backend-DuvHGgK1.d.ts +1897 -0
- package/dist/backend.cjs +222 -3
- package/dist/backend.cjs.map +1 -1
- package/dist/backend.d.cts +25 -5
- package/dist/backend.d.ts +25 -5
- package/dist/backend.js +197 -4
- package/dist/backend.js.map +1 -1
- package/dist/chunk-2DHMNTV6.js +16 -0
- package/dist/chunk-2DHMNTV6.js.map +1 -0
- package/dist/chunk-4MMQ5W74.js +288 -0
- package/dist/chunk-4MMQ5W74.js.map +1 -0
- package/dist/chunk-D4J7Z4FE.js +67 -0
- package/dist/chunk-D4J7Z4FE.js.map +1 -0
- package/dist/chunk-N5HFDWQX.js +23 -0
- package/dist/chunk-N5HFDWQX.js.map +1 -0
- package/dist/chunk-PAD7WFFU.js +573 -0
- package/dist/chunk-PAD7WFFU.js.map +1 -0
- package/dist/{chunk-AWW4MUJ5.js → chunk-TK64DNVK.js} +12 -1
- package/dist/chunk-TK64DNVK.js.map +1 -0
- package/dist/{chunk-HONQY4HF.js → chunk-WRTFC5NG.js} +362 -17
- package/dist/chunk-WRTFC5NG.js.map +1 -0
- package/dist/client-BKi3vk0Q.d.ts +34 -0
- package/dist/client-BrsaXtDV.d.cts +34 -0
- package/dist/cloudflare/index.cjs +930 -3
- package/dist/cloudflare/index.cjs.map +1 -1
- package/dist/cloudflare/index.d.cts +213 -12
- package/dist/cloudflare/index.d.ts +213 -12
- package/dist/cloudflare/index.js +562 -281
- package/dist/cloudflare/index.js.map +1 -1
- package/dist/codegen/index.d.cts +1 -1
- package/dist/codegen/index.d.ts +1 -1
- package/dist/errors-BRc3I_eH.d.cts +73 -0
- package/dist/errors-BRc3I_eH.d.ts +73 -0
- package/dist/firestore-enterprise/index.cjs +3877 -0
- package/dist/firestore-enterprise/index.cjs.map +1 -0
- package/dist/firestore-enterprise/index.d.cts +141 -0
- package/dist/firestore-enterprise/index.d.ts +141 -0
- package/dist/firestore-enterprise/index.js +985 -0
- package/dist/firestore-enterprise/index.js.map +1 -0
- package/dist/firestore-standard/index.cjs +3117 -0
- package/dist/firestore-standard/index.cjs.map +1 -0
- package/dist/firestore-standard/index.d.cts +49 -0
- package/dist/firestore-standard/index.d.ts +49 -0
- package/dist/firestore-standard/index.js +283 -0
- package/dist/firestore-standard/index.js.map +1 -0
- package/dist/index.cjs +590 -550
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +9 -37
- package/dist/index.d.ts +9 -37
- package/dist/index.js +178 -555
- package/dist/index.js.map +1 -1
- package/dist/{registry-Fi074zVa.d.ts → registry-Bc7h6WTM.d.cts} +1 -1
- package/dist/{registry-B1qsVL0E.d.cts → registry-C2KUPVZj.d.ts} +1 -1
- package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.cts} +1 -56
- package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.ts} +1 -56
- package/dist/sqlite/index.cjs +3631 -0
- package/dist/sqlite/index.cjs.map +1 -0
- package/dist/sqlite/index.d.cts +111 -0
- package/dist/sqlite/index.d.ts +111 -0
- package/dist/sqlite/index.js +1164 -0
- package/dist/sqlite/index.js.map +1 -0
- package/package.json +33 -3
- package/dist/backend-BsR0lnFL.d.ts +0 -200
- package/dist/backend-Ct-fLlkG.d.cts +0 -200
- package/dist/chunk-AWW4MUJ5.js.map +0 -1
- package/dist/chunk-HONQY4HF.js.map +0 -1
- package/dist/types-DxYLy8Ol.d.cts +0 -770
- package/dist/types-DxYLy8Ol.d.ts +0 -770
|
@@ -0,0 +1,288 @@
|
|
|
1
|
+
import {
|
|
2
|
+
FiregraphError,
|
|
3
|
+
isDeleteSentinel
|
|
4
|
+
} from "./chunk-TK64DNVK.js";
|
|
5
|
+
import {
|
|
6
|
+
SERIALIZATION_TAG
|
|
7
|
+
} from "./chunk-EQJUUVFG.js";
|
|
8
|
+
|
|
9
|
+
// src/internal/sqlite-index-ddl.ts
|
|
10
|
+
var IDENT_RE = /^[A-Za-z_][A-Za-z0-9_]*$/;
|
|
11
|
+
var JSON_PATH_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
12
|
+
function quoteIdent(name) {
|
|
13
|
+
if (!IDENT_RE.test(name)) {
|
|
14
|
+
throw new FiregraphError(
|
|
15
|
+
`Invalid SQL identifier in index DDL: ${name}. Must match /^[A-Za-z_][A-Za-z0-9_]*$/.`,
|
|
16
|
+
"INVALID_INDEX"
|
|
17
|
+
);
|
|
18
|
+
}
|
|
19
|
+
return `"${name}"`;
|
|
20
|
+
}
|
|
21
|
+
function fnv1a32(str) {
|
|
22
|
+
let h = 2166136261;
|
|
23
|
+
for (let i = 0; i < str.length; i++) {
|
|
24
|
+
h ^= str.charCodeAt(i);
|
|
25
|
+
h = Math.imul(h, 16777619);
|
|
26
|
+
}
|
|
27
|
+
return (h >>> 0).toString(16).padStart(8, "0");
|
|
28
|
+
}
|
|
29
|
+
function normalizeFields(fields) {
|
|
30
|
+
return fields.map((f) => {
|
|
31
|
+
if (typeof f === "string") return { path: f, desc: false };
|
|
32
|
+
if (!f.path || typeof f.path !== "string") {
|
|
33
|
+
throw new FiregraphError(
|
|
34
|
+
`IndexSpec field must be a string or { path: string, desc?: boolean }; got ${JSON.stringify(f)}`,
|
|
35
|
+
"INVALID_INDEX"
|
|
36
|
+
);
|
|
37
|
+
}
|
|
38
|
+
return { path: f.path, desc: !!f.desc };
|
|
39
|
+
});
|
|
40
|
+
}
|
|
41
|
+
function specFingerprint(spec, leadingColumns) {
|
|
42
|
+
const normalized = {
|
|
43
|
+
lead: leadingColumns,
|
|
44
|
+
fields: normalizeFields(spec.fields),
|
|
45
|
+
where: spec.where ?? ""
|
|
46
|
+
};
|
|
47
|
+
return fnv1a32(JSON.stringify(normalized));
|
|
48
|
+
}
|
|
49
|
+
function compileFieldExpr(path, fieldToColumn) {
|
|
50
|
+
const col = fieldToColumn[path];
|
|
51
|
+
if (col) return quoteIdent(col);
|
|
52
|
+
if (path === "data") {
|
|
53
|
+
return `json_extract("data", '$')`;
|
|
54
|
+
}
|
|
55
|
+
if (path.startsWith("data.")) {
|
|
56
|
+
const suffix = path.slice(5);
|
|
57
|
+
const parts = suffix.split(".");
|
|
58
|
+
for (const part of parts) {
|
|
59
|
+
if (!JSON_PATH_KEY_RE.test(part)) {
|
|
60
|
+
throw new FiregraphError(
|
|
61
|
+
`IndexSpec data path "${path}" has invalid component "${part}". Each component must match /^[A-Za-z_][A-Za-z0-9_-]*$/.`,
|
|
62
|
+
"INVALID_INDEX"
|
|
63
|
+
);
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
return `json_extract("data", '$.${suffix}')`;
|
|
67
|
+
}
|
|
68
|
+
throw new FiregraphError(
|
|
69
|
+
`IndexSpec field "${path}" is not a known firegraph field. Use a top-level field (aType, aUid, axbType, bType, bUid, createdAt, updatedAt, v) or a dotted data path like 'data.status'.`,
|
|
70
|
+
"INVALID_INDEX"
|
|
71
|
+
);
|
|
72
|
+
}
|
|
73
|
+
function buildIndexDDL(spec, options) {
|
|
74
|
+
const { table, fieldToColumn, leadingColumns = [] } = options;
|
|
75
|
+
if (!spec.fields || spec.fields.length === 0) {
|
|
76
|
+
throw new FiregraphError("IndexSpec.fields must be a non-empty array", "INVALID_INDEX");
|
|
77
|
+
}
|
|
78
|
+
const normalized = normalizeFields(spec.fields);
|
|
79
|
+
const hash = specFingerprint(spec, leadingColumns);
|
|
80
|
+
const indexName = `${table}_idx_${hash}`;
|
|
81
|
+
const cols = [];
|
|
82
|
+
for (const col of leadingColumns) {
|
|
83
|
+
cols.push(quoteIdent(col));
|
|
84
|
+
}
|
|
85
|
+
for (const f of normalized) {
|
|
86
|
+
const expr = compileFieldExpr(f.path, fieldToColumn);
|
|
87
|
+
cols.push(f.desc ? `${expr} DESC` : expr);
|
|
88
|
+
}
|
|
89
|
+
let ddl = `CREATE INDEX IF NOT EXISTS ${quoteIdent(indexName)} ON ${quoteIdent(table)}(${cols.join(", ")})`;
|
|
90
|
+
if (spec.where) {
|
|
91
|
+
ddl += ` WHERE ${spec.where}`;
|
|
92
|
+
}
|
|
93
|
+
return ddl;
|
|
94
|
+
}
|
|
95
|
+
function dedupeIndexSpecs(specs, leadingColumns = []) {
|
|
96
|
+
const seen = /* @__PURE__ */ new Set();
|
|
97
|
+
const out = [];
|
|
98
|
+
for (const spec of specs) {
|
|
99
|
+
const fp = specFingerprint(spec, leadingColumns);
|
|
100
|
+
if (seen.has(fp)) continue;
|
|
101
|
+
seen.add(fp);
|
|
102
|
+
out.push(spec);
|
|
103
|
+
}
|
|
104
|
+
return out;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// src/internal/sqlite-data-ops.ts
|
|
108
|
+
var FIRESTORE_TYPE_NAMES = /* @__PURE__ */ new Set([
|
|
109
|
+
"Timestamp",
|
|
110
|
+
"GeoPoint",
|
|
111
|
+
"VectorValue",
|
|
112
|
+
"DocumentReference",
|
|
113
|
+
"FieldValue"
|
|
114
|
+
]);
|
|
115
|
+
function isFirestoreSpecialType(value) {
|
|
116
|
+
const ctorName = value.constructor?.name;
|
|
117
|
+
if (ctorName && FIRESTORE_TYPE_NAMES.has(ctorName)) return ctorName;
|
|
118
|
+
return null;
|
|
119
|
+
}
|
|
120
|
+
var JSON_PATH_KEY_RE2 = /^[A-Za-z_][A-Za-z0-9_-]*$/;
|
|
121
|
+
function validateJsonPathKey(key, backendLabel) {
|
|
122
|
+
if (key.length === 0) {
|
|
123
|
+
throw new FiregraphError(
|
|
124
|
+
`${backendLabel}: empty JSON path component is not allowed`,
|
|
125
|
+
"INVALID_QUERY"
|
|
126
|
+
);
|
|
127
|
+
}
|
|
128
|
+
if (!JSON_PATH_KEY_RE2.test(key)) {
|
|
129
|
+
throw new FiregraphError(
|
|
130
|
+
`${backendLabel}: data field path component "${key}" is not a safe JSON-path identifier. Allowed pattern: /^[A-Za-z_][A-Za-z0-9_-]*$/. Use replaceNode/replaceEdge (full-data overwrite) for keys with reserved characters (whitespace, dots, brackets, quotes, etc.).`,
|
|
131
|
+
"INVALID_QUERY"
|
|
132
|
+
);
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
function jsonBind(value, backendLabel) {
|
|
136
|
+
if (value === void 0) return "null";
|
|
137
|
+
if (value !== null && typeof value === "object") {
|
|
138
|
+
const firestoreType = isFirestoreSpecialType(value);
|
|
139
|
+
if (firestoreType) {
|
|
140
|
+
throw new FiregraphError(
|
|
141
|
+
`${backendLabel} cannot persist a Firestore ${firestoreType} value. Convert to a primitive before writing (e.g. \`ts.toMillis()\` for Timestamp).`,
|
|
142
|
+
"INVALID_ARGUMENT"
|
|
143
|
+
);
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
return JSON.stringify(value);
|
|
147
|
+
}
|
|
148
|
+
function compileDataOpsExpr(ops, base, params, backendLabel) {
|
|
149
|
+
if (ops.length === 0) return null;
|
|
150
|
+
const deletes = [];
|
|
151
|
+
const sets = [];
|
|
152
|
+
for (const op of ops) (op.delete ? deletes : sets).push(op);
|
|
153
|
+
let expr = base;
|
|
154
|
+
if (deletes.length > 0) {
|
|
155
|
+
const placeholders = deletes.map(() => "?").join(", ");
|
|
156
|
+
expr = `json_remove(${expr}, ${placeholders})`;
|
|
157
|
+
for (const op of deletes) {
|
|
158
|
+
for (const seg of op.path) validateJsonPathKey(seg, backendLabel);
|
|
159
|
+
params.push(`$.${op.path.join(".")}`);
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
if (sets.length > 0) {
|
|
163
|
+
const pieces = sets.map(() => "?, json(?)").join(", ");
|
|
164
|
+
expr = `json_set(${expr}, ${pieces})`;
|
|
165
|
+
for (const op of sets) {
|
|
166
|
+
for (const seg of op.path) validateJsonPathKey(seg, backendLabel);
|
|
167
|
+
params.push(`$.${op.path.join(".")}`);
|
|
168
|
+
params.push(jsonBind(op.value, backendLabel));
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
return expr;
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
// src/internal/sqlite-payload-guard.ts
|
|
175
|
+
var FIRESTORE_TYPE_NAMES2 = /* @__PURE__ */ new Set([
|
|
176
|
+
"Timestamp",
|
|
177
|
+
"GeoPoint",
|
|
178
|
+
"VectorValue",
|
|
179
|
+
"DocumentReference",
|
|
180
|
+
"FieldValue"
|
|
181
|
+
]);
|
|
182
|
+
function assertJsonSafePayload(data, label) {
|
|
183
|
+
walk(data, [], label);
|
|
184
|
+
}
|
|
185
|
+
function walk(node, path, label) {
|
|
186
|
+
if (node === null || node === void 0) return;
|
|
187
|
+
if (isDeleteSentinel(node)) {
|
|
188
|
+
throw new FiregraphError(
|
|
189
|
+
`${label} backend cannot persist a deleteField() sentinel inside a full-data payload (replaceNode/replaceEdge or first-insert). The sentinel is only valid inside an updateNode/updateEdge dataOps patch. Path: ${formatPath(path)}.`,
|
|
190
|
+
"INVALID_ARGUMENT"
|
|
191
|
+
);
|
|
192
|
+
}
|
|
193
|
+
const t = typeof node;
|
|
194
|
+
if (t === "symbol" || t === "function") {
|
|
195
|
+
throw new FiregraphError(
|
|
196
|
+
`${label} backend cannot persist a value of type ${t}. JSON.stringify drops it silently. Path: ${formatPath(path)}.`,
|
|
197
|
+
"INVALID_ARGUMENT"
|
|
198
|
+
);
|
|
199
|
+
}
|
|
200
|
+
if (t === "bigint") {
|
|
201
|
+
throw new FiregraphError(
|
|
202
|
+
`${label} backend cannot persist a value of type bigint. JSON.stringify cannot serialize this type (throws TypeError). Path: ${formatPath(path)}.`,
|
|
203
|
+
"INVALID_ARGUMENT"
|
|
204
|
+
);
|
|
205
|
+
}
|
|
206
|
+
if (t !== "object") return;
|
|
207
|
+
if (Array.isArray(node)) {
|
|
208
|
+
for (let i = 0; i < node.length; i++) {
|
|
209
|
+
walk(node[i], [...path, String(i)], label);
|
|
210
|
+
}
|
|
211
|
+
return;
|
|
212
|
+
}
|
|
213
|
+
const obj = node;
|
|
214
|
+
if (Object.prototype.hasOwnProperty.call(obj, SERIALIZATION_TAG)) {
|
|
215
|
+
const tagValue = obj[SERIALIZATION_TAG];
|
|
216
|
+
throw new FiregraphError(
|
|
217
|
+
`${label} backend cannot persist an object with a \`${SERIALIZATION_TAG}\` key (value: ${formatTagValue(tagValue)}). Recognised tags are valid only on the Firestore backend (migration-sandbox output); a literal \`${SERIALIZATION_TAG}\` field in user data is reserved and not allowed. Path: ${formatPath(path)}.`,
|
|
218
|
+
"INVALID_ARGUMENT"
|
|
219
|
+
);
|
|
220
|
+
}
|
|
221
|
+
const proto = Object.getPrototypeOf(node);
|
|
222
|
+
if (proto !== null && proto !== Object.prototype) {
|
|
223
|
+
const ctor = node.constructor;
|
|
224
|
+
const ctorName = ctor && typeof ctor.name === "string" ? ctor.name : "<anonymous>";
|
|
225
|
+
if (FIRESTORE_TYPE_NAMES2.has(ctorName)) {
|
|
226
|
+
throw new FiregraphError(
|
|
227
|
+
`${label} backend cannot persist a Firestore ${ctorName} value. Convert to a primitive before writing (e.g. \`ts.toMillis()\` for Timestamp, \`{lat,lng}\` for GeoPoint). Path: ${formatPath(path)}.`,
|
|
228
|
+
"INVALID_ARGUMENT"
|
|
229
|
+
);
|
|
230
|
+
}
|
|
231
|
+
if (node instanceof Date) return;
|
|
232
|
+
throw new FiregraphError(
|
|
233
|
+
`${label} backend cannot persist a class instance of type ${ctorName}. Only plain objects, arrays, and primitives round-trip safely through JSON storage. Path: ${formatPath(path)}.`,
|
|
234
|
+
"INVALID_ARGUMENT"
|
|
235
|
+
);
|
|
236
|
+
}
|
|
237
|
+
for (const key of Object.keys(obj)) {
|
|
238
|
+
walk(obj[key], [...path, key], label);
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
function formatPath(path) {
|
|
242
|
+
return path.length === 0 ? "<root>" : path.map((p) => JSON.stringify(p)).join(" > ");
|
|
243
|
+
}
|
|
244
|
+
function formatTagValue(value) {
|
|
245
|
+
if (value === null) return "null";
|
|
246
|
+
if (value === void 0) return "undefined";
|
|
247
|
+
if (typeof value === "string") return JSON.stringify(value);
|
|
248
|
+
if (typeof value === "number" || typeof value === "boolean" || typeof value === "bigint") {
|
|
249
|
+
return String(value);
|
|
250
|
+
}
|
|
251
|
+
return typeof value;
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
// src/timestamp.ts
|
|
255
|
+
var GraphTimestampImpl = class _GraphTimestampImpl {
|
|
256
|
+
constructor(seconds, nanoseconds) {
|
|
257
|
+
this.seconds = seconds;
|
|
258
|
+
this.nanoseconds = nanoseconds;
|
|
259
|
+
}
|
|
260
|
+
toDate() {
|
|
261
|
+
return new Date(this.toMillis());
|
|
262
|
+
}
|
|
263
|
+
toMillis() {
|
|
264
|
+
return this.seconds * 1e3 + Math.floor(this.nanoseconds / 1e6);
|
|
265
|
+
}
|
|
266
|
+
toJSON() {
|
|
267
|
+
return { seconds: this.seconds, nanoseconds: this.nanoseconds };
|
|
268
|
+
}
|
|
269
|
+
static fromMillis(ms) {
|
|
270
|
+
const seconds = Math.floor(ms / 1e3);
|
|
271
|
+
const nanoseconds = (ms - seconds * 1e3) * 1e6;
|
|
272
|
+
return new _GraphTimestampImpl(seconds, nanoseconds);
|
|
273
|
+
}
|
|
274
|
+
static now() {
|
|
275
|
+
return _GraphTimestampImpl.fromMillis(Date.now());
|
|
276
|
+
}
|
|
277
|
+
};
|
|
278
|
+
|
|
279
|
+
export {
|
|
280
|
+
isFirestoreSpecialType,
|
|
281
|
+
validateJsonPathKey,
|
|
282
|
+
compileDataOpsExpr,
|
|
283
|
+
assertJsonSafePayload,
|
|
284
|
+
GraphTimestampImpl,
|
|
285
|
+
buildIndexDDL,
|
|
286
|
+
dedupeIndexSpecs
|
|
287
|
+
};
|
|
288
|
+
//# sourceMappingURL=chunk-4MMQ5W74.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/internal/sqlite-index-ddl.ts","../src/internal/sqlite-data-ops.ts","../src/internal/sqlite-payload-guard.ts","../src/timestamp.ts"],"sourcesContent":["/**\n * Translator from `IndexSpec` to SQLite `CREATE INDEX` DDL.\n *\n * Shared between the DO SQLite backend (`src/cloudflare/schema.ts`) and the\n * legacy single-table SQLite backend (`src/internal/sqlite-schema.ts`). The\n * two backends differ only in:\n *\n * 1. Their field→column mapping (no `scope` column in the DO schema).\n * 2. Whether a fixed `scope` leading column is prepended to every index\n * (legacy backend only — DO rows are scoped by DO-instance identity).\n *\n * Both differences are handled via the `fieldToColumn` and `leadingColumns`\n * options; the rest of the emission logic is identical.\n *\n * ## JSON path expression indexes\n *\n * Data-field specs (`data.foo`, `data.nested.bar`) compile to\n * `json_extract(\"data\", '$.foo')` expression indexes. The JSON path\n * literal is inlined — not parametrized — so the SQLite query planner can\n * match the index against the expression emitted by the query compiler\n * (which also inlines the literal after this PR). Path components are\n * validated against a safe identifier pattern so inlining is not an\n * injection risk.\n *\n * ## Index naming\n *\n * Names are `{table}_idx_{hash}` where `hash` is a short FNV-1a of a\n * canonicalized spec. This keeps names stable across runs (so\n * `CREATE INDEX IF NOT EXISTS` is idempotent) and prevents collisions\n * between similar specs. The hash includes the field list, per-field\n * direction, and the `where` predicate.\n */\n\nimport { FiregraphError } from '../errors.js';\nimport type { IndexFieldSpec, IndexSpec } from '../types.js';\n\n/**\n * Valid SQLite identifier pattern — used for table and column names.\n * Mirrors the validation in `sqlite-schema.ts` / `cloudflare/schema.ts` so\n * this module doesn't need to import one over the other.\n */\nconst IDENT_RE = /^[A-Za-z_][A-Za-z0-9_]*$/;\n\n/**\n * Safe JSON path component. Must match `JSON_PATH_KEY_RE` in the SQLite\n * query compilers — an index is only useful if the query emits an\n * identical `json_extract` expression.\n */\nconst JSON_PATH_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;\n\nfunction quoteIdent(name: string): string {\n if (!IDENT_RE.test(name)) {\n throw new FiregraphError(\n `Invalid SQL identifier in index DDL: ${name}. Must match /^[A-Za-z_][A-Za-z0-9_]*$/.`,\n 'INVALID_INDEX',\n );\n }\n return `\"${name}\"`;\n}\n\n/**\n * FNV-1a 32-bit hash, returned as 8-char hex. Non-cryptographic;\n * used only to produce short, stable index names.\n */\nfunction fnv1a32(str: string): string {\n let h = 0x811c9dc5;\n for (let i = 0; i < str.length; i++) {\n h ^= str.charCodeAt(i);\n h = Math.imul(h, 0x01000193);\n }\n return (h >>> 0).toString(16).padStart(8, '0');\n}\n\nfunction normalizeFields(\n fields: Array<string | IndexFieldSpec>,\n): Array<{ path: string; desc: boolean }> {\n return fields.map((f) => {\n if (typeof f === 'string') return { path: f, desc: false };\n if (!f.path || typeof f.path !== 'string') {\n throw new FiregraphError(\n `IndexSpec field must be a string or { path: string, desc?: boolean }; got ${JSON.stringify(f)}`,\n 'INVALID_INDEX',\n );\n }\n return { path: f.path, desc: !!f.desc };\n });\n}\n\nfunction specFingerprint(spec: IndexSpec, leadingColumns: string[]): string {\n // Canonical form: JSON of normalized fields + leading cols + where.\n // Leading columns are part of the fingerprint so the same spec under\n // two different backends gets distinct names (though in practice only\n // one backend compiles a given spec).\n const normalized = {\n lead: leadingColumns,\n fields: normalizeFields(spec.fields),\n where: spec.where ?? '',\n };\n return fnv1a32(JSON.stringify(normalized));\n}\n\n/**\n * Compile one field path to its SQLite column expression.\n *\n * - Firegraph top-level fields (`aType`, `createdAt`, …) → mapped column.\n * - `data.foo` / `data.foo.bar` → `json_extract(\"data\", '$.foo.bar')`.\n * - `data` alone → `json_extract(\"data\", '$')`.\n */\nfunction compileFieldExpr(path: string, fieldToColumn: Record<string, string>): string {\n const col = fieldToColumn[path];\n if (col) return quoteIdent(col);\n\n if (path === 'data') {\n return `json_extract(\"data\", '$')`;\n }\n if (path.startsWith('data.')) {\n const suffix = path.slice(5);\n const parts = suffix.split('.');\n for (const part of parts) {\n if (!JSON_PATH_KEY_RE.test(part)) {\n throw new FiregraphError(\n `IndexSpec data path \"${path}\" has invalid component \"${part}\". ` +\n `Each component must match /^[A-Za-z_][A-Za-z0-9_-]*$/.`,\n 'INVALID_INDEX',\n );\n }\n }\n // Inline the path literal (no parameter). Validated components above\n // are safe to embed — no quote or escape characters.\n return `json_extract(\"data\", '$.${suffix}')`;\n }\n\n throw new FiregraphError(\n `IndexSpec field \"${path}\" is not a known firegraph field. ` +\n `Use a top-level field (aType, aUid, axbType, bType, bUid, createdAt, updatedAt, v) ` +\n `or a dotted data path like 'data.status'.`,\n 'INVALID_INDEX',\n );\n}\n\nexport interface SqliteIndexDDLOptions {\n /** Target table. */\n table: string;\n /** Map from firegraph field name to SQLite column name. */\n fieldToColumn: Record<string, string>;\n /**\n * Columns prepended to every index's field list (leading ASC). Used by\n * the legacy shared-table SQLite backend to lead every index with\n * `scope`, matching the predicate its query compiler emits.\n *\n * Identifier names only — no JSON paths or expressions.\n */\n leadingColumns?: string[];\n}\n\n/**\n * Emit the `CREATE INDEX IF NOT EXISTS` DDL for one `IndexSpec`.\n *\n * Returns a single SQL string. Name is deterministic (same spec → same\n * name across runs), so re-running the bootstrap is idempotent.\n */\nexport function buildIndexDDL(spec: IndexSpec, options: SqliteIndexDDLOptions): string {\n const { table, fieldToColumn, leadingColumns = [] } = options;\n\n if (!spec.fields || spec.fields.length === 0) {\n throw new FiregraphError('IndexSpec.fields must be a non-empty array', 'INVALID_INDEX');\n }\n\n const normalized = normalizeFields(spec.fields);\n const hash = specFingerprint(spec, leadingColumns);\n const indexName = `${table}_idx_${hash}`;\n\n const cols: string[] = [];\n for (const col of leadingColumns) {\n cols.push(quoteIdent(col));\n }\n for (const f of normalized) {\n const expr = compileFieldExpr(f.path, fieldToColumn);\n cols.push(f.desc ? `${expr} DESC` : expr);\n }\n\n let ddl = `CREATE INDEX IF NOT EXISTS ${quoteIdent(indexName)} ON ${quoteIdent(table)}(${cols.join(', ')})`;\n\n if (spec.where) {\n // The predicate is inlined verbatim. It comes from library/app\n // configuration — never from user data — so we don't attempt to\n // parse, rewrite, or validate it. Callers authoring partial indexes\n // are responsible for writing a valid SQLite WHERE clause.\n ddl += ` WHERE ${spec.where}`;\n }\n\n return ddl;\n}\n\n/**\n * Deduplicate index specs by their deterministic fingerprint. Same spec\n * declared twice (e.g., by core preset + registry entry) collapses to a\n * single DDL statement.\n */\nexport function dedupeIndexSpecs(\n specs: ReadonlyArray<IndexSpec>,\n leadingColumns: string[] = [],\n): IndexSpec[] {\n const seen = new Set<string>();\n const out: IndexSpec[] = [];\n for (const spec of specs) {\n const fp = specFingerprint(spec, leadingColumns);\n if (seen.has(fp)) continue;\n seen.add(fp);\n out.push(spec);\n }\n return out;\n}\n","/**\n * Shared `dataOps` SQL compilation helpers used by both SQLite-style backends\n * (`internal/sqlite-sql.ts` for the shared-table backend and `cloudflare/sql.ts`\n * for the per-DO backend).\n *\n * The two backends differ in identifier quoting and scope handling, but the\n * `data` column lives in JSON in both, the deep-merge / replace contract is\n * identical, and the `json_set` / `json_remove` expression they emit for a\n * `DataPathOp[]` is byte-for-byte the same. Lifting the helpers here keeps\n * that shape in one place — the comment in `cloudflare/sql.ts` used to read\n * \"keep them in sync\"; this module is what they keep in sync against.\n *\n * The helpers take a `backendLabel` parameter so error messages still\n * distinguish `\"SQLite backend\"` (shared-table) from `\"DO SQLite backend\"`\n * (per-Durable-Object). Identifier quoting is the caller's job — the helpers\n * here only emit JSON-path expressions against an opaque `base` argument,\n * never bare column names.\n */\n\nimport { FiregraphError } from '../errors.js';\nimport type { DataPathOp } from './write-plan.js';\n\n/**\n * Constructor names of Firestore special types that don't survive a plain\n * `JSON.stringify` round-trip — they have non-enumerable accessors (e.g.\n * `Timestamp.seconds`) or class identity that JSON loses. Detection is by\n * `constructor.name` to keep this module dependency-free (importing\n * `@google-cloud/firestore` here would pollute the Cloudflare Workers bundle —\n * see tests/unit/bundle-pollution.test.ts).\n */\nexport const FIRESTORE_TYPE_NAMES = new Set([\n 'Timestamp',\n 'GeoPoint',\n 'VectorValue',\n 'DocumentReference',\n 'FieldValue',\n]);\n\nexport function isFirestoreSpecialType(value: object): string | null {\n const ctorName = (value as { constructor?: { name?: string } }).constructor?.name;\n if (ctorName && FIRESTORE_TYPE_NAMES.has(ctorName)) return ctorName;\n return null;\n}\n\n/**\n * Identifiers accepted in `data.<key>` paths and `dataOps` path segments.\n * The pattern (`/^[A-Za-z_][A-Za-z0-9_-]*$/`) covers code-style identifiers\n * (camel, snake, kebab). Silently quoting exotic keys would require symmetric\n * quoting at every read/write call site; any drift produces silent data\n * corruption. Failing loudly at compile time is safer — users with exotic\n * keys can use `replaceNode` / `replaceEdge` (full-data overwrite) instead.\n */\nexport const JSON_PATH_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;\n\nexport function validateJsonPathKey(key: string, backendLabel: string): void {\n if (key.length === 0) {\n throw new FiregraphError(\n `${backendLabel}: empty JSON path component is not allowed`,\n 'INVALID_QUERY',\n );\n }\n if (!JSON_PATH_KEY_RE.test(key)) {\n throw new FiregraphError(\n `${backendLabel}: data field path component \"${key}\" is not a safe JSON-path identifier. ` +\n `Allowed pattern: /^[A-Za-z_][A-Za-z0-9_-]*$/. Use replaceNode/replaceEdge (full-data overwrite) ` +\n `for keys with reserved characters (whitespace, dots, brackets, quotes, etc.).`,\n 'INVALID_QUERY',\n );\n }\n}\n\n/**\n * Bind a value as a JSON-serializable string for `json(?)` placeholders in\n * the compiled `json_set` expression. `assertJsonSafePayload` already runs\n * eagerly at the write boundary, so the Firestore-special-type rejection\n * here is defense-in-depth — left in place per the team's preference for\n * symmetric guards across the SQLite compilers.\n */\nexport function jsonBind(value: unknown, backendLabel: string): string {\n if (value === undefined) return 'null';\n if (value !== null && typeof value === 'object') {\n const firestoreType = isFirestoreSpecialType(value);\n if (firestoreType) {\n throw new FiregraphError(\n `${backendLabel} cannot persist a Firestore ${firestoreType} value. ` +\n `Convert to a primitive before writing (e.g. \\`ts.toMillis()\\` for Timestamp).`,\n 'INVALID_ARGUMENT',\n );\n }\n }\n return JSON.stringify(value);\n}\n\n/**\n * Build the SQL expression that applies a list of `DataPathOp`s onto an\n * existing JSON column reference (e.g. `\"data\"` or `COALESCE(\"data\", '{}')`).\n *\n * Returns the full expression (already parenthesised where needed) and pushes\n * the bound parameters onto `params` in left-to-right order. Returns `null`\n * when there are no ops at all — the caller picks a fallback expression.\n *\n * Strategy:\n * 1. `json_remove(<base>, '$.a.b', '$.c', …)` strips delete-ops.\n * 2. `json_set(<#1>, '$.x.y', json(?), '$.z', json(?), …)` writes value-ops.\n * `json(?)` ensures non-string values bind as JSON (objects, arrays,\n * numbers, booleans, null).\n */\nexport function compileDataOpsExpr(\n ops: readonly DataPathOp[],\n base: string,\n params: unknown[],\n backendLabel: string,\n): string | null {\n if (ops.length === 0) return null;\n\n const deletes: DataPathOp[] = [];\n const sets: DataPathOp[] = [];\n for (const op of ops) (op.delete ? deletes : sets).push(op);\n\n let expr = base;\n\n if (deletes.length > 0) {\n const placeholders = deletes.map(() => '?').join(', ');\n expr = `json_remove(${expr}, ${placeholders})`;\n for (const op of deletes) {\n for (const seg of op.path) validateJsonPathKey(seg, backendLabel);\n params.push(`$.${op.path.join('.')}`);\n }\n }\n\n if (sets.length > 0) {\n const pieces = sets.map(() => '?, json(?)').join(', ');\n expr = `json_set(${expr}, ${pieces})`;\n for (const op of sets) {\n for (const seg of op.path) validateJsonPathKey(seg, backendLabel);\n params.push(`$.${op.path.join('.')}`);\n params.push(jsonBind(op.value, backendLabel));\n }\n }\n\n return expr;\n}\n","/**\n * Shared eager-validation helper for SQLite-style backends\n * (`internal/sqlite-sql.ts` and `cloudflare/sql.ts`).\n *\n * Both backends serialise `record.data` (and `update.replaceData`) as a raw\n * JSON blob via `JSON.stringify`. Two classes of value silently corrupt that\n * representation and the cross-backend contract:\n *\n * 1. **Firestore special types** (`Timestamp`, `GeoPoint`, `VectorValue`,\n * `DocumentReference`, `FieldValue`). They have non-enumerable accessors\n * or rely on class identity that JSON drops, so they round-trip as `{}`\n * or garbage. Callers must convert to primitives before writing.\n * 2. **`DELETE_FIELD` sentinel.** A `Symbol` is invisible to\n * `JSON.stringify`. If a caller embeds the sentinel in a `replaceNode`\n * payload or in a fresh-insert (no existing row), the field would\n * silently disappear instead of erroring loudly the way it does for the\n * `dataOps` path — so we reject it eagerly here.\n * 3. **Tagged serialization payloads** (`__firegraph_ser__`). These are the\n * sandbox migration boundary marshalling form. They are valid inside\n * Firestore (the Firestore backend re-hydrates them via\n * `deserializeFirestoreTypes`), but on SQLite they would persist as\n * opaque tagged objects that no downstream reader knows how to interpret.\n * Reject them at the boundary so the failure is loud.\n *\n * The Firestore backend does NOT call this — it accepts those types natively\n * and `deserializeFirestoreTypes` rebuilds tagged values into real Firestore\n * objects on its own write path.\n *\n * Detection avoids `instanceof` so this module stays free of\n * `@google-cloud/firestore`. Constructor-name + duck-type matches the\n * approach used by `bindValue`/`jsonBind` elsewhere in the SQLite compilers.\n */\n\nimport { FiregraphError } from '../errors.js';\nimport { SERIALIZATION_TAG } from './serialization-tag.js';\nimport { isDeleteSentinel } from './write-plan.js';\n\nconst FIRESTORE_TYPE_NAMES = new Set([\n 'Timestamp',\n 'GeoPoint',\n 'VectorValue',\n 'DocumentReference',\n 'FieldValue',\n]);\n\n/**\n * Walk `data` and throw on any value that the SQLite-style raw-JSON\n * persistence path can't faithfully serialise. `label` distinguishes the\n * caller in error messages (e.g. `'shared-table SQLite'` vs `'DO SQLite'`).\n *\n * Plain objects recurse. Arrays recurse element-wise. Primitives, `null`,\n * and `undefined` are accepted (mirroring how `flattenPatch` treats them\n * during the merge path).\n */\nexport function assertJsonSafePayload(data: unknown, label: string): void {\n walk(data, [], label);\n}\n\nfunction walk(node: unknown, path: readonly string[], label: string): void {\n if (node === null || node === undefined) return;\n if (isDeleteSentinel(node)) {\n throw new FiregraphError(\n `${label} backend cannot persist a deleteField() sentinel inside a ` +\n `full-data payload (replaceNode/replaceEdge or first-insert). The ` +\n `sentinel is only valid inside an updateNode/updateEdge dataOps patch. ` +\n `Path: ${formatPath(path)}.`,\n 'INVALID_ARGUMENT',\n );\n }\n const t = typeof node;\n if (t === 'symbol' || t === 'function') {\n throw new FiregraphError(\n `${label} backend cannot persist a value of type ${t}. ` +\n `JSON.stringify drops it silently. Path: ${formatPath(path)}.`,\n 'INVALID_ARGUMENT',\n );\n }\n if (t === 'bigint') {\n throw new FiregraphError(\n `${label} backend cannot persist a value of type bigint. ` +\n `JSON.stringify cannot serialize this type (throws TypeError). ` +\n `Path: ${formatPath(path)}.`,\n 'INVALID_ARGUMENT',\n );\n }\n if (t !== 'object') return;\n if (Array.isArray(node)) {\n for (let i = 0; i < node.length; i++) {\n walk(node[i], [...path, String(i)], label);\n }\n return;\n }\n // Reject any object carrying the firegraph serialization tag — both legit\n // tagged Firestore-type payloads (the migration-sandbox output that round-\n // trips through Firestore) and bogus user data that happens to put a\n // literal `__firegraph_ser__` key on a plain object. SQLite has no\n // Timestamp class to rebuild the tag into, and silently writing the\n // envelope would produce an unreadable column.\n const obj = node as Record<string, unknown>;\n if (Object.prototype.hasOwnProperty.call(obj, SERIALIZATION_TAG)) {\n const tagValue = obj[SERIALIZATION_TAG];\n throw new FiregraphError(\n `${label} backend cannot persist an object with a \\`${SERIALIZATION_TAG}\\` ` +\n `key (value: ${formatTagValue(tagValue)}). Recognised tags are valid only on ` +\n `the Firestore backend (migration-sandbox output); a literal ` +\n `\\`${SERIALIZATION_TAG}\\` field in user data is reserved and not allowed. ` +\n `Path: ${formatPath(path)}.`,\n 'INVALID_ARGUMENT',\n );\n }\n // Class instances: reject Firestore special types loudly; reject every\n // other class instance generically (Map, Set, Date are caller's\n // responsibility to convert — Date is allowed in filter binds via\n // `bindValue` but not as a stored payload value because JSON.stringify\n // produces a string, not a real Date).\n const proto = Object.getPrototypeOf(node);\n if (proto !== null && proto !== Object.prototype) {\n const ctor = (node as { constructor?: { name?: string } }).constructor;\n const ctorName = ctor && typeof ctor.name === 'string' ? ctor.name : '<anonymous>';\n if (FIRESTORE_TYPE_NAMES.has(ctorName)) {\n throw new FiregraphError(\n `${label} backend cannot persist a Firestore ${ctorName} value. ` +\n `Convert to a primitive before writing (e.g. \\`ts.toMillis()\\` for ` +\n `Timestamp, \\`{lat,lng}\\` for GeoPoint). Path: ${formatPath(path)}.`,\n 'INVALID_ARGUMENT',\n );\n }\n // Accept Date as an alias for its epoch-ms — it round-trips as an ISO\n // string via JSON.stringify, which the caller chose; not our place to\n // reject. Same for Buffer / typed arrays — they'll JSON-serialize as\n // best they can. Reject only opaque exotic instances that JSON drops.\n if (node instanceof Date) return;\n throw new FiregraphError(\n `${label} backend cannot persist a class instance of type ${ctorName}. ` +\n `Only plain objects, arrays, and primitives round-trip safely through ` +\n `JSON storage. Path: ${formatPath(path)}.`,\n 'INVALID_ARGUMENT',\n );\n }\n for (const key of Object.keys(obj)) {\n walk(obj[key], [...path, key], label);\n }\n}\n\nfunction formatPath(path: readonly string[]): string {\n return path.length === 0 ? '<root>' : path.map((p) => JSON.stringify(p)).join(' > ');\n}\n\nfunction formatTagValue(value: unknown): string {\n if (value === null) return 'null';\n if (value === undefined) return 'undefined';\n if (typeof value === 'string') return JSON.stringify(value);\n if (typeof value === 'number' || typeof value === 'boolean' || typeof value === 'bigint') {\n return String(value);\n }\n return typeof value;\n}\n","/**\n * Backend-agnostic timestamp.\n *\n * Structurally compatible with `@google-cloud/firestore`'s `Timestamp` so\n * that records returned by either the Firestore or SQLite backend can be\n * consumed through the same `StoredGraphRecord` shape.\n *\n * Firestore's native `Timestamp` already satisfies this interface, so\n * existing Firestore consumers see no behavior change. The SQLite backend\n * returns instances of `GraphTimestampImpl` which also satisfies it.\n */\n\nexport interface GraphTimestamp {\n readonly seconds: number;\n readonly nanoseconds: number;\n toDate(): Date;\n toMillis(): number;\n}\n\n/**\n * Concrete `GraphTimestamp` implementation used by non-Firestore backends.\n * Mirrors the surface of Firestore's `Timestamp` enough for typical use.\n */\nexport class GraphTimestampImpl implements GraphTimestamp {\n constructor(\n public readonly seconds: number,\n public readonly nanoseconds: number,\n ) {}\n\n toDate(): Date {\n return new Date(this.toMillis());\n }\n\n toMillis(): number {\n return this.seconds * 1000 + Math.floor(this.nanoseconds / 1e6);\n }\n\n toJSON(): { seconds: number; nanoseconds: number } {\n return { seconds: this.seconds, nanoseconds: this.nanoseconds };\n }\n\n static fromMillis(ms: number): GraphTimestampImpl {\n const seconds = Math.floor(ms / 1000);\n const nanoseconds = (ms - seconds * 1000) * 1e6;\n return new GraphTimestampImpl(seconds, nanoseconds);\n }\n\n static now(): GraphTimestampImpl {\n return GraphTimestampImpl.fromMillis(Date.now());\n }\n}\n\n/**\n * Sentinel returned by `StorageBackend.serverTimestamp()` when the backend\n * has no native server-time concept and just wants a placeholder that the\n * adapter resolves to a concrete time at write commit. SQLite backends\n * substitute the wall-clock millis at the moment of `setDoc`/`updateDoc`.\n */\nexport const SERVER_TIMESTAMP_SENTINEL = Symbol.for('firegraph.serverTimestamp');\nexport type ServerTimestampSentinel = typeof SERVER_TIMESTAMP_SENTINEL;\n\nexport function isServerTimestampSentinel(value: unknown): value is ServerTimestampSentinel {\n return value === SERVER_TIMESTAMP_SENTINEL;\n}\n"],"mappings":";;;;;;;;;AAyCA,IAAM,WAAW;AAOjB,IAAM,mBAAmB;AAEzB,SAAS,WAAW,MAAsB;AACxC,MAAI,CAAC,SAAS,KAAK,IAAI,GAAG;AACxB,UAAM,IAAI;AAAA,MACR,wCAAwC,IAAI;AAAA,MAC5C;AAAA,IACF;AAAA,EACF;AACA,SAAO,IAAI,IAAI;AACjB;AAMA,SAAS,QAAQ,KAAqB;AACpC,MAAI,IAAI;AACR,WAAS,IAAI,GAAG,IAAI,IAAI,QAAQ,KAAK;AACnC,SAAK,IAAI,WAAW,CAAC;AACrB,QAAI,KAAK,KAAK,GAAG,QAAU;AAAA,EAC7B;AACA,UAAQ,MAAM,GAAG,SAAS,EAAE,EAAE,SAAS,GAAG,GAAG;AAC/C;AAEA,SAAS,gBACP,QACwC;AACxC,SAAO,OAAO,IAAI,CAAC,MAAM;AACvB,QAAI,OAAO,MAAM,SAAU,QAAO,EAAE,MAAM,GAAG,MAAM,MAAM;AACzD,QAAI,CAAC,EAAE,QAAQ,OAAO,EAAE,SAAS,UAAU;AACzC,YAAM,IAAI;AAAA,QACR,6EAA6E,KAAK,UAAU,CAAC,CAAC;AAAA,QAC9F;AAAA,MACF;AAAA,IACF;AACA,WAAO,EAAE,MAAM,EAAE,MAAM,MAAM,CAAC,CAAC,EAAE,KAAK;AAAA,EACxC,CAAC;AACH;AAEA,SAAS,gBAAgB,MAAiB,gBAAkC;AAK1E,QAAM,aAAa;AAAA,IACjB,MAAM;AAAA,IACN,QAAQ,gBAAgB,KAAK,MAAM;AAAA,IACnC,OAAO,KAAK,SAAS;AAAA,EACvB;AACA,SAAO,QAAQ,KAAK,UAAU,UAAU,CAAC;AAC3C;AASA,SAAS,iBAAiB,MAAc,eAA+C;AACrF,QAAM,MAAM,cAAc,IAAI;AAC9B,MAAI,IAAK,QAAO,WAAW,GAAG;AAE9B,MAAI,SAAS,QAAQ;AACnB,WAAO;AAAA,EACT;AACA,MAAI,KAAK,WAAW,OAAO,GAAG;AAC5B,UAAM,SAAS,KAAK,MAAM,CAAC;AAC3B,UAAM,QAAQ,OAAO,MAAM,GAAG;AAC9B,eAAW,QAAQ,OAAO;AACxB,UAAI,CAAC,iBAAiB,KAAK,IAAI,GAAG;AAChC,cAAM,IAAI;AAAA,UACR,wBAAwB,IAAI,4BAA4B,IAAI;AAAA,UAE5D;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,WAAO,2BAA2B,MAAM;AAAA,EAC1C;AAEA,QAAM,IAAI;AAAA,IACR,oBAAoB,IAAI;AAAA,IAGxB;AAAA,EACF;AACF;AAuBO,SAAS,cAAc,MAAiB,SAAwC;AACrF,QAAM,EAAE,OAAO,eAAe,iBAAiB,CAAC,EAAE,IAAI;AAEtD,MAAI,CAAC,KAAK,UAAU,KAAK,OAAO,WAAW,GAAG;AAC5C,UAAM,IAAI,eAAe,8CAA8C,eAAe;AAAA,EACxF;AAEA,QAAM,aAAa,gBAAgB,KAAK,MAAM;AAC9C,QAAM,OAAO,gBAAgB,MAAM,cAAc;AACjD,QAAM,YAAY,GAAG,KAAK,QAAQ,IAAI;AAEtC,QAAM,OAAiB,CAAC;AACxB,aAAW,OAAO,gBAAgB;AAChC,SAAK,KAAK,WAAW,GAAG,CAAC;AAAA,EAC3B;AACA,aAAW,KAAK,YAAY;AAC1B,UAAM,OAAO,iBAAiB,EAAE,MAAM,aAAa;AACnD,SAAK,KAAK,EAAE,OAAO,GAAG,IAAI,UAAU,IAAI;AAAA,EAC1C;AAEA,MAAI,MAAM,8BAA8B,WAAW,SAAS,CAAC,OAAO,WAAW,KAAK,CAAC,IAAI,KAAK,KAAK,IAAI,CAAC;AAExG,MAAI,KAAK,OAAO;AAKd,WAAO,UAAU,KAAK,KAAK;AAAA,EAC7B;AAEA,SAAO;AACT;AAOO,SAAS,iBACd,OACA,iBAA2B,CAAC,GACf;AACb,QAAM,OAAO,oBAAI,IAAY;AAC7B,QAAM,MAAmB,CAAC;AAC1B,aAAW,QAAQ,OAAO;AACxB,UAAM,KAAK,gBAAgB,MAAM,cAAc;AAC/C,QAAI,KAAK,IAAI,EAAE,EAAG;AAClB,SAAK,IAAI,EAAE;AACX,QAAI,KAAK,IAAI;AAAA,EACf;AACA,SAAO;AACT;;;ACtLO,IAAM,uBAAuB,oBAAI,IAAI;AAAA,EAC1C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAEM,SAAS,uBAAuB,OAA8B;AACnE,QAAM,WAAY,MAA8C,aAAa;AAC7E,MAAI,YAAY,qBAAqB,IAAI,QAAQ,EAAG,QAAO;AAC3D,SAAO;AACT;AAUO,IAAMA,oBAAmB;AAEzB,SAAS,oBAAoB,KAAa,cAA4B;AAC3E,MAAI,IAAI,WAAW,GAAG;AACpB,UAAM,IAAI;AAAA,MACR,GAAG,YAAY;AAAA,MACf;AAAA,IACF;AAAA,EACF;AACA,MAAI,CAACA,kBAAiB,KAAK,GAAG,GAAG;AAC/B,UAAM,IAAI;AAAA,MACR,GAAG,YAAY,gCAAgC,GAAG;AAAA,MAGlD;AAAA,IACF;AAAA,EACF;AACF;AASO,SAAS,SAAS,OAAgB,cAA8B;AACrE,MAAI,UAAU,OAAW,QAAO;AAChC,MAAI,UAAU,QAAQ,OAAO,UAAU,UAAU;AAC/C,UAAM,gBAAgB,uBAAuB,KAAK;AAClD,QAAI,eAAe;AACjB,YAAM,IAAI;AAAA,QACR,GAAG,YAAY,+BAA+B,aAAa;AAAA,QAE3D;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACA,SAAO,KAAK,UAAU,KAAK;AAC7B;AAgBO,SAAS,mBACd,KACA,MACA,QACA,cACe;AACf,MAAI,IAAI,WAAW,EAAG,QAAO;AAE7B,QAAM,UAAwB,CAAC;AAC/B,QAAM,OAAqB,CAAC;AAC5B,aAAW,MAAM,IAAK,EAAC,GAAG,SAAS,UAAU,MAAM,KAAK,EAAE;AAE1D,MAAI,OAAO;AAEX,MAAI,QAAQ,SAAS,GAAG;AACtB,UAAM,eAAe,QAAQ,IAAI,MAAM,GAAG,EAAE,KAAK,IAAI;AACrD,WAAO,eAAe,IAAI,KAAK,YAAY;AAC3C,eAAW,MAAM,SAAS;AACxB,iBAAW,OAAO,GAAG,KAAM,qBAAoB,KAAK,YAAY;AAChE,aAAO,KAAK,KAAK,GAAG,KAAK,KAAK,GAAG,CAAC,EAAE;AAAA,IACtC;AAAA,EACF;AAEA,MAAI,KAAK,SAAS,GAAG;AACnB,UAAM,SAAS,KAAK,IAAI,MAAM,YAAY,EAAE,KAAK,IAAI;AACrD,WAAO,YAAY,IAAI,KAAK,MAAM;AAClC,eAAW,MAAM,MAAM;AACrB,iBAAW,OAAO,GAAG,KAAM,qBAAoB,KAAK,YAAY;AAChE,aAAO,KAAK,KAAK,GAAG,KAAK,KAAK,GAAG,CAAC,EAAE;AACpC,aAAO,KAAK,SAAS,GAAG,OAAO,YAAY,CAAC;AAAA,IAC9C;AAAA,EACF;AAEA,SAAO;AACT;;;ACxGA,IAAMC,wBAAuB,oBAAI,IAAI;AAAA,EACnC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAWM,SAAS,sBAAsB,MAAe,OAAqB;AACxE,OAAK,MAAM,CAAC,GAAG,KAAK;AACtB;AAEA,SAAS,KAAK,MAAe,MAAyB,OAAqB;AACzE,MAAI,SAAS,QAAQ,SAAS,OAAW;AACzC,MAAI,iBAAiB,IAAI,GAAG;AAC1B,UAAM,IAAI;AAAA,MACR,GAAG,KAAK,0MAGG,WAAW,IAAI,CAAC;AAAA,MAC3B;AAAA,IACF;AAAA,EACF;AACA,QAAM,IAAI,OAAO;AACjB,MAAI,MAAM,YAAY,MAAM,YAAY;AACtC,UAAM,IAAI;AAAA,MACR,GAAG,KAAK,2CAA2C,CAAC,6CACP,WAAW,IAAI,CAAC;AAAA,MAC7D;AAAA,IACF;AAAA,EACF;AACA,MAAI,MAAM,UAAU;AAClB,UAAM,IAAI;AAAA,MACR,GAAG,KAAK,uHAEG,WAAW,IAAI,CAAC;AAAA,MAC3B;AAAA,IACF;AAAA,EACF;AACA,MAAI,MAAM,SAAU;AACpB,MAAI,MAAM,QAAQ,IAAI,GAAG;AACvB,aAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,WAAK,KAAK,CAAC,GAAG,CAAC,GAAG,MAAM,OAAO,CAAC,CAAC,GAAG,KAAK;AAAA,IAC3C;AACA;AAAA,EACF;AAOA,QAAM,MAAM;AACZ,MAAI,OAAO,UAAU,eAAe,KAAK,KAAK,iBAAiB,GAAG;AAChE,UAAM,WAAW,IAAI,iBAAiB;AACtC,UAAM,IAAI;AAAA,MACR,GAAG,KAAK,8CAA8C,iBAAiB,kBACtD,eAAe,QAAQ,CAAC,sGAElC,iBAAiB,4DACb,WAAW,IAAI,CAAC;AAAA,MAC3B;AAAA,IACF;AAAA,EACF;AAMA,QAAM,QAAQ,OAAO,eAAe,IAAI;AACxC,MAAI,UAAU,QAAQ,UAAU,OAAO,WAAW;AAChD,UAAM,OAAQ,KAA6C;AAC3D,UAAM,WAAW,QAAQ,OAAO,KAAK,SAAS,WAAW,KAAK,OAAO;AACrE,QAAIA,sBAAqB,IAAI,QAAQ,GAAG;AACtC,YAAM,IAAI;AAAA,QACR,GAAG,KAAK,uCAAuC,QAAQ,2HAEJ,WAAW,IAAI,CAAC;AAAA,QACnE;AAAA,MACF;AAAA,IACF;AAKA,QAAI,gBAAgB,KAAM;AAC1B,UAAM,IAAI;AAAA,MACR,GAAG,KAAK,oDAAoD,QAAQ,8FAE3C,WAAW,IAAI,CAAC;AAAA,MACzC;AAAA,IACF;AAAA,EACF;AACA,aAAW,OAAO,OAAO,KAAK,GAAG,GAAG;AAClC,SAAK,IAAI,GAAG,GAAG,CAAC,GAAG,MAAM,GAAG,GAAG,KAAK;AAAA,EACtC;AACF;AAEA,SAAS,WAAW,MAAiC;AACnD,SAAO,KAAK,WAAW,IAAI,WAAW,KAAK,IAAI,CAAC,MAAM,KAAK,UAAU,CAAC,CAAC,EAAE,KAAK,KAAK;AACrF;AAEA,SAAS,eAAe,OAAwB;AAC9C,MAAI,UAAU,KAAM,QAAO;AAC3B,MAAI,UAAU,OAAW,QAAO;AAChC,MAAI,OAAO,UAAU,SAAU,QAAO,KAAK,UAAU,KAAK;AAC1D,MAAI,OAAO,UAAU,YAAY,OAAO,UAAU,aAAa,OAAO,UAAU,UAAU;AACxF,WAAO,OAAO,KAAK;AAAA,EACrB;AACA,SAAO,OAAO;AAChB;;;ACrIO,IAAM,qBAAN,MAAM,oBAA6C;AAAA,EACxD,YACkB,SACA,aAChB;AAFgB;AACA;AAAA,EACf;AAAA,EAEH,SAAe;AACb,WAAO,IAAI,KAAK,KAAK,SAAS,CAAC;AAAA,EACjC;AAAA,EAEA,WAAmB;AACjB,WAAO,KAAK,UAAU,MAAO,KAAK,MAAM,KAAK,cAAc,GAAG;AAAA,EAChE;AAAA,EAEA,SAAmD;AACjD,WAAO,EAAE,SAAS,KAAK,SAAS,aAAa,KAAK,YAAY;AAAA,EAChE;AAAA,EAEA,OAAO,WAAW,IAAgC;AAChD,UAAM,UAAU,KAAK,MAAM,KAAK,GAAI;AACpC,UAAM,eAAe,KAAK,UAAU,OAAQ;AAC5C,WAAO,IAAI,oBAAmB,SAAS,WAAW;AAAA,EACpD;AAAA,EAEA,OAAO,MAA0B;AAC/B,WAAO,oBAAmB,WAAW,KAAK,IAAI,CAAC;AAAA,EACjD;AACF;","names":["JSON_PATH_KEY_RE","FIRESTORE_TYPE_NAMES"]}
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
// src/internal/firestore-traverse-compiler.ts
|
|
2
|
+
var MAX_PIPELINE_DEPTH = 5;
|
|
3
|
+
function compileEngineTraversal(params, opts) {
|
|
4
|
+
const maxDepth = opts?.maxDepth ?? MAX_PIPELINE_DEPTH;
|
|
5
|
+
const maxReads = opts?.maxReads ?? params.maxReads;
|
|
6
|
+
if (!Array.isArray(params.hops) || params.hops.length === 0) {
|
|
7
|
+
return { eligible: false, reason: "engine traversal requires at least one hop" };
|
|
8
|
+
}
|
|
9
|
+
if (params.hops.length > maxDepth) {
|
|
10
|
+
return {
|
|
11
|
+
eligible: false,
|
|
12
|
+
reason: `engine traversal depth ${params.hops.length} exceeds MAX_PIPELINE_DEPTH (${maxDepth})`
|
|
13
|
+
};
|
|
14
|
+
}
|
|
15
|
+
if (!Array.isArray(params.sources)) {
|
|
16
|
+
return { eligible: false, reason: "engine traversal requires a sources array" };
|
|
17
|
+
}
|
|
18
|
+
const normalizedHops = [];
|
|
19
|
+
for (let i = 0; i < params.hops.length; i++) {
|
|
20
|
+
const hop = params.hops[i];
|
|
21
|
+
if (!hop.axbType || hop.axbType.length === 0) {
|
|
22
|
+
return {
|
|
23
|
+
eligible: false,
|
|
24
|
+
reason: `engine traversal hop ${i} is missing axbType`
|
|
25
|
+
};
|
|
26
|
+
}
|
|
27
|
+
if (typeof hop.limitPerSource !== "number" || hop.limitPerSource <= 0 || !Number.isFinite(hop.limitPerSource)) {
|
|
28
|
+
return {
|
|
29
|
+
eligible: false,
|
|
30
|
+
reason: `engine traversal hop ${i} (${hop.axbType}) requires a positive limitPerSource`
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
normalizedHops.push({
|
|
34
|
+
...hop,
|
|
35
|
+
axbType: hop.axbType,
|
|
36
|
+
direction: hop.direction ?? "forward",
|
|
37
|
+
limitPerSource: hop.limitPerSource
|
|
38
|
+
});
|
|
39
|
+
}
|
|
40
|
+
let estimatedReads = Math.max(1, params.sources.length);
|
|
41
|
+
for (const hop of normalizedHops) {
|
|
42
|
+
estimatedReads *= hop.limitPerSource;
|
|
43
|
+
if (estimatedReads > Number.MAX_SAFE_INTEGER) {
|
|
44
|
+
estimatedReads = Number.MAX_SAFE_INTEGER;
|
|
45
|
+
break;
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
if (maxReads !== void 0 && estimatedReads > maxReads) {
|
|
49
|
+
return {
|
|
50
|
+
eligible: false,
|
|
51
|
+
reason: `engine traversal worst-case response size ${estimatedReads} exceeds maxReads budget ${maxReads}`
|
|
52
|
+
};
|
|
53
|
+
}
|
|
54
|
+
return {
|
|
55
|
+
eligible: true,
|
|
56
|
+
normalized: {
|
|
57
|
+
sources: params.sources,
|
|
58
|
+
hops: normalizedHops,
|
|
59
|
+
estimatedReads
|
|
60
|
+
}
|
|
61
|
+
};
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
export {
|
|
65
|
+
compileEngineTraversal
|
|
66
|
+
};
|
|
67
|
+
//# sourceMappingURL=chunk-D4J7Z4FE.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/internal/firestore-traverse-compiler.ts"],"sourcesContent":["/**\n * Pure compiler for engine-level multi-hop traversal.\n *\n * Takes an `EngineTraversalParams` spec and decides whether it can be\n * compiled into one nested-Pipeline round trip. Returns a discriminated\n * union — `{ eligible: true; normalized }` carries the validated spec\n * with defaults filled in; `{ eligible: false; reason }` carries a\n * human-readable explanation that the caller (traversal layer or\n * `engineTraversal: 'force'` test path) can either log, throw, or\n * silently fall back on.\n *\n * The compiler is split out so the validation surface is unit-testable\n * without spinning up a Firestore SDK or a real Pipeline. The actual\n * pipeline construction and result decoding live in\n * `firestore-traverse.ts` and depend on `@google-cloud/firestore`.\n *\n * Eligibility checks (in order):\n *\n * 1. `hops.length` ≥ 1 → otherwise no traversal to run\n * 2. `hops.length` ≤ `maxDepth` (default 5) → pipeline-depth cap\n * 3. Every hop has `limitPerSource` set → required to bound response size\n * 4. Every hop's `axbType` is non-empty → query needs a relation predicate\n * 5. Worst-case response size ≤ `maxReads` budget → prevent runaway tree responses\n *\n * The maxDepth bound is conservative — Firestore Pipelines don't\n * publish a hard limit on `addFields` / `define` nesting depth, but\n * empirically deep nesting starts to slow down planning. Five hops\n * covers the vast majority of real-world traversal specs; specs that\n * exceed it fall back to the per-hop loop with a debug-level signal.\n *\n * The response-size estimate is the conservative top-line:\n * `sources.length × Π(limitPerSource_i)`. This is the worst-case edge\n * count at the deepest hop, which dominates the total tree size for\n * branching factors > 1. We deliberately don't sum over hops — the\n * deepest-hop bound already triggers fallback well before any realistic\n * total response size matters.\n */\n\nimport type { EngineHopSpec, EngineTraversalParams } from '../types.js';\n\n/**\n * Default cap on `addFields` / `define` nesting depth. Traversal specs\n * deeper than this are rejected by the compiler and fall back to the\n * per-hop loop. Configurable per call via `compileEngineTraversal`'s\n * `opts.maxDepth`.\n */\nexport const MAX_PIPELINE_DEPTH = 5;\n\n/**\n * A normalized, validated engine-traversal spec ready for the executor\n * to translate into a nested Pipeline. Mirrors `EngineTraversalParams`\n * but with `direction` defaulted to `'forward'` on every hop and the\n * estimated worst-case response size attached for budget bookkeeping.\n */\nexport interface NormalizedEngineTraversal {\n sources: string[];\n hops: Array<\n Required<Pick<EngineHopSpec, 'axbType' | 'limitPerSource' | 'direction'>> & EngineHopSpec\n >;\n /** Worst-case edge count at the deepest hop — `sources.length × Π(limitPerSource_i)`. */\n estimatedReads: number;\n}\n\nexport type CompilerResult =\n | { eligible: true; normalized: NormalizedEngineTraversal }\n | { eligible: false; reason: string };\n\nexport interface CompilerOptions {\n /** Override the depth cap. Default `MAX_PIPELINE_DEPTH` (5). */\n maxDepth?: number;\n /**\n * Worst-case response-size budget. The compiler refuses to emit when\n * `sources.length × Π(limitPerSource_i)` exceeds this.\n */\n maxReads?: number;\n}\n\n/**\n * Validate an engine-traversal spec. Pure; no SDK interaction.\n *\n * Returns `{ eligible: true; normalized }` with `direction` defaulted\n * and `estimatedReads` attached, or `{ eligible: false; reason }` with\n * a one-line description suitable for logging or for an\n * `UNSUPPORTED_OPERATION` error message.\n */\nexport function compileEngineTraversal(\n params: EngineTraversalParams,\n opts?: CompilerOptions,\n): CompilerResult {\n const maxDepth = opts?.maxDepth ?? MAX_PIPELINE_DEPTH;\n const maxReads = opts?.maxReads ?? params.maxReads;\n\n if (!Array.isArray(params.hops) || params.hops.length === 0) {\n return { eligible: false, reason: 'engine traversal requires at least one hop' };\n }\n if (params.hops.length > maxDepth) {\n return {\n eligible: false,\n reason: `engine traversal depth ${params.hops.length} exceeds MAX_PIPELINE_DEPTH (${maxDepth})`,\n };\n }\n if (!Array.isArray(params.sources)) {\n return { eligible: false, reason: 'engine traversal requires a sources array' };\n }\n\n const normalizedHops: NormalizedEngineTraversal['hops'] = [];\n for (let i = 0; i < params.hops.length; i++) {\n const hop = params.hops[i];\n if (!hop.axbType || hop.axbType.length === 0) {\n return {\n eligible: false,\n reason: `engine traversal hop ${i} is missing axbType`,\n };\n }\n if (\n typeof hop.limitPerSource !== 'number' ||\n hop.limitPerSource <= 0 ||\n !Number.isFinite(hop.limitPerSource)\n ) {\n return {\n eligible: false,\n reason: `engine traversal hop ${i} (${hop.axbType}) requires a positive limitPerSource`,\n };\n }\n normalizedHops.push({\n ...hop,\n axbType: hop.axbType,\n direction: hop.direction ?? 'forward',\n limitPerSource: hop.limitPerSource,\n });\n }\n\n // Worst-case at deepest hop. We multiply iteratively so that an\n // overflowing product short-circuits before bumping into JS's float\n // precision. `Number.MAX_SAFE_INTEGER` is well past any reasonable\n // `maxReads` value, so that's the early-exit threshold even when the\n // caller didn't supply a budget.\n let estimatedReads = Math.max(1, params.sources.length);\n for (const hop of normalizedHops) {\n estimatedReads *= hop.limitPerSource;\n if (estimatedReads > Number.MAX_SAFE_INTEGER) {\n estimatedReads = Number.MAX_SAFE_INTEGER;\n break;\n }\n }\n\n if (maxReads !== undefined && estimatedReads > maxReads) {\n return {\n eligible: false,\n reason: `engine traversal worst-case response size ${estimatedReads} exceeds maxReads budget ${maxReads}`,\n };\n }\n\n return {\n eligible: true,\n normalized: {\n sources: params.sources,\n hops: normalizedHops,\n estimatedReads,\n },\n };\n}\n"],"mappings":";AA8CO,IAAM,qBAAqB;AAuC3B,SAAS,uBACd,QACA,MACgB;AAChB,QAAM,WAAW,MAAM,YAAY;AACnC,QAAM,WAAW,MAAM,YAAY,OAAO;AAE1C,MAAI,CAAC,MAAM,QAAQ,OAAO,IAAI,KAAK,OAAO,KAAK,WAAW,GAAG;AAC3D,WAAO,EAAE,UAAU,OAAO,QAAQ,6CAA6C;AAAA,EACjF;AACA,MAAI,OAAO,KAAK,SAAS,UAAU;AACjC,WAAO;AAAA,MACL,UAAU;AAAA,MACV,QAAQ,0BAA0B,OAAO,KAAK,MAAM,gCAAgC,QAAQ;AAAA,IAC9F;AAAA,EACF;AACA,MAAI,CAAC,MAAM,QAAQ,OAAO,OAAO,GAAG;AAClC,WAAO,EAAE,UAAU,OAAO,QAAQ,4CAA4C;AAAA,EAChF;AAEA,QAAM,iBAAoD,CAAC;AAC3D,WAAS,IAAI,GAAG,IAAI,OAAO,KAAK,QAAQ,KAAK;AAC3C,UAAM,MAAM,OAAO,KAAK,CAAC;AACzB,QAAI,CAAC,IAAI,WAAW,IAAI,QAAQ,WAAW,GAAG;AAC5C,aAAO;AAAA,QACL,UAAU;AAAA,QACV,QAAQ,wBAAwB,CAAC;AAAA,MACnC;AAAA,IACF;AACA,QACE,OAAO,IAAI,mBAAmB,YAC9B,IAAI,kBAAkB,KACtB,CAAC,OAAO,SAAS,IAAI,cAAc,GACnC;AACA,aAAO;AAAA,QACL,UAAU;AAAA,QACV,QAAQ,wBAAwB,CAAC,KAAK,IAAI,OAAO;AAAA,MACnD;AAAA,IACF;AACA,mBAAe,KAAK;AAAA,MAClB,GAAG;AAAA,MACH,SAAS,IAAI;AAAA,MACb,WAAW,IAAI,aAAa;AAAA,MAC5B,gBAAgB,IAAI;AAAA,IACtB,CAAC;AAAA,EACH;AAOA,MAAI,iBAAiB,KAAK,IAAI,GAAG,OAAO,QAAQ,MAAM;AACtD,aAAW,OAAO,gBAAgB;AAChC,sBAAkB,IAAI;AACtB,QAAI,iBAAiB,OAAO,kBAAkB;AAC5C,uBAAiB,OAAO;AACxB;AAAA,IACF;AAAA,EACF;AAEA,MAAI,aAAa,UAAa,iBAAiB,UAAU;AACvD,WAAO;AAAA,MACL,UAAU;AAAA,MACV,QAAQ,6CAA6C,cAAc,4BAA4B,QAAQ;AAAA,IACzG;AAAA,EACF;AAEA,SAAO;AAAA,IACL,UAAU;AAAA,IACV,YAAY;AAAA,MACV,SAAS,OAAO;AAAA,MAChB,MAAM;AAAA,MACN;AAAA,IACF;AAAA,EACF;AACF;","names":[]}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
// src/internal/backend.ts
|
|
2
|
+
function createCapabilities(caps) {
|
|
3
|
+
return {
|
|
4
|
+
has: (capability) => caps.has(capability),
|
|
5
|
+
values: () => caps.values()
|
|
6
|
+
};
|
|
7
|
+
}
|
|
8
|
+
function intersectCapabilities(parts) {
|
|
9
|
+
if (parts.length === 0) return createCapabilities(/* @__PURE__ */ new Set());
|
|
10
|
+
const sets = parts.map((p) => new Set(p.values()));
|
|
11
|
+
const [first, ...rest] = sets;
|
|
12
|
+
const intersection = /* @__PURE__ */ new Set();
|
|
13
|
+
for (const c of first) {
|
|
14
|
+
if (rest.every((s) => s.has(c))) intersection.add(c);
|
|
15
|
+
}
|
|
16
|
+
return createCapabilities(intersection);
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export {
|
|
20
|
+
createCapabilities,
|
|
21
|
+
intersectCapabilities
|
|
22
|
+
};
|
|
23
|
+
//# sourceMappingURL=chunk-N5HFDWQX.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/internal/backend.ts"],"sourcesContent":["/**\n * Backend abstraction for firegraph.\n *\n * `StorageBackend` is the single interface every storage driver implements.\n * The Firestore backend wraps `@google-cloud/firestore`; the SQLite backend\n * (shared by D1 and Durable Object SQLite) uses a parameterized SQL executor.\n *\n * `GraphClientImpl` and friends depend only on this interface — they have\n * no direct knowledge of Firestore or SQLite.\n */\n\nimport type {\n AggregateSpec,\n BulkOptions,\n BulkResult,\n BulkUpdatePatch,\n Capability,\n CascadeResult,\n EngineTraversalParams,\n EngineTraversalResult,\n ExpandParams,\n ExpandResult,\n FindEdgesParams,\n FindNearestParams,\n FullTextSearchParams,\n GeoSearchParams,\n GraphReader,\n QueryFilter,\n QueryOptions,\n StoredGraphRecord,\n} from '../types.js';\nimport type { DataPathOp } from './write-plan.js';\n\n/**\n * Runtime descriptor of which `Capability`s a `StorageBackend` actually\n * implements. Static for the lifetime of a backend instance; declared at\n * construction. The phantom `_phantom` field is a type-level marker\n * (never read at runtime) that lets the type parameter `C` flow through\n * the descriptor for use by `GraphClient<C>` conditional gating.\n *\n * Use `createCapabilities` to construct one. Use `.has(c)` to check\n * membership at runtime; the type system gates extension methods on the\n * client level (see `.claude/backend-capabilities.md`).\n */\nexport interface BackendCapabilities<C extends Capability = Capability> {\n /** Runtime membership check. */\n has(capability: Capability): boolean;\n /** Iterate declared capabilities (diagnostics, error messages). */\n values(): IterableIterator<Capability>;\n /** Type-level marker. Never read at runtime. */\n readonly _phantom?: C;\n}\n\n/**\n * Construct a `BackendCapabilities<C>` from an explicit set. The set is\n * captured by reference; callers should treat it as readonly after passing\n * it in. The runtime cost of `has()` is one Set lookup.\n */\nexport function createCapabilities<C extends Capability>(\n caps: ReadonlySet<C>,\n): BackendCapabilities<C> {\n return {\n has: (capability: Capability): boolean => caps.has(capability as C),\n values: () => caps.values() as IterableIterator<Capability>,\n };\n}\n\n/**\n * Intersect multiple capability sets. Used by `RoutingStorageBackend` to\n * derive the capability set of a composite backend: a routed graph can\n * only honour a capability if every wrapped backend honours it.\n */\nexport function intersectCapabilities(\n parts: ReadonlyArray<BackendCapabilities>,\n): BackendCapabilities {\n if (parts.length === 0) return createCapabilities(new Set<Capability>());\n const sets = parts.map((p) => new Set<Capability>(p.values()));\n const [first, ...rest] = sets;\n const intersection = new Set<Capability>();\n for (const c of first) {\n if (rest.every((s) => s.has(c))) intersection.add(c);\n }\n return createCapabilities(intersection);\n}\n\n/**\n * Per-record write payload — backend-agnostic. Timestamps are not present;\n * the backend supplies them via `serverTimestamp()` placeholders that it\n * itself resolves at commit time.\n */\nexport interface WritableRecord {\n aType: string;\n aUid: string;\n axbType: string;\n bType: string;\n bUid: string;\n data: Record<string, unknown>;\n /** Schema version (set by the writer when registry has migrations). */\n v?: number;\n}\n\n/**\n * Write semantics for `setDoc`.\n *\n * - `'merge'` — the new contract (0.12+). Existing fields not mentioned\n * in the new data survive; nested objects are recursively merged;\n * arrays are replaced as a unit. This is the default for\n * `putNode` / `putEdge`.\n * - `'replace'` — the document is replaced wholesale, dropping any\n * fields not present in the payload. This is the explicit escape\n * hatch surfaced as `replaceNode` / `replaceEdge` and used by\n * migration write-back.\n */\nexport type WriteMode = 'merge' | 'replace';\n\n/**\n * Patch shape for `updateDoc`.\n *\n * - `dataOps`: list of deep-path terminal ops produced by\n * `flattenPatch()` (one op per leaf — arrays / primitives / Firestore\n * special types are terminal). Used by `updateNode` / `updateEdge`.\n * Sibling keys at every depth are preserved.\n * - `replaceData`: full `data` replacement. Used only by the migration\n * write-back path, which has already produced a complete migrated\n * document.\n * - `v`: optional schema-version stamp.\n *\n * `updatedAt` is always set by the backend.\n */\nexport interface UpdatePayload {\n dataOps?: DataPathOp[];\n replaceData?: Record<string, unknown>;\n v?: number;\n}\n\n/**\n * Read/write transaction adapter. Mirrors Firestore's transaction semantics:\n * reads are snapshot-consistent; writes are issued inside the transaction\n * and a rejection from any write aborts the surrounding `runTransaction`.\n *\n * Writes return `Promise<void>` so SQL drivers can surface row-level errors\n * (constraint violations, malformed JSON paths) rather than swallowing them.\n * Firestore implementations can resolve synchronously since the underlying\n * `Transaction.set/update/delete` calls are themselves synchronous buffers.\n */\nexport interface TransactionBackend {\n getDoc(docId: string): Promise<StoredGraphRecord | null>;\n query(filters: QueryFilter[], options?: QueryOptions): Promise<StoredGraphRecord[]>;\n setDoc(docId: string, record: WritableRecord, mode: WriteMode): Promise<void>;\n updateDoc(docId: string, update: UpdatePayload): Promise<void>;\n deleteDoc(docId: string): Promise<void>;\n}\n\n/**\n * Atomic multi-write batch.\n */\nexport interface BatchBackend {\n setDoc(docId: string, record: WritableRecord, mode: WriteMode): void;\n updateDoc(docId: string, update: UpdatePayload): void;\n deleteDoc(docId: string): void;\n commit(): Promise<void>;\n}\n\n/**\n * The single storage abstraction.\n *\n * Each backend instance is scoped to a \"graph location\" — for Firestore\n * that's a collection path; for SQLite it's a (table, scopePath) pair.\n * `subgraph()` returns a child backend bound to a nested location.\n */\nexport interface StorageBackend<C extends Capability = Capability> {\n /** Capabilities this backend instance declares. Static for the lifetime of the backend. */\n readonly capabilities: BackendCapabilities<C>;\n /** Backend-internal location identifier (collection path or table name). */\n readonly collectionPath: string;\n /** Subgraph scope (empty string for root). */\n readonly scopePath: string;\n\n // --- Reads ---\n getDoc(docId: string): Promise<StoredGraphRecord | null>;\n query(filters: QueryFilter[], options?: QueryOptions): Promise<StoredGraphRecord[]>;\n\n // --- Writes ---\n setDoc(docId: string, record: WritableRecord, mode: WriteMode): Promise<void>;\n updateDoc(docId: string, update: UpdatePayload): Promise<void>;\n deleteDoc(docId: string): Promise<void>;\n\n // --- Transactions & batches ---\n runTransaction<T>(fn: (tx: TransactionBackend) => Promise<T>): Promise<T>;\n createBatch(): BatchBackend;\n\n // --- Subgraphs ---\n subgraph(parentNodeUid: string, name: string): StorageBackend;\n\n // --- Cascade & bulk ---\n removeNodeCascade(\n uid: string,\n reader: GraphReader,\n options?: BulkOptions,\n ): Promise<CascadeResult>;\n bulkRemoveEdges(\n params: FindEdgesParams,\n reader: GraphReader,\n options?: BulkOptions,\n ): Promise<BulkResult>;\n\n // --- Cross-collection queries ---\n /**\n * Find edges across all subgraphs sharing a given collection name.\n * Optional — backends that can't support this should throw a clear error.\n */\n findEdgesGlobal?(params: FindEdgesParams, collectionName?: string): Promise<StoredGraphRecord[]>;\n\n // --- Aggregations ---\n /**\n * Run an aggregate query (count/sum/avg/min/max). Present only on backends\n * that declare `query.aggregate`. The map's keys are caller-defined aliases\n * matching `AggregateSpec`; values are the resolved numeric results.\n *\n * Backends that can't satisfy a particular op throw `FiregraphError` with\n * code `UNSUPPORTED_AGGREGATE` (e.g. Firestore Standard rejects min/max).\n */\n aggregate?(spec: AggregateSpec, filters: QueryFilter[]): Promise<Record<string, number>>;\n\n // --- Server-side DML ---\n /**\n * Delete every row matching `filters` in one server-side statement.\n * Present only on backends that declare `query.dml`. The default cascade\n * implementation in `bulk.ts` uses this when available; backends without\n * the cap (e.g. Firestore Standard) fall back to a fetch-then-delete\n * loop driven by `findEdges` + per-row `deleteDoc`.\n *\n * The contract matches `findEdges`: scope predicates are honoured\n * automatically by the backend's own internal scope tracking. Callers\n * supply only the filter list — the same shape produced by\n * `buildEdgeQueryPlan`.\n */\n bulkDelete?(filters: QueryFilter[], options?: BulkOptions): Promise<BulkResult>;\n /**\n * Update every row matching `filters` with `patch` in one server-side\n * statement. The patch is deep-merged into each row's `data` field, the\n * same flatten-then-merge pipeline `updateDoc` uses. Identifying columns\n * (`aType`, `axbType`, `aUid`, `bType`, `bUid`, `v`) are not writable\n * through this path.\n */\n bulkUpdate?(\n filters: QueryFilter[],\n patch: BulkUpdatePatch,\n options?: BulkOptions,\n ): Promise<BulkResult>;\n\n // --- Server-side multi-source fan-out ---\n /**\n * Fan out from `params.sources` over a single edge type in one server-side\n * round trip. Present only on backends that declare `query.join`. The\n * traversal layer (`traverse.ts`) calls `expand` once per hop when the\n * backend declares the cap; otherwise it falls back to the per-source\n * `findEdges` loop.\n *\n * Cross-graph hops are never dispatched through `expand` — each source\n * UID resolves to a distinct subgraph location, which can't be fanned\n * out as a single statement. The traversal layer enforces that\n * boundary; `expand` itself does not need to inspect `targetGraph`.\n */\n expand?(params: ExpandParams): Promise<ExpandResult>;\n\n // --- Engine-level multi-hop traversal ---\n /**\n * Compile a multi-hop traversal spec into one server-side query and\n * dispatch a single round trip. Present only on backends that declare\n * `traversal.serverSide` (Firestore Enterprise today, via nested\n * Pipelines that combine `define`, `addFields`, and\n * `toArrayExpression`).\n *\n * The traversal layer (`traverse.ts`) compiles a `TraversalBuilder`\n * spec into `EngineTraversalParams` only when the spec is eligible\n * (no cross-graph hops, no JS filters, depth ≤ `MAX_PIPELINE_DEPTH`,\n * `Π(limitPerSource_i × N_i) ≤ maxReads`, `limitPerSource` set on\n * every hop). Ineligible specs fall back to the per-hop `expand()`\n * loop without invoking this method.\n *\n * The result collapses the nested-pipeline tree into per-hop edge\n * arrays so the traversal layer can fold the result into the same\n * `HopResult[]` shape it produces from the per-hop loop.\n */\n runEngineTraversal?(params: EngineTraversalParams): Promise<EngineTraversalResult>;\n\n // --- Server-side projection ---\n /**\n * Run a projecting query — return only the listed fields per row. Present\n * only on backends that declare `query.select`. The cap-less fallback is\n * `findEdges` followed by a JS-side projection in user code; firegraph\n * does not auto-fall-back because the wire-payload reduction is the only\n * reason to call this method.\n *\n * `select` is the explicit field list; `filters` and `options` mirror the\n * `query()` shape. The returned rows have one slot per unique entry in\n * `select`. Field-name interpretation is the backend's responsibility:\n * built-in fields resolve to columns / Firestore field names, bare names\n * resolve to `data.<name>`, and dotted paths resolve verbatim. See\n * `FindEdgesProjectedParams` for the user-facing contract.\n *\n * Migrations are not applied to the result — the caller asked for a\n * specific projection shape, and rehydrating a partial record into the\n * migration pipeline would require synthesising every absent field.\n */\n findEdgesProjected?(\n select: ReadonlyArray<string>,\n filters: QueryFilter[],\n options?: QueryOptions,\n ): Promise<Array<Record<string, unknown>>>;\n\n // --- Native vector / nearest-neighbour search ---\n /**\n * Run a vector / nearest-neighbour query. Present only on backends that\n * declare `search.vector`. There is no client-side fallback — the\n * SQLite-shaped backends (shared SQLite, Cloudflare DO) genuinely have\n * no native ANN index, and a JS-side k-NN sweep over `findEdges()` would\n * scale catastrophically. Backends without the cap throw\n * `UNSUPPORTED_OPERATION` from the client wrapper.\n *\n * `params` carries the user-facing shape (vector field path, query\n * vector, distance metric, optional threshold and result-field). The\n * client wrapper has already run scan-protection on the identifying\n * / `where` filter list before dispatching.\n *\n * Path normalisation is the backend's responsibility: rewriting bare\n * `vectorField` / `distanceResultField` names to `data.<name>` and\n * rejecting envelope fields (`aType`, `axbType`, `bType`, `aUid`,\n * `bUid`, `v`, etc.) with `INVALID_QUERY` happens inside the\n * backend, not the client wrapper. The two in-tree Firestore-edition\n * backends share `runFirestoreFindNearest` (see\n * `src/internal/firestore-vector.ts`) for this; third-party backends\n * declaring `search.vector` must apply equivalent normalisation\n * before calling their underlying SDK.\n *\n * The backend is also responsible for translating to the underlying\n * SDK call (`Query.findNearest` on Firestore today) and decoding the\n * result snapshot into `StoredGraphRecord[]`.\n *\n * Migrations are not applied to the result. The vector index walks the\n * raw stored shape; rehydrating into the migration pipeline before\n * returning would change the candidate set the index already chose.\n */\n findNearest?(params: FindNearestParams): Promise<StoredGraphRecord[]>;\n\n // --- Native full-text search ---\n /**\n * Run a full-text search query. Present only on backends that declare\n * `search.fullText`. There is no client-side fallback — the only\n * in-tree backend that supports it is Firestore Enterprise (via\n * Pipeline `search({ query: documentMatches(...) })`); Standard and\n * the SQLite-shaped backends throw `UNSUPPORTED_OPERATION` from the\n * client wrapper.\n *\n * The backend is responsible for path normalisation (rewriting\n * bare `fields` entries to `data.<name>`, rejecting envelope fields\n * with `INVALID_QUERY`), translating to the underlying SDK call,\n * and decoding the result into `StoredGraphRecord[]`.\n *\n * Migrations are not applied to the result. The search index walked\n * the raw stored shape; rehydrating into the migration pipeline\n * would change the candidate set the index already scored.\n */\n fullTextSearch?(params: FullTextSearchParams): Promise<StoredGraphRecord[]>;\n\n // --- Native geospatial distance search ---\n /**\n * Run a geospatial distance search. Present only on backends that\n * declare `search.geo`. There is no client-side fallback — only\n * Firestore Enterprise has a native geo index (translated via\n * Pipeline `search({ query: geoDistance(...).lessThanOrEqual(...) })`).\n * Backends without the cap throw `UNSUPPORTED_OPERATION` from the\n * client wrapper.\n *\n * The backend is responsible for `geoField` path normalisation,\n * translating `point` to a Firestore `GeoPoint`, applying the\n * radius cap inside the search query, and (when\n * `orderByDistance` is true / unset) emitting the\n * `geoDistance(...).ascending()` ordering inside the search stage.\n *\n * Migrations are not applied to the result.\n */\n geoSearch?(params: GeoSearchParams): Promise<StoredGraphRecord[]>;\n}\n"],"mappings":";AA0DO,SAAS,mBACd,MACwB;AACxB,SAAO;AAAA,IACL,KAAK,CAAC,eAAoC,KAAK,IAAI,UAAe;AAAA,IAClE,QAAQ,MAAM,KAAK,OAAO;AAAA,EAC5B;AACF;AAOO,SAAS,sBACd,OACqB;AACrB,MAAI,MAAM,WAAW,EAAG,QAAO,mBAAmB,oBAAI,IAAgB,CAAC;AACvE,QAAM,OAAO,MAAM,IAAI,CAAC,MAAM,IAAI,IAAgB,EAAE,OAAO,CAAC,CAAC;AAC7D,QAAM,CAAC,OAAO,GAAG,IAAI,IAAI;AACzB,QAAM,eAAe,oBAAI,IAAgB;AACzC,aAAW,KAAK,OAAO;AACrB,QAAI,KAAK,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,CAAC,EAAG,cAAa,IAAI,CAAC;AAAA,EACrD;AACA,SAAO,mBAAmB,YAAY;AACxC;","names":[]}
|