@typicalday/firegraph 0.11.2 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +355 -78
- package/dist/backend-DuvHGgK1.d.cts +1897 -0
- package/dist/backend-DuvHGgK1.d.ts +1897 -0
- package/dist/backend.cjs +365 -5
- package/dist/backend.cjs.map +1 -1
- package/dist/backend.d.cts +25 -5
- package/dist/backend.d.ts +25 -5
- package/dist/backend.js +209 -7
- package/dist/backend.js.map +1 -1
- package/dist/chunk-2DHMNTV6.js +16 -0
- package/dist/chunk-2DHMNTV6.js.map +1 -0
- package/dist/chunk-4MMQ5W74.js +288 -0
- package/dist/chunk-4MMQ5W74.js.map +1 -0
- package/dist/{chunk-5753Y42M.js → chunk-C2QMD7RY.js} +6 -10
- package/dist/chunk-C2QMD7RY.js.map +1 -0
- package/dist/chunk-D4J7Z4FE.js +67 -0
- package/dist/chunk-D4J7Z4FE.js.map +1 -0
- package/dist/chunk-EQJUUVFG.js +14 -0
- package/dist/chunk-EQJUUVFG.js.map +1 -0
- package/dist/chunk-N5HFDWQX.js +23 -0
- package/dist/chunk-N5HFDWQX.js.map +1 -0
- package/dist/chunk-PAD7WFFU.js +573 -0
- package/dist/chunk-PAD7WFFU.js.map +1 -0
- package/dist/chunk-TK64DNVK.js +256 -0
- package/dist/chunk-TK64DNVK.js.map +1 -0
- package/dist/{chunk-NJSOD64C.js → chunk-WRTFC5NG.js} +438 -30
- package/dist/chunk-WRTFC5NG.js.map +1 -0
- package/dist/client-BKi3vk0Q.d.ts +34 -0
- package/dist/client-BrsaXtDV.d.cts +34 -0
- package/dist/cloudflare/index.cjs +1386 -74
- package/dist/cloudflare/index.cjs.map +1 -1
- package/dist/cloudflare/index.d.cts +217 -13
- package/dist/cloudflare/index.d.ts +217 -13
- package/dist/cloudflare/index.js +639 -180
- package/dist/cloudflare/index.js.map +1 -1
- package/dist/codegen/index.d.cts +1 -1
- package/dist/codegen/index.d.ts +1 -1
- package/dist/errors-BRc3I_eH.d.cts +73 -0
- package/dist/errors-BRc3I_eH.d.ts +73 -0
- package/dist/firestore-enterprise/index.cjs +3877 -0
- package/dist/firestore-enterprise/index.cjs.map +1 -0
- package/dist/firestore-enterprise/index.d.cts +141 -0
- package/dist/firestore-enterprise/index.d.ts +141 -0
- package/dist/firestore-enterprise/index.js +985 -0
- package/dist/firestore-enterprise/index.js.map +1 -0
- package/dist/firestore-standard/index.cjs +3117 -0
- package/dist/firestore-standard/index.cjs.map +1 -0
- package/dist/firestore-standard/index.d.cts +49 -0
- package/dist/firestore-standard/index.d.ts +49 -0
- package/dist/firestore-standard/index.js +283 -0
- package/dist/firestore-standard/index.js.map +1 -0
- package/dist/index.cjs +809 -534
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +24 -100
- package/dist/index.d.ts +24 -100
- package/dist/index.js +184 -531
- package/dist/index.js.map +1 -1
- package/dist/registry-Bc7h6WTM.d.cts +64 -0
- package/dist/registry-C2KUPVZj.d.ts +64 -0
- package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.cts} +1 -56
- package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.ts} +1 -56
- package/dist/{serialization-ZZ7RSDRX.js → serialization-OE2PFZMY.js} +6 -4
- package/dist/sqlite/index.cjs +3631 -0
- package/dist/sqlite/index.cjs.map +1 -0
- package/dist/sqlite/index.d.cts +111 -0
- package/dist/sqlite/index.d.ts +111 -0
- package/dist/sqlite/index.js +1164 -0
- package/dist/sqlite/index.js.map +1 -0
- package/package.json +33 -3
- package/dist/backend-U-MLShlg.d.ts +0 -97
- package/dist/backend-np4gEVhB.d.cts +0 -97
- package/dist/chunk-5753Y42M.js.map +0 -1
- package/dist/chunk-NJSOD64C.js.map +0 -1
- package/dist/chunk-R7CRGYY4.js +0 -94
- package/dist/chunk-R7CRGYY4.js.map +0 -1
- package/dist/types-BGWxcpI_.d.cts +0 -736
- package/dist/types-BGWxcpI_.d.ts +0 -736
- /package/dist/{serialization-ZZ7RSDRX.js.map → serialization-OE2PFZMY.js.map} +0 -0
|
@@ -0,0 +1,1164 @@
|
|
|
1
|
+
import {
|
|
2
|
+
GraphTimestampImpl,
|
|
3
|
+
assertJsonSafePayload,
|
|
4
|
+
compileDataOpsExpr,
|
|
5
|
+
isFirestoreSpecialType,
|
|
6
|
+
validateJsonPathKey
|
|
7
|
+
} from "../chunk-4MMQ5W74.js";
|
|
8
|
+
import "../chunk-2DHMNTV6.js";
|
|
9
|
+
import {
|
|
10
|
+
createCapabilities
|
|
11
|
+
} from "../chunk-N5HFDWQX.js";
|
|
12
|
+
import {
|
|
13
|
+
META_EDGE_TYPE,
|
|
14
|
+
META_NODE_TYPE,
|
|
15
|
+
NODE_RELATION,
|
|
16
|
+
buildEdgeQueryPlan,
|
|
17
|
+
computeEdgeDocId,
|
|
18
|
+
computeNodeDocId,
|
|
19
|
+
createGraphClient,
|
|
20
|
+
createMergedRegistry,
|
|
21
|
+
createRegistry,
|
|
22
|
+
generateId
|
|
23
|
+
} from "../chunk-WRTFC5NG.js";
|
|
24
|
+
import {
|
|
25
|
+
FiregraphError,
|
|
26
|
+
assertUpdatePayloadExclusive,
|
|
27
|
+
flattenPatch
|
|
28
|
+
} from "../chunk-TK64DNVK.js";
|
|
29
|
+
import "../chunk-EQJUUVFG.js";
|
|
30
|
+
|
|
31
|
+
// src/internal/sqlite-schema.ts
|
|
32
|
+
var FIELD_TO_COLUMN = {
|
|
33
|
+
aType: "a_type",
|
|
34
|
+
aUid: "a_uid",
|
|
35
|
+
axbType: "axb_type",
|
|
36
|
+
bType: "b_type",
|
|
37
|
+
bUid: "b_uid",
|
|
38
|
+
v: "v",
|
|
39
|
+
createdAt: "created_at",
|
|
40
|
+
updatedAt: "updated_at"
|
|
41
|
+
};
|
|
42
|
+
function quoteIdent(name) {
|
|
43
|
+
validateTableName(name);
|
|
44
|
+
return `"${name}"`;
|
|
45
|
+
}
|
|
46
|
+
function validateTableName(name) {
|
|
47
|
+
if (!/^[A-Za-z_][A-Za-z0-9_]*$/.test(name)) {
|
|
48
|
+
throw new Error(`Invalid SQL identifier: ${name}. Must match /^[A-Za-z_][A-Za-z0-9_]*$/.`);
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
function quoteColumnAlias(label) {
|
|
52
|
+
return `"${label.replace(/"/g, '""')}"`;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// src/sqlite/sql.ts
|
|
56
|
+
var SQLITE_BACKEND_LABEL = "shared-table SQLite";
|
|
57
|
+
var SQLITE_BACKEND_ERR_LABEL = "SQLite backend";
|
|
58
|
+
function compileFieldRef(field) {
|
|
59
|
+
const column = FIELD_TO_COLUMN[field];
|
|
60
|
+
if (column) {
|
|
61
|
+
return { expr: quoteIdent(column) };
|
|
62
|
+
}
|
|
63
|
+
if (field.startsWith("data.")) {
|
|
64
|
+
const suffix = field.slice(5);
|
|
65
|
+
for (const part of suffix.split(".")) {
|
|
66
|
+
validateJsonPathKey(part, SQLITE_BACKEND_ERR_LABEL);
|
|
67
|
+
}
|
|
68
|
+
return { expr: `json_extract("data", '$.${suffix}')` };
|
|
69
|
+
}
|
|
70
|
+
if (field === "data") {
|
|
71
|
+
return { expr: `json_extract("data", '$')` };
|
|
72
|
+
}
|
|
73
|
+
throw new FiregraphError(`SQLite backend cannot resolve filter field: ${field}`, "INVALID_QUERY");
|
|
74
|
+
}
|
|
75
|
+
function bindValue(value) {
|
|
76
|
+
if (value === null || value === void 0) return null;
|
|
77
|
+
if (typeof value === "string" || typeof value === "number" || typeof value === "bigint" || typeof value === "boolean") {
|
|
78
|
+
return value;
|
|
79
|
+
}
|
|
80
|
+
if (value instanceof Date) return value.getTime();
|
|
81
|
+
if (typeof value === "object") {
|
|
82
|
+
const firestoreType = isFirestoreSpecialType(value);
|
|
83
|
+
if (firestoreType) {
|
|
84
|
+
throw new FiregraphError(
|
|
85
|
+
`SQLite backend cannot bind a Firestore ${firestoreType} value \u2014 JSON serialization would silently drop fields and the resulting bind would never match a stored row. Convert to a primitive (e.g. \`ts.toMillis()\` for Timestamp) before filtering or updating.`,
|
|
86
|
+
"INVALID_QUERY"
|
|
87
|
+
);
|
|
88
|
+
}
|
|
89
|
+
return JSON.stringify(value);
|
|
90
|
+
}
|
|
91
|
+
return String(value);
|
|
92
|
+
}
|
|
93
|
+
function compileFilter(filter, params) {
|
|
94
|
+
const { expr } = compileFieldRef(filter.field);
|
|
95
|
+
switch (filter.op) {
|
|
96
|
+
case "==":
|
|
97
|
+
params.push(bindValue(filter.value));
|
|
98
|
+
return `${expr} = ?`;
|
|
99
|
+
case "!=":
|
|
100
|
+
params.push(bindValue(filter.value));
|
|
101
|
+
return `${expr} != ?`;
|
|
102
|
+
case "<":
|
|
103
|
+
params.push(bindValue(filter.value));
|
|
104
|
+
return `${expr} < ?`;
|
|
105
|
+
case "<=":
|
|
106
|
+
params.push(bindValue(filter.value));
|
|
107
|
+
return `${expr} <= ?`;
|
|
108
|
+
case ">":
|
|
109
|
+
params.push(bindValue(filter.value));
|
|
110
|
+
return `${expr} > ?`;
|
|
111
|
+
case ">=":
|
|
112
|
+
params.push(bindValue(filter.value));
|
|
113
|
+
return `${expr} >= ?`;
|
|
114
|
+
case "in": {
|
|
115
|
+
const values = asArray(filter.value, "in");
|
|
116
|
+
const placeholders = values.map(() => "?").join(", ");
|
|
117
|
+
for (const v of values) params.push(bindValue(v));
|
|
118
|
+
return `${expr} IN (${placeholders})`;
|
|
119
|
+
}
|
|
120
|
+
case "not-in": {
|
|
121
|
+
const values = asArray(filter.value, "not-in");
|
|
122
|
+
const placeholders = values.map(() => "?").join(", ");
|
|
123
|
+
for (const v of values) params.push(bindValue(v));
|
|
124
|
+
return `${expr} NOT IN (${placeholders})`;
|
|
125
|
+
}
|
|
126
|
+
case "array-contains": {
|
|
127
|
+
params.push(bindValue(filter.value));
|
|
128
|
+
return `EXISTS (SELECT 1 FROM json_each(${expr}) WHERE value = ?)`;
|
|
129
|
+
}
|
|
130
|
+
case "array-contains-any": {
|
|
131
|
+
const values = asArray(filter.value, "array-contains-any");
|
|
132
|
+
const placeholders = values.map(() => "?").join(", ");
|
|
133
|
+
for (const v of values) params.push(bindValue(v));
|
|
134
|
+
return `EXISTS (SELECT 1 FROM json_each(${expr}) WHERE value IN (${placeholders}))`;
|
|
135
|
+
}
|
|
136
|
+
default:
|
|
137
|
+
throw new FiregraphError(
|
|
138
|
+
`SQLite backend does not support filter operator: ${String(filter.op)}`,
|
|
139
|
+
"INVALID_QUERY"
|
|
140
|
+
);
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
function asArray(value, op) {
|
|
144
|
+
if (!Array.isArray(value) || value.length === 0) {
|
|
145
|
+
throw new FiregraphError(`Operator "${op}" requires a non-empty array value`, "INVALID_QUERY");
|
|
146
|
+
}
|
|
147
|
+
return value;
|
|
148
|
+
}
|
|
149
|
+
function compileOrderBy(options, _params) {
|
|
150
|
+
if (!options?.orderBy) return "";
|
|
151
|
+
const { field, direction } = options.orderBy;
|
|
152
|
+
const { expr } = compileFieldRef(field);
|
|
153
|
+
const dir = direction === "desc" ? "DESC" : "ASC";
|
|
154
|
+
return ` ORDER BY ${expr} ${dir}`;
|
|
155
|
+
}
|
|
156
|
+
function compileLimit(options, params) {
|
|
157
|
+
if (options?.limit === void 0) return "";
|
|
158
|
+
params.push(options.limit);
|
|
159
|
+
return ` LIMIT ?`;
|
|
160
|
+
}
|
|
161
|
+
function compileSelect(table, scope, filters, options) {
|
|
162
|
+
const params = [];
|
|
163
|
+
const conditions = ['"scope" = ?'];
|
|
164
|
+
params.push(scope);
|
|
165
|
+
for (const f of filters) {
|
|
166
|
+
conditions.push(compileFilter(f, params));
|
|
167
|
+
}
|
|
168
|
+
let sql = `SELECT * FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}`;
|
|
169
|
+
const orderClause = compileOrderBy(options, params);
|
|
170
|
+
sql += orderClause;
|
|
171
|
+
sql += compileLimit(options, params);
|
|
172
|
+
return { sql, params };
|
|
173
|
+
}
|
|
174
|
+
function compileAggregate(table, scope, spec, filters) {
|
|
175
|
+
const aliases = Object.keys(spec);
|
|
176
|
+
if (aliases.length === 0) {
|
|
177
|
+
throw new FiregraphError(
|
|
178
|
+
"aggregate() requires at least one aggregation in the `aggregates` map.",
|
|
179
|
+
"INVALID_QUERY"
|
|
180
|
+
);
|
|
181
|
+
}
|
|
182
|
+
const projections = [];
|
|
183
|
+
for (const alias of aliases) {
|
|
184
|
+
const { op, field } = spec[alias];
|
|
185
|
+
validateJsonPathKey(alias, SQLITE_BACKEND_ERR_LABEL);
|
|
186
|
+
if (op === "count") {
|
|
187
|
+
if (field !== void 0) {
|
|
188
|
+
throw new FiregraphError(
|
|
189
|
+
`Aggregate '${alias}' op 'count' must not specify a field \u2014 count operates on rows, not a column expression.`,
|
|
190
|
+
"INVALID_QUERY"
|
|
191
|
+
);
|
|
192
|
+
}
|
|
193
|
+
projections.push(`COUNT(*) AS ${quoteIdent(alias)}`);
|
|
194
|
+
continue;
|
|
195
|
+
}
|
|
196
|
+
if (!field) {
|
|
197
|
+
throw new FiregraphError(
|
|
198
|
+
`Aggregate '${alias}' op '${op}' requires a field.`,
|
|
199
|
+
"INVALID_QUERY"
|
|
200
|
+
);
|
|
201
|
+
}
|
|
202
|
+
const { expr } = compileFieldRef(field);
|
|
203
|
+
const numeric = `CAST(${expr} AS REAL)`;
|
|
204
|
+
if (op === "sum") projections.push(`SUM(${numeric}) AS ${quoteIdent(alias)}`);
|
|
205
|
+
else if (op === "avg") projections.push(`AVG(${numeric}) AS ${quoteIdent(alias)}`);
|
|
206
|
+
else if (op === "min") projections.push(`MIN(${numeric}) AS ${quoteIdent(alias)}`);
|
|
207
|
+
else if (op === "max") projections.push(`MAX(${numeric}) AS ${quoteIdent(alias)}`);
|
|
208
|
+
else
|
|
209
|
+
throw new FiregraphError(
|
|
210
|
+
`SQLite backend does not support aggregate op: ${String(op)}`,
|
|
211
|
+
"INVALID_QUERY"
|
|
212
|
+
);
|
|
213
|
+
}
|
|
214
|
+
const params = [scope];
|
|
215
|
+
const conditions = ['"scope" = ?'];
|
|
216
|
+
for (const f of filters) {
|
|
217
|
+
conditions.push(compileFilter(f, params));
|
|
218
|
+
}
|
|
219
|
+
const sql = `SELECT ${projections.join(", ")} FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}`;
|
|
220
|
+
return { stmt: { sql, params }, aliases };
|
|
221
|
+
}
|
|
222
|
+
function compileSelectGlobal(table, filters, options, scopeNameFilter) {
|
|
223
|
+
if (filters.length === 0) {
|
|
224
|
+
throw new FiregraphError(
|
|
225
|
+
"compileSelectGlobal requires at least one filter \u2014 refusing to issue an unbounded SELECT.",
|
|
226
|
+
"INVALID_QUERY"
|
|
227
|
+
);
|
|
228
|
+
}
|
|
229
|
+
const params = [];
|
|
230
|
+
const conditions = [];
|
|
231
|
+
if (scopeNameFilter) {
|
|
232
|
+
if (scopeNameFilter.isRoot) {
|
|
233
|
+
conditions.push(`"scope" = ?`);
|
|
234
|
+
params.push("");
|
|
235
|
+
} else {
|
|
236
|
+
conditions.push(`"scope" LIKE ? ESCAPE '\\'`);
|
|
237
|
+
params.push(`%/${escapeLike(scopeNameFilter.name)}`);
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
for (const f of filters) {
|
|
241
|
+
conditions.push(compileFilter(f, params));
|
|
242
|
+
}
|
|
243
|
+
const sql = `SELECT * FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}` + compileOrderBy(options, params) + compileLimit(options, params);
|
|
244
|
+
return { sql, params };
|
|
245
|
+
}
|
|
246
|
+
function compileSelectByDocId(table, scope, docId) {
|
|
247
|
+
return {
|
|
248
|
+
sql: `SELECT * FROM ${quoteIdent(table)} WHERE "scope" = ? AND "doc_id" = ? LIMIT 1`,
|
|
249
|
+
params: [scope, docId]
|
|
250
|
+
};
|
|
251
|
+
}
|
|
252
|
+
function normalizeProjectionField(field) {
|
|
253
|
+
if (field in FIELD_TO_COLUMN) return field;
|
|
254
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
255
|
+
return `data.${field}`;
|
|
256
|
+
}
|
|
257
|
+
function compileFindEdgesProjected(table, scope, select, filters, options) {
|
|
258
|
+
if (select.length === 0) {
|
|
259
|
+
throw new FiregraphError(
|
|
260
|
+
"compileFindEdgesProjected requires a non-empty select list \u2014 an empty projection has no SQL representation distinct from `findEdges`.",
|
|
261
|
+
"INVALID_QUERY"
|
|
262
|
+
);
|
|
263
|
+
}
|
|
264
|
+
const seen = /* @__PURE__ */ new Set();
|
|
265
|
+
const uniqueFields = [];
|
|
266
|
+
for (const f of select) {
|
|
267
|
+
if (!seen.has(f)) {
|
|
268
|
+
seen.add(f);
|
|
269
|
+
uniqueFields.push(f);
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
const projections = [];
|
|
273
|
+
const columns = [];
|
|
274
|
+
for (let idx = 0; idx < uniqueFields.length; idx++) {
|
|
275
|
+
const field = uniqueFields[idx];
|
|
276
|
+
const canonical = normalizeProjectionField(field);
|
|
277
|
+
const { expr } = compileFieldRef(canonical);
|
|
278
|
+
const alias = quoteColumnAlias(field);
|
|
279
|
+
projections.push(`${expr} AS ${alias}`);
|
|
280
|
+
let kind;
|
|
281
|
+
let typeAliasName;
|
|
282
|
+
if (canonical === "data") {
|
|
283
|
+
kind = "data";
|
|
284
|
+
} else if (canonical.startsWith("data.")) {
|
|
285
|
+
kind = "json";
|
|
286
|
+
typeAliasName = `__fg_t_${idx}`;
|
|
287
|
+
const typeAlias = quoteColumnAlias(typeAliasName);
|
|
288
|
+
projections.push(`json_type("data", '$.${canonical.slice(5)}') AS ${typeAlias}`);
|
|
289
|
+
} else {
|
|
290
|
+
if (canonical === "v") kind = "builtin-int";
|
|
291
|
+
else if (canonical === "createdAt" || canonical === "updatedAt") kind = "builtin-timestamp";
|
|
292
|
+
else kind = "builtin-text";
|
|
293
|
+
}
|
|
294
|
+
columns.push({ field, kind, typeAlias: typeAliasName });
|
|
295
|
+
}
|
|
296
|
+
const params = [scope];
|
|
297
|
+
const conditions = ['"scope" = ?'];
|
|
298
|
+
for (const f of filters) {
|
|
299
|
+
conditions.push(compileFilter(f, params));
|
|
300
|
+
}
|
|
301
|
+
let sql = `SELECT ${projections.join(", ")} FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}`;
|
|
302
|
+
sql += compileOrderBy(options, params);
|
|
303
|
+
sql += compileLimit(options, params);
|
|
304
|
+
return { stmt: { sql, params }, columns };
|
|
305
|
+
}
|
|
306
|
+
function decodeProjectedRow(row, columns) {
|
|
307
|
+
const out = {};
|
|
308
|
+
for (const c of columns) {
|
|
309
|
+
const raw = row[c.field];
|
|
310
|
+
switch (c.kind) {
|
|
311
|
+
case "builtin-text":
|
|
312
|
+
out[c.field] = raw === null || raw === void 0 ? null : String(raw);
|
|
313
|
+
break;
|
|
314
|
+
case "builtin-int":
|
|
315
|
+
if (raw === null || raw === void 0) {
|
|
316
|
+
out[c.field] = null;
|
|
317
|
+
} else if (typeof raw === "bigint") {
|
|
318
|
+
out[c.field] = Number(raw);
|
|
319
|
+
} else if (typeof raw === "number") {
|
|
320
|
+
out[c.field] = raw;
|
|
321
|
+
} else {
|
|
322
|
+
out[c.field] = Number(raw);
|
|
323
|
+
}
|
|
324
|
+
break;
|
|
325
|
+
case "builtin-timestamp": {
|
|
326
|
+
const ms = toMillis(raw);
|
|
327
|
+
out[c.field] = GraphTimestampImpl.fromMillis(ms);
|
|
328
|
+
break;
|
|
329
|
+
}
|
|
330
|
+
case "data":
|
|
331
|
+
if (raw === null || raw === void 0 || raw === "") {
|
|
332
|
+
out[c.field] = {};
|
|
333
|
+
} else {
|
|
334
|
+
out[c.field] = JSON.parse(raw);
|
|
335
|
+
}
|
|
336
|
+
break;
|
|
337
|
+
case "json": {
|
|
338
|
+
const t = row[c.typeAlias];
|
|
339
|
+
if (raw === null || raw === void 0) {
|
|
340
|
+
out[c.field] = null;
|
|
341
|
+
} else if (t === "object" || t === "array") {
|
|
342
|
+
out[c.field] = typeof raw === "string" ? JSON.parse(raw) : raw;
|
|
343
|
+
} else if (t === "integer" && typeof raw === "bigint") {
|
|
344
|
+
out[c.field] = Number(raw);
|
|
345
|
+
} else {
|
|
346
|
+
out[c.field] = raw;
|
|
347
|
+
}
|
|
348
|
+
break;
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
return out;
|
|
353
|
+
}
|
|
354
|
+
function compileExpand(table, scope, params) {
|
|
355
|
+
if (params.sources.length === 0) {
|
|
356
|
+
throw new FiregraphError(
|
|
357
|
+
"compileExpand requires a non-empty sources list \u2014 empty IN () is invalid SQL. Callers should short-circuit empty input before reaching the compiler.",
|
|
358
|
+
"INVALID_QUERY"
|
|
359
|
+
);
|
|
360
|
+
}
|
|
361
|
+
const direction = params.direction ?? "forward";
|
|
362
|
+
const aUidCol = compileFieldRef("aUid").expr;
|
|
363
|
+
const bUidCol = compileFieldRef("bUid").expr;
|
|
364
|
+
const aTypeCol = compileFieldRef("aType").expr;
|
|
365
|
+
const bTypeCol = compileFieldRef("bType").expr;
|
|
366
|
+
const axbTypeCol = compileFieldRef("axbType").expr;
|
|
367
|
+
const sourceColumn = direction === "forward" ? aUidCol : bUidCol;
|
|
368
|
+
const sqlParams = [scope, params.axbType];
|
|
369
|
+
const conditions = ['"scope" = ?', `${axbTypeCol} = ?`];
|
|
370
|
+
const placeholders = params.sources.map(() => "?").join(", ");
|
|
371
|
+
conditions.push(`${sourceColumn} IN (${placeholders})`);
|
|
372
|
+
for (const uid of params.sources) sqlParams.push(uid);
|
|
373
|
+
if (params.aType !== void 0) {
|
|
374
|
+
conditions.push(`${aTypeCol} = ?`);
|
|
375
|
+
sqlParams.push(params.aType);
|
|
376
|
+
}
|
|
377
|
+
if (params.bType !== void 0) {
|
|
378
|
+
conditions.push(`${bTypeCol} = ?`);
|
|
379
|
+
sqlParams.push(params.bType);
|
|
380
|
+
}
|
|
381
|
+
if (params.axbType === NODE_RELATION) {
|
|
382
|
+
conditions.push(`${aUidCol} != ${bUidCol}`);
|
|
383
|
+
}
|
|
384
|
+
let sql = `SELECT * FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}`;
|
|
385
|
+
if (params.orderBy) {
|
|
386
|
+
sql += compileOrderBy({ orderBy: params.orderBy }, sqlParams);
|
|
387
|
+
}
|
|
388
|
+
if (params.limitPerSource !== void 0) {
|
|
389
|
+
const totalLimit = params.sources.length * params.limitPerSource;
|
|
390
|
+
sql += ` LIMIT ?`;
|
|
391
|
+
sqlParams.push(totalLimit);
|
|
392
|
+
}
|
|
393
|
+
return { sql, params: sqlParams };
|
|
394
|
+
}
|
|
395
|
+
function compileExpandHydrate(table, scope, targetUids) {
|
|
396
|
+
if (targetUids.length === 0) {
|
|
397
|
+
throw new FiregraphError(
|
|
398
|
+
"compileExpandHydrate requires a non-empty target list \u2014 empty IN () is invalid SQL.",
|
|
399
|
+
"INVALID_QUERY"
|
|
400
|
+
);
|
|
401
|
+
}
|
|
402
|
+
const placeholders = targetUids.map(() => "?").join(", ");
|
|
403
|
+
const sqlParams = [scope, NODE_RELATION];
|
|
404
|
+
for (const uid of targetUids) sqlParams.push(uid);
|
|
405
|
+
const aUidCol = compileFieldRef("aUid").expr;
|
|
406
|
+
const bUidCol = compileFieldRef("bUid").expr;
|
|
407
|
+
const axbTypeCol = compileFieldRef("axbType").expr;
|
|
408
|
+
return {
|
|
409
|
+
sql: `SELECT * FROM ${quoteIdent(table)} WHERE "scope" = ? AND ${axbTypeCol} = ? AND ${aUidCol} = ${bUidCol} AND ${bUidCol} IN (${placeholders})`,
|
|
410
|
+
params: sqlParams
|
|
411
|
+
};
|
|
412
|
+
}
|
|
413
|
+
function compileSet(table, scope, docId, record, nowMillis, mode) {
|
|
414
|
+
assertJsonSafePayload(record.data, SQLITE_BACKEND_LABEL);
|
|
415
|
+
if (mode === "replace") {
|
|
416
|
+
const sql2 = `INSERT OR REPLACE INTO ${quoteIdent(table)} (
|
|
417
|
+
doc_id, scope, a_type, a_uid, axb_type, b_type, b_uid, data, v, created_at, updated_at
|
|
418
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`;
|
|
419
|
+
const params = [
|
|
420
|
+
docId,
|
|
421
|
+
scope,
|
|
422
|
+
record.aType,
|
|
423
|
+
record.aUid,
|
|
424
|
+
record.axbType,
|
|
425
|
+
record.bType,
|
|
426
|
+
record.bUid,
|
|
427
|
+
JSON.stringify(record.data ?? {}),
|
|
428
|
+
record.v ?? null,
|
|
429
|
+
nowMillis,
|
|
430
|
+
nowMillis
|
|
431
|
+
];
|
|
432
|
+
return { sql: sql2, params };
|
|
433
|
+
}
|
|
434
|
+
const insertParams = [
|
|
435
|
+
docId,
|
|
436
|
+
scope,
|
|
437
|
+
record.aType,
|
|
438
|
+
record.aUid,
|
|
439
|
+
record.axbType,
|
|
440
|
+
record.bType,
|
|
441
|
+
record.bUid,
|
|
442
|
+
JSON.stringify(record.data ?? {}),
|
|
443
|
+
record.v ?? null,
|
|
444
|
+
nowMillis,
|
|
445
|
+
nowMillis
|
|
446
|
+
];
|
|
447
|
+
const ops = flattenPatch(record.data ?? {});
|
|
448
|
+
const updateParams = [];
|
|
449
|
+
const dataExpr = compileDataOpsExpr(ops, `COALESCE("data", '{}')`, updateParams, SQLITE_BACKEND_ERR_LABEL) ?? `COALESCE("data", '{}')`;
|
|
450
|
+
const sql = `INSERT INTO ${quoteIdent(table)} (
|
|
451
|
+
doc_id, scope, a_type, a_uid, axb_type, b_type, b_uid, data, v, created_at, updated_at
|
|
452
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
453
|
+
ON CONFLICT(scope, doc_id) DO UPDATE SET
|
|
454
|
+
"a_type" = excluded."a_type",
|
|
455
|
+
"a_uid" = excluded."a_uid",
|
|
456
|
+
"axb_type" = excluded."axb_type",
|
|
457
|
+
"b_type" = excluded."b_type",
|
|
458
|
+
"b_uid" = excluded."b_uid",
|
|
459
|
+
"data" = ${dataExpr},
|
|
460
|
+
"v" = COALESCE(excluded."v", "v"),
|
|
461
|
+
"created_at" = excluded."created_at",
|
|
462
|
+
"updated_at" = excluded."updated_at"`;
|
|
463
|
+
return { sql, params: [...insertParams, ...updateParams] };
|
|
464
|
+
}
|
|
465
|
+
function compileUpdate(table, scope, docId, update, nowMillis) {
|
|
466
|
+
assertUpdatePayloadExclusive(update);
|
|
467
|
+
const setClauses = [];
|
|
468
|
+
const params = [];
|
|
469
|
+
if (update.replaceData) {
|
|
470
|
+
assertJsonSafePayload(update.replaceData, SQLITE_BACKEND_LABEL);
|
|
471
|
+
setClauses.push(`"data" = ?`);
|
|
472
|
+
params.push(JSON.stringify(update.replaceData));
|
|
473
|
+
} else if (update.dataOps && update.dataOps.length > 0) {
|
|
474
|
+
for (const op of update.dataOps) {
|
|
475
|
+
if (!op.delete) assertJsonSafePayload(op.value, SQLITE_BACKEND_LABEL);
|
|
476
|
+
}
|
|
477
|
+
const expr = compileDataOpsExpr(
|
|
478
|
+
update.dataOps,
|
|
479
|
+
`COALESCE("data", '{}')`,
|
|
480
|
+
params,
|
|
481
|
+
SQLITE_BACKEND_ERR_LABEL
|
|
482
|
+
);
|
|
483
|
+
if (expr !== null) {
|
|
484
|
+
setClauses.push(`"data" = ${expr}`);
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
if (update.v !== void 0) {
|
|
488
|
+
setClauses.push(`"v" = ?`);
|
|
489
|
+
params.push(update.v);
|
|
490
|
+
}
|
|
491
|
+
setClauses.push(`"updated_at" = ?`);
|
|
492
|
+
params.push(nowMillis);
|
|
493
|
+
params.push(scope, docId);
|
|
494
|
+
return {
|
|
495
|
+
sql: `UPDATE ${quoteIdent(table)} SET ${setClauses.join(", ")} WHERE "scope" = ? AND "doc_id" = ?`,
|
|
496
|
+
params
|
|
497
|
+
};
|
|
498
|
+
}
|
|
499
|
+
function compileDelete(table, scope, docId) {
|
|
500
|
+
return {
|
|
501
|
+
sql: `DELETE FROM ${quoteIdent(table)} WHERE "scope" = ? AND "doc_id" = ?`,
|
|
502
|
+
params: [scope, docId]
|
|
503
|
+
};
|
|
504
|
+
}
|
|
505
|
+
function compileBulkDelete(table, scope, filters) {
|
|
506
|
+
const params = [scope];
|
|
507
|
+
const conditions = ['"scope" = ?'];
|
|
508
|
+
for (const f of filters) {
|
|
509
|
+
conditions.push(compileFilter(f, params));
|
|
510
|
+
}
|
|
511
|
+
return {
|
|
512
|
+
sql: `DELETE FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}`,
|
|
513
|
+
params
|
|
514
|
+
};
|
|
515
|
+
}
|
|
516
|
+
function compileBulkUpdate(table, scope, filters, patchData, nowMillis) {
|
|
517
|
+
const dataOps = flattenPatch(patchData);
|
|
518
|
+
if (dataOps.length === 0) {
|
|
519
|
+
throw new FiregraphError(
|
|
520
|
+
"bulkUpdate() patch.data must contain at least one leaf \u2014 an empty patch would only rewrite `updated_at`, which is almost certainly a bug. Use `setDoc` with merge mode if you want to stamp without editing data.",
|
|
521
|
+
"INVALID_QUERY"
|
|
522
|
+
);
|
|
523
|
+
}
|
|
524
|
+
for (const op of dataOps) {
|
|
525
|
+
if (!op.delete) assertJsonSafePayload(op.value, SQLITE_BACKEND_LABEL);
|
|
526
|
+
}
|
|
527
|
+
const setParams = [];
|
|
528
|
+
const expr = compileDataOpsExpr(
|
|
529
|
+
dataOps,
|
|
530
|
+
`COALESCE("data", '{}')`,
|
|
531
|
+
setParams,
|
|
532
|
+
SQLITE_BACKEND_ERR_LABEL
|
|
533
|
+
);
|
|
534
|
+
if (expr === null) {
|
|
535
|
+
throw new FiregraphError(
|
|
536
|
+
"bulkUpdate() patch produced no SQL operations \u2014 internal invariant violated.",
|
|
537
|
+
"INVALID_ARGUMENT"
|
|
538
|
+
);
|
|
539
|
+
}
|
|
540
|
+
const setClauses = [`"data" = ${expr}`, `"updated_at" = ?`];
|
|
541
|
+
setParams.push(nowMillis);
|
|
542
|
+
const whereParams = [scope];
|
|
543
|
+
const conditions = ['"scope" = ?'];
|
|
544
|
+
for (const f of filters) {
|
|
545
|
+
conditions.push(compileFilter(f, whereParams));
|
|
546
|
+
}
|
|
547
|
+
return {
|
|
548
|
+
sql: `UPDATE ${quoteIdent(table)} SET ${setClauses.join(", ")} WHERE ${conditions.join(" AND ")}`,
|
|
549
|
+
params: [...setParams, ...whereParams]
|
|
550
|
+
};
|
|
551
|
+
}
|
|
552
|
+
function compileDeleteScopePrefix(table, scopePrefix) {
|
|
553
|
+
const escaped = escapeLike(scopePrefix);
|
|
554
|
+
return {
|
|
555
|
+
sql: `DELETE FROM ${quoteIdent(table)} WHERE "scope" LIKE ? ESCAPE '\\'`,
|
|
556
|
+
params: [`${escaped}/%`]
|
|
557
|
+
};
|
|
558
|
+
}
|
|
559
|
+
function compileCountScopePrefix(table, scopePrefix) {
|
|
560
|
+
const escaped = escapeLike(scopePrefix);
|
|
561
|
+
return {
|
|
562
|
+
sql: `SELECT COUNT(*) AS n FROM ${quoteIdent(table)} WHERE "scope" LIKE ? ESCAPE '\\'`,
|
|
563
|
+
params: [`${escaped}/%`]
|
|
564
|
+
};
|
|
565
|
+
}
|
|
566
|
+
function escapeLike(value) {
|
|
567
|
+
return value.replace(/\\/g, "\\\\").replace(/%/g, "\\%").replace(/_/g, "\\_");
|
|
568
|
+
}
|
|
569
|
+
function rowToRecord(row) {
|
|
570
|
+
const dataString = row.data;
|
|
571
|
+
const data = dataString ? JSON.parse(dataString) : {};
|
|
572
|
+
const createdMs = toMillis(row.created_at);
|
|
573
|
+
const updatedMs = toMillis(row.updated_at);
|
|
574
|
+
const record = {
|
|
575
|
+
aType: row.a_type,
|
|
576
|
+
aUid: row.a_uid,
|
|
577
|
+
axbType: row.axb_type,
|
|
578
|
+
bType: row.b_type,
|
|
579
|
+
bUid: row.b_uid,
|
|
580
|
+
data,
|
|
581
|
+
createdAt: GraphTimestampImpl.fromMillis(createdMs),
|
|
582
|
+
updatedAt: GraphTimestampImpl.fromMillis(updatedMs)
|
|
583
|
+
};
|
|
584
|
+
if (row.v !== null && row.v !== void 0) {
|
|
585
|
+
record.v = Number(row.v);
|
|
586
|
+
}
|
|
587
|
+
return record;
|
|
588
|
+
}
|
|
589
|
+
function toMillis(value) {
|
|
590
|
+
if (typeof value === "number") return value;
|
|
591
|
+
if (typeof value === "bigint") return Number(value);
|
|
592
|
+
if (typeof value === "string") return Number(value);
|
|
593
|
+
return 0;
|
|
594
|
+
}
|
|
595
|
+
|
|
596
|
+
// src/sqlite/backend.ts
|
|
597
|
+
var DEFAULT_MAX_RETRIES = 3;
|
|
598
|
+
var BASE_RETRY_DELAY_MS = 200;
|
|
599
|
+
var MAX_RETRY_DELAY_MS = 5e3;
|
|
600
|
+
function sleep(ms) {
|
|
601
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
602
|
+
}
|
|
603
|
+
function minDefined(a, b) {
|
|
604
|
+
if (a === void 0) return b;
|
|
605
|
+
if (b === void 0) return a;
|
|
606
|
+
return Math.min(a, b);
|
|
607
|
+
}
|
|
608
|
+
function chunkStatements(statements, maxStatements, maxParams) {
|
|
609
|
+
const stmtCap = maxStatements && maxStatements > 0 && Number.isFinite(maxStatements) ? Math.floor(maxStatements) : Infinity;
|
|
610
|
+
const paramCap = maxParams && maxParams > 0 && Number.isFinite(maxParams) ? Math.floor(maxParams) : Infinity;
|
|
611
|
+
if (stmtCap === Infinity && paramCap === Infinity) {
|
|
612
|
+
return [statements];
|
|
613
|
+
}
|
|
614
|
+
const chunks = [];
|
|
615
|
+
let current = [];
|
|
616
|
+
let currentParamCount = 0;
|
|
617
|
+
for (const stmt of statements) {
|
|
618
|
+
const stmtParams = stmt.params.length;
|
|
619
|
+
const wouldExceedStmt = current.length + 1 > stmtCap;
|
|
620
|
+
const wouldExceedParam = currentParamCount + stmtParams > paramCap;
|
|
621
|
+
if (current.length > 0 && (wouldExceedStmt || wouldExceedParam)) {
|
|
622
|
+
chunks.push(current);
|
|
623
|
+
current = [];
|
|
624
|
+
currentParamCount = 0;
|
|
625
|
+
}
|
|
626
|
+
current.push(stmt);
|
|
627
|
+
currentParamCount += stmtParams;
|
|
628
|
+
}
|
|
629
|
+
if (current.length > 0) chunks.push(current);
|
|
630
|
+
return chunks;
|
|
631
|
+
}
|
|
632
|
+
var SqliteTransactionBackendImpl = class {
|
|
633
|
+
constructor(tx, tableName, storageScope) {
|
|
634
|
+
this.tx = tx;
|
|
635
|
+
this.tableName = tableName;
|
|
636
|
+
this.storageScope = storageScope;
|
|
637
|
+
}
|
|
638
|
+
async getDoc(docId) {
|
|
639
|
+
const stmt = compileSelectByDocId(this.tableName, this.storageScope, docId);
|
|
640
|
+
const rows = await this.tx.all(stmt.sql, stmt.params);
|
|
641
|
+
return rows.length === 0 ? null : rowToRecord(rows[0]);
|
|
642
|
+
}
|
|
643
|
+
async query(filters, options) {
|
|
644
|
+
const stmt = compileSelect(this.tableName, this.storageScope, filters, options);
|
|
645
|
+
const rows = await this.tx.all(stmt.sql, stmt.params);
|
|
646
|
+
return rows.map(rowToRecord);
|
|
647
|
+
}
|
|
648
|
+
async setDoc(docId, record, mode) {
|
|
649
|
+
const stmt = compileSet(this.tableName, this.storageScope, docId, record, Date.now(), mode);
|
|
650
|
+
await this.tx.run(stmt.sql, stmt.params);
|
|
651
|
+
}
|
|
652
|
+
async updateDoc(docId, update) {
|
|
653
|
+
const stmt = compileUpdate(this.tableName, this.storageScope, docId, update, Date.now());
|
|
654
|
+
const sqlWithReturning = `${stmt.sql} RETURNING "doc_id"`;
|
|
655
|
+
const rows = await this.tx.all(sqlWithReturning, stmt.params);
|
|
656
|
+
if (rows.length === 0) {
|
|
657
|
+
throw new FiregraphError(
|
|
658
|
+
`updateDoc: no document found for doc_id=${docId} (scope=${this.storageScope})`,
|
|
659
|
+
"NOT_FOUND"
|
|
660
|
+
);
|
|
661
|
+
}
|
|
662
|
+
}
|
|
663
|
+
async deleteDoc(docId) {
|
|
664
|
+
const stmt = compileDelete(this.tableName, this.storageScope, docId);
|
|
665
|
+
await this.tx.run(stmt.sql, stmt.params);
|
|
666
|
+
}
|
|
667
|
+
};
|
|
668
|
+
var SqliteBatchBackendImpl = class {
|
|
669
|
+
constructor(executor, tableName, storageScope) {
|
|
670
|
+
this.executor = executor;
|
|
671
|
+
this.tableName = tableName;
|
|
672
|
+
this.storageScope = storageScope;
|
|
673
|
+
}
|
|
674
|
+
statements = [];
|
|
675
|
+
setDoc(docId, record, mode) {
|
|
676
|
+
this.statements.push(
|
|
677
|
+
compileSet(this.tableName, this.storageScope, docId, record, Date.now(), mode)
|
|
678
|
+
);
|
|
679
|
+
}
|
|
680
|
+
updateDoc(docId, update) {
|
|
681
|
+
this.statements.push(
|
|
682
|
+
compileUpdate(this.tableName, this.storageScope, docId, update, Date.now())
|
|
683
|
+
);
|
|
684
|
+
}
|
|
685
|
+
deleteDoc(docId) {
|
|
686
|
+
this.statements.push(compileDelete(this.tableName, this.storageScope, docId));
|
|
687
|
+
}
|
|
688
|
+
async commit() {
|
|
689
|
+
if (this.statements.length === 0) return;
|
|
690
|
+
await this.executor.batch(this.statements);
|
|
691
|
+
this.statements.length = 0;
|
|
692
|
+
}
|
|
693
|
+
};
|
|
694
|
+
var SQLITE_CORE_CAPS = [
|
|
695
|
+
"core.read",
|
|
696
|
+
"core.write",
|
|
697
|
+
"core.batch",
|
|
698
|
+
"core.subgraph",
|
|
699
|
+
"query.aggregate",
|
|
700
|
+
"query.dml",
|
|
701
|
+
"query.join",
|
|
702
|
+
"query.select",
|
|
703
|
+
"raw.sql"
|
|
704
|
+
];
|
|
705
|
+
var SqliteBackendImpl = class _SqliteBackendImpl {
|
|
706
|
+
constructor(executor, tableName, storageScope, scopePath) {
|
|
707
|
+
this.executor = executor;
|
|
708
|
+
this.collectionPath = tableName;
|
|
709
|
+
this.storageScope = storageScope;
|
|
710
|
+
this.scopePath = scopePath;
|
|
711
|
+
const caps = new Set(SQLITE_CORE_CAPS);
|
|
712
|
+
if (typeof executor.transaction === "function") {
|
|
713
|
+
caps.add("core.transactions");
|
|
714
|
+
}
|
|
715
|
+
this.capabilities = createCapabilities(caps);
|
|
716
|
+
}
|
|
717
|
+
capabilities;
|
|
718
|
+
/** Logical table name (returned through `collectionPath` for parity with Firestore). */
|
|
719
|
+
collectionPath;
|
|
720
|
+
scopePath;
|
|
721
|
+
/** Materialized storage scope (interleaved parent UIDs + subgraph names). */
|
|
722
|
+
storageScope;
|
|
723
|
+
// --- Reads ---
|
|
724
|
+
async getDoc(docId) {
|
|
725
|
+
const stmt = compileSelectByDocId(this.collectionPath, this.storageScope, docId);
|
|
726
|
+
const rows = await this.executor.all(stmt.sql, stmt.params);
|
|
727
|
+
return rows.length === 0 ? null : rowToRecord(rows[0]);
|
|
728
|
+
}
|
|
729
|
+
async query(filters, options) {
|
|
730
|
+
const stmt = compileSelect(this.collectionPath, this.storageScope, filters, options);
|
|
731
|
+
const rows = await this.executor.all(stmt.sql, stmt.params);
|
|
732
|
+
return rows.map(rowToRecord);
|
|
733
|
+
}
|
|
734
|
+
// --- Writes ---
|
|
735
|
+
async setDoc(docId, record, mode) {
|
|
736
|
+
const stmt = compileSet(
|
|
737
|
+
this.collectionPath,
|
|
738
|
+
this.storageScope,
|
|
739
|
+
docId,
|
|
740
|
+
record,
|
|
741
|
+
Date.now(),
|
|
742
|
+
mode
|
|
743
|
+
);
|
|
744
|
+
await this.executor.run(stmt.sql, stmt.params);
|
|
745
|
+
}
|
|
746
|
+
async updateDoc(docId, update) {
|
|
747
|
+
const stmt = compileUpdate(this.collectionPath, this.storageScope, docId, update, Date.now());
|
|
748
|
+
const sqlWithReturning = `${stmt.sql} RETURNING "doc_id"`;
|
|
749
|
+
const rows = await this.executor.all(sqlWithReturning, stmt.params);
|
|
750
|
+
if (rows.length === 0) {
|
|
751
|
+
throw new FiregraphError(
|
|
752
|
+
`updateDoc: no document found for doc_id=${docId} (scope=${this.storageScope})`,
|
|
753
|
+
"NOT_FOUND"
|
|
754
|
+
);
|
|
755
|
+
}
|
|
756
|
+
}
|
|
757
|
+
async deleteDoc(docId) {
|
|
758
|
+
const stmt = compileDelete(this.collectionPath, this.storageScope, docId);
|
|
759
|
+
await this.executor.run(stmt.sql, stmt.params);
|
|
760
|
+
}
|
|
761
|
+
// --- Transactions / Batches ---
|
|
762
|
+
async runTransaction(fn) {
|
|
763
|
+
if (!this.executor.transaction) {
|
|
764
|
+
throw new FiregraphError(
|
|
765
|
+
"Interactive transactions are not supported by this SQLite driver. D1 in particular has no read-then-conditional-write transactions; use a Durable Object SQLite client instead, or rewrite the code path as a batch().",
|
|
766
|
+
"UNSUPPORTED_OPERATION"
|
|
767
|
+
);
|
|
768
|
+
}
|
|
769
|
+
return this.executor.transaction(async (tx) => {
|
|
770
|
+
const txBackend = new SqliteTransactionBackendImpl(
|
|
771
|
+
tx,
|
|
772
|
+
this.collectionPath,
|
|
773
|
+
this.storageScope
|
|
774
|
+
);
|
|
775
|
+
return fn(txBackend);
|
|
776
|
+
});
|
|
777
|
+
}
|
|
778
|
+
createBatch() {
|
|
779
|
+
return new SqliteBatchBackendImpl(this.executor, this.collectionPath, this.storageScope);
|
|
780
|
+
}
|
|
781
|
+
// --- Subgraphs ---
|
|
782
|
+
subgraph(parentNodeUid, name) {
|
|
783
|
+
if (!parentNodeUid || parentNodeUid.includes("/")) {
|
|
784
|
+
throw new FiregraphError(
|
|
785
|
+
`Invalid parentNodeUid for subgraph: "${parentNodeUid}". Must be a non-empty string without "/".`,
|
|
786
|
+
"INVALID_SUBGRAPH"
|
|
787
|
+
);
|
|
788
|
+
}
|
|
789
|
+
if (!name || name.includes("/")) {
|
|
790
|
+
throw new FiregraphError(
|
|
791
|
+
`Subgraph name must not contain "/" and must be non-empty: got "${name}". Use chained .subgraph() calls for nested subgraphs.`,
|
|
792
|
+
"INVALID_SUBGRAPH"
|
|
793
|
+
);
|
|
794
|
+
}
|
|
795
|
+
const newStorageScope = this.storageScope ? `${this.storageScope}/${parentNodeUid}/${name}` : `${parentNodeUid}/${name}`;
|
|
796
|
+
const newScope = this.scopePath ? `${this.scopePath}/${name}` : name;
|
|
797
|
+
return new _SqliteBackendImpl(this.executor, this.collectionPath, newStorageScope, newScope);
|
|
798
|
+
}
|
|
799
|
+
// --- Cascade & bulk ---
|
|
800
|
+
async removeNodeCascade(uid, reader, options) {
|
|
801
|
+
const [outgoingRaw, incomingRaw] = await Promise.all([
|
|
802
|
+
reader.findEdges({ aUid: uid, allowCollectionScan: true, limit: 0 }),
|
|
803
|
+
reader.findEdges({ bUid: uid, allowCollectionScan: true, limit: 0 })
|
|
804
|
+
]);
|
|
805
|
+
const seen = /* @__PURE__ */ new Set();
|
|
806
|
+
const edgeDocIds = [];
|
|
807
|
+
for (const edge of [...outgoingRaw, ...incomingRaw]) {
|
|
808
|
+
if (edge.axbType === NODE_RELATION) continue;
|
|
809
|
+
const docId = computeEdgeDocId(edge.aUid, edge.axbType, edge.bUid);
|
|
810
|
+
if (!seen.has(docId)) {
|
|
811
|
+
seen.add(docId);
|
|
812
|
+
edgeDocIds.push(docId);
|
|
813
|
+
}
|
|
814
|
+
}
|
|
815
|
+
const nodeDocId = computeNodeDocId(uid);
|
|
816
|
+
const shouldDeleteSubgraphs = options?.deleteSubcollections !== false;
|
|
817
|
+
let subgraphRowCount = 0;
|
|
818
|
+
if (shouldDeleteSubgraphs) {
|
|
819
|
+
const prefix = this.storageScope ? `${this.storageScope}/${uid}` : uid;
|
|
820
|
+
const countStmt = compileCountScopePrefix(this.collectionPath, prefix);
|
|
821
|
+
const countRows = await this.executor.all(countStmt.sql, countStmt.params);
|
|
822
|
+
const first = countRows[0];
|
|
823
|
+
const n = first?.n;
|
|
824
|
+
subgraphRowCount = typeof n === "bigint" ? Number(n) : Number(n ?? 0);
|
|
825
|
+
}
|
|
826
|
+
const writeStatements = edgeDocIds.map(
|
|
827
|
+
(id) => compileDelete(this.collectionPath, this.storageScope, id)
|
|
828
|
+
);
|
|
829
|
+
writeStatements.push(compileDelete(this.collectionPath, this.storageScope, nodeDocId));
|
|
830
|
+
if (shouldDeleteSubgraphs) {
|
|
831
|
+
const prefix = this.storageScope ? `${this.storageScope}/${uid}` : uid;
|
|
832
|
+
writeStatements.push(compileDeleteScopePrefix(this.collectionPath, prefix));
|
|
833
|
+
}
|
|
834
|
+
const {
|
|
835
|
+
deleted: stmtDeleted,
|
|
836
|
+
batches,
|
|
837
|
+
errors
|
|
838
|
+
} = await this.executeChunkedBatches(writeStatements, options);
|
|
839
|
+
const allOk = errors.length === 0;
|
|
840
|
+
const edgesDeleted = allOk ? edgeDocIds.length : 0;
|
|
841
|
+
const nodeDeleted = allOk;
|
|
842
|
+
const prefixStatementContribution = shouldDeleteSubgraphs && allOk ? 1 : 0;
|
|
843
|
+
const deleted = stmtDeleted - prefixStatementContribution + (allOk ? subgraphRowCount : 0);
|
|
844
|
+
return { deleted, batches, errors, edgesDeleted, nodeDeleted };
|
|
845
|
+
}
|
|
846
|
+
async bulkRemoveEdges(params, reader, options) {
|
|
847
|
+
const effectiveParams = params.limit !== void 0 ? { ...params, allowCollectionScan: params.allowCollectionScan ?? true } : { ...params, limit: 0, allowCollectionScan: params.allowCollectionScan ?? true };
|
|
848
|
+
const edges = await reader.findEdges(effectiveParams);
|
|
849
|
+
const docIds = edges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
|
|
850
|
+
if (docIds.length === 0) {
|
|
851
|
+
return { deleted: 0, batches: 0, errors: [] };
|
|
852
|
+
}
|
|
853
|
+
const statements = docIds.map(
|
|
854
|
+
(id) => compileDelete(this.collectionPath, this.storageScope, id)
|
|
855
|
+
);
|
|
856
|
+
return this.executeChunkedBatches(statements, options);
|
|
857
|
+
}
|
|
858
|
+
/**
|
|
859
|
+
* Submit `statements` to the executor as one or more `batch()` calls,
|
|
860
|
+
* chunking by `executor.maxBatchSize` (e.g. D1's ~100-statement cap).
|
|
861
|
+
* Drivers that don't advertise a cap submit everything in one batch,
|
|
862
|
+
* preserving cross-batch atomicity.
|
|
863
|
+
*
|
|
864
|
+
* Each chunk is retried with exponential backoff up to `maxRetries`
|
|
865
|
+
* (default 3) before being recorded in `errors`. The loop continues past
|
|
866
|
+
* a permanently failed chunk so the caller still gets partial progress
|
|
867
|
+
* visibility — to halt on first failure, set `maxRetries: 0` and check
|
|
868
|
+
* `result.errors.length` after the call.
|
|
869
|
+
*
|
|
870
|
+
* Returns `BulkResult`-shaped fields. `deleted` reflects only the
|
|
871
|
+
* statement count of *successfully committed* batches — a prefix-delete
|
|
872
|
+
* statement contributes 1 to that total even though it may match many
|
|
873
|
+
* rows; `removeNodeCascade` patches that up with a pre-counted row total.
|
|
874
|
+
*
|
|
875
|
+
* **Atomicity caveat (D1):** when chunking kicks in, atomicity is lost
|
|
876
|
+
* across chunk boundaries — one chunk may commit while a later one fails.
|
|
877
|
+
* `removeNodeCascade` is idempotent (deleting the same docs again is a
|
|
878
|
+
* no-op) so a caller can simply retry on partial failure. `bulkRemoveEdges`
|
|
879
|
+
* is also idempotent for the same reason. DO SQLite leaves `maxBatchSize`
|
|
880
|
+
* unset, so everything funnels through one atomic `transactionSync` and
|
|
881
|
+
* this caveat does not apply.
|
|
882
|
+
*/
|
|
883
|
+
async executeChunkedBatches(statements, options) {
|
|
884
|
+
if (statements.length === 0) {
|
|
885
|
+
return { deleted: 0, batches: 0, errors: [] };
|
|
886
|
+
}
|
|
887
|
+
const maxRetries = options?.maxRetries ?? DEFAULT_MAX_RETRIES;
|
|
888
|
+
const callerBatchSize = options?.batchSize;
|
|
889
|
+
const stmtCap = minDefined(callerBatchSize, this.executor.maxBatchSize);
|
|
890
|
+
const chunks = chunkStatements(statements, stmtCap, this.executor.maxBatchParams);
|
|
891
|
+
const errors = [];
|
|
892
|
+
let deleted = 0;
|
|
893
|
+
let batches = 0;
|
|
894
|
+
const totalBatches = chunks.length;
|
|
895
|
+
const driverParamCap = this.executor.maxBatchParams;
|
|
896
|
+
for (let batchIndex = 0; batchIndex < chunks.length; batchIndex++) {
|
|
897
|
+
const chunk = chunks[batchIndex];
|
|
898
|
+
const isUnretriableOversize = chunk.length === 1 && driverParamCap !== void 0 && chunk[0].params.length > driverParamCap;
|
|
899
|
+
let committed = false;
|
|
900
|
+
let lastError = null;
|
|
901
|
+
const effectiveRetries = isUnretriableOversize ? 0 : maxRetries;
|
|
902
|
+
for (let attempt = 0; attempt <= effectiveRetries; attempt++) {
|
|
903
|
+
try {
|
|
904
|
+
await this.executor.batch(chunk);
|
|
905
|
+
committed = true;
|
|
906
|
+
break;
|
|
907
|
+
} catch (err) {
|
|
908
|
+
lastError = err instanceof Error ? err : new Error(String(err));
|
|
909
|
+
if (attempt < effectiveRetries) {
|
|
910
|
+
const delay = Math.min(BASE_RETRY_DELAY_MS * Math.pow(2, attempt), MAX_RETRY_DELAY_MS);
|
|
911
|
+
await sleep(delay);
|
|
912
|
+
}
|
|
913
|
+
}
|
|
914
|
+
}
|
|
915
|
+
if (committed) {
|
|
916
|
+
deleted += chunk.length;
|
|
917
|
+
batches += 1;
|
|
918
|
+
} else if (lastError) {
|
|
919
|
+
errors.push({
|
|
920
|
+
batchIndex,
|
|
921
|
+
error: lastError,
|
|
922
|
+
operationCount: chunk.length
|
|
923
|
+
});
|
|
924
|
+
}
|
|
925
|
+
if (options?.onProgress) {
|
|
926
|
+
options.onProgress({
|
|
927
|
+
completedBatches: batches,
|
|
928
|
+
totalBatches,
|
|
929
|
+
deletedSoFar: deleted
|
|
930
|
+
});
|
|
931
|
+
}
|
|
932
|
+
}
|
|
933
|
+
return { deleted, batches, errors };
|
|
934
|
+
}
|
|
935
|
+
// --- Cross-scope (collection group) ---
|
|
936
|
+
async findEdgesGlobal(params, collectionName) {
|
|
937
|
+
const plan = buildEdgeQueryPlan(params);
|
|
938
|
+
if (plan.strategy === "get") {
|
|
939
|
+
throw new FiregraphError(
|
|
940
|
+
"findEdgesGlobal() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
941
|
+
"INVALID_QUERY"
|
|
942
|
+
);
|
|
943
|
+
}
|
|
944
|
+
const name = collectionName ?? this.collectionPath;
|
|
945
|
+
const scopeNameFilter = {
|
|
946
|
+
name,
|
|
947
|
+
isRoot: name === this.collectionPath
|
|
948
|
+
};
|
|
949
|
+
const stmt = compileSelectGlobal(
|
|
950
|
+
this.collectionPath,
|
|
951
|
+
plan.filters,
|
|
952
|
+
plan.options,
|
|
953
|
+
scopeNameFilter
|
|
954
|
+
);
|
|
955
|
+
const rows = await this.executor.all(stmt.sql, stmt.params);
|
|
956
|
+
return rows.map(rowToRecord);
|
|
957
|
+
}
|
|
958
|
+
// --- Aggregate ---
|
|
959
|
+
/**
|
|
960
|
+
* Run an aggregate query in a single SQL statement. Supports the full
|
|
961
|
+
* count/sum/avg/min/max set — the SQLite engine evaluates each aggregate
|
|
962
|
+
* function over the filtered row set and the executor returns one row
|
|
963
|
+
* with one column per alias. SUM/MIN/MAX of an empty set returns 0
|
|
964
|
+
* (SQLite's `SUM(NULL) = NULL` is mapped to a clean number for the
|
|
965
|
+
* cross-backend contract); AVG returns NaN, matching the mathematical
|
|
966
|
+
* convention and the Firestore Standard helper.
|
|
967
|
+
*/
|
|
968
|
+
async aggregate(spec, filters) {
|
|
969
|
+
const { stmt, aliases } = compileAggregate(
|
|
970
|
+
this.collectionPath,
|
|
971
|
+
this.storageScope,
|
|
972
|
+
spec,
|
|
973
|
+
filters
|
|
974
|
+
);
|
|
975
|
+
const rows = await this.executor.all(stmt.sql, stmt.params);
|
|
976
|
+
const row = rows[0] ?? {};
|
|
977
|
+
const out = {};
|
|
978
|
+
for (const alias of aliases) {
|
|
979
|
+
const v = row[alias];
|
|
980
|
+
if (v === null || v === void 0) {
|
|
981
|
+
const op = spec[alias].op;
|
|
982
|
+
out[alias] = op === "avg" ? Number.NaN : 0;
|
|
983
|
+
} else if (typeof v === "bigint") {
|
|
984
|
+
out[alias] = Number(v);
|
|
985
|
+
} else if (typeof v === "number") {
|
|
986
|
+
out[alias] = v;
|
|
987
|
+
} else {
|
|
988
|
+
out[alias] = Number(v);
|
|
989
|
+
}
|
|
990
|
+
}
|
|
991
|
+
return out;
|
|
992
|
+
}
|
|
993
|
+
// --- Server-side DML ---
|
|
994
|
+
/**
|
|
995
|
+
* Delete every row matching `filters` in a single SQL DELETE statement.
|
|
996
|
+
*
|
|
997
|
+
* Uses `RETURNING "doc_id"` to count rows touched — the SQLite executor's
|
|
998
|
+
* `run` returns void, so RETURNING + `all()` is the portable way to learn
|
|
999
|
+
* how many rows the engine actually deleted. SQLite ≥ 3.35 supports
|
|
1000
|
+
* `DELETE … RETURNING`; better-sqlite3, D1, and DO SQLite all run on a
|
|
1001
|
+
* recent enough engine.
|
|
1002
|
+
*
|
|
1003
|
+
* Single-statement DML doesn't chunk: the engine handles N rows in one
|
|
1004
|
+
* shot, so `BulkOptions.batchSize` is intentionally ignored. The retry
|
|
1005
|
+
* loop here exists only for transient driver errors (e.g. D1 surface
|
|
1006
|
+
* congestion); a permanent failure is surfaced via the `errors` array
|
|
1007
|
+
* with `batchIndex: 0` so callers see the same shape as `bulkRemoveEdges`.
|
|
1008
|
+
*
|
|
1009
|
+
* Subgraph scoping is enforced inside `compileBulkDelete` (the leading
|
|
1010
|
+
* `"scope" = ?` predicate) so this method, like every other backend
|
|
1011
|
+
* surface, naturally honours subgraph isolation.
|
|
1012
|
+
*/
|
|
1013
|
+
async bulkDelete(filters, options) {
|
|
1014
|
+
const stmt = compileBulkDelete(this.collectionPath, this.storageScope, filters);
|
|
1015
|
+
return this.executeDmlWithReturning(stmt, options);
|
|
1016
|
+
}
|
|
1017
|
+
/**
|
|
1018
|
+
* Update every row matching `filters` with `patch.data` in a single SQL
|
|
1019
|
+
* UPDATE statement. The patch is deep-merged into each row's `data`
|
|
1020
|
+
* column via the same `flattenPatch` → `compileDataOpsExpr` pipeline that
|
|
1021
|
+
* `compileUpdate` (single-row) uses.
|
|
1022
|
+
*
|
|
1023
|
+
* Same contract notes as `bulkDelete` apply: single-statement, no
|
|
1024
|
+
* chunking, `RETURNING "doc_id"` for the affected count, retry loop for
|
|
1025
|
+
* transient driver errors.
|
|
1026
|
+
*/
|
|
1027
|
+
async bulkUpdate(filters, patch, options) {
|
|
1028
|
+
const stmt = compileBulkUpdate(
|
|
1029
|
+
this.collectionPath,
|
|
1030
|
+
this.storageScope,
|
|
1031
|
+
filters,
|
|
1032
|
+
patch.data,
|
|
1033
|
+
Date.now()
|
|
1034
|
+
);
|
|
1035
|
+
return this.executeDmlWithReturning(stmt, options);
|
|
1036
|
+
}
|
|
1037
|
+
/**
|
|
1038
|
+
* Multi-source fan-out — `query.join` capability.
|
|
1039
|
+
*
|
|
1040
|
+
* Issues a single `SELECT … WHERE "aUid" IN (?, ?, …)` statement that
|
|
1041
|
+
* matches every edge from every source UID in one round trip. When
|
|
1042
|
+
* `params.hydrate === true`, follows up with a second statement that
|
|
1043
|
+
* fetches the target node rows; both queries hit the same table so
|
|
1044
|
+
* the executor amortises connection / parsing cost across them.
|
|
1045
|
+
*
|
|
1046
|
+
* Empty `params.sources` short-circuits to an empty result without
|
|
1047
|
+
* touching the executor — `IN ()` is not valid SQL.
|
|
1048
|
+
*
|
|
1049
|
+
* Per-source ordering / strict per-source LIMIT enforcement is NOT
|
|
1050
|
+
* implemented here; see the `ExpandParams.limitPerSource` JSDoc and
|
|
1051
|
+
* `compileExpand` for the cap semantics. Strict per-source caps would
|
|
1052
|
+
* require window functions and were judged out of scope for the
|
|
1053
|
+
* round-trip-collapse goal.
|
|
1054
|
+
*/
|
|
1055
|
+
async expand(params) {
|
|
1056
|
+
if (params.sources.length === 0) {
|
|
1057
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
1058
|
+
}
|
|
1059
|
+
const stmt = compileExpand(this.collectionPath, this.storageScope, params);
|
|
1060
|
+
const rows = await this.executor.all(stmt.sql, stmt.params);
|
|
1061
|
+
const edges = rows.map(rowToRecord);
|
|
1062
|
+
if (!params.hydrate) {
|
|
1063
|
+
return { edges };
|
|
1064
|
+
}
|
|
1065
|
+
const direction = params.direction ?? "forward";
|
|
1066
|
+
const targetUids = edges.map((e) => direction === "forward" ? e.bUid : e.aUid);
|
|
1067
|
+
const uniqueTargets = [...new Set(targetUids)];
|
|
1068
|
+
if (uniqueTargets.length === 0) {
|
|
1069
|
+
return { edges, targets: [] };
|
|
1070
|
+
}
|
|
1071
|
+
const hydrateStmt = compileExpandHydrate(this.collectionPath, this.storageScope, uniqueTargets);
|
|
1072
|
+
const hydrateRows = await this.executor.all(hydrateStmt.sql, hydrateStmt.params);
|
|
1073
|
+
const byUid = /* @__PURE__ */ new Map();
|
|
1074
|
+
for (const row of hydrateRows) {
|
|
1075
|
+
const node = rowToRecord(row);
|
|
1076
|
+
byUid.set(node.bUid, node);
|
|
1077
|
+
}
|
|
1078
|
+
const targets = targetUids.map((uid) => byUid.get(uid) ?? null);
|
|
1079
|
+
return { edges, targets };
|
|
1080
|
+
}
|
|
1081
|
+
/**
|
|
1082
|
+
* Server-side projection — `query.select` capability.
|
|
1083
|
+
*
|
|
1084
|
+
* Issues a single `SELECT json_extract(data, '$.f1'), …` statement that
|
|
1085
|
+
* returns only the requested fields. The compiler emits one column per
|
|
1086
|
+
* unique field plus a paired `json_type` column for `data.*` projections
|
|
1087
|
+
* so the decoder can recover JSON-encoded objects/arrays without a
|
|
1088
|
+
* second round trip. Migrations are NOT applied — the caller asked for
|
|
1089
|
+
* a partial shape, and rehydrating that into the migration pipeline
|
|
1090
|
+
* would require synthesising every absent field.
|
|
1091
|
+
*
|
|
1092
|
+
* The wire-payload reduction is the entire reason this method exists:
|
|
1093
|
+
* a list view that only needs `title` / `date` no longer drags the
|
|
1094
|
+
* full `data` JSON across the network. Callers that need the full
|
|
1095
|
+
* record should use `findEdges` (with migration support).
|
|
1096
|
+
*/
|
|
1097
|
+
async findEdgesProjected(select, filters, options) {
|
|
1098
|
+
const { stmt, columns } = compileFindEdgesProjected(
|
|
1099
|
+
this.collectionPath,
|
|
1100
|
+
this.storageScope,
|
|
1101
|
+
select,
|
|
1102
|
+
filters,
|
|
1103
|
+
options
|
|
1104
|
+
);
|
|
1105
|
+
const rows = await this.executor.all(stmt.sql, stmt.params);
|
|
1106
|
+
return rows.map((row) => decodeProjectedRow(row, columns));
|
|
1107
|
+
}
|
|
1108
|
+
/**
|
|
1109
|
+
* Run a DML statement with `RETURNING "doc_id"` so we can count the
|
|
1110
|
+
* rows the engine touched, with the same retry/backoff contract as
|
|
1111
|
+
* `executeChunkedBatches`. Single statement, single batch.
|
|
1112
|
+
*/
|
|
1113
|
+
async executeDmlWithReturning(stmt, options) {
|
|
1114
|
+
const sqlWithReturning = `${stmt.sql} RETURNING "doc_id"`;
|
|
1115
|
+
const maxRetries = options?.maxRetries ?? DEFAULT_MAX_RETRIES;
|
|
1116
|
+
let lastError = null;
|
|
1117
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
1118
|
+
try {
|
|
1119
|
+
const rows = await this.executor.all(sqlWithReturning, stmt.params);
|
|
1120
|
+
const deleted = rows.length;
|
|
1121
|
+
if (options?.onProgress) {
|
|
1122
|
+
options.onProgress({
|
|
1123
|
+
completedBatches: 1,
|
|
1124
|
+
totalBatches: 1,
|
|
1125
|
+
deletedSoFar: deleted
|
|
1126
|
+
});
|
|
1127
|
+
}
|
|
1128
|
+
return { deleted, batches: 1, errors: [] };
|
|
1129
|
+
} catch (err) {
|
|
1130
|
+
lastError = err instanceof Error ? err : new Error(String(err));
|
|
1131
|
+
if (attempt < maxRetries) {
|
|
1132
|
+
const delay = Math.min(BASE_RETRY_DELAY_MS * Math.pow(2, attempt), MAX_RETRY_DELAY_MS);
|
|
1133
|
+
await sleep(delay);
|
|
1134
|
+
}
|
|
1135
|
+
}
|
|
1136
|
+
}
|
|
1137
|
+
return {
|
|
1138
|
+
deleted: 0,
|
|
1139
|
+
batches: 0,
|
|
1140
|
+
errors: [
|
|
1141
|
+
{
|
|
1142
|
+
batchIndex: 0,
|
|
1143
|
+
error: lastError ?? new Error("bulk DML failed for unknown reason"),
|
|
1144
|
+
operationCount: 0
|
|
1145
|
+
}
|
|
1146
|
+
]
|
|
1147
|
+
};
|
|
1148
|
+
}
|
|
1149
|
+
};
|
|
1150
|
+
function createSqliteBackend(executor, tableName, options = {}) {
|
|
1151
|
+
const storageScope = options.storageScope ?? "";
|
|
1152
|
+
const scopePath = options.scopePath ?? "";
|
|
1153
|
+
return new SqliteBackendImpl(executor, tableName, storageScope, scopePath);
|
|
1154
|
+
}
|
|
1155
|
+
export {
|
|
1156
|
+
META_EDGE_TYPE,
|
|
1157
|
+
META_NODE_TYPE,
|
|
1158
|
+
createGraphClient,
|
|
1159
|
+
createMergedRegistry,
|
|
1160
|
+
createRegistry,
|
|
1161
|
+
createSqliteBackend,
|
|
1162
|
+
generateId
|
|
1163
|
+
};
|
|
1164
|
+
//# sourceMappingURL=index.js.map
|