@typicalday/firegraph 0.9.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/README.md +93 -90
  2. package/bin/firegraph.mjs +21 -7
  3. package/dist/backend-U-MLShlg.d.ts +97 -0
  4. package/dist/backend-np4gEVhB.d.cts +97 -0
  5. package/dist/backend.cjs.map +1 -1
  6. package/dist/backend.d.cts +7 -6
  7. package/dist/backend.d.ts +7 -6
  8. package/dist/backend.js +1 -1
  9. package/dist/backend.js.map +1 -1
  10. package/dist/{chunk-EVUM6ORB.js → chunk-6SB34IPQ.js} +76 -8
  11. package/dist/chunk-6SB34IPQ.js.map +1 -0
  12. package/dist/{chunk-SU4FNLC3.js → chunk-EEKWRX5E.js} +1 -1
  13. package/dist/{chunk-SU4FNLC3.js.map → chunk-EEKWRX5E.js.map} +1 -1
  14. package/dist/{chunk-YLGXLEUE.js → chunk-GJVVRTQT.js} +5 -14
  15. package/dist/chunk-GJVVRTQT.js.map +1 -0
  16. package/dist/{chunk-GLOVWKQH.js → chunk-R7CRGYY4.js} +1 -1
  17. package/dist/{chunk-GLOVWKQH.js.map → chunk-R7CRGYY4.js.map} +1 -1
  18. package/dist/{do-sqlite.cjs → cloudflare/index.cjs} +1659 -1422
  19. package/dist/cloudflare/index.cjs.map +1 -0
  20. package/dist/cloudflare/index.d.cts +529 -0
  21. package/dist/cloudflare/index.d.ts +529 -0
  22. package/dist/cloudflare/index.js +934 -0
  23. package/dist/cloudflare/index.js.map +1 -0
  24. package/dist/codegen/index.cjs +4 -13
  25. package/dist/codegen/index.cjs.map +1 -1
  26. package/dist/codegen/index.d.cts +1 -1
  27. package/dist/codegen/index.d.ts +1 -1
  28. package/dist/codegen/index.js +1 -1
  29. package/dist/index.cjs +144 -132
  30. package/dist/index.cjs.map +1 -1
  31. package/dist/index.d.cts +116 -27
  32. package/dist/index.d.ts +116 -27
  33. package/dist/index.js +77 -123
  34. package/dist/index.js.map +1 -1
  35. package/dist/query-client/index.cjs.map +1 -1
  36. package/dist/query-client/index.js +1 -1
  37. package/dist/{scope-path-BtajqNK5.d.ts → scope-path-B1G3YiA7.d.cts} +5 -100
  38. package/dist/{scope-path-D2mNENJ-.d.cts → scope-path-B1G3YiA7.d.ts} +5 -100
  39. package/dist/{types-DfWVTsMn.d.ts → types-BGWxcpI_.d.cts} +92 -1
  40. package/dist/{types-DfWVTsMn.d.cts → types-BGWxcpI_.d.ts} +92 -1
  41. package/package.json +13 -17
  42. package/dist/chunk-EVUM6ORB.js.map +0 -1
  43. package/dist/chunk-SZ6W4VAS.js +0 -701
  44. package/dist/chunk-SZ6W4VAS.js.map +0 -1
  45. package/dist/chunk-YLGXLEUE.js.map +0 -1
  46. package/dist/d1.cjs +0 -2421
  47. package/dist/d1.cjs.map +0 -1
  48. package/dist/d1.d.cts +0 -54
  49. package/dist/d1.d.ts +0 -54
  50. package/dist/d1.js +0 -76
  51. package/dist/d1.js.map +0 -1
  52. package/dist/do-sqlite.cjs.map +0 -1
  53. package/dist/do-sqlite.d.cts +0 -41
  54. package/dist/do-sqlite.d.ts +0 -41
  55. package/dist/do-sqlite.js +0 -79
  56. package/dist/do-sqlite.js.map +0 -1
  57. package/dist/editor/client/assets/index-Bq2bfzeY.js +0 -411
  58. package/dist/editor/client/assets/index-CJ4m_EOL.css +0 -1
  59. package/dist/editor/client/index.html +0 -16
  60. package/dist/editor/server/index.mjs +0 -51511
@@ -1,701 +0,0 @@
1
- import {
2
- NODE_RELATION,
3
- buildEdgeQueryPlan,
4
- computeEdgeDocId,
5
- computeNodeDocId
6
- } from "./chunk-EVUM6ORB.js";
7
- import {
8
- FiregraphError
9
- } from "./chunk-GLOVWKQH.js";
10
-
11
- // src/internal/sqlite-schema.ts
12
- var FIELD_TO_COLUMN = {
13
- aType: "a_type",
14
- aUid: "a_uid",
15
- axbType: "axb_type",
16
- bType: "b_type",
17
- bUid: "b_uid",
18
- v: "v",
19
- createdAt: "created_at",
20
- updatedAt: "updated_at"
21
- };
22
- function buildSchemaStatements(table) {
23
- const t = quoteIdent(table);
24
- return [
25
- `CREATE TABLE IF NOT EXISTS ${t} (
26
- doc_id TEXT NOT NULL,
27
- scope TEXT NOT NULL DEFAULT '',
28
- a_type TEXT NOT NULL,
29
- a_uid TEXT NOT NULL,
30
- axb_type TEXT NOT NULL,
31
- b_type TEXT NOT NULL,
32
- b_uid TEXT NOT NULL,
33
- data TEXT NOT NULL,
34
- v INTEGER,
35
- created_at INTEGER NOT NULL,
36
- updated_at INTEGER NOT NULL,
37
- PRIMARY KEY (scope, doc_id)
38
- )`,
39
- `CREATE INDEX IF NOT EXISTS ${quoteIdent(`${table}_idx_scope_a_uid`)} ON ${t}(scope, a_uid)`,
40
- `CREATE INDEX IF NOT EXISTS ${quoteIdent(`${table}_idx_scope_b_uid`)} ON ${t}(scope, b_uid)`,
41
- `CREATE INDEX IF NOT EXISTS ${quoteIdent(`${table}_idx_scope_axb_type_b_uid`)} ON ${t}(scope, axb_type, b_uid)`,
42
- `CREATE INDEX IF NOT EXISTS ${quoteIdent(`${table}_idx_scope_a_type`)} ON ${t}(scope, a_type)`,
43
- `CREATE INDEX IF NOT EXISTS ${quoteIdent(`${table}_idx_scope_b_type`)} ON ${t}(scope, b_type)`,
44
- `CREATE INDEX IF NOT EXISTS ${quoteIdent(`${table}_idx_doc_id`)} ON ${t}(doc_id)`
45
- ];
46
- }
47
- function quoteIdent(name) {
48
- validateTableName(name);
49
- return `"${name}"`;
50
- }
51
- function validateTableName(name) {
52
- if (!/^[A-Za-z_][A-Za-z0-9_]*$/.test(name)) {
53
- throw new Error(`Invalid SQL identifier: ${name}. Must match /^[A-Za-z_][A-Za-z0-9_]*$/.`);
54
- }
55
- }
56
-
57
- // src/timestamp.ts
58
- var GraphTimestampImpl = class _GraphTimestampImpl {
59
- constructor(seconds, nanoseconds) {
60
- this.seconds = seconds;
61
- this.nanoseconds = nanoseconds;
62
- }
63
- toDate() {
64
- return new Date(this.toMillis());
65
- }
66
- toMillis() {
67
- return this.seconds * 1e3 + Math.floor(this.nanoseconds / 1e6);
68
- }
69
- toJSON() {
70
- return { seconds: this.seconds, nanoseconds: this.nanoseconds };
71
- }
72
- static fromMillis(ms) {
73
- const seconds = Math.floor(ms / 1e3);
74
- const nanoseconds = (ms - seconds * 1e3) * 1e6;
75
- return new _GraphTimestampImpl(seconds, nanoseconds);
76
- }
77
- static now() {
78
- return _GraphTimestampImpl.fromMillis(Date.now());
79
- }
80
- };
81
-
82
- // src/internal/sqlite-sql.ts
83
- function compileFieldRef(field) {
84
- const column = FIELD_TO_COLUMN[field];
85
- if (column) {
86
- return { expr: quoteIdent(column) };
87
- }
88
- if (field.startsWith("data.")) {
89
- const key = field.slice(5);
90
- validateJsonPathKey(key);
91
- return { expr: 'json_extract("data", ?)', pathParam: `$.${key}` };
92
- }
93
- if (field === "data") {
94
- return { expr: 'json_extract("data", ?)', pathParam: "$" };
95
- }
96
- throw new FiregraphError(`SQLite backend cannot resolve filter field: ${field}`, "INVALID_QUERY");
97
- }
98
- var FIRESTORE_TYPE_NAMES = /* @__PURE__ */ new Set([
99
- "Timestamp",
100
- "GeoPoint",
101
- "VectorValue",
102
- "DocumentReference",
103
- "FieldValue"
104
- ]);
105
- function isFirestoreSpecialType(value) {
106
- const ctorName = value.constructor?.name;
107
- if (ctorName && FIRESTORE_TYPE_NAMES.has(ctorName)) return ctorName;
108
- return null;
109
- }
110
- function bindValue(value) {
111
- if (value === null || value === void 0) return null;
112
- if (typeof value === "string" || typeof value === "number" || typeof value === "bigint" || typeof value === "boolean") {
113
- return value;
114
- }
115
- if (value instanceof Date) return value.getTime();
116
- if (typeof value === "object") {
117
- const firestoreType = isFirestoreSpecialType(value);
118
- if (firestoreType) {
119
- throw new FiregraphError(
120
- `SQLite backend cannot bind a Firestore ${firestoreType} value \u2014 JSON serialization would silently drop fields and the resulting bind would never match a stored row. Convert to a primitive (e.g. \`ts.toMillis()\` for Timestamp) before filtering or updating.`,
121
- "INVALID_QUERY"
122
- );
123
- }
124
- return JSON.stringify(value);
125
- }
126
- return String(value);
127
- }
128
- var JSON_PATH_KEY_RE = /^[A-Za-z_][A-Za-z0-9_-]*$/;
129
- function validateJsonPathKey(key) {
130
- if (key.length === 0) {
131
- throw new FiregraphError(
132
- "SQLite backend: empty JSON path component is not allowed",
133
- "INVALID_QUERY"
134
- );
135
- }
136
- if (!JSON_PATH_KEY_RE.test(key)) {
137
- throw new FiregraphError(
138
- `SQLite backend: data field path component "${key}" is not a safe JSON-path identifier. Allowed pattern: /^[A-Za-z_][A-Za-z0-9_-]*$/. Use replaceData (full-data overwrite) for keys with reserved characters (whitespace, dots, brackets, quotes, etc.).`,
139
- "INVALID_QUERY"
140
- );
141
- }
142
- }
143
- function compileFilter(filter, params) {
144
- const { expr, pathParam } = compileFieldRef(filter.field);
145
- if (pathParam !== void 0) params.push(pathParam);
146
- switch (filter.op) {
147
- case "==":
148
- params.push(bindValue(filter.value));
149
- return `${expr} = ?`;
150
- case "!=":
151
- params.push(bindValue(filter.value));
152
- return `${expr} != ?`;
153
- case "<":
154
- params.push(bindValue(filter.value));
155
- return `${expr} < ?`;
156
- case "<=":
157
- params.push(bindValue(filter.value));
158
- return `${expr} <= ?`;
159
- case ">":
160
- params.push(bindValue(filter.value));
161
- return `${expr} > ?`;
162
- case ">=":
163
- params.push(bindValue(filter.value));
164
- return `${expr} >= ?`;
165
- case "in": {
166
- const values = asArray(filter.value, "in");
167
- const placeholders = values.map(() => "?").join(", ");
168
- for (const v of values) params.push(bindValue(v));
169
- return `${expr} IN (${placeholders})`;
170
- }
171
- case "not-in": {
172
- const values = asArray(filter.value, "not-in");
173
- const placeholders = values.map(() => "?").join(", ");
174
- for (const v of values) params.push(bindValue(v));
175
- return `${expr} NOT IN (${placeholders})`;
176
- }
177
- case "array-contains": {
178
- params.push(bindValue(filter.value));
179
- return `EXISTS (SELECT 1 FROM json_each(${expr}) WHERE value = ?)`;
180
- }
181
- case "array-contains-any": {
182
- const values = asArray(filter.value, "array-contains-any");
183
- const placeholders = values.map(() => "?").join(", ");
184
- for (const v of values) params.push(bindValue(v));
185
- return `EXISTS (SELECT 1 FROM json_each(${expr}) WHERE value IN (${placeholders}))`;
186
- }
187
- default:
188
- throw new FiregraphError(
189
- `SQLite backend does not support filter operator: ${String(filter.op)}`,
190
- "INVALID_QUERY"
191
- );
192
- }
193
- }
194
- function asArray(value, op) {
195
- if (!Array.isArray(value) || value.length === 0) {
196
- throw new FiregraphError(`Operator "${op}" requires a non-empty array value`, "INVALID_QUERY");
197
- }
198
- return value;
199
- }
200
- function compileOrderBy(options, params) {
201
- if (!options?.orderBy) return "";
202
- const { field, direction } = options.orderBy;
203
- const { expr, pathParam } = compileFieldRef(field);
204
- if (pathParam !== void 0) params.push(pathParam);
205
- const dir = direction === "desc" ? "DESC" : "ASC";
206
- return ` ORDER BY ${expr} ${dir}`;
207
- }
208
- function compileLimit(options, params) {
209
- if (options?.limit === void 0) return "";
210
- params.push(options.limit);
211
- return ` LIMIT ?`;
212
- }
213
- function compileSelect(table, scope, filters, options) {
214
- const params = [];
215
- const conditions = ['"scope" = ?'];
216
- params.push(scope);
217
- for (const f of filters) {
218
- conditions.push(compileFilter(f, params));
219
- }
220
- let sql = `SELECT * FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}`;
221
- const orderClause = compileOrderBy(options, params);
222
- sql += orderClause;
223
- sql += compileLimit(options, params);
224
- return { sql, params };
225
- }
226
- function compileSelectGlobal(table, filters, options, scopeNameFilter) {
227
- if (filters.length === 0) {
228
- throw new FiregraphError(
229
- "compileSelectGlobal requires at least one filter \u2014 refusing to issue an unbounded SELECT.",
230
- "INVALID_QUERY"
231
- );
232
- }
233
- const params = [];
234
- const conditions = [];
235
- if (scopeNameFilter) {
236
- if (scopeNameFilter.isRoot) {
237
- conditions.push(`"scope" = ?`);
238
- params.push("");
239
- } else {
240
- conditions.push(`"scope" LIKE ? ESCAPE '\\'`);
241
- params.push(`%/${escapeLike(scopeNameFilter.name)}`);
242
- }
243
- }
244
- for (const f of filters) {
245
- conditions.push(compileFilter(f, params));
246
- }
247
- const sql = `SELECT * FROM ${quoteIdent(table)} WHERE ${conditions.join(" AND ")}` + compileOrderBy(options, params) + compileLimit(options, params);
248
- return { sql, params };
249
- }
250
- function compileSelectByDocId(table, scope, docId) {
251
- return {
252
- sql: `SELECT * FROM ${quoteIdent(table)} WHERE "scope" = ? AND "doc_id" = ? LIMIT 1`,
253
- params: [scope, docId]
254
- };
255
- }
256
- function compileSet(table, scope, docId, record, nowMillis) {
257
- const sql = `INSERT OR REPLACE INTO ${quoteIdent(table)} (
258
- doc_id, scope, a_type, a_uid, axb_type, b_type, b_uid, data, v, created_at, updated_at
259
- ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`;
260
- const params = [
261
- docId,
262
- scope,
263
- record.aType,
264
- record.aUid,
265
- record.axbType,
266
- record.bType,
267
- record.bUid,
268
- JSON.stringify(record.data ?? {}),
269
- record.v ?? null,
270
- nowMillis,
271
- nowMillis
272
- ];
273
- return { sql, params };
274
- }
275
- function compileUpdate(table, scope, docId, update, nowMillis) {
276
- const setClauses = [];
277
- const params = [];
278
- if (update.replaceData) {
279
- setClauses.push(`"data" = ?`);
280
- params.push(JSON.stringify(update.replaceData));
281
- } else if (update.dataFields && Object.keys(update.dataFields).length > 0) {
282
- const entries = Object.entries(update.dataFields);
283
- const pathArgs = entries.map(() => `?, ?`).join(", ");
284
- setClauses.push(`"data" = json_set(COALESCE("data", '{}'), ${pathArgs})`);
285
- for (const [k, v] of entries) {
286
- validateJsonPathKey(k);
287
- params.push(`$.${k}`);
288
- params.push(bindValue(v));
289
- }
290
- }
291
- if (update.v !== void 0) {
292
- setClauses.push(`"v" = ?`);
293
- params.push(update.v);
294
- }
295
- setClauses.push(`"updated_at" = ?`);
296
- params.push(nowMillis);
297
- params.push(scope, docId);
298
- return {
299
- sql: `UPDATE ${quoteIdent(table)} SET ${setClauses.join(", ")} WHERE "scope" = ? AND "doc_id" = ?`,
300
- params
301
- };
302
- }
303
- function compileDelete(table, scope, docId) {
304
- return {
305
- sql: `DELETE FROM ${quoteIdent(table)} WHERE "scope" = ? AND "doc_id" = ?`,
306
- params: [scope, docId]
307
- };
308
- }
309
- function compileDeleteScopePrefix(table, scopePrefix) {
310
- const escaped = escapeLike(scopePrefix);
311
- return {
312
- sql: `DELETE FROM ${quoteIdent(table)} WHERE "scope" LIKE ? ESCAPE '\\'`,
313
- params: [`${escaped}/%`]
314
- };
315
- }
316
- function compileCountScopePrefix(table, scopePrefix) {
317
- const escaped = escapeLike(scopePrefix);
318
- return {
319
- sql: `SELECT COUNT(*) AS n FROM ${quoteIdent(table)} WHERE "scope" LIKE ? ESCAPE '\\'`,
320
- params: [`${escaped}/%`]
321
- };
322
- }
323
- function escapeLike(value) {
324
- return value.replace(/\\/g, "\\\\").replace(/%/g, "\\%").replace(/_/g, "\\_");
325
- }
326
- function rowToRecord(row) {
327
- const dataString = row.data;
328
- const data = dataString ? JSON.parse(dataString) : {};
329
- const createdMs = toMillis(row.created_at);
330
- const updatedMs = toMillis(row.updated_at);
331
- const record = {
332
- aType: row.a_type,
333
- aUid: row.a_uid,
334
- axbType: row.axb_type,
335
- bType: row.b_type,
336
- bUid: row.b_uid,
337
- data,
338
- createdAt: GraphTimestampImpl.fromMillis(createdMs),
339
- updatedAt: GraphTimestampImpl.fromMillis(updatedMs)
340
- };
341
- if (row.v !== null && row.v !== void 0) {
342
- record.v = Number(row.v);
343
- }
344
- return record;
345
- }
346
- function toMillis(value) {
347
- if (typeof value === "number") return value;
348
- if (typeof value === "bigint") return Number(value);
349
- if (typeof value === "string") return Number(value);
350
- return 0;
351
- }
352
-
353
- // src/internal/sqlite-backend.ts
354
- var DEFAULT_MAX_RETRIES = 3;
355
- var BASE_RETRY_DELAY_MS = 200;
356
- var MAX_RETRY_DELAY_MS = 5e3;
357
- function sleep(ms) {
358
- return new Promise((resolve) => setTimeout(resolve, ms));
359
- }
360
- function minDefined(a, b) {
361
- if (a === void 0) return b;
362
- if (b === void 0) return a;
363
- return Math.min(a, b);
364
- }
365
- function chunkStatements(statements, maxStatements, maxParams) {
366
- const stmtCap = maxStatements && maxStatements > 0 && Number.isFinite(maxStatements) ? Math.floor(maxStatements) : Infinity;
367
- const paramCap = maxParams && maxParams > 0 && Number.isFinite(maxParams) ? Math.floor(maxParams) : Infinity;
368
- if (stmtCap === Infinity && paramCap === Infinity) {
369
- return [statements];
370
- }
371
- const chunks = [];
372
- let current = [];
373
- let currentParamCount = 0;
374
- for (const stmt of statements) {
375
- const stmtParams = stmt.params.length;
376
- const wouldExceedStmt = current.length + 1 > stmtCap;
377
- const wouldExceedParam = currentParamCount + stmtParams > paramCap;
378
- if (current.length > 0 && (wouldExceedStmt || wouldExceedParam)) {
379
- chunks.push(current);
380
- current = [];
381
- currentParamCount = 0;
382
- }
383
- current.push(stmt);
384
- currentParamCount += stmtParams;
385
- }
386
- if (current.length > 0) chunks.push(current);
387
- return chunks;
388
- }
389
- var SqliteTransactionBackendImpl = class {
390
- constructor(tx, tableName, storageScope) {
391
- this.tx = tx;
392
- this.tableName = tableName;
393
- this.storageScope = storageScope;
394
- }
395
- async getDoc(docId) {
396
- const stmt = compileSelectByDocId(this.tableName, this.storageScope, docId);
397
- const rows = await this.tx.all(stmt.sql, stmt.params);
398
- return rows.length === 0 ? null : rowToRecord(rows[0]);
399
- }
400
- async query(filters, options) {
401
- const stmt = compileSelect(this.tableName, this.storageScope, filters, options);
402
- const rows = await this.tx.all(stmt.sql, stmt.params);
403
- return rows.map(rowToRecord);
404
- }
405
- async setDoc(docId, record) {
406
- const stmt = compileSet(this.tableName, this.storageScope, docId, record, Date.now());
407
- await this.tx.run(stmt.sql, stmt.params);
408
- }
409
- async updateDoc(docId, update) {
410
- const stmt = compileUpdate(this.tableName, this.storageScope, docId, update, Date.now());
411
- const sqlWithReturning = `${stmt.sql} RETURNING "doc_id"`;
412
- const rows = await this.tx.all(sqlWithReturning, stmt.params);
413
- if (rows.length === 0) {
414
- throw new FiregraphError(
415
- `updateDoc: no document found for doc_id=${docId} (scope=${this.storageScope})`,
416
- "NOT_FOUND"
417
- );
418
- }
419
- }
420
- async deleteDoc(docId) {
421
- const stmt = compileDelete(this.tableName, this.storageScope, docId);
422
- await this.tx.run(stmt.sql, stmt.params);
423
- }
424
- };
425
- var SqliteBatchBackendImpl = class {
426
- constructor(executor, tableName, storageScope) {
427
- this.executor = executor;
428
- this.tableName = tableName;
429
- this.storageScope = storageScope;
430
- }
431
- statements = [];
432
- setDoc(docId, record) {
433
- this.statements.push(compileSet(this.tableName, this.storageScope, docId, record, Date.now()));
434
- }
435
- updateDoc(docId, update) {
436
- this.statements.push(
437
- compileUpdate(this.tableName, this.storageScope, docId, update, Date.now())
438
- );
439
- }
440
- deleteDoc(docId) {
441
- this.statements.push(compileDelete(this.tableName, this.storageScope, docId));
442
- }
443
- async commit() {
444
- if (this.statements.length === 0) return;
445
- await this.executor.batch(this.statements);
446
- this.statements.length = 0;
447
- }
448
- };
449
- var SqliteBackendImpl = class _SqliteBackendImpl {
450
- constructor(executor, tableName, storageScope, scopePath) {
451
- this.executor = executor;
452
- this.collectionPath = tableName;
453
- this.storageScope = storageScope;
454
- this.scopePath = scopePath;
455
- }
456
- /** Logical table name (returned through `collectionPath` for parity with Firestore). */
457
- collectionPath;
458
- scopePath;
459
- /** Materialized storage scope (interleaved parent UIDs + subgraph names). */
460
- storageScope;
461
- // --- Reads ---
462
- async getDoc(docId) {
463
- const stmt = compileSelectByDocId(this.collectionPath, this.storageScope, docId);
464
- const rows = await this.executor.all(stmt.sql, stmt.params);
465
- return rows.length === 0 ? null : rowToRecord(rows[0]);
466
- }
467
- async query(filters, options) {
468
- const stmt = compileSelect(this.collectionPath, this.storageScope, filters, options);
469
- const rows = await this.executor.all(stmt.sql, stmt.params);
470
- return rows.map(rowToRecord);
471
- }
472
- // --- Writes ---
473
- async setDoc(docId, record) {
474
- const stmt = compileSet(this.collectionPath, this.storageScope, docId, record, Date.now());
475
- await this.executor.run(stmt.sql, stmt.params);
476
- }
477
- async updateDoc(docId, update) {
478
- const stmt = compileUpdate(this.collectionPath, this.storageScope, docId, update, Date.now());
479
- const sqlWithReturning = `${stmt.sql} RETURNING "doc_id"`;
480
- const rows = await this.executor.all(sqlWithReturning, stmt.params);
481
- if (rows.length === 0) {
482
- throw new FiregraphError(
483
- `updateDoc: no document found for doc_id=${docId} (scope=${this.storageScope})`,
484
- "NOT_FOUND"
485
- );
486
- }
487
- }
488
- async deleteDoc(docId) {
489
- const stmt = compileDelete(this.collectionPath, this.storageScope, docId);
490
- await this.executor.run(stmt.sql, stmt.params);
491
- }
492
- // --- Transactions / Batches ---
493
- async runTransaction(fn) {
494
- if (!this.executor.transaction) {
495
- throw new FiregraphError(
496
- "Interactive transactions are not supported by this SQLite driver. D1 in particular has no read-then-conditional-write transactions; use a Durable Object SQLite client instead, or rewrite the code path as a batch().",
497
- "UNSUPPORTED_OPERATION"
498
- );
499
- }
500
- return this.executor.transaction(async (tx) => {
501
- const txBackend = new SqliteTransactionBackendImpl(
502
- tx,
503
- this.collectionPath,
504
- this.storageScope
505
- );
506
- return fn(txBackend);
507
- });
508
- }
509
- createBatch() {
510
- return new SqliteBatchBackendImpl(this.executor, this.collectionPath, this.storageScope);
511
- }
512
- // --- Subgraphs ---
513
- subgraph(parentNodeUid, name) {
514
- if (!parentNodeUid || parentNodeUid.includes("/")) {
515
- throw new FiregraphError(
516
- `Invalid parentNodeUid for subgraph: "${parentNodeUid}". Must be a non-empty string without "/".`,
517
- "INVALID_SUBGRAPH"
518
- );
519
- }
520
- if (!name || name.includes("/")) {
521
- throw new FiregraphError(
522
- `Subgraph name must not contain "/" and must be non-empty: got "${name}". Use chained .subgraph() calls for nested subgraphs.`,
523
- "INVALID_SUBGRAPH"
524
- );
525
- }
526
- const newStorageScope = this.storageScope ? `${this.storageScope}/${parentNodeUid}/${name}` : `${parentNodeUid}/${name}`;
527
- const newScope = this.scopePath ? `${this.scopePath}/${name}` : name;
528
- return new _SqliteBackendImpl(this.executor, this.collectionPath, newStorageScope, newScope);
529
- }
530
- // --- Cascade & bulk ---
531
- async removeNodeCascade(uid, reader, options) {
532
- const [outgoingRaw, incomingRaw] = await Promise.all([
533
- reader.findEdges({ aUid: uid, allowCollectionScan: true, limit: 0 }),
534
- reader.findEdges({ bUid: uid, allowCollectionScan: true, limit: 0 })
535
- ]);
536
- const seen = /* @__PURE__ */ new Set();
537
- const edgeDocIds = [];
538
- for (const edge of [...outgoingRaw, ...incomingRaw]) {
539
- if (edge.axbType === NODE_RELATION) continue;
540
- const docId = computeEdgeDocId(edge.aUid, edge.axbType, edge.bUid);
541
- if (!seen.has(docId)) {
542
- seen.add(docId);
543
- edgeDocIds.push(docId);
544
- }
545
- }
546
- const nodeDocId = computeNodeDocId(uid);
547
- const shouldDeleteSubgraphs = options?.deleteSubcollections !== false;
548
- let subgraphRowCount = 0;
549
- if (shouldDeleteSubgraphs) {
550
- const prefix = this.storageScope ? `${this.storageScope}/${uid}` : uid;
551
- const countStmt = compileCountScopePrefix(this.collectionPath, prefix);
552
- const countRows = await this.executor.all(countStmt.sql, countStmt.params);
553
- const first = countRows[0];
554
- const n = first?.n;
555
- subgraphRowCount = typeof n === "bigint" ? Number(n) : Number(n ?? 0);
556
- }
557
- const writeStatements = edgeDocIds.map(
558
- (id) => compileDelete(this.collectionPath, this.storageScope, id)
559
- );
560
- writeStatements.push(compileDelete(this.collectionPath, this.storageScope, nodeDocId));
561
- if (shouldDeleteSubgraphs) {
562
- const prefix = this.storageScope ? `${this.storageScope}/${uid}` : uid;
563
- writeStatements.push(compileDeleteScopePrefix(this.collectionPath, prefix));
564
- }
565
- const {
566
- deleted: stmtDeleted,
567
- batches,
568
- errors
569
- } = await this.executeChunkedBatches(writeStatements, options);
570
- const allOk = errors.length === 0;
571
- const edgesDeleted = allOk ? edgeDocIds.length : 0;
572
- const nodeDeleted = allOk;
573
- const prefixStatementContribution = shouldDeleteSubgraphs && allOk ? 1 : 0;
574
- const deleted = stmtDeleted - prefixStatementContribution + (allOk ? subgraphRowCount : 0);
575
- return { deleted, batches, errors, edgesDeleted, nodeDeleted };
576
- }
577
- async bulkRemoveEdges(params, reader, options) {
578
- const effectiveParams = params.limit !== void 0 ? { ...params, allowCollectionScan: params.allowCollectionScan ?? true } : { ...params, limit: 0, allowCollectionScan: params.allowCollectionScan ?? true };
579
- const edges = await reader.findEdges(effectiveParams);
580
- const docIds = edges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
581
- if (docIds.length === 0) {
582
- return { deleted: 0, batches: 0, errors: [] };
583
- }
584
- const statements = docIds.map(
585
- (id) => compileDelete(this.collectionPath, this.storageScope, id)
586
- );
587
- return this.executeChunkedBatches(statements, options);
588
- }
589
- /**
590
- * Submit `statements` to the executor as one or more `batch()` calls,
591
- * chunking by `executor.maxBatchSize` (e.g. D1's ~100-statement cap).
592
- * Drivers that don't advertise a cap submit everything in one batch,
593
- * preserving cross-batch atomicity.
594
- *
595
- * Each chunk is retried with exponential backoff up to `maxRetries`
596
- * (default 3) before being recorded in `errors`. The loop continues past
597
- * a permanently failed chunk so the caller still gets partial progress
598
- * visibility — to halt on first failure, set `maxRetries: 0` and check
599
- * `result.errors.length` after the call.
600
- *
601
- * Returns `BulkResult`-shaped fields. `deleted` reflects only the
602
- * statement count of *successfully committed* batches — a prefix-delete
603
- * statement contributes 1 to that total even though it may match many
604
- * rows; `removeNodeCascade` patches that up with a pre-counted row total.
605
- *
606
- * **Atomicity caveat (D1):** when chunking kicks in, atomicity is lost
607
- * across chunk boundaries — one chunk may commit while a later one fails.
608
- * `removeNodeCascade` is idempotent (deleting the same docs again is a
609
- * no-op) so a caller can simply retry on partial failure. `bulkRemoveEdges`
610
- * is also idempotent for the same reason. DO SQLite leaves `maxBatchSize`
611
- * unset, so everything funnels through one atomic `transactionSync` and
612
- * this caveat does not apply.
613
- */
614
- async executeChunkedBatches(statements, options) {
615
- if (statements.length === 0) {
616
- return { deleted: 0, batches: 0, errors: [] };
617
- }
618
- const maxRetries = options?.maxRetries ?? DEFAULT_MAX_RETRIES;
619
- const callerBatchSize = options?.batchSize;
620
- const stmtCap = minDefined(callerBatchSize, this.executor.maxBatchSize);
621
- const chunks = chunkStatements(statements, stmtCap, this.executor.maxBatchParams);
622
- const errors = [];
623
- let deleted = 0;
624
- let batches = 0;
625
- const totalBatches = chunks.length;
626
- const driverParamCap = this.executor.maxBatchParams;
627
- for (let batchIndex = 0; batchIndex < chunks.length; batchIndex++) {
628
- const chunk = chunks[batchIndex];
629
- const isUnretriableOversize = chunk.length === 1 && driverParamCap !== void 0 && chunk[0].params.length > driverParamCap;
630
- let committed = false;
631
- let lastError = null;
632
- const effectiveRetries = isUnretriableOversize ? 0 : maxRetries;
633
- for (let attempt = 0; attempt <= effectiveRetries; attempt++) {
634
- try {
635
- await this.executor.batch(chunk);
636
- committed = true;
637
- break;
638
- } catch (err) {
639
- lastError = err instanceof Error ? err : new Error(String(err));
640
- if (attempt < effectiveRetries) {
641
- const delay = Math.min(BASE_RETRY_DELAY_MS * Math.pow(2, attempt), MAX_RETRY_DELAY_MS);
642
- await sleep(delay);
643
- }
644
- }
645
- }
646
- if (committed) {
647
- deleted += chunk.length;
648
- batches += 1;
649
- } else if (lastError) {
650
- errors.push({
651
- batchIndex,
652
- error: lastError,
653
- operationCount: chunk.length
654
- });
655
- }
656
- if (options?.onProgress) {
657
- options.onProgress({
658
- completedBatches: batches,
659
- totalBatches,
660
- deletedSoFar: deleted
661
- });
662
- }
663
- }
664
- return { deleted, batches, errors };
665
- }
666
- // --- Cross-scope (collection group) ---
667
- async findEdgesGlobal(params, collectionName) {
668
- const plan = buildEdgeQueryPlan(params);
669
- if (plan.strategy === "get") {
670
- throw new FiregraphError(
671
- "findEdgesGlobal() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
672
- "INVALID_QUERY"
673
- );
674
- }
675
- const name = collectionName ?? this.collectionPath;
676
- const scopeNameFilter = {
677
- name,
678
- isRoot: name === this.collectionPath
679
- };
680
- const stmt = compileSelectGlobal(
681
- this.collectionPath,
682
- plan.filters,
683
- plan.options,
684
- scopeNameFilter
685
- );
686
- const rows = await this.executor.all(stmt.sql, stmt.params);
687
- return rows.map(rowToRecord);
688
- }
689
- };
690
- function createSqliteBackend(executor, tableName, options = {}) {
691
- const storageScope = options.storageScope ?? "";
692
- const scopePath = options.scopePath ?? "";
693
- return new SqliteBackendImpl(executor, tableName, storageScope, scopePath);
694
- }
695
-
696
- export {
697
- buildSchemaStatements,
698
- validateTableName,
699
- createSqliteBackend
700
- };
701
- //# sourceMappingURL=chunk-SZ6W4VAS.js.map