@typicalday/firegraph 0.12.0 → 0.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/README.md +317 -73
  2. package/dist/backend-DuvHGgK1.d.cts +1897 -0
  3. package/dist/backend-DuvHGgK1.d.ts +1897 -0
  4. package/dist/backend.cjs +222 -3
  5. package/dist/backend.cjs.map +1 -1
  6. package/dist/backend.d.cts +25 -5
  7. package/dist/backend.d.ts +25 -5
  8. package/dist/backend.js +197 -4
  9. package/dist/backend.js.map +1 -1
  10. package/dist/chunk-2DHMNTV6.js +16 -0
  11. package/dist/chunk-2DHMNTV6.js.map +1 -0
  12. package/dist/chunk-4MMQ5W74.js +288 -0
  13. package/dist/chunk-4MMQ5W74.js.map +1 -0
  14. package/dist/chunk-D4J7Z4FE.js +67 -0
  15. package/dist/chunk-D4J7Z4FE.js.map +1 -0
  16. package/dist/chunk-N5HFDWQX.js +23 -0
  17. package/dist/chunk-N5HFDWQX.js.map +1 -0
  18. package/dist/chunk-PAD7WFFU.js +573 -0
  19. package/dist/chunk-PAD7WFFU.js.map +1 -0
  20. package/dist/{chunk-AWW4MUJ5.js → chunk-TK64DNVK.js} +12 -1
  21. package/dist/chunk-TK64DNVK.js.map +1 -0
  22. package/dist/{chunk-HONQY4HF.js → chunk-WRTFC5NG.js} +362 -17
  23. package/dist/chunk-WRTFC5NG.js.map +1 -0
  24. package/dist/client-BKi3vk0Q.d.ts +34 -0
  25. package/dist/client-BrsaXtDV.d.cts +34 -0
  26. package/dist/cloudflare/index.cjs +930 -3
  27. package/dist/cloudflare/index.cjs.map +1 -1
  28. package/dist/cloudflare/index.d.cts +213 -12
  29. package/dist/cloudflare/index.d.ts +213 -12
  30. package/dist/cloudflare/index.js +562 -281
  31. package/dist/cloudflare/index.js.map +1 -1
  32. package/dist/codegen/index.d.cts +1 -1
  33. package/dist/codegen/index.d.ts +1 -1
  34. package/dist/errors-BRc3I_eH.d.cts +73 -0
  35. package/dist/errors-BRc3I_eH.d.ts +73 -0
  36. package/dist/firestore-enterprise/index.cjs +3877 -0
  37. package/dist/firestore-enterprise/index.cjs.map +1 -0
  38. package/dist/firestore-enterprise/index.d.cts +141 -0
  39. package/dist/firestore-enterprise/index.d.ts +141 -0
  40. package/dist/firestore-enterprise/index.js +985 -0
  41. package/dist/firestore-enterprise/index.js.map +1 -0
  42. package/dist/firestore-standard/index.cjs +3117 -0
  43. package/dist/firestore-standard/index.cjs.map +1 -0
  44. package/dist/firestore-standard/index.d.cts +49 -0
  45. package/dist/firestore-standard/index.d.ts +49 -0
  46. package/dist/firestore-standard/index.js +283 -0
  47. package/dist/firestore-standard/index.js.map +1 -0
  48. package/dist/index.cjs +590 -550
  49. package/dist/index.cjs.map +1 -1
  50. package/dist/index.d.cts +9 -37
  51. package/dist/index.d.ts +9 -37
  52. package/dist/index.js +178 -555
  53. package/dist/index.js.map +1 -1
  54. package/dist/{registry-Fi074zVa.d.ts → registry-Bc7h6WTM.d.cts} +1 -1
  55. package/dist/{registry-B1qsVL0E.d.cts → registry-C2KUPVZj.d.ts} +1 -1
  56. package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.cts} +1 -56
  57. package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.ts} +1 -56
  58. package/dist/sqlite/index.cjs +3631 -0
  59. package/dist/sqlite/index.cjs.map +1 -0
  60. package/dist/sqlite/index.d.cts +111 -0
  61. package/dist/sqlite/index.d.ts +111 -0
  62. package/dist/sqlite/index.js +1164 -0
  63. package/dist/sqlite/index.js.map +1 -0
  64. package/package.json +33 -3
  65. package/dist/backend-BsR0lnFL.d.ts +0 -200
  66. package/dist/backend-Ct-fLlkG.d.cts +0 -200
  67. package/dist/chunk-AWW4MUJ5.js.map +0 -1
  68. package/dist/chunk-HONQY4HF.js.map +0 -1
  69. package/dist/types-DxYLy8Ol.d.cts +0 -770
  70. package/dist/types-DxYLy8Ol.d.ts +0 -770
package/dist/index.js CHANGED
@@ -1,18 +1,32 @@
1
+ import {
2
+ QueryClient,
3
+ QueryClientError
4
+ } from "./chunk-EEKWRX5E.js";
1
5
  import {
2
6
  appendStorageScope,
3
7
  isAncestorScopeUid,
4
8
  parseStorageScope,
5
9
  resolveAncestorScope
6
10
  } from "./chunk-TYYPRVIE.js";
11
+ import {
12
+ DEFAULT_CORE_INDEXES
13
+ } from "./chunk-2DHMNTV6.js";
14
+ import {
15
+ generateTypes
16
+ } from "./chunk-GJVVRTQT.js";
17
+ import {
18
+ compileEngineTraversal
19
+ } from "./chunk-D4J7Z4FE.js";
20
+ import {
21
+ deserializeFirestoreTypes,
22
+ serializeFirestoreTypes
23
+ } from "./chunk-C2QMD7RY.js";
7
24
  import {
8
25
  BOOTSTRAP_ENTRIES,
9
- DEFAULT_CORE_INDEXES,
10
26
  DEFAULT_QUERY_LIMIT,
11
27
  EDGE_TYPE_SCHEMA,
12
- GraphClientImpl,
13
28
  META_EDGE_TYPE,
14
29
  META_NODE_TYPE,
15
- NODE_RELATION,
16
30
  NODE_TYPE_SCHEMA,
17
31
  analyzeQuerySafety,
18
32
  applyMigrationChain,
@@ -24,6 +38,7 @@ import {
24
38
  computeEdgeDocId,
25
39
  computeNodeDocId,
26
40
  createBootstrapRegistry,
41
+ createGraphClient,
27
42
  createGraphClientFromBackend,
28
43
  createMergedRegistry,
29
44
  createRegistry,
@@ -39,8 +54,9 @@ import {
39
54
  migrateRecords,
40
55
  precompileSource,
41
56
  validateMigrationChain
42
- } from "./chunk-HONQY4HF.js";
57
+ } from "./chunk-WRTFC5NG.js";
43
58
  import {
59
+ CapabilityNotSupportedError,
44
60
  CrossBackendTransactionError,
45
61
  DynamicRegistryError,
46
62
  EdgeNotFoundError,
@@ -53,21 +69,8 @@ import {
53
69
  RegistryViolationError,
54
70
  TraversalError,
55
71
  ValidationError,
56
- assertSafePath,
57
- assertUpdatePayloadExclusive,
58
72
  deleteField
59
- } from "./chunk-AWW4MUJ5.js";
60
- import {
61
- generateTypes
62
- } from "./chunk-GJVVRTQT.js";
63
- import {
64
- QueryClient,
65
- QueryClientError
66
- } from "./chunk-EEKWRX5E.js";
67
- import {
68
- deserializeFirestoreTypes,
69
- serializeFirestoreTypes
70
- } from "./chunk-C2QMD7RY.js";
73
+ } from "./chunk-TK64DNVK.js";
71
74
  import {
72
75
  SERIALIZATION_TAG,
73
76
  isTaggedValue
@@ -308,512 +311,6 @@ function discoverEntities(entitiesDir) {
308
311
  };
309
312
  }
310
313
 
311
- // src/internal/firestore-backend.ts
312
- import { FieldValue } from "@google-cloud/firestore";
313
-
314
- // src/bulk.ts
315
- var MAX_BATCH_SIZE = 500;
316
- var DEFAULT_MAX_RETRIES = 3;
317
- var BASE_DELAY_MS = 200;
318
- function sleep(ms) {
319
- return new Promise((resolve2) => setTimeout(resolve2, ms));
320
- }
321
- function chunk(arr, size) {
322
- const chunks = [];
323
- for (let i = 0; i < arr.length; i += size) {
324
- chunks.push(arr.slice(i, i + size));
325
- }
326
- return chunks;
327
- }
328
- async function bulkDeleteDocIds(db, collectionPath, docIds, options) {
329
- if (docIds.length === 0) {
330
- return { deleted: 0, batches: 0, errors: [] };
331
- }
332
- const batchSize = Math.min(options?.batchSize ?? MAX_BATCH_SIZE, MAX_BATCH_SIZE);
333
- const maxRetries = options?.maxRetries ?? DEFAULT_MAX_RETRIES;
334
- const onProgress = options?.onProgress;
335
- const chunks = chunk(docIds, batchSize);
336
- const errors = [];
337
- let deleted = 0;
338
- let completedBatches = 0;
339
- for (let i = 0; i < chunks.length; i++) {
340
- const ids = chunks[i];
341
- let committed = false;
342
- for (let attempt = 0; attempt <= maxRetries; attempt++) {
343
- try {
344
- const batch = db.batch();
345
- const collectionRef = db.collection(collectionPath);
346
- for (const id of ids) {
347
- batch.delete(collectionRef.doc(id));
348
- }
349
- await batch.commit();
350
- committed = true;
351
- deleted += ids.length;
352
- break;
353
- } catch (err) {
354
- if (attempt < maxRetries) {
355
- const delay = BASE_DELAY_MS * Math.pow(2, attempt);
356
- await sleep(delay);
357
- } else {
358
- errors.push({
359
- batchIndex: i,
360
- error: err instanceof Error ? err : new Error(String(err)),
361
- operationCount: ids.length
362
- });
363
- }
364
- }
365
- }
366
- if (committed) {
367
- completedBatches++;
368
- }
369
- if (onProgress) {
370
- onProgress({
371
- completedBatches,
372
- totalBatches: chunks.length,
373
- deletedSoFar: deleted
374
- });
375
- }
376
- }
377
- return { deleted, batches: completedBatches, errors };
378
- }
379
- async function bulkRemoveEdges(db, collectionPath, reader, params, options) {
380
- const effectiveParams = params.limit !== void 0 ? { ...params, allowCollectionScan: params.allowCollectionScan ?? true } : { ...params, limit: 0, allowCollectionScan: params.allowCollectionScan ?? true };
381
- const edges = await reader.findEdges(effectiveParams);
382
- const docIds = edges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
383
- return bulkDeleteDocIds(db, collectionPath, docIds, options);
384
- }
385
- async function deleteSubcollectionsRecursive(db, collectionPath, docId, options) {
386
- const docRef = db.collection(collectionPath).doc(docId);
387
- const subcollections = await docRef.listCollections();
388
- if (subcollections.length === 0) return { deleted: 0, errors: [] };
389
- let totalDeleted = 0;
390
- const allErrors = [];
391
- const subOptions = options ? { batchSize: options.batchSize, maxRetries: options.maxRetries } : void 0;
392
- for (const subCollRef of subcollections) {
393
- const subCollPath = subCollRef.path;
394
- const snapshot = await subCollRef.select().get();
395
- const subDocIds = snapshot.docs.map((d) => d.id);
396
- for (const subDocId of subDocIds) {
397
- const subResult = await deleteSubcollectionsRecursive(db, subCollPath, subDocId, subOptions);
398
- totalDeleted += subResult.deleted;
399
- allErrors.push(...subResult.errors);
400
- }
401
- if (subDocIds.length > 0) {
402
- const result = await bulkDeleteDocIds(db, subCollPath, subDocIds, subOptions);
403
- totalDeleted += result.deleted;
404
- allErrors.push(...result.errors);
405
- }
406
- }
407
- return { deleted: totalDeleted, errors: allErrors };
408
- }
409
- async function removeNodeCascade(db, collectionPath, reader, uid, options) {
410
- const [outgoingRaw, incomingRaw] = await Promise.all([
411
- reader.findEdges({ aUid: uid, allowCollectionScan: true, limit: 0 }),
412
- reader.findEdges({ bUid: uid, allowCollectionScan: true, limit: 0 })
413
- ]);
414
- const outgoing = outgoingRaw.filter((e) => e.axbType !== NODE_RELATION);
415
- const incoming = incomingRaw.filter((e) => e.axbType !== NODE_RELATION);
416
- const edgeDocIdSet = /* @__PURE__ */ new Set();
417
- const allEdges = [];
418
- for (const edge of [...outgoing, ...incoming]) {
419
- const docId = computeEdgeDocId(edge.aUid, edge.axbType, edge.bUid);
420
- if (!edgeDocIdSet.has(docId)) {
421
- edgeDocIdSet.add(docId);
422
- allEdges.push(edge);
423
- }
424
- }
425
- const shouldDeleteSubcollections = options?.deleteSubcollections !== false;
426
- const nodeDocId = computeNodeDocId(uid);
427
- let subcollectionResult = { deleted: 0, errors: [] };
428
- if (shouldDeleteSubcollections) {
429
- subcollectionResult = await deleteSubcollectionsRecursive(
430
- db,
431
- collectionPath,
432
- nodeDocId,
433
- options
434
- );
435
- }
436
- const edgeDocIds = allEdges.map((e) => computeEdgeDocId(e.aUid, e.axbType, e.bUid));
437
- const allDocIds = [...edgeDocIds, nodeDocId];
438
- const batchSize = Math.min(options?.batchSize ?? MAX_BATCH_SIZE, MAX_BATCH_SIZE);
439
- const result = await bulkDeleteDocIds(db, collectionPath, allDocIds, {
440
- ...options,
441
- batchSize
442
- });
443
- const totalChunks = Math.ceil(allDocIds.length / batchSize);
444
- const nodeChunkIndex = totalChunks - 1;
445
- const nodeDeleted = !result.errors.some((e) => e.batchIndex === nodeChunkIndex);
446
- const topLevelEdgesDeleted = nodeDeleted ? result.deleted - 1 : result.deleted;
447
- return {
448
- deleted: result.deleted + subcollectionResult.deleted,
449
- batches: result.batches,
450
- errors: [...result.errors, ...subcollectionResult.errors],
451
- edgesDeleted: topLevelEdgesDeleted,
452
- nodeDeleted
453
- };
454
- }
455
-
456
- // src/internal/firestore-adapter.ts
457
- function createFirestoreAdapter(db, collectionPath) {
458
- const collectionRef = db.collection(collectionPath);
459
- return {
460
- collectionPath,
461
- async getDoc(docId) {
462
- const snap = await collectionRef.doc(docId).get();
463
- if (!snap.exists) return null;
464
- return snap.data();
465
- },
466
- async setDoc(docId, data, options) {
467
- if (options?.merge) {
468
- await collectionRef.doc(docId).set(data, { merge: true });
469
- } else {
470
- await collectionRef.doc(docId).set(data);
471
- }
472
- },
473
- async updateDoc(docId, data) {
474
- await collectionRef.doc(docId).update(data);
475
- },
476
- async deleteDoc(docId) {
477
- await collectionRef.doc(docId).delete();
478
- },
479
- async query(filters, options) {
480
- let q = collectionRef;
481
- for (const f of filters) {
482
- q = q.where(f.field, f.op, f.value);
483
- }
484
- if (options?.orderBy) {
485
- q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
486
- }
487
- if (options?.limit !== void 0) {
488
- q = q.limit(options.limit);
489
- }
490
- const snap = await q.get();
491
- return snap.docs.map((doc) => doc.data());
492
- }
493
- };
494
- }
495
- function createTransactionAdapter(db, collectionPath, tx) {
496
- const collectionRef = db.collection(collectionPath);
497
- return {
498
- async getDoc(docId) {
499
- const snap = await tx.get(collectionRef.doc(docId));
500
- if (!snap.exists) return null;
501
- return snap.data();
502
- },
503
- setDoc(docId, data, options) {
504
- if (options?.merge) {
505
- tx.set(collectionRef.doc(docId), data, { merge: true });
506
- } else {
507
- tx.set(collectionRef.doc(docId), data);
508
- }
509
- },
510
- updateDoc(docId, data) {
511
- tx.update(collectionRef.doc(docId), data);
512
- },
513
- deleteDoc(docId) {
514
- tx.delete(collectionRef.doc(docId));
515
- },
516
- async query(filters, options) {
517
- let q = collectionRef;
518
- for (const f of filters) {
519
- q = q.where(f.field, f.op, f.value);
520
- }
521
- if (options?.orderBy) {
522
- q = q.orderBy(options.orderBy.field, options.orderBy.direction ?? "asc");
523
- }
524
- if (options?.limit !== void 0) {
525
- q = q.limit(options.limit);
526
- }
527
- const snap = await tx.get(q);
528
- return snap.docs.map((doc) => doc.data());
529
- }
530
- };
531
- }
532
- function createBatchAdapter(db, collectionPath) {
533
- const collectionRef = db.collection(collectionPath);
534
- const batch = db.batch();
535
- return {
536
- setDoc(docId, data, options) {
537
- if (options?.merge) {
538
- batch.set(collectionRef.doc(docId), data, { merge: true });
539
- } else {
540
- batch.set(collectionRef.doc(docId), data);
541
- }
542
- },
543
- updateDoc(docId, data) {
544
- batch.update(collectionRef.doc(docId), data);
545
- },
546
- deleteDoc(docId) {
547
- batch.delete(collectionRef.doc(docId));
548
- },
549
- async commit() {
550
- await batch.commit();
551
- }
552
- };
553
- }
554
-
555
- // src/internal/pipeline-adapter.ts
556
- var _Pipelines = null;
557
- async function getPipelines() {
558
- if (!_Pipelines) {
559
- const mod = await import("@google-cloud/firestore");
560
- _Pipelines = mod.Pipelines;
561
- }
562
- return _Pipelines;
563
- }
564
- function buildFilterExpression(P, filter) {
565
- const { field: fieldName, op, value } = filter;
566
- switch (op) {
567
- case "==":
568
- return P.equal(fieldName, value);
569
- case "!=":
570
- return P.notEqual(fieldName, value);
571
- case "<":
572
- return P.lessThan(fieldName, value);
573
- case "<=":
574
- return P.lessThanOrEqual(fieldName, value);
575
- case ">":
576
- return P.greaterThan(fieldName, value);
577
- case ">=":
578
- return P.greaterThanOrEqual(fieldName, value);
579
- case "in":
580
- return P.equalAny(fieldName, value);
581
- case "not-in":
582
- return P.notEqualAny(fieldName, value);
583
- case "array-contains":
584
- return P.arrayContains(fieldName, value);
585
- case "array-contains-any":
586
- return P.arrayContainsAny(fieldName, value);
587
- default:
588
- throw new Error(`Unsupported filter op for pipeline mode: ${op}`);
589
- }
590
- }
591
- function createPipelineQueryAdapter(db, collectionPath) {
592
- return {
593
- async query(filters, options) {
594
- const P = await getPipelines();
595
- let pipeline = db.pipeline().collection(collectionPath);
596
- if (filters.length === 1) {
597
- pipeline = pipeline.where(buildFilterExpression(P, filters[0]));
598
- } else if (filters.length > 1) {
599
- const [first, second, ...rest] = filters.map((f) => buildFilterExpression(P, f));
600
- pipeline = pipeline.where(P.and(first, second, ...rest));
601
- }
602
- if (options?.orderBy) {
603
- const f = P.field(options.orderBy.field);
604
- const ordering = options.orderBy.direction === "desc" ? f.descending() : f.ascending();
605
- pipeline = pipeline.sort(ordering);
606
- }
607
- if (options?.limit !== void 0) {
608
- pipeline = pipeline.limit(options.limit);
609
- }
610
- const snap = await pipeline.execute();
611
- return snap.results.map((r) => r.data());
612
- }
613
- };
614
- }
615
-
616
- // src/internal/firestore-backend.ts
617
- function dottedDataPath(op) {
618
- assertSafePath(op.path);
619
- return `data.${op.path.join(".")}`;
620
- }
621
- function buildFirestoreUpdate(update, db) {
622
- assertUpdatePayloadExclusive(update);
623
- const out = {
624
- updatedAt: FieldValue.serverTimestamp()
625
- };
626
- if (update.replaceData) {
627
- out.data = deserializeFirestoreTypes(update.replaceData, db);
628
- } else if (update.dataOps) {
629
- for (const op of update.dataOps) {
630
- const key = dottedDataPath(op);
631
- out[key] = op.delete ? FieldValue.delete() : op.value;
632
- }
633
- }
634
- if (update.v !== void 0) {
635
- out.v = update.v;
636
- }
637
- return out;
638
- }
639
- function stampWritableRecord(record) {
640
- const now = FieldValue.serverTimestamp();
641
- const out = {
642
- aType: record.aType,
643
- aUid: record.aUid,
644
- axbType: record.axbType,
645
- bType: record.bType,
646
- bUid: record.bUid,
647
- data: record.data,
648
- createdAt: now,
649
- updatedAt: now
650
- };
651
- if (record.v !== void 0) out.v = record.v;
652
- return out;
653
- }
654
- var FirestoreTransactionBackend = class {
655
- constructor(adapter, db) {
656
- this.adapter = adapter;
657
- this.db = db;
658
- }
659
- getDoc(docId) {
660
- return this.adapter.getDoc(docId);
661
- }
662
- query(filters, options) {
663
- return this.adapter.query(filters, options);
664
- }
665
- async setDoc(docId, record, mode) {
666
- this.adapter.setDoc(
667
- docId,
668
- stampWritableRecord(record),
669
- mode === "merge" ? { merge: true } : void 0
670
- );
671
- }
672
- async updateDoc(docId, update) {
673
- this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
674
- }
675
- async deleteDoc(docId) {
676
- this.adapter.deleteDoc(docId);
677
- }
678
- };
679
- var FirestoreBatchBackend = class {
680
- constructor(adapter, db) {
681
- this.adapter = adapter;
682
- this.db = db;
683
- }
684
- setDoc(docId, record, mode) {
685
- this.adapter.setDoc(
686
- docId,
687
- stampWritableRecord(record),
688
- mode === "merge" ? { merge: true } : void 0
689
- );
690
- }
691
- updateDoc(docId, update) {
692
- this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
693
- }
694
- deleteDoc(docId) {
695
- this.adapter.deleteDoc(docId);
696
- }
697
- commit() {
698
- return this.adapter.commit();
699
- }
700
- };
701
- var FirestoreBackendImpl = class _FirestoreBackendImpl {
702
- constructor(db, collectionPath, queryMode, scopePath) {
703
- this.db = db;
704
- this.queryMode = queryMode;
705
- this.collectionPath = collectionPath;
706
- this.scopePath = scopePath;
707
- this.adapter = createFirestoreAdapter(db, collectionPath);
708
- if (queryMode === "pipeline") {
709
- this.pipelineAdapter = createPipelineQueryAdapter(db, collectionPath);
710
- }
711
- }
712
- collectionPath;
713
- scopePath;
714
- adapter;
715
- pipelineAdapter;
716
- // --- Reads ---
717
- getDoc(docId) {
718
- return this.adapter.getDoc(docId);
719
- }
720
- query(filters, options) {
721
- if (this.pipelineAdapter) {
722
- return this.pipelineAdapter.query(filters, options);
723
- }
724
- return this.adapter.query(filters, options);
725
- }
726
- // --- Writes ---
727
- setDoc(docId, record, mode) {
728
- return this.adapter.setDoc(
729
- docId,
730
- stampWritableRecord(record),
731
- mode === "merge" ? { merge: true } : void 0
732
- );
733
- }
734
- updateDoc(docId, update) {
735
- return this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
736
- }
737
- deleteDoc(docId) {
738
- return this.adapter.deleteDoc(docId);
739
- }
740
- // --- Transactions / Batches ---
741
- runTransaction(fn) {
742
- return this.db.runTransaction(async (firestoreTx) => {
743
- const txAdapter = createTransactionAdapter(this.db, this.collectionPath, firestoreTx);
744
- return fn(new FirestoreTransactionBackend(txAdapter, this.db));
745
- });
746
- }
747
- createBatch() {
748
- const batchAdapter = createBatchAdapter(this.db, this.collectionPath);
749
- return new FirestoreBatchBackend(batchAdapter, this.db);
750
- }
751
- // --- Subgraphs ---
752
- subgraph(parentNodeUid, name) {
753
- const subPath = `${this.collectionPath}/${parentNodeUid}/${name}`;
754
- const newScope = this.scopePath ? `${this.scopePath}/${name}` : name;
755
- return new _FirestoreBackendImpl(this.db, subPath, this.queryMode, newScope);
756
- }
757
- // --- Cascade & bulk ---
758
- removeNodeCascade(uid, reader, options) {
759
- return removeNodeCascade(this.db, this.collectionPath, reader, uid, options);
760
- }
761
- bulkRemoveEdges(params, reader, options) {
762
- return bulkRemoveEdges(this.db, this.collectionPath, reader, params, options);
763
- }
764
- // --- Cross-collection ---
765
- async findEdgesGlobal(params, collectionName) {
766
- const name = collectionName ?? this.collectionPath.split("/").pop();
767
- const plan = buildEdgeQueryPlan(params);
768
- if (plan.strategy === "get") {
769
- throw new FiregraphError(
770
- "findEdgesGlobal() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
771
- "INVALID_QUERY"
772
- );
773
- }
774
- const collectionGroupRef = this.db.collectionGroup(name);
775
- let q = collectionGroupRef;
776
- for (const f of plan.filters) {
777
- q = q.where(f.field, f.op, f.value);
778
- }
779
- if (plan.options?.orderBy) {
780
- q = q.orderBy(plan.options.orderBy.field, plan.options.orderBy.direction ?? "asc");
781
- }
782
- if (plan.options?.limit !== void 0) {
783
- q = q.limit(plan.options.limit);
784
- }
785
- const snap = await q.get();
786
- return snap.docs.map((doc) => doc.data());
787
- }
788
- };
789
- function createFirestoreBackend(db, collectionPath, options = {}) {
790
- const queryMode = options.queryMode ?? "pipeline";
791
- const scopePath = options.scopePath ?? "";
792
- return new FirestoreBackendImpl(db, collectionPath, queryMode, scopePath);
793
- }
794
-
795
- // src/firestore.ts
796
- var _standardModeWarned = false;
797
- function createGraphClient(db, collectionPath, options) {
798
- const requestedMode = options?.queryMode ?? "pipeline";
799
- const isEmulator = !!process.env.FIRESTORE_EMULATOR_HOST;
800
- const effectiveMode = isEmulator ? "standard" : requestedMode;
801
- if (effectiveMode === "standard" && !isEmulator && requestedMode === "standard" && !_standardModeWarned) {
802
- _standardModeWarned = true;
803
- console.warn(
804
- "[firegraph] Standard query mode enabled. This is NOT recommended for production:\n - Enterprise Firestore: data.* filters cause full collection scans (high billing)\n - Standard Firestore: data.* filters without composite indexes will fail\n See: https://github.com/typicalday/firegraph#query-modes"
805
- );
806
- }
807
- const backend = createFirestoreBackend(db, collectionPath, { queryMode: effectiveMode });
808
- let metaBackend;
809
- if (options?.registryMode?.collection && options.registryMode.collection !== collectionPath) {
810
- metaBackend = createFirestoreBackend(db, options.registryMode.collection, {
811
- queryMode: effectiveMode
812
- });
813
- }
814
- return new GraphClientImpl(backend, options, metaBackend);
815
- }
816
-
817
314
  // src/indexes.ts
818
315
  function normalizeField(f) {
819
316
  return typeof f === "string" ? { path: f, desc: false } : { path: f.path, desc: !!f.desc };
@@ -888,35 +385,6 @@ function generateIndexConfig(collection, options = {}) {
888
385
  return { indexes, fieldOverrides: [] };
889
386
  }
890
387
 
891
- // src/record.ts
892
- import { FieldValue as FieldValue2 } from "@google-cloud/firestore";
893
- function buildNodeRecord(aType, uid, data) {
894
- const now = FieldValue2.serverTimestamp();
895
- return {
896
- aType,
897
- aUid: uid,
898
- axbType: NODE_RELATION,
899
- bType: aType,
900
- bUid: uid,
901
- data,
902
- createdAt: now,
903
- updatedAt: now
904
- };
905
- }
906
- function buildEdgeRecord(aType, aUid, axbType, bType, bUid, data) {
907
- const now = FieldValue2.serverTimestamp();
908
- return {
909
- aType,
910
- aUid,
911
- axbType,
912
- bType,
913
- bUid,
914
- data,
915
- createdAt: now,
916
- updatedAt: now
917
- };
918
- }
919
-
920
388
  // src/traverse.ts
921
389
  var DEFAULT_LIMIT = 10;
922
390
  var DEFAULT_MAX_READS = 100;
@@ -925,6 +393,16 @@ var _crossGraphWarned = false;
925
393
  function isGraphClient(reader) {
926
394
  return "subgraph" in reader && typeof reader.subgraph === "function";
927
395
  }
396
+ function readerSupportsExpand(reader) {
397
+ if (!isGraphClient(reader)) return false;
398
+ const client = reader;
399
+ return "capabilities" in client && typeof client.capabilities?.has === "function" && client.capabilities.has("query.join") && typeof client.expand === "function";
400
+ }
401
+ function readerSupportsEngineTraversal(reader) {
402
+ if (!isGraphClient(reader)) return false;
403
+ const client = reader;
404
+ return "capabilities" in client && typeof client.capabilities?.has === "function" && client.capabilities.has("traversal.serverSide") && typeof client.runEngineTraversal === "function";
405
+ }
928
406
  var Semaphore = class {
929
407
  constructor(slots) {
930
408
  this.slots = slots;
@@ -967,7 +445,15 @@ var TraversalBuilderImpl = class {
967
445
  const maxReads = options?.maxReads ?? DEFAULT_MAX_READS;
968
446
  const concurrency = options?.concurrency ?? DEFAULT_CONCURRENCY;
969
447
  const returnIntermediates = options?.returnIntermediates ?? false;
448
+ const engineMode = options?.engineTraversal ?? "auto";
970
449
  const semaphore = new Semaphore(concurrency);
450
+ if (engineMode !== "off") {
451
+ const engineResult = await this.tryEngineTraversal({
452
+ engineMode,
453
+ returnIntermediates
454
+ });
455
+ if (engineResult) return engineResult;
456
+ }
971
457
  let totalReads = 0;
972
458
  let truncated = false;
973
459
  let sources = [
@@ -992,6 +478,62 @@ var TraversalBuilderImpl = class {
992
478
  const resolvedTargetGraph = this.resolveTargetGraph(hop);
993
479
  const direction = hop.direction ?? "forward";
994
480
  const isCrossGraph = direction === "forward" && !!resolvedTargetGraph;
481
+ const sharedReader = sources.every((s) => s.reader === sources[0].reader) ? sources[0].reader : null;
482
+ const canFastPath = !isCrossGraph && sharedReader && readerSupportsExpand(sharedReader);
483
+ if (canFastPath && sharedReader) {
484
+ if (totalReads >= maxReads) {
485
+ hopTruncated = true;
486
+ } else {
487
+ totalReads++;
488
+ const limit = hop.limit ?? DEFAULT_LIMIT;
489
+ const expandParams = {
490
+ sources: sources.map((s) => s.uid),
491
+ axbType: hop.axbType,
492
+ direction
493
+ };
494
+ if (hop.aType) expandParams.aType = hop.aType;
495
+ if (hop.bType) expandParams.bType = hop.bType;
496
+ if (hop.orderBy) expandParams.orderBy = hop.orderBy;
497
+ if (!hop.filter) {
498
+ expandParams.limitPerSource = limit;
499
+ }
500
+ const result = await sharedReader.expand(expandParams);
501
+ let edges2 = result.edges;
502
+ if (hop.filter) {
503
+ edges2 = edges2.filter(hop.filter);
504
+ const counts = /* @__PURE__ */ new Map();
505
+ const kept = [];
506
+ for (const e of edges2) {
507
+ const sourceUid = direction === "forward" ? e.aUid : e.bUid;
508
+ const c = counts.get(sourceUid) ?? 0;
509
+ if (c < limit) {
510
+ counts.set(sourceUid, c + 1);
511
+ kept.push(e);
512
+ }
513
+ }
514
+ edges2 = kept;
515
+ }
516
+ for (const edge of edges2) {
517
+ hopEdges.push({ edge, reader: sharedReader });
518
+ }
519
+ }
520
+ const fastEdges = hopEdges.map((h) => h.edge);
521
+ hopResults.push({
522
+ axbType: hop.axbType,
523
+ depth,
524
+ edges: returnIntermediates ? [...fastEdges] : fastEdges,
525
+ sourceCount,
526
+ truncated: hopTruncated
527
+ });
528
+ if (hopTruncated) truncated = true;
529
+ const seen2 = /* @__PURE__ */ new Map();
530
+ for (const { edge, reader: edgeReader } of hopEdges) {
531
+ const nextUid = direction === "forward" ? edge.bUid : edge.aUid;
532
+ if (!seen2.has(nextUid)) seen2.set(nextUid, edgeReader);
533
+ }
534
+ sources = [...seen2.entries()].map(([uid, reader]) => ({ uid, reader }));
535
+ continue;
536
+ }
995
537
  const tasks = sources.map(({ uid, reader: sourceReader }) => async () => {
996
538
  if (totalReads >= maxReads) {
997
539
  hopTruncated = true;
@@ -1086,6 +628,88 @@ var TraversalBuilderImpl = class {
1086
628
  truncated
1087
629
  };
1088
630
  }
631
+ /**
632
+ * Try to dispatch the entire hop chain as one engine-traversal call.
633
+ * Returns a `TraversalResult` on success, or `undefined` if the spec is
634
+ * ineligible and the caller should fall through to the per-hop loop.
635
+ *
636
+ * `'force'` mode throws on any ineligibility instead of returning
637
+ * `undefined` — the caller intentionally opted out of fallback.
638
+ */
639
+ async tryEngineTraversal(args) {
640
+ const { engineMode, returnIntermediates } = args;
641
+ const refuse = (reason) => {
642
+ if (engineMode === "force") {
643
+ throw new FiregraphError(`engineTraversal: 'force' but ${reason}`, "UNSUPPORTED_OPERATION");
644
+ }
645
+ return void 0;
646
+ };
647
+ if (!readerSupportsEngineTraversal(this.reader)) {
648
+ return refuse("reader does not declare traversal.serverSide capability");
649
+ }
650
+ const client = this.reader;
651
+ const engineHops = [];
652
+ for (let i = 0; i < this.hops.length; i++) {
653
+ const hop = this.hops[i];
654
+ if (hop.filter) {
655
+ return refuse(`hop ${i} (${hop.axbType}) carries a JS filter callback`);
656
+ }
657
+ const targetGraph = this.resolveTargetGraph(hop);
658
+ const direction = hop.direction ?? "forward";
659
+ if (targetGraph) {
660
+ return refuse(`hop ${i} (${hop.axbType}) is cross-graph (targetGraph=${targetGraph})`);
661
+ }
662
+ const limit = hop.limit ?? DEFAULT_LIMIT;
663
+ const engineHop = {
664
+ axbType: hop.axbType,
665
+ direction,
666
+ limitPerSource: limit
667
+ };
668
+ if (hop.aType) engineHop.aType = hop.aType;
669
+ if (hop.bType) engineHop.bType = hop.bType;
670
+ if (hop.orderBy) engineHop.orderBy = hop.orderBy;
671
+ engineHops.push(engineHop);
672
+ }
673
+ const params = {
674
+ sources: [this.startUid],
675
+ hops: engineHops
676
+ };
677
+ const compiled = compileEngineTraversal(params);
678
+ if (!compiled.eligible) {
679
+ return refuse(compiled.reason);
680
+ }
681
+ let engineResult;
682
+ try {
683
+ engineResult = await client.runEngineTraversal(params);
684
+ } catch (err) {
685
+ if (engineMode === "force") throw err;
686
+ return void 0;
687
+ }
688
+ const hopResults = [];
689
+ for (let i = 0; i < this.hops.length; i++) {
690
+ const definedHop = this.hops[i];
691
+ const engineHopResult = engineResult.hops[i] ?? { edges: [], sourceCount: 0 };
692
+ const edges = engineHopResult.edges;
693
+ const hopTruncated = edges.length >= engineHops[i].limitPerSource;
694
+ hopResults.push({
695
+ axbType: definedHop.axbType,
696
+ depth: i,
697
+ edges: returnIntermediates ? [...edges] : edges,
698
+ sourceCount: engineHopResult.sourceCount,
699
+ truncated: hopTruncated
700
+ });
701
+ }
702
+ const lastHop = hopResults[hopResults.length - 1];
703
+ return {
704
+ nodes: lastHop.edges,
705
+ hops: hopResults,
706
+ // One server-side round trip — same accounting as the `expand()`
707
+ // fast path. The tree response can carry up to `estimatedReads`
708
+ // docs total, but the budget is in round trips, not docs.
709
+ totalReads: 1,
710
+ truncated: hopResults.some((h) => h.truncated)
711
+ };
712
+ }
1089
713
  /**
1090
714
  * Resolve the targetGraph for a hop. Priority:
1091
715
  * 1. Explicit `hop.targetGraph` (user override)
@@ -1207,6 +831,7 @@ function defineViews(input) {
1207
831
  }
1208
832
  export {
1209
833
  BOOTSTRAP_ENTRIES,
834
+ CapabilityNotSupportedError,
1210
835
  CrossBackendTransactionError,
1211
836
  DEFAULT_CORE_INDEXES,
1212
837
  DEFAULT_QUERY_LIMIT,
@@ -1233,9 +858,7 @@ export {
1233
858
  appendStorageScope,
1234
859
  applyMigrationChain,
1235
860
  buildEdgeQueryPlan,
1236
- buildEdgeRecord,
1237
861
  buildNodeQueryPlan,
1238
- buildNodeRecord,
1239
862
  compileMigrationFn,
1240
863
  compileMigrations,
1241
864
  compileSchema,