@typicalday/firegraph 0.12.0 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +317 -73
- package/dist/backend-DuvHGgK1.d.cts +1897 -0
- package/dist/backend-DuvHGgK1.d.ts +1897 -0
- package/dist/backend.cjs +222 -3
- package/dist/backend.cjs.map +1 -1
- package/dist/backend.d.cts +25 -5
- package/dist/backend.d.ts +25 -5
- package/dist/backend.js +197 -4
- package/dist/backend.js.map +1 -1
- package/dist/chunk-2DHMNTV6.js +16 -0
- package/dist/chunk-2DHMNTV6.js.map +1 -0
- package/dist/chunk-4MMQ5W74.js +288 -0
- package/dist/chunk-4MMQ5W74.js.map +1 -0
- package/dist/chunk-D4J7Z4FE.js +67 -0
- package/dist/chunk-D4J7Z4FE.js.map +1 -0
- package/dist/chunk-N5HFDWQX.js +23 -0
- package/dist/chunk-N5HFDWQX.js.map +1 -0
- package/dist/chunk-PAD7WFFU.js +573 -0
- package/dist/chunk-PAD7WFFU.js.map +1 -0
- package/dist/{chunk-AWW4MUJ5.js → chunk-TK64DNVK.js} +12 -1
- package/dist/chunk-TK64DNVK.js.map +1 -0
- package/dist/{chunk-HONQY4HF.js → chunk-WRTFC5NG.js} +362 -17
- package/dist/chunk-WRTFC5NG.js.map +1 -0
- package/dist/client-BKi3vk0Q.d.ts +34 -0
- package/dist/client-BrsaXtDV.d.cts +34 -0
- package/dist/cloudflare/index.cjs +930 -3
- package/dist/cloudflare/index.cjs.map +1 -1
- package/dist/cloudflare/index.d.cts +213 -12
- package/dist/cloudflare/index.d.ts +213 -12
- package/dist/cloudflare/index.js +562 -281
- package/dist/cloudflare/index.js.map +1 -1
- package/dist/codegen/index.d.cts +1 -1
- package/dist/codegen/index.d.ts +1 -1
- package/dist/errors-BRc3I_eH.d.cts +73 -0
- package/dist/errors-BRc3I_eH.d.ts +73 -0
- package/dist/firestore-enterprise/index.cjs +3877 -0
- package/dist/firestore-enterprise/index.cjs.map +1 -0
- package/dist/firestore-enterprise/index.d.cts +141 -0
- package/dist/firestore-enterprise/index.d.ts +141 -0
- package/dist/firestore-enterprise/index.js +985 -0
- package/dist/firestore-enterprise/index.js.map +1 -0
- package/dist/firestore-standard/index.cjs +3117 -0
- package/dist/firestore-standard/index.cjs.map +1 -0
- package/dist/firestore-standard/index.d.cts +49 -0
- package/dist/firestore-standard/index.d.ts +49 -0
- package/dist/firestore-standard/index.js +283 -0
- package/dist/firestore-standard/index.js.map +1 -0
- package/dist/index.cjs +590 -550
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +9 -37
- package/dist/index.d.ts +9 -37
- package/dist/index.js +178 -555
- package/dist/index.js.map +1 -1
- package/dist/{registry-Fi074zVa.d.ts → registry-Bc7h6WTM.d.cts} +1 -1
- package/dist/{registry-B1qsVL0E.d.cts → registry-C2KUPVZj.d.ts} +1 -1
- package/dist/{scope-path-B1G3YiA7.d.cts → scope-path-CROFZGr9.d.cts} +1 -56
- package/dist/{scope-path-B1G3YiA7.d.ts → scope-path-CROFZGr9.d.ts} +1 -56
- package/dist/sqlite/index.cjs +3631 -0
- package/dist/sqlite/index.cjs.map +1 -0
- package/dist/sqlite/index.d.cts +111 -0
- package/dist/sqlite/index.d.ts +111 -0
- package/dist/sqlite/index.js +1164 -0
- package/dist/sqlite/index.js.map +1 -0
- package/package.json +33 -3
- package/dist/backend-BsR0lnFL.d.ts +0 -200
- package/dist/backend-Ct-fLlkG.d.cts +0 -200
- package/dist/chunk-AWW4MUJ5.js.map +0 -1
- package/dist/chunk-HONQY4HF.js.map +0 -1
- package/dist/types-DxYLy8Ol.d.cts +0 -770
- package/dist/types-DxYLy8Ol.d.ts +0 -770
|
@@ -0,0 +1,985 @@
|
|
|
1
|
+
import {
|
|
2
|
+
compileEngineTraversal
|
|
3
|
+
} from "../chunk-D4J7Z4FE.js";
|
|
4
|
+
import {
|
|
5
|
+
bulkRemoveEdges,
|
|
6
|
+
createBatchAdapter,
|
|
7
|
+
createFirestoreAdapter,
|
|
8
|
+
createTransactionAdapter,
|
|
9
|
+
removeNodeCascade,
|
|
10
|
+
runFirestoreAggregate,
|
|
11
|
+
runFirestoreClassicExpand,
|
|
12
|
+
runFirestoreFindEdgesProjected,
|
|
13
|
+
runFirestoreFindNearest
|
|
14
|
+
} from "../chunk-PAD7WFFU.js";
|
|
15
|
+
import {
|
|
16
|
+
deserializeFirestoreTypes
|
|
17
|
+
} from "../chunk-C2QMD7RY.js";
|
|
18
|
+
import {
|
|
19
|
+
createCapabilities
|
|
20
|
+
} from "../chunk-N5HFDWQX.js";
|
|
21
|
+
import {
|
|
22
|
+
META_EDGE_TYPE,
|
|
23
|
+
META_NODE_TYPE,
|
|
24
|
+
NODE_RELATION,
|
|
25
|
+
buildEdgeQueryPlan,
|
|
26
|
+
createGraphClient,
|
|
27
|
+
createMergedRegistry,
|
|
28
|
+
createRegistry,
|
|
29
|
+
generateId
|
|
30
|
+
} from "../chunk-WRTFC5NG.js";
|
|
31
|
+
import {
|
|
32
|
+
FiregraphError,
|
|
33
|
+
assertSafePath,
|
|
34
|
+
assertUpdatePayloadExclusive,
|
|
35
|
+
flattenPatch
|
|
36
|
+
} from "../chunk-TK64DNVK.js";
|
|
37
|
+
import "../chunk-EQJUUVFG.js";
|
|
38
|
+
|
|
39
|
+
// src/firestore-enterprise/backend.ts
|
|
40
|
+
import { FieldValue } from "@google-cloud/firestore";
|
|
41
|
+
|
|
42
|
+
// src/internal/firestore-bulk-dml.ts
|
|
43
|
+
var _Pipelines = null;
|
|
44
|
+
var _Timestamp = null;
|
|
45
|
+
async function getFirestoreSurface() {
|
|
46
|
+
if (!_Pipelines || !_Timestamp) {
|
|
47
|
+
const mod = await import("@google-cloud/firestore");
|
|
48
|
+
_Pipelines = mod.Pipelines;
|
|
49
|
+
_Timestamp = mod.Timestamp;
|
|
50
|
+
}
|
|
51
|
+
return { P: _Pipelines, Ts: _Timestamp };
|
|
52
|
+
}
|
|
53
|
+
function buildFilterExpression(P, filter) {
|
|
54
|
+
const { field: fieldName, op, value } = filter;
|
|
55
|
+
switch (op) {
|
|
56
|
+
case "==":
|
|
57
|
+
return P.equal(fieldName, value);
|
|
58
|
+
case "!=":
|
|
59
|
+
return P.notEqual(fieldName, value);
|
|
60
|
+
case "<":
|
|
61
|
+
return P.lessThan(fieldName, value);
|
|
62
|
+
case "<=":
|
|
63
|
+
return P.lessThanOrEqual(fieldName, value);
|
|
64
|
+
case ">":
|
|
65
|
+
return P.greaterThan(fieldName, value);
|
|
66
|
+
case ">=":
|
|
67
|
+
return P.greaterThanOrEqual(fieldName, value);
|
|
68
|
+
case "in":
|
|
69
|
+
return P.equalAny(fieldName, value);
|
|
70
|
+
case "not-in":
|
|
71
|
+
return P.notEqualAny(fieldName, value);
|
|
72
|
+
case "array-contains":
|
|
73
|
+
return P.arrayContains(fieldName, value);
|
|
74
|
+
case "array-contains-any":
|
|
75
|
+
return P.arrayContainsAny(fieldName, value);
|
|
76
|
+
default:
|
|
77
|
+
throw new FiregraphError(
|
|
78
|
+
`bulkDelete/bulkUpdate: unsupported filter op "${op}" for pipeline DML.`,
|
|
79
|
+
"INVALID_QUERY"
|
|
80
|
+
);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
function applyWhere(P, pipeline, filters) {
|
|
84
|
+
const exprs = filters.map((f) => buildFilterExpression(P, f));
|
|
85
|
+
if (exprs.length === 1) {
|
|
86
|
+
return pipeline.where(exprs[0]);
|
|
87
|
+
}
|
|
88
|
+
const [first, second, ...rest] = exprs;
|
|
89
|
+
return pipeline.where(P.and(first, second, ...rest));
|
|
90
|
+
}
|
|
91
|
+
function assertNonEmptyFilters(filters, op) {
|
|
92
|
+
if (filters.length === 0) {
|
|
93
|
+
throw new FiregraphError(
|
|
94
|
+
`bulk${op === "delete" ? "Delete" : "Update"}() on Firestore requires at least one filter; an empty filter list would target the whole collection. To wipe a subgraph, use removeNodeCascade() on the parent node instead.`,
|
|
95
|
+
"INVALID_QUERY"
|
|
96
|
+
);
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
async function runFirestorePipelineDelete(db, collectionPath, filters, _options) {
|
|
100
|
+
assertNonEmptyFilters(filters, "delete");
|
|
101
|
+
const { P } = await getFirestoreSurface();
|
|
102
|
+
let pipeline = db.pipeline().collection(collectionPath);
|
|
103
|
+
pipeline = applyWhere(P, pipeline, filters);
|
|
104
|
+
const snap = await pipeline.delete().execute();
|
|
105
|
+
return {
|
|
106
|
+
deleted: snap.results.length,
|
|
107
|
+
batches: 1,
|
|
108
|
+
errors: []
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
async function runFirestorePipelineUpdate(db, collectionPath, filters, patch, _options) {
|
|
112
|
+
assertNonEmptyFilters(filters, "update");
|
|
113
|
+
const ops = flattenPatch(patch.data);
|
|
114
|
+
if (ops.length === 0) {
|
|
115
|
+
throw new FiregraphError(
|
|
116
|
+
"bulkUpdate(): patch.data produced no field updates. An empty patch would only stamp updatedAt, which is almost certainly a bug.",
|
|
117
|
+
"INVALID_QUERY"
|
|
118
|
+
);
|
|
119
|
+
}
|
|
120
|
+
for (const op of ops) {
|
|
121
|
+
if (op.delete) {
|
|
122
|
+
throw new FiregraphError(
|
|
123
|
+
`bulkUpdate(): preview Pipeline DML does not support deleteField() sentinels (no typed delete-transform on @google-cloud/firestore@8.5.0's Pipeline.update(AliasedExpression[])). Drop the delete from the patch or use replaceEdge/replaceNode for the affected rows.`,
|
|
124
|
+
"INVALID_QUERY"
|
|
125
|
+
);
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
const { P, Ts } = await getFirestoreSurface();
|
|
129
|
+
const transforms = ops.map((op) => {
|
|
130
|
+
const alias = `data.${op.path.join(".")}`;
|
|
131
|
+
return P.constant(op.value).as(alias);
|
|
132
|
+
});
|
|
133
|
+
transforms.push(P.constant(Ts.now()).as("updatedAt"));
|
|
134
|
+
let pipeline = db.pipeline().collection(collectionPath);
|
|
135
|
+
pipeline = applyWhere(P, pipeline, filters);
|
|
136
|
+
const snap = await pipeline.update(transforms).execute();
|
|
137
|
+
return {
|
|
138
|
+
deleted: snap.results.length,
|
|
139
|
+
batches: 1,
|
|
140
|
+
errors: []
|
|
141
|
+
};
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
// src/internal/firestore-expand.ts
|
|
145
|
+
var _Pipelines2 = null;
|
|
146
|
+
async function getPipelines() {
|
|
147
|
+
if (!_Pipelines2) {
|
|
148
|
+
const mod = await import("@google-cloud/firestore");
|
|
149
|
+
_Pipelines2 = mod.Pipelines;
|
|
150
|
+
}
|
|
151
|
+
return _Pipelines2;
|
|
152
|
+
}
|
|
153
|
+
async function runFirestorePipelineExpand(db, collectionPath, params) {
|
|
154
|
+
if (params.sources.length === 0) {
|
|
155
|
+
return params.hydrate ? { edges: [], targets: [] } : { edges: [] };
|
|
156
|
+
}
|
|
157
|
+
if (params.axbType.length === 0) {
|
|
158
|
+
throw new FiregraphError("expand(): axbType must be a non-empty string.", "INVALID_QUERY");
|
|
159
|
+
}
|
|
160
|
+
const direction = params.direction ?? "forward";
|
|
161
|
+
const sourceField = direction === "forward" ? "aUid" : "bUid";
|
|
162
|
+
const P = await getPipelines();
|
|
163
|
+
const exprs = [
|
|
164
|
+
P.equal("axbType", params.axbType),
|
|
165
|
+
P.equalAny(sourceField, params.sources)
|
|
166
|
+
];
|
|
167
|
+
if (params.aType !== void 0) exprs.push(P.equal("aType", params.aType));
|
|
168
|
+
if (params.bType !== void 0) exprs.push(P.equal("bType", params.bType));
|
|
169
|
+
let pipeline = db.pipeline().collection(collectionPath);
|
|
170
|
+
if (exprs.length === 1) {
|
|
171
|
+
pipeline = pipeline.where(exprs[0]);
|
|
172
|
+
} else {
|
|
173
|
+
const [first, second, ...rest] = exprs;
|
|
174
|
+
pipeline = pipeline.where(P.and(first, second, ...rest));
|
|
175
|
+
}
|
|
176
|
+
if (params.orderBy) {
|
|
177
|
+
const f = P.field(params.orderBy.field);
|
|
178
|
+
const ordering = params.orderBy.direction === "desc" ? f.descending() : f.ascending();
|
|
179
|
+
pipeline = pipeline.sort(ordering);
|
|
180
|
+
}
|
|
181
|
+
const totalLimit = params.limitPerSource !== void 0 ? params.sources.length * params.limitPerSource : void 0;
|
|
182
|
+
if (totalLimit !== void 0) {
|
|
183
|
+
pipeline = pipeline.limit(totalLimit);
|
|
184
|
+
}
|
|
185
|
+
const snap = await pipeline.execute();
|
|
186
|
+
let edges = snap.results.map((r) => r.data());
|
|
187
|
+
if (params.axbType === NODE_RELATION) {
|
|
188
|
+
edges = edges.filter((e) => e.aUid !== e.bUid);
|
|
189
|
+
}
|
|
190
|
+
if (!params.hydrate) return { edges };
|
|
191
|
+
const targetUids = edges.map((e) => direction === "forward" ? e.bUid : e.aUid);
|
|
192
|
+
const uniqueTargets = [...new Set(targetUids)];
|
|
193
|
+
if (uniqueTargets.length === 0) {
|
|
194
|
+
return { edges, targets: [] };
|
|
195
|
+
}
|
|
196
|
+
const hydratePipeline = db.pipeline().collection(collectionPath).where(
|
|
197
|
+
P.and(P.equal("axbType", NODE_RELATION), P.equalAny("aUid", uniqueTargets))
|
|
198
|
+
);
|
|
199
|
+
const hydrateSnap = await hydratePipeline.execute();
|
|
200
|
+
const byUid = /* @__PURE__ */ new Map();
|
|
201
|
+
for (const r of hydrateSnap.results) {
|
|
202
|
+
const row = r.data();
|
|
203
|
+
byUid.set(row.bUid, row);
|
|
204
|
+
}
|
|
205
|
+
const targets = targetUids.map((uid) => byUid.get(uid) ?? null);
|
|
206
|
+
return { edges, targets };
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
// src/internal/firestore-fulltext.ts
|
|
210
|
+
var ENVELOPE_FIELDS = /* @__PURE__ */ new Set([
|
|
211
|
+
"aType",
|
|
212
|
+
"aUid",
|
|
213
|
+
"axbType",
|
|
214
|
+
"bType",
|
|
215
|
+
"bUid",
|
|
216
|
+
"createdAt",
|
|
217
|
+
"updatedAt",
|
|
218
|
+
"v"
|
|
219
|
+
]);
|
|
220
|
+
function normalizeFullTextFieldPath(field) {
|
|
221
|
+
if (ENVELOPE_FIELDS.has(field)) {
|
|
222
|
+
throw new FiregraphError(
|
|
223
|
+
`fullTextSearch(): field '${field}' is a built-in envelope field \u2014 text-indexed fields must live under \`data.*\`. Use a path like 'data.${field}' if you really meant a nested data field.`,
|
|
224
|
+
"INVALID_QUERY"
|
|
225
|
+
);
|
|
226
|
+
}
|
|
227
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
228
|
+
return `data.${field}`;
|
|
229
|
+
}
|
|
230
|
+
var _Pipelines3 = null;
|
|
231
|
+
async function getPipelines2() {
|
|
232
|
+
if (!_Pipelines3) {
|
|
233
|
+
const mod = await import("@google-cloud/firestore");
|
|
234
|
+
_Pipelines3 = mod.Pipelines;
|
|
235
|
+
}
|
|
236
|
+
return _Pipelines3;
|
|
237
|
+
}
|
|
238
|
+
async function runFirestoreFullTextSearch(db, collectionPath, params) {
|
|
239
|
+
if (typeof params.query !== "string" || params.query.length === 0) {
|
|
240
|
+
throw new FiregraphError(
|
|
241
|
+
"fullTextSearch(): query must be a non-empty string.",
|
|
242
|
+
"INVALID_QUERY"
|
|
243
|
+
);
|
|
244
|
+
}
|
|
245
|
+
if (!Number.isInteger(params.limit) || params.limit <= 0) {
|
|
246
|
+
throw new FiregraphError(
|
|
247
|
+
`fullTextSearch(): limit must be a positive integer (got ${params.limit}).`,
|
|
248
|
+
"INVALID_QUERY"
|
|
249
|
+
);
|
|
250
|
+
}
|
|
251
|
+
const normalizedFields = params.fields?.map((f) => normalizeFullTextFieldPath(f));
|
|
252
|
+
const P = await getPipelines2();
|
|
253
|
+
if (normalizedFields !== void 0 && normalizedFields.length > 0) {
|
|
254
|
+
throw new FiregraphError(
|
|
255
|
+
"fullTextSearch(): the `fields` option is not yet supported \u2014 per-field text predicates are not available in @google-cloud/firestore@8.5.0. Omit `fields` to search all indexed fields.",
|
|
256
|
+
"INVALID_QUERY"
|
|
257
|
+
);
|
|
258
|
+
}
|
|
259
|
+
const searchQuery = P.documentMatches(params.query);
|
|
260
|
+
let pipeline = db.pipeline().collection(collectionPath).search({
|
|
261
|
+
query: searchQuery,
|
|
262
|
+
sort: P.score().descending()
|
|
263
|
+
});
|
|
264
|
+
const whereExprs = [];
|
|
265
|
+
if (params.aType) whereExprs.push(P.equal("aType", params.aType));
|
|
266
|
+
if (params.axbType) whereExprs.push(P.equal("axbType", params.axbType));
|
|
267
|
+
if (params.bType) whereExprs.push(P.equal("bType", params.bType));
|
|
268
|
+
if (whereExprs.length === 1) {
|
|
269
|
+
pipeline = pipeline.where(whereExprs[0]);
|
|
270
|
+
} else if (whereExprs.length > 1) {
|
|
271
|
+
const [first, second, ...rest] = whereExprs;
|
|
272
|
+
pipeline = pipeline.where(P.and(first, second, ...rest));
|
|
273
|
+
}
|
|
274
|
+
pipeline = pipeline.limit(params.limit);
|
|
275
|
+
const snap = await pipeline.execute();
|
|
276
|
+
return snap.results.map((r) => r.data());
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// src/internal/firestore-geo.ts
|
|
280
|
+
import { GeoPoint } from "@google-cloud/firestore";
|
|
281
|
+
var ENVELOPE_FIELDS2 = /* @__PURE__ */ new Set([
|
|
282
|
+
"aType",
|
|
283
|
+
"aUid",
|
|
284
|
+
"axbType",
|
|
285
|
+
"bType",
|
|
286
|
+
"bUid",
|
|
287
|
+
"createdAt",
|
|
288
|
+
"updatedAt",
|
|
289
|
+
"v"
|
|
290
|
+
]);
|
|
291
|
+
function normalizeGeoFieldPath(field) {
|
|
292
|
+
if (ENVELOPE_FIELDS2.has(field)) {
|
|
293
|
+
throw new FiregraphError(
|
|
294
|
+
`geoSearch(): geoField '${field}' is a built-in envelope field \u2014 geo-indexed fields must live under \`data.*\`. Use a path like 'data.${field}' if you really meant a nested data field.`,
|
|
295
|
+
"INVALID_QUERY"
|
|
296
|
+
);
|
|
297
|
+
}
|
|
298
|
+
if (field === "data" || field.startsWith("data.")) return field;
|
|
299
|
+
return `data.${field}`;
|
|
300
|
+
}
|
|
301
|
+
function assertValidGeoPoint(lat, lng) {
|
|
302
|
+
if (!Number.isFinite(lat) || lat < -90 || lat > 90) {
|
|
303
|
+
throw new FiregraphError(
|
|
304
|
+
`geoSearch(): point.lat must be in [-90, 90] (got ${lat}).`,
|
|
305
|
+
"INVALID_QUERY"
|
|
306
|
+
);
|
|
307
|
+
}
|
|
308
|
+
if (!Number.isFinite(lng) || lng < -180 || lng > 180) {
|
|
309
|
+
throw new FiregraphError(
|
|
310
|
+
`geoSearch(): point.lng must be in [-180, 180] (got ${lng}).`,
|
|
311
|
+
"INVALID_QUERY"
|
|
312
|
+
);
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
var _Pipelines4 = null;
|
|
316
|
+
async function getPipelines3() {
|
|
317
|
+
if (!_Pipelines4) {
|
|
318
|
+
const mod = await import("@google-cloud/firestore");
|
|
319
|
+
_Pipelines4 = mod.Pipelines;
|
|
320
|
+
}
|
|
321
|
+
return _Pipelines4;
|
|
322
|
+
}
|
|
323
|
+
async function runFirestoreGeoSearch(db, collectionPath, params) {
|
|
324
|
+
if (!Number.isFinite(params.radiusMeters) || params.radiusMeters <= 0) {
|
|
325
|
+
throw new FiregraphError(
|
|
326
|
+
`geoSearch(): radiusMeters must be a positive finite number (got ${params.radiusMeters}).`,
|
|
327
|
+
"INVALID_QUERY"
|
|
328
|
+
);
|
|
329
|
+
}
|
|
330
|
+
if (!Number.isInteger(params.limit) || params.limit <= 0) {
|
|
331
|
+
throw new FiregraphError(
|
|
332
|
+
`geoSearch(): limit must be a positive integer (got ${params.limit}).`,
|
|
333
|
+
"INVALID_QUERY"
|
|
334
|
+
);
|
|
335
|
+
}
|
|
336
|
+
assertValidGeoPoint(params.point.lat, params.point.lng);
|
|
337
|
+
const geoFieldPath = normalizeGeoFieldPath(params.geoField);
|
|
338
|
+
const center = new GeoPoint(params.point.lat, params.point.lng);
|
|
339
|
+
const P = await getPipelines3();
|
|
340
|
+
const distanceQuery = P.geoDistance(geoFieldPath, center).lessThanOrEqual(params.radiusMeters);
|
|
341
|
+
const orderByDistance = params.orderByDistance !== false;
|
|
342
|
+
const opts = {
|
|
343
|
+
query: distanceQuery
|
|
344
|
+
};
|
|
345
|
+
if (orderByDistance) {
|
|
346
|
+
opts.sort = P.geoDistance(geoFieldPath, center).ascending();
|
|
347
|
+
}
|
|
348
|
+
let pipeline = db.pipeline().collection(collectionPath).search(opts);
|
|
349
|
+
const whereExprs = [];
|
|
350
|
+
if (params.aType) whereExprs.push(P.equal("aType", params.aType));
|
|
351
|
+
if (params.axbType) whereExprs.push(P.equal("axbType", params.axbType));
|
|
352
|
+
if (params.bType) whereExprs.push(P.equal("bType", params.bType));
|
|
353
|
+
if (whereExprs.length === 1) {
|
|
354
|
+
pipeline = pipeline.where(whereExprs[0]);
|
|
355
|
+
} else if (whereExprs.length > 1) {
|
|
356
|
+
const [first, second, ...rest] = whereExprs;
|
|
357
|
+
pipeline = pipeline.where(P.and(first, second, ...rest));
|
|
358
|
+
}
|
|
359
|
+
pipeline = pipeline.limit(params.limit);
|
|
360
|
+
const snap = await pipeline.execute();
|
|
361
|
+
return snap.results.map((r) => r.data());
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
// src/internal/firestore-traverse.ts
|
|
365
|
+
function childArrayKey(depth) {
|
|
366
|
+
return `hop_${depth}_children`;
|
|
367
|
+
}
|
|
368
|
+
function joinVarName(depth) {
|
|
369
|
+
return `hop_${depth}_join`;
|
|
370
|
+
}
|
|
371
|
+
var _Pipelines5 = null;
|
|
372
|
+
async function getPipelines4() {
|
|
373
|
+
if (!_Pipelines5) {
|
|
374
|
+
const mod = await import("@google-cloud/firestore");
|
|
375
|
+
_Pipelines5 = mod.Pipelines;
|
|
376
|
+
}
|
|
377
|
+
return _Pipelines5;
|
|
378
|
+
}
|
|
379
|
+
function buildHopPredicates(P, hop, sourcePredicate) {
|
|
380
|
+
const exprs = [P.equal("axbType", hop.axbType), sourcePredicate];
|
|
381
|
+
if (hop.aType !== void 0) exprs.push(P.equal("aType", hop.aType));
|
|
382
|
+
if (hop.bType !== void 0) exprs.push(P.equal("bType", hop.bType));
|
|
383
|
+
const [first, second, ...rest] = exprs;
|
|
384
|
+
return P.and(first, second, ...rest);
|
|
385
|
+
}
|
|
386
|
+
function applyHopOrderingAndLimit(P, pipeline, hop, totalLimit) {
|
|
387
|
+
let p = pipeline;
|
|
388
|
+
if (hop.orderBy) {
|
|
389
|
+
const f = P.field(hop.orderBy.field);
|
|
390
|
+
const ordering = hop.orderBy.direction === "desc" ? f.descending() : f.ascending();
|
|
391
|
+
p = p.sort(ordering);
|
|
392
|
+
}
|
|
393
|
+
p = p.limit(totalLimit);
|
|
394
|
+
return p;
|
|
395
|
+
}
|
|
396
|
+
function buildSubPipeline(P, db, collectionPath, hops, depth) {
|
|
397
|
+
const hop = hops[depth];
|
|
398
|
+
const parentDepth = depth - 1;
|
|
399
|
+
const sourceField = hop.direction === "forward" ? "aUid" : "bUid";
|
|
400
|
+
const sourcePredicate = P.equal(
|
|
401
|
+
sourceField,
|
|
402
|
+
P.variable(joinVarName(parentDepth))
|
|
403
|
+
);
|
|
404
|
+
const where = buildHopPredicates(P, hop, sourcePredicate);
|
|
405
|
+
let pipeline = db.pipeline().collection(collectionPath).where(where);
|
|
406
|
+
pipeline = applyHopOrderingAndLimit(P, pipeline, hop, hop.limitPerSource);
|
|
407
|
+
if (depth + 1 < hops.length) {
|
|
408
|
+
const targetField = hop.direction === "forward" ? "bUid" : "aUid";
|
|
409
|
+
pipeline = pipeline.define(P.field(targetField).as(joinVarName(depth)));
|
|
410
|
+
const child = buildSubPipeline(P, db, collectionPath, hops, depth + 1);
|
|
411
|
+
pipeline = pipeline.addFields(child.toArrayExpression().as(childArrayKey(depth)));
|
|
412
|
+
}
|
|
413
|
+
return pipeline;
|
|
414
|
+
}
|
|
415
|
+
function buildRootPipeline(P, db, collectionPath, spec) {
|
|
416
|
+
const hop0 = spec.hops[0];
|
|
417
|
+
const sourceField = hop0.direction === "forward" ? "aUid" : "bUid";
|
|
418
|
+
const sourcePredicate = P.equalAny(
|
|
419
|
+
sourceField,
|
|
420
|
+
spec.sources
|
|
421
|
+
);
|
|
422
|
+
const where = buildHopPredicates(P, hop0, sourcePredicate);
|
|
423
|
+
let pipeline = db.pipeline().collection(collectionPath).where(where);
|
|
424
|
+
pipeline = applyHopOrderingAndLimit(P, pipeline, hop0, spec.sources.length * hop0.limitPerSource);
|
|
425
|
+
if (spec.hops.length > 1) {
|
|
426
|
+
const targetField = hop0.direction === "forward" ? "bUid" : "aUid";
|
|
427
|
+
pipeline = pipeline.define(P.field(targetField).as(joinVarName(0)));
|
|
428
|
+
const child = buildSubPipeline(P, db, collectionPath, spec.hops, 1);
|
|
429
|
+
pipeline = pipeline.addFields(child.toArrayExpression().as(childArrayKey(0)));
|
|
430
|
+
}
|
|
431
|
+
return pipeline;
|
|
432
|
+
}
|
|
433
|
+
function stripScaffolding(row, depth) {
|
|
434
|
+
const stripped = { ...row };
|
|
435
|
+
delete stripped[childArrayKey(depth)];
|
|
436
|
+
delete stripped[joinVarName(depth)];
|
|
437
|
+
return stripped;
|
|
438
|
+
}
|
|
439
|
+
function decodeTree(rootRows, hops) {
|
|
440
|
+
const out = [];
|
|
441
|
+
let frontier = rootRows;
|
|
442
|
+
for (let depth = 0; depth < hops.length; depth++) {
|
|
443
|
+
const hop = hops[depth];
|
|
444
|
+
const targetField = hop.direction === "forward" ? "bUid" : "aUid";
|
|
445
|
+
const seen = /* @__PURE__ */ new Set();
|
|
446
|
+
const edges = [];
|
|
447
|
+
const nextFrontier = [];
|
|
448
|
+
for (const row of frontier) {
|
|
449
|
+
const childKey = childArrayKey(depth);
|
|
450
|
+
const children = row[childKey];
|
|
451
|
+
if (Array.isArray(children)) {
|
|
452
|
+
for (const child of children) {
|
|
453
|
+
if (child && typeof child === "object") {
|
|
454
|
+
nextFrontier.push(child);
|
|
455
|
+
}
|
|
456
|
+
}
|
|
457
|
+
}
|
|
458
|
+
const stripped = stripScaffolding(row, depth);
|
|
459
|
+
if (hop.axbType === NODE_RELATION && stripped.aUid === stripped.bUid) continue;
|
|
460
|
+
const dedupKey = stripped[targetField];
|
|
461
|
+
if (typeof dedupKey !== "string" || seen.has(dedupKey)) continue;
|
|
462
|
+
seen.add(dedupKey);
|
|
463
|
+
edges.push(stripped);
|
|
464
|
+
}
|
|
465
|
+
out.push({ edges, sourceCount: depth === 0 ? -1 : out[depth - 1].edges.length });
|
|
466
|
+
frontier = nextFrontier;
|
|
467
|
+
}
|
|
468
|
+
return {
|
|
469
|
+
hops: out,
|
|
470
|
+
totalReads: 1
|
|
471
|
+
};
|
|
472
|
+
}
|
|
473
|
+
async function runFirestoreEngineTraversal(db, collectionPath, params) {
|
|
474
|
+
if (process.env.FIRESTORE_EMULATOR_HOST) {
|
|
475
|
+
throw new FiregraphError(
|
|
476
|
+
"engine traversal requires Pipelines \u2014 not supported on the Firestore emulator",
|
|
477
|
+
"UNSUPPORTED_OPERATION"
|
|
478
|
+
);
|
|
479
|
+
}
|
|
480
|
+
const compiled = compileEngineTraversal(params);
|
|
481
|
+
if (!compiled.eligible) {
|
|
482
|
+
throw new FiregraphError(
|
|
483
|
+
`engine traversal not eligible: ${compiled.reason}`,
|
|
484
|
+
"UNSUPPORTED_OPERATION"
|
|
485
|
+
);
|
|
486
|
+
}
|
|
487
|
+
const spec = compiled.normalized;
|
|
488
|
+
if (spec.sources.length === 0) {
|
|
489
|
+
return {
|
|
490
|
+
hops: spec.hops.map(() => ({ edges: [], sourceCount: 0 })),
|
|
491
|
+
totalReads: 0
|
|
492
|
+
};
|
|
493
|
+
}
|
|
494
|
+
const P = await getPipelines4();
|
|
495
|
+
const pipeline = buildRootPipeline(P, db, collectionPath, spec);
|
|
496
|
+
const snap = await pipeline.execute();
|
|
497
|
+
const rows = snap.results.map((r) => r.data());
|
|
498
|
+
const result = decodeTree(rows, spec.hops);
|
|
499
|
+
if (result.hops.length > 0) {
|
|
500
|
+
result.hops[0] = { ...result.hops[0], sourceCount: spec.sources.length };
|
|
501
|
+
}
|
|
502
|
+
return result;
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
// src/firestore-enterprise/pipeline-adapter.ts
|
|
506
|
+
var _Pipelines6 = null;
|
|
507
|
+
async function getPipelines5() {
|
|
508
|
+
if (!_Pipelines6) {
|
|
509
|
+
const mod = await import("@google-cloud/firestore");
|
|
510
|
+
_Pipelines6 = mod.Pipelines;
|
|
511
|
+
}
|
|
512
|
+
return _Pipelines6;
|
|
513
|
+
}
|
|
514
|
+
function buildFilterExpression2(P, filter) {
|
|
515
|
+
const { field: fieldName, op, value } = filter;
|
|
516
|
+
switch (op) {
|
|
517
|
+
case "==":
|
|
518
|
+
return P.equal(fieldName, value);
|
|
519
|
+
case "!=":
|
|
520
|
+
return P.notEqual(fieldName, value);
|
|
521
|
+
case "<":
|
|
522
|
+
return P.lessThan(fieldName, value);
|
|
523
|
+
case "<=":
|
|
524
|
+
return P.lessThanOrEqual(fieldName, value);
|
|
525
|
+
case ">":
|
|
526
|
+
return P.greaterThan(fieldName, value);
|
|
527
|
+
case ">=":
|
|
528
|
+
return P.greaterThanOrEqual(fieldName, value);
|
|
529
|
+
case "in":
|
|
530
|
+
return P.equalAny(fieldName, value);
|
|
531
|
+
case "not-in":
|
|
532
|
+
return P.notEqualAny(fieldName, value);
|
|
533
|
+
case "array-contains":
|
|
534
|
+
return P.arrayContains(fieldName, value);
|
|
535
|
+
case "array-contains-any":
|
|
536
|
+
return P.arrayContainsAny(fieldName, value);
|
|
537
|
+
default:
|
|
538
|
+
throw new Error(`Unsupported filter op for pipeline mode: ${op}`);
|
|
539
|
+
}
|
|
540
|
+
}
|
|
541
|
+
function createPipelineQueryAdapter(db, collectionPath) {
|
|
542
|
+
return {
|
|
543
|
+
async query(filters, options) {
|
|
544
|
+
const P = await getPipelines5();
|
|
545
|
+
let pipeline = db.pipeline().collection(collectionPath);
|
|
546
|
+
if (filters.length === 1) {
|
|
547
|
+
pipeline = pipeline.where(buildFilterExpression2(P, filters[0]));
|
|
548
|
+
} else if (filters.length > 1) {
|
|
549
|
+
const [first, second, ...rest] = filters.map((f) => buildFilterExpression2(P, f));
|
|
550
|
+
pipeline = pipeline.where(P.and(first, second, ...rest));
|
|
551
|
+
}
|
|
552
|
+
if (options?.orderBy) {
|
|
553
|
+
const f = P.field(options.orderBy.field);
|
|
554
|
+
const ordering = options.orderBy.direction === "desc" ? f.descending() : f.ascending();
|
|
555
|
+
pipeline = pipeline.sort(ordering);
|
|
556
|
+
}
|
|
557
|
+
if (options?.limit !== void 0) {
|
|
558
|
+
pipeline = pipeline.limit(options.limit);
|
|
559
|
+
}
|
|
560
|
+
const snap = await pipeline.execute();
|
|
561
|
+
return snap.results.map((r) => r.data());
|
|
562
|
+
}
|
|
563
|
+
};
|
|
564
|
+
}
|
|
565
|
+
|
|
566
|
+
// src/firestore-enterprise/backend.ts
|
|
567
|
+
var ENTERPRISE_BASE_CAPS = /* @__PURE__ */ new Set([
|
|
568
|
+
"core.read",
|
|
569
|
+
"core.write",
|
|
570
|
+
"core.transactions",
|
|
571
|
+
"core.batch",
|
|
572
|
+
"core.subgraph",
|
|
573
|
+
"query.aggregate",
|
|
574
|
+
"query.select",
|
|
575
|
+
"query.join",
|
|
576
|
+
"traversal.serverSide",
|
|
577
|
+
"search.vector",
|
|
578
|
+
"search.fullText",
|
|
579
|
+
"search.geo",
|
|
580
|
+
"raw.firestore"
|
|
581
|
+
]);
|
|
582
|
+
var _emulatorFallbackWarned = false;
|
|
583
|
+
var _classicInProductionWarned = false;
|
|
584
|
+
var _previewDmlWarned = false;
|
|
585
|
+
function dottedDataPath(op) {
|
|
586
|
+
assertSafePath(op.path);
|
|
587
|
+
return `data.${op.path.join(".")}`;
|
|
588
|
+
}
|
|
589
|
+
function buildFirestoreUpdate(update, db) {
|
|
590
|
+
assertUpdatePayloadExclusive(update);
|
|
591
|
+
const out = {
|
|
592
|
+
updatedAt: FieldValue.serverTimestamp()
|
|
593
|
+
};
|
|
594
|
+
if (update.replaceData) {
|
|
595
|
+
out.data = deserializeFirestoreTypes(update.replaceData, db);
|
|
596
|
+
} else if (update.dataOps) {
|
|
597
|
+
for (const op of update.dataOps) {
|
|
598
|
+
const key = dottedDataPath(op);
|
|
599
|
+
out[key] = op.delete ? FieldValue.delete() : op.value;
|
|
600
|
+
}
|
|
601
|
+
}
|
|
602
|
+
if (update.v !== void 0) {
|
|
603
|
+
out.v = update.v;
|
|
604
|
+
}
|
|
605
|
+
return out;
|
|
606
|
+
}
|
|
607
|
+
function stampWritableRecord(record) {
|
|
608
|
+
const now = FieldValue.serverTimestamp();
|
|
609
|
+
const out = {
|
|
610
|
+
aType: record.aType,
|
|
611
|
+
aUid: record.aUid,
|
|
612
|
+
axbType: record.axbType,
|
|
613
|
+
bType: record.bType,
|
|
614
|
+
bUid: record.bUid,
|
|
615
|
+
data: record.data,
|
|
616
|
+
createdAt: now,
|
|
617
|
+
updatedAt: now
|
|
618
|
+
};
|
|
619
|
+
if (record.v !== void 0) out.v = record.v;
|
|
620
|
+
return out;
|
|
621
|
+
}
|
|
622
|
+
var FirestoreEnterpriseTransactionBackend = class {
|
|
623
|
+
constructor(adapter, db) {
|
|
624
|
+
this.adapter = adapter;
|
|
625
|
+
this.db = db;
|
|
626
|
+
}
|
|
627
|
+
getDoc(docId) {
|
|
628
|
+
return this.adapter.getDoc(docId);
|
|
629
|
+
}
|
|
630
|
+
query(filters, options) {
|
|
631
|
+
return this.adapter.query(filters, options);
|
|
632
|
+
}
|
|
633
|
+
async setDoc(docId, record, mode) {
|
|
634
|
+
this.adapter.setDoc(
|
|
635
|
+
docId,
|
|
636
|
+
stampWritableRecord(record),
|
|
637
|
+
mode === "merge" ? { merge: true } : void 0
|
|
638
|
+
);
|
|
639
|
+
}
|
|
640
|
+
async updateDoc(docId, update) {
|
|
641
|
+
this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
642
|
+
}
|
|
643
|
+
async deleteDoc(docId) {
|
|
644
|
+
this.adapter.deleteDoc(docId);
|
|
645
|
+
}
|
|
646
|
+
};
|
|
647
|
+
var FirestoreEnterpriseBatchBackend = class {
|
|
648
|
+
constructor(adapter, db) {
|
|
649
|
+
this.adapter = adapter;
|
|
650
|
+
this.db = db;
|
|
651
|
+
}
|
|
652
|
+
setDoc(docId, record, mode) {
|
|
653
|
+
this.adapter.setDoc(
|
|
654
|
+
docId,
|
|
655
|
+
stampWritableRecord(record),
|
|
656
|
+
mode === "merge" ? { merge: true } : void 0
|
|
657
|
+
);
|
|
658
|
+
}
|
|
659
|
+
updateDoc(docId, update) {
|
|
660
|
+
this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
661
|
+
}
|
|
662
|
+
deleteDoc(docId) {
|
|
663
|
+
this.adapter.deleteDoc(docId);
|
|
664
|
+
}
|
|
665
|
+
commit() {
|
|
666
|
+
return this.adapter.commit();
|
|
667
|
+
}
|
|
668
|
+
};
|
|
669
|
+
var FirestoreEnterpriseBackendImpl = class _FirestoreEnterpriseBackendImpl {
|
|
670
|
+
constructor(db, collectionPath, queryMode, scopePath, previewDml) {
|
|
671
|
+
this.db = db;
|
|
672
|
+
this.queryMode = queryMode;
|
|
673
|
+
this.previewDml = previewDml;
|
|
674
|
+
this.collectionPath = collectionPath;
|
|
675
|
+
this.scopePath = scopePath;
|
|
676
|
+
this.adapter = createFirestoreAdapter(db, collectionPath);
|
|
677
|
+
if (queryMode === "pipeline") {
|
|
678
|
+
this.pipelineAdapter = createPipelineQueryAdapter(db, collectionPath);
|
|
679
|
+
}
|
|
680
|
+
const caps = previewDml ? /* @__PURE__ */ new Set([...ENTERPRISE_BASE_CAPS, "query.dml"]) : ENTERPRISE_BASE_CAPS;
|
|
681
|
+
this.capabilities = createCapabilities(caps);
|
|
682
|
+
}
|
|
683
|
+
capabilities;
|
|
684
|
+
collectionPath;
|
|
685
|
+
scopePath;
|
|
686
|
+
adapter;
|
|
687
|
+
pipelineAdapter;
|
|
688
|
+
// --- Reads ---
|
|
689
|
+
getDoc(docId) {
|
|
690
|
+
return this.adapter.getDoc(docId);
|
|
691
|
+
}
|
|
692
|
+
query(filters, options) {
|
|
693
|
+
if (this.pipelineAdapter) {
|
|
694
|
+
return this.pipelineAdapter.query(filters, options);
|
|
695
|
+
}
|
|
696
|
+
return this.adapter.query(filters, options);
|
|
697
|
+
}
|
|
698
|
+
// --- Writes ---
|
|
699
|
+
setDoc(docId, record, mode) {
|
|
700
|
+
return this.adapter.setDoc(
|
|
701
|
+
docId,
|
|
702
|
+
stampWritableRecord(record),
|
|
703
|
+
mode === "merge" ? { merge: true } : void 0
|
|
704
|
+
);
|
|
705
|
+
}
|
|
706
|
+
updateDoc(docId, update) {
|
|
707
|
+
return this.adapter.updateDoc(docId, buildFirestoreUpdate(update, this.db));
|
|
708
|
+
}
|
|
709
|
+
deleteDoc(docId) {
|
|
710
|
+
return this.adapter.deleteDoc(docId);
|
|
711
|
+
}
|
|
712
|
+
// --- Transactions / Batches ---
|
|
713
|
+
runTransaction(fn) {
|
|
714
|
+
return this.db.runTransaction(async (firestoreTx) => {
|
|
715
|
+
const txAdapter = createTransactionAdapter(this.db, this.collectionPath, firestoreTx);
|
|
716
|
+
return fn(new FirestoreEnterpriseTransactionBackend(txAdapter, this.db));
|
|
717
|
+
});
|
|
718
|
+
}
|
|
719
|
+
createBatch() {
|
|
720
|
+
const batchAdapter = createBatchAdapter(this.db, this.collectionPath);
|
|
721
|
+
return new FirestoreEnterpriseBatchBackend(batchAdapter, this.db);
|
|
722
|
+
}
|
|
723
|
+
// --- Subgraphs ---
|
|
724
|
+
subgraph(parentNodeUid, name) {
|
|
725
|
+
const subPath = `${this.collectionPath}/${parentNodeUid}/${name}`;
|
|
726
|
+
const newScope = this.scopePath ? `${this.scopePath}/${name}` : name;
|
|
727
|
+
return new _FirestoreEnterpriseBackendImpl(
|
|
728
|
+
this.db,
|
|
729
|
+
subPath,
|
|
730
|
+
this.queryMode,
|
|
731
|
+
newScope,
|
|
732
|
+
this.previewDml
|
|
733
|
+
);
|
|
734
|
+
}
|
|
735
|
+
// --- Cascade & bulk ---
|
|
736
|
+
removeNodeCascade(uid, reader, options) {
|
|
737
|
+
return removeNodeCascade(this.db, this.collectionPath, reader, uid, options);
|
|
738
|
+
}
|
|
739
|
+
bulkRemoveEdges(params, reader, options) {
|
|
740
|
+
return bulkRemoveEdges(this.db, this.collectionPath, reader, params, options);
|
|
741
|
+
}
|
|
742
|
+
// --- Cross-collection ---
|
|
743
|
+
async findEdgesGlobal(params, collectionName) {
|
|
744
|
+
const name = collectionName ?? this.collectionPath.split("/").pop();
|
|
745
|
+
const plan = buildEdgeQueryPlan(params);
|
|
746
|
+
if (plan.strategy === "get") {
|
|
747
|
+
throw new FiregraphError(
|
|
748
|
+
"findEdgesGlobal() requires a query, not a direct document lookup. Omit one of aUid/axbType/bUid to force a query strategy.",
|
|
749
|
+
"INVALID_QUERY"
|
|
750
|
+
);
|
|
751
|
+
}
|
|
752
|
+
const collectionGroupRef = this.db.collectionGroup(name);
|
|
753
|
+
let q = collectionGroupRef;
|
|
754
|
+
for (const f of plan.filters) {
|
|
755
|
+
q = q.where(f.field, f.op, f.value);
|
|
756
|
+
}
|
|
757
|
+
if (plan.options?.orderBy) {
|
|
758
|
+
q = q.orderBy(plan.options.orderBy.field, plan.options.orderBy.direction ?? "asc");
|
|
759
|
+
}
|
|
760
|
+
if (plan.options?.limit !== void 0) {
|
|
761
|
+
q = q.limit(plan.options.limit);
|
|
762
|
+
}
|
|
763
|
+
const snap = await q.get();
|
|
764
|
+
return snap.docs.map((doc) => doc.data());
|
|
765
|
+
}
|
|
766
|
+
// --- Aggregate ---
|
|
767
|
+
/**
|
|
768
|
+
* Aggregate via the classic `Query.aggregate()` API. Supports count/sum/avg
|
|
769
|
+
* — min/max throws `UNSUPPORTED_AGGREGATE`. The Pipelines `aggregate()`
|
|
770
|
+
* stage could in principle add min/max on Enterprise, but that is deferred
|
|
771
|
+
* to a future phase; both editions currently route through the same
|
|
772
|
+
* classic-API helper so capability semantics stay symmetric.
|
|
773
|
+
*/
|
|
774
|
+
aggregate(spec, filters) {
|
|
775
|
+
return runFirestoreAggregate(this.db.collection(this.collectionPath), spec, filters, {
|
|
776
|
+
edition: "enterprise"
|
|
777
|
+
});
|
|
778
|
+
}
|
|
779
|
+
// --- Server-side projection (capability: query.select) ---
|
|
780
|
+
/**
|
|
781
|
+
* Run a projecting query via the shared classic-API helper. Enterprise and
|
|
782
|
+
* Standard delegate to the same implementation so the projection contract
|
|
783
|
+
* stays consistent.
|
|
784
|
+
*
|
|
785
|
+
* Why classic and not the pipeline `select()` stage: the byte-savings
|
|
786
|
+
* deliverable (the only reason `findEdgesProjected` exists) is achieved
|
|
787
|
+
* by either path; the classic API works on both editions today and
|
|
788
|
+
* sidesteps the pipeline-vs-emulator forking the rest of this backend has
|
|
789
|
+
* to manage. When pipeline `select()` becomes preferable for some other
|
|
790
|
+
* reason — e.g. composing with a future pipeline-only stage — swap the
|
|
791
|
+
* implementation behind `runFirestoreFindEdgesProjected`; callers and
|
|
792
|
+
* the capability declaration stay put.
|
|
793
|
+
*/
|
|
794
|
+
findEdgesProjected(select, filters, options) {
|
|
795
|
+
return runFirestoreFindEdgesProjected(
|
|
796
|
+
this.db.collection(this.collectionPath),
|
|
797
|
+
select,
|
|
798
|
+
filters,
|
|
799
|
+
options
|
|
800
|
+
);
|
|
801
|
+
}
|
|
802
|
+
// --- Native vector / nearest-neighbour search (capability: search.vector) ---
|
|
803
|
+
/**
|
|
804
|
+
* Run a vector / nearest-neighbour query via the shared classic-API
|
|
805
|
+
* helper. Enterprise and Standard delegate to one implementation so the
|
|
806
|
+
* field-path normalisation, validation surface, and result shape stay
|
|
807
|
+
* consistent across editions.
|
|
808
|
+
*
|
|
809
|
+
* Why classic and not the pipeline `findNearest` stage: the deliverable
|
|
810
|
+
* (top-K by similarity) is achieved by either path; the classic API
|
|
811
|
+
* works identically on both editions today and sidesteps the
|
|
812
|
+
* pipeline-vs-emulator forking the rest of this backend has to manage.
|
|
813
|
+
* When pipeline `findNearest` becomes preferable for composing with
|
|
814
|
+
* other pipeline stages, swap the implementation behind
|
|
815
|
+
* `runFirestoreFindNearest`; callers and the capability declaration
|
|
816
|
+
* stay put.
|
|
817
|
+
*
|
|
818
|
+
* Index requirements are identical to Standard — single-field vector
|
|
819
|
+
* index on the indexed `vectorField`, plus a composite index whenever
|
|
820
|
+
* additional `where` filters narrow the candidate set.
|
|
821
|
+
*/
|
|
822
|
+
findNearest(params) {
|
|
823
|
+
return runFirestoreFindNearest(this.db.collection(this.collectionPath), params);
|
|
824
|
+
}
|
|
825
|
+
// --- Native full-text search (capability: search.fullText) ---
|
|
826
|
+
/**
|
|
827
|
+
* Run a full-text search via Firestore Pipelines `search(...)` stage.
|
|
828
|
+
* Translates `documentMatches(query)` against the indexed search
|
|
829
|
+
* fields, sorts by relevance score (`score().descending()`), and
|
|
830
|
+
* applies identifying filters as a follow-up `where(...)` stage
|
|
831
|
+
* because `search` must be the first stage of a pipeline.
|
|
832
|
+
*
|
|
833
|
+
* Enterprise-only: full-text search is an Enterprise product feature.
|
|
834
|
+
* Standard does not declare `search.fullText` regardless of SDK
|
|
835
|
+
* surface, and the SQLite-shaped backends have no native FTS index.
|
|
836
|
+
*
|
|
837
|
+
* Index requirements: an FTS index on the indexed search fields
|
|
838
|
+
* (configured in Firestore's index config). Without the index the
|
|
839
|
+
* underlying `search` stage returns no rows; the firegraph layer
|
|
840
|
+
* cannot detect that case ahead of time.
|
|
841
|
+
*/
|
|
842
|
+
fullTextSearch(params) {
|
|
843
|
+
return runFirestoreFullTextSearch(this.db, this.collectionPath, params);
|
|
844
|
+
}
|
|
845
|
+
// --- Native geospatial distance search (capability: search.geo) ---
|
|
846
|
+
/**
|
|
847
|
+
* Run a geospatial distance query via Firestore Pipelines
|
|
848
|
+
* `search(...)` stage. The same `geoDistance(geoField, point)`
|
|
849
|
+
* expression feeds the radius cap (`<= radiusMeters`) and the
|
|
850
|
+
* nearest-first sort (when `orderByDistance` is true / unset).
|
|
851
|
+
* Identifying filters apply as a follow-up `where(...)` stage.
|
|
852
|
+
*
|
|
853
|
+
* Enterprise-only: same Enterprise-product-feature gating as FTS.
|
|
854
|
+
*
|
|
855
|
+
* Index requirements: a geospatial index on the indexed `geoField`.
|
|
856
|
+
* Same caveat as FTS — unindexed geo searches return no rows
|
|
857
|
+
* server-side and the firegraph layer cannot pre-detect that.
|
|
858
|
+
*/
|
|
859
|
+
geoSearch(params) {
|
|
860
|
+
return runFirestoreGeoSearch(this.db, this.collectionPath, params);
|
|
861
|
+
}
|
|
862
|
+
// --- Server-side multi-source fan-out (capability: query.join) ---
|
|
863
|
+
/**
|
|
864
|
+
* Fan out from `params.sources` over a single edge type in one server-
|
|
865
|
+
* side round trip when running in pipeline mode, or via a chunked
|
|
866
|
+
* classic-API fan-out when running in classic mode.
|
|
867
|
+
*
|
|
868
|
+
* Pipeline mode (default outside the emulator): one call to
|
|
869
|
+
* `db.pipeline().collection(path).where(equalAny(sourceField, sources))
|
|
870
|
+
* .execute()`. `equalAny` has no documented cap, so a 1k-source fan-
|
|
871
|
+
* out is one round trip.
|
|
872
|
+
*
|
|
873
|
+
* Classic mode (emulator, or `defaultQueryMode: 'classic'`): chunks
|
|
874
|
+
* `params.sources` into 30-element groups (the classic `'in'`
|
|
875
|
+
* operator's documented cap), dispatches one query per chunk in
|
|
876
|
+
* parallel, concats the results, and applies a cross-chunk re-sort +
|
|
877
|
+
* total-limit slice. Same observable contract, `ceil(N/30)` round
|
|
878
|
+
* trips. The chunked path is still a win over the per-source
|
|
879
|
+
* `findEdges` loop in `traverse.ts` — 100 sources go from 100 round
|
|
880
|
+
* trips to 4.
|
|
881
|
+
*/
|
|
882
|
+
expand(params) {
|
|
883
|
+
if (this.queryMode === "pipeline") {
|
|
884
|
+
return runFirestorePipelineExpand(this.db, this.collectionPath, params);
|
|
885
|
+
}
|
|
886
|
+
return runFirestoreClassicExpand(this.adapter, params);
|
|
887
|
+
}
|
|
888
|
+
// --- Server-side DML (capability: query.dml, gated by previewDml) ---
|
|
889
|
+
/**
|
|
890
|
+
* Single-statement bulk DELETE via Pipeline `delete()` stage. Wired only
|
|
891
|
+
* when the backend was created with `previewDml: true`; otherwise the
|
|
892
|
+
* cap isn't declared and `client.bulkDelete()` throws
|
|
893
|
+
* `UNSUPPORTED_OPERATION` (or `bulk.ts` cascade falls back to
|
|
894
|
+
* `bulkRemoveEdges`).
|
|
895
|
+
*
|
|
896
|
+
* Empty filter lists are rejected at the helper boundary —
|
|
897
|
+
* see `runFirestorePipelineDelete`'s `assertNonEmptyFilters`.
|
|
898
|
+
*
|
|
899
|
+
* Subgraph isolation comes from `this.collectionPath`: the pipeline's
|
|
900
|
+
* `collection(path)` source IS the subgraph, so there's no separate
|
|
901
|
+
* `scope` predicate to enforce (unlike SQLite's leading-`scope`-`?`
|
|
902
|
+
* filter).
|
|
903
|
+
*/
|
|
904
|
+
bulkDelete(filters, options) {
|
|
905
|
+
return runFirestorePipelineDelete(this.db, this.collectionPath, filters, options);
|
|
906
|
+
}
|
|
907
|
+
/**
|
|
908
|
+
* Single-statement bulk UPDATE via Pipeline
|
|
909
|
+
* `update(transformedFields)` stage. Same gating and scoping as
|
|
910
|
+
* `bulkDelete` above. The patch is flattened into one
|
|
911
|
+
* `AliasedExpression` per terminal leaf via the shared `flattenPatch`
|
|
912
|
+
* pipeline; `deleteField()` sentinels are rejected at the helper
|
|
913
|
+
* boundary (the typed `update(AliasedExpression[])` surface in 8.5.0
|
|
914
|
+
* has no field-deletion transform — see `runFirestorePipelineUpdate`).
|
|
915
|
+
*/
|
|
916
|
+
bulkUpdate(filters, patch, options) {
|
|
917
|
+
return runFirestorePipelineUpdate(this.db, this.collectionPath, filters, patch, options);
|
|
918
|
+
}
|
|
919
|
+
// --- Engine-level multi-hop traversal (capability: traversal.serverSide) ---
|
|
920
|
+
/**
|
|
921
|
+
* Compile a multi-hop traversal spec into one nested Pipeline and
|
|
922
|
+
* dispatch a single round trip via `define` + `addFields(child
|
|
923
|
+
* .toArrayExpression().as(...))`. The compiler in
|
|
924
|
+
* `firestore-traverse-compiler.ts` validates spec eligibility (depth
|
|
925
|
+
* ≤ `MAX_PIPELINE_DEPTH`, every hop has `limitPerSource`, response-
|
|
926
|
+
* size product ≤ `maxReads`) and the executor in
|
|
927
|
+
* `firestore-traverse.ts` builds + decodes the tree.
|
|
928
|
+
*
|
|
929
|
+
* Unlike `bulkDelete` / `bulkUpdate`, this method is GA-typed in 8.5.0
|
|
930
|
+
* (no `@beta` annotation on `define`, `addFields`, `toArrayExpression`,
|
|
931
|
+
* or `variable`), so it does NOT need a `previewDml`-style opt-in.
|
|
932
|
+
*
|
|
933
|
+
* `defaultQueryMode` is irrelevant here — engine traversal always
|
|
934
|
+
* dispatches through Pipelines because the join-key binding (`define`
|
|
935
|
+
* + `variable`) has no classic Query API equivalent. Specs that
|
|
936
|
+
* arrive on a classic-mode backend still execute via Pipelines; the
|
|
937
|
+
* `queryMode` toggle only affects the read query path
|
|
938
|
+
* (`query()` / `expand()`).
|
|
939
|
+
*/
|
|
940
|
+
runEngineTraversal(params) {
|
|
941
|
+
return runFirestoreEngineTraversal(this.db, this.collectionPath, params);
|
|
942
|
+
}
|
|
943
|
+
};
|
|
944
|
+
function createFirestoreEnterpriseBackend(db, collectionPath, options = {}) {
|
|
945
|
+
const requestedMode = options.defaultQueryMode ?? "pipeline";
|
|
946
|
+
const isEmulator = !!process.env.FIRESTORE_EMULATOR_HOST;
|
|
947
|
+
const effectiveMode = isEmulator && requestedMode === "pipeline" ? "classic" : requestedMode;
|
|
948
|
+
if (isEmulator && requestedMode === "pipeline" && effectiveMode === "classic" && !_emulatorFallbackWarned) {
|
|
949
|
+
_emulatorFallbackWarned = true;
|
|
950
|
+
console.warn(
|
|
951
|
+
"[firegraph] Firestore Enterprise pipeline mode is unavailable in the emulator; falling back to classic Query API for this run. Set `defaultQueryMode: 'classic'` to silence this warning."
|
|
952
|
+
);
|
|
953
|
+
}
|
|
954
|
+
if (!isEmulator && requestedMode === "classic" && !_classicInProductionWarned) {
|
|
955
|
+
_classicInProductionWarned = true;
|
|
956
|
+
console.warn(
|
|
957
|
+
"[firegraph] Firestore Enterprise backend created with `defaultQueryMode: 'classic'`. Classic-mode `query()` against Enterprise causes full collection scans for `data.*` filters (high billing). For production reads on Standard Firestore, import from `'firegraph/firestore-standard'` instead."
|
|
958
|
+
);
|
|
959
|
+
}
|
|
960
|
+
const previewDml = options.previewDml ?? false;
|
|
961
|
+
if (previewDml && !_previewDmlWarned) {
|
|
962
|
+
_previewDmlWarned = true;
|
|
963
|
+
console.warn(
|
|
964
|
+
"[firegraph] Firestore Enterprise backend created with `previewDml: true`. bulkDelete()/bulkUpdate() will dispatch through Pipeline.delete() / Pipeline.update(transformedFields), both `@beta` in @google-cloud/firestore@8.5.0. The typed surface may shift before GA \u2014 pin your firestore SDK or be ready to set `previewDml: false` and route through the read-then-write fallback if needed."
|
|
965
|
+
);
|
|
966
|
+
}
|
|
967
|
+
const scopePath = options.scopePath ?? "";
|
|
968
|
+
return new FirestoreEnterpriseBackendImpl(
|
|
969
|
+
db,
|
|
970
|
+
collectionPath,
|
|
971
|
+
effectiveMode,
|
|
972
|
+
scopePath,
|
|
973
|
+
previewDml
|
|
974
|
+
);
|
|
975
|
+
}
|
|
976
|
+
export {
|
|
977
|
+
META_EDGE_TYPE,
|
|
978
|
+
META_NODE_TYPE,
|
|
979
|
+
createFirestoreEnterpriseBackend,
|
|
980
|
+
createGraphClient,
|
|
981
|
+
createMergedRegistry,
|
|
982
|
+
createRegistry,
|
|
983
|
+
generateId
|
|
984
|
+
};
|
|
985
|
+
//# sourceMappingURL=index.js.map
|