@noy-db/hub 0.1.0-pre.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +197 -0
- package/dist/aggregate/index.cjs +476 -0
- package/dist/aggregate/index.cjs.map +1 -0
- package/dist/aggregate/index.d.cts +38 -0
- package/dist/aggregate/index.d.ts +38 -0
- package/dist/aggregate/index.js +53 -0
- package/dist/aggregate/index.js.map +1 -0
- package/dist/blobs/index.cjs +1480 -0
- package/dist/blobs/index.cjs.map +1 -0
- package/dist/blobs/index.d.cts +45 -0
- package/dist/blobs/index.d.ts +45 -0
- package/dist/blobs/index.js +48 -0
- package/dist/blobs/index.js.map +1 -0
- package/dist/bundle/index.cjs +436 -0
- package/dist/bundle/index.cjs.map +1 -0
- package/dist/bundle/index.d.cts +7 -0
- package/dist/bundle/index.d.ts +7 -0
- package/dist/bundle/index.js +40 -0
- package/dist/bundle/index.js.map +1 -0
- package/dist/chunk-2QR2PQTT.js +217 -0
- package/dist/chunk-2QR2PQTT.js.map +1 -0
- package/dist/chunk-4OWFYIDQ.js +79 -0
- package/dist/chunk-4OWFYIDQ.js.map +1 -0
- package/dist/chunk-5AATM2M2.js +90 -0
- package/dist/chunk-5AATM2M2.js.map +1 -0
- package/dist/chunk-ACLDOTNQ.js +543 -0
- package/dist/chunk-ACLDOTNQ.js.map +1 -0
- package/dist/chunk-BTDCBVJW.js +160 -0
- package/dist/chunk-BTDCBVJW.js.map +1 -0
- package/dist/chunk-CIMZBAZB.js +72 -0
- package/dist/chunk-CIMZBAZB.js.map +1 -0
- package/dist/chunk-E445ICYI.js +365 -0
- package/dist/chunk-E445ICYI.js.map +1 -0
- package/dist/chunk-EXQRC2L4.js +722 -0
- package/dist/chunk-EXQRC2L4.js.map +1 -0
- package/dist/chunk-FZU343FL.js +32 -0
- package/dist/chunk-FZU343FL.js.map +1 -0
- package/dist/chunk-GJILMRPO.js +354 -0
- package/dist/chunk-GJILMRPO.js.map +1 -0
- package/dist/chunk-GOUT6DND.js +1285 -0
- package/dist/chunk-GOUT6DND.js.map +1 -0
- package/dist/chunk-J66GRPNH.js +111 -0
- package/dist/chunk-J66GRPNH.js.map +1 -0
- package/dist/chunk-M2F2JAWB.js +464 -0
- package/dist/chunk-M2F2JAWB.js.map +1 -0
- package/dist/chunk-M5INGEFC.js +84 -0
- package/dist/chunk-M5INGEFC.js.map +1 -0
- package/dist/chunk-M62XNWRA.js +72 -0
- package/dist/chunk-M62XNWRA.js.map +1 -0
- package/dist/chunk-MR4424N3.js +275 -0
- package/dist/chunk-MR4424N3.js.map +1 -0
- package/dist/chunk-NPC4LFV5.js +132 -0
- package/dist/chunk-NPC4LFV5.js.map +1 -0
- package/dist/chunk-NXFEYLVG.js +311 -0
- package/dist/chunk-NXFEYLVG.js.map +1 -0
- package/dist/chunk-R36SIKES.js +79 -0
- package/dist/chunk-R36SIKES.js.map +1 -0
- package/dist/chunk-TDR6T5CJ.js +381 -0
- package/dist/chunk-TDR6T5CJ.js.map +1 -0
- package/dist/chunk-UF3BUNQZ.js +1 -0
- package/dist/chunk-UF3BUNQZ.js.map +1 -0
- package/dist/chunk-UQFSPSWG.js +1109 -0
- package/dist/chunk-UQFSPSWG.js.map +1 -0
- package/dist/chunk-USKYUS74.js +793 -0
- package/dist/chunk-USKYUS74.js.map +1 -0
- package/dist/chunk-XCL3WP6J.js +121 -0
- package/dist/chunk-XCL3WP6J.js.map +1 -0
- package/dist/chunk-XHFOENR2.js +680 -0
- package/dist/chunk-XHFOENR2.js.map +1 -0
- package/dist/chunk-ZFKD4QMV.js +430 -0
- package/dist/chunk-ZFKD4QMV.js.map +1 -0
- package/dist/chunk-ZLMV3TUA.js +490 -0
- package/dist/chunk-ZLMV3TUA.js.map +1 -0
- package/dist/chunk-ZRG4V3F5.js +17 -0
- package/dist/chunk-ZRG4V3F5.js.map +1 -0
- package/dist/consent/index.cjs +204 -0
- package/dist/consent/index.cjs.map +1 -0
- package/dist/consent/index.d.cts +24 -0
- package/dist/consent/index.d.ts +24 -0
- package/dist/consent/index.js +23 -0
- package/dist/consent/index.js.map +1 -0
- package/dist/crdt/index.cjs +152 -0
- package/dist/crdt/index.cjs.map +1 -0
- package/dist/crdt/index.d.cts +30 -0
- package/dist/crdt/index.d.ts +30 -0
- package/dist/crdt/index.js +24 -0
- package/dist/crdt/index.js.map +1 -0
- package/dist/crypto-IVKU7YTT.js +44 -0
- package/dist/crypto-IVKU7YTT.js.map +1 -0
- package/dist/delegation-XDJCBTI2.js +16 -0
- package/dist/delegation-XDJCBTI2.js.map +1 -0
- package/dist/dev-unlock-CeXic1xC.d.cts +263 -0
- package/dist/dev-unlock-KrKkcqD3.d.ts +263 -0
- package/dist/hash-9KO1BGxh.d.cts +63 -0
- package/dist/hash-ChfJjRjQ.d.ts +63 -0
- package/dist/history/index.cjs +1215 -0
- package/dist/history/index.cjs.map +1 -0
- package/dist/history/index.d.cts +62 -0
- package/dist/history/index.d.ts +62 -0
- package/dist/history/index.js +79 -0
- package/dist/history/index.js.map +1 -0
- package/dist/i18n/index.cjs +746 -0
- package/dist/i18n/index.cjs.map +1 -0
- package/dist/i18n/index.d.cts +38 -0
- package/dist/i18n/index.d.ts +38 -0
- package/dist/i18n/index.js +55 -0
- package/dist/i18n/index.js.map +1 -0
- package/dist/index-BRHBCmLt.d.ts +1940 -0
- package/dist/index-C8kQtmOk.d.ts +380 -0
- package/dist/index-DN-J-5wT.d.cts +1940 -0
- package/dist/index-DhjMjz7L.d.cts +380 -0
- package/dist/index.cjs +14756 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +269 -0
- package/dist/index.d.ts +269 -0
- package/dist/index.js +6085 -0
- package/dist/index.js.map +1 -0
- package/dist/indexing/index.cjs +736 -0
- package/dist/indexing/index.cjs.map +1 -0
- package/dist/indexing/index.d.cts +36 -0
- package/dist/indexing/index.d.ts +36 -0
- package/dist/indexing/index.js +77 -0
- package/dist/indexing/index.js.map +1 -0
- package/dist/lazy-builder-BwEoBQZ9.d.ts +304 -0
- package/dist/lazy-builder-CZVLKh0Z.d.cts +304 -0
- package/dist/ledger-2NX4L7PN.js +33 -0
- package/dist/ledger-2NX4L7PN.js.map +1 -0
- package/dist/mime-magic-CBBSOkjm.d.cts +50 -0
- package/dist/mime-magic-CBBSOkjm.d.ts +50 -0
- package/dist/periods/index.cjs +1035 -0
- package/dist/periods/index.cjs.map +1 -0
- package/dist/periods/index.d.cts +21 -0
- package/dist/periods/index.d.ts +21 -0
- package/dist/periods/index.js +25 -0
- package/dist/periods/index.js.map +1 -0
- package/dist/predicate-SBHmi6D0.d.cts +161 -0
- package/dist/predicate-SBHmi6D0.d.ts +161 -0
- package/dist/query/index.cjs +1957 -0
- package/dist/query/index.cjs.map +1 -0
- package/dist/query/index.d.cts +3 -0
- package/dist/query/index.d.ts +3 -0
- package/dist/query/index.js +62 -0
- package/dist/query/index.js.map +1 -0
- package/dist/session/index.cjs +487 -0
- package/dist/session/index.cjs.map +1 -0
- package/dist/session/index.d.cts +45 -0
- package/dist/session/index.d.ts +45 -0
- package/dist/session/index.js +44 -0
- package/dist/session/index.js.map +1 -0
- package/dist/shadow/index.cjs +133 -0
- package/dist/shadow/index.cjs.map +1 -0
- package/dist/shadow/index.d.cts +16 -0
- package/dist/shadow/index.d.ts +16 -0
- package/dist/shadow/index.js +20 -0
- package/dist/shadow/index.js.map +1 -0
- package/dist/store/index.cjs +1069 -0
- package/dist/store/index.cjs.map +1 -0
- package/dist/store/index.d.cts +491 -0
- package/dist/store/index.d.ts +491 -0
- package/dist/store/index.js +34 -0
- package/dist/store/index.js.map +1 -0
- package/dist/strategy-BSxFXGzb.d.cts +110 -0
- package/dist/strategy-BSxFXGzb.d.ts +110 -0
- package/dist/strategy-D-SrOLCl.d.cts +548 -0
- package/dist/strategy-D-SrOLCl.d.ts +548 -0
- package/dist/sync/index.cjs +1062 -0
- package/dist/sync/index.cjs.map +1 -0
- package/dist/sync/index.d.cts +42 -0
- package/dist/sync/index.d.ts +42 -0
- package/dist/sync/index.js +28 -0
- package/dist/sync/index.js.map +1 -0
- package/dist/team/index.cjs +1233 -0
- package/dist/team/index.cjs.map +1 -0
- package/dist/team/index.d.cts +117 -0
- package/dist/team/index.d.ts +117 -0
- package/dist/team/index.js +39 -0
- package/dist/team/index.js.map +1 -0
- package/dist/tx/index.cjs +212 -0
- package/dist/tx/index.cjs.map +1 -0
- package/dist/tx/index.d.cts +20 -0
- package/dist/tx/index.d.ts +20 -0
- package/dist/tx/index.js +20 -0
- package/dist/tx/index.js.map +1 -0
- package/dist/types-BZpCZB8N.d.ts +7526 -0
- package/dist/types-Bfs0qr5F.d.cts +7526 -0
- package/dist/ulid-COREQ2RQ.js +9 -0
- package/dist/ulid-COREQ2RQ.js.map +1 -0
- package/dist/util/index.cjs +230 -0
- package/dist/util/index.cjs.map +1 -0
- package/dist/util/index.d.cts +77 -0
- package/dist/util/index.d.ts +77 -0
- package/dist/util/index.js +190 -0
- package/dist/util/index.js.map +1 -0
- package/package.json +244 -0
|
@@ -0,0 +1,1285 @@
|
|
|
1
|
+
import {
|
|
2
|
+
evaluateClause,
|
|
3
|
+
readPath
|
|
4
|
+
} from "./chunk-M5INGEFC.js";
|
|
5
|
+
import {
|
|
6
|
+
DanglingReferenceError,
|
|
7
|
+
JoinTooLargeError
|
|
8
|
+
} from "./chunk-ACLDOTNQ.js";
|
|
9
|
+
|
|
10
|
+
// src/query/join.ts
|
|
11
|
+
var DEFAULT_JOIN_MAX_ROWS = 5e4;
|
|
12
|
+
var JOIN_WARN_FRACTION = 0.8;
|
|
13
|
+
function coerceRefKey(value) {
|
|
14
|
+
if (value === null || value === void 0) return null;
|
|
15
|
+
if (typeof value === "string") return value;
|
|
16
|
+
if (typeof value === "number" || typeof value === "bigint") return String(value);
|
|
17
|
+
return null;
|
|
18
|
+
}
|
|
19
|
+
var warnedDanglingKeys = /* @__PURE__ */ new Set();
|
|
20
|
+
function warnOnceDangling(field, target, refId) {
|
|
21
|
+
const key = `${field}\u2192${target}:${refId}`;
|
|
22
|
+
if (warnedDanglingKeys.has(key)) return;
|
|
23
|
+
warnedDanglingKeys.add(key);
|
|
24
|
+
console.warn(
|
|
25
|
+
`[noy-db] .join() encountered dangling ref in 'warn' mode: field "${field}" \u2192 "${target}:${refId}" not found. Attaching null.`
|
|
26
|
+
);
|
|
27
|
+
}
|
|
28
|
+
var warnedCeilingKeys = /* @__PURE__ */ new Set();
|
|
29
|
+
function warnCeilingApproaching(target, side, rows, maxRows) {
|
|
30
|
+
const key = `${target}:${side}`;
|
|
31
|
+
if (warnedCeilingKeys.has(key)) return;
|
|
32
|
+
warnedCeilingKeys.add(key);
|
|
33
|
+
const pct = Math.round(rows / maxRows * 100);
|
|
34
|
+
console.warn(
|
|
35
|
+
`[noy-db] .join() ${side} side is at ${pct}% of the ${maxRows}-row ceiling for target "${target}" (${rows} rows). Streaming joins over scan() are not yet supported for collections that need to exceed this.`
|
|
36
|
+
);
|
|
37
|
+
}
|
|
38
|
+
function applyJoins(rows, joins, context) {
|
|
39
|
+
if (joins.length === 0) return [...rows];
|
|
40
|
+
let result = [...rows];
|
|
41
|
+
for (const leg of joins) {
|
|
42
|
+
result = applyOneJoin(result, leg, context);
|
|
43
|
+
}
|
|
44
|
+
return result;
|
|
45
|
+
}
|
|
46
|
+
function applyOneJoin(leftRows, leg, context) {
|
|
47
|
+
if (leg.isDictJoin) {
|
|
48
|
+
const dictSource = context.resolveDictSource?.(leg.field);
|
|
49
|
+
if (!dictSource) {
|
|
50
|
+
throw new Error(
|
|
51
|
+
`.join() field "${leg.field}" on "${context.leftCollection}" is declared as a dictKey join but the dict source could not be resolved. Ensure the dictionary has at least one entry.`
|
|
52
|
+
);
|
|
53
|
+
}
|
|
54
|
+
const out = [];
|
|
55
|
+
const snapshot = dictSource.snapshot();
|
|
56
|
+
const dictMap = /* @__PURE__ */ new Map();
|
|
57
|
+
for (const entry of snapshot) {
|
|
58
|
+
const k = readPath(entry, "key");
|
|
59
|
+
if (typeof k === "string") dictMap.set(k, entry);
|
|
60
|
+
}
|
|
61
|
+
for (const left of leftRows) {
|
|
62
|
+
const rawId = readPath(left, leg.field);
|
|
63
|
+
const key = coerceRefKey(rawId);
|
|
64
|
+
const dictEntry = key === null ? void 0 : dictMap.get(key);
|
|
65
|
+
out.push({ ...left, [leg.as]: dictEntry ?? null });
|
|
66
|
+
}
|
|
67
|
+
return out;
|
|
68
|
+
}
|
|
69
|
+
const source = context.resolveSource(leg.target);
|
|
70
|
+
if (!source) {
|
|
71
|
+
throw new Error(
|
|
72
|
+
`.join() cannot resolve target collection "${leg.target}" (referenced from field "${leg.field}" on "${context.leftCollection}"). Make sure the target collection has been opened via vault.collection() at least once before running the query.`
|
|
73
|
+
);
|
|
74
|
+
}
|
|
75
|
+
const maxRows = leg.maxRows ?? DEFAULT_JOIN_MAX_ROWS;
|
|
76
|
+
if (leftRows.length > maxRows) {
|
|
77
|
+
throw new JoinTooLargeError({
|
|
78
|
+
leftRows: leftRows.length,
|
|
79
|
+
rightRows: -1,
|
|
80
|
+
maxRows,
|
|
81
|
+
side: "left",
|
|
82
|
+
message: `.join() left side has ${leftRows.length} rows, exceeding the ${maxRows}-row ceiling for target "${leg.target}". Filter the left side further with where()/limit() before joining, or raise the ceiling via { maxRows }. Streaming joins over scan() are not yet supported.`
|
|
83
|
+
});
|
|
84
|
+
}
|
|
85
|
+
if (leftRows.length > maxRows * JOIN_WARN_FRACTION) {
|
|
86
|
+
warnCeilingApproaching(leg.target, "left", leftRows.length, maxRows);
|
|
87
|
+
}
|
|
88
|
+
const rightSnapshot = source.snapshot();
|
|
89
|
+
if (rightSnapshot.length > maxRows) {
|
|
90
|
+
throw new JoinTooLargeError({
|
|
91
|
+
leftRows: leftRows.length,
|
|
92
|
+
rightRows: rightSnapshot.length,
|
|
93
|
+
maxRows,
|
|
94
|
+
side: "right",
|
|
95
|
+
message: `.join() right side "${leg.target}" has ${rightSnapshot.length} rows, exceeding the ${maxRows}-row ceiling. Raise the ceiling via { maxRows } if the data genuinely fits in memory, or track for streaming joins.`
|
|
96
|
+
});
|
|
97
|
+
}
|
|
98
|
+
if (rightSnapshot.length > maxRows * JOIN_WARN_FRACTION) {
|
|
99
|
+
warnCeilingApproaching(leg.target, "right", rightSnapshot.length, maxRows);
|
|
100
|
+
}
|
|
101
|
+
const strategy = leg.strategy ?? (source.lookupById ? "nested" : "hash");
|
|
102
|
+
if (strategy === "nested" && source.lookupById) {
|
|
103
|
+
const lookup = (id) => source.lookupById?.(id);
|
|
104
|
+
return nestedLoopJoin(leftRows, leg, lookup);
|
|
105
|
+
}
|
|
106
|
+
return hashJoin(leftRows, leg, rightSnapshot);
|
|
107
|
+
}
|
|
108
|
+
function nestedLoopJoin(leftRows, leg, lookupById) {
|
|
109
|
+
const out = [];
|
|
110
|
+
for (const left of leftRows) {
|
|
111
|
+
const rawId = readPath(left, leg.field);
|
|
112
|
+
const key = coerceRefKey(rawId);
|
|
113
|
+
const right = key === null ? void 0 : lookupById(key);
|
|
114
|
+
out.push(attachJoin(left, leg, right, rawId));
|
|
115
|
+
}
|
|
116
|
+
return out;
|
|
117
|
+
}
|
|
118
|
+
function hashJoin(leftRows, leg, rightSnapshot) {
|
|
119
|
+
const rightMap = /* @__PURE__ */ new Map();
|
|
120
|
+
for (const record of rightSnapshot) {
|
|
121
|
+
const rawId = readPath(record, "id");
|
|
122
|
+
const key = coerceRefKey(rawId);
|
|
123
|
+
if (key !== null) {
|
|
124
|
+
rightMap.set(key, record);
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
const out = [];
|
|
128
|
+
for (const left of leftRows) {
|
|
129
|
+
const rawId = readPath(left, leg.field);
|
|
130
|
+
const key = coerceRefKey(rawId);
|
|
131
|
+
const right = key === null ? void 0 : rightMap.get(key);
|
|
132
|
+
out.push(attachJoin(left, leg, right, rawId));
|
|
133
|
+
}
|
|
134
|
+
return out;
|
|
135
|
+
}
|
|
136
|
+
function attachJoin(left, leg, right, rawId) {
|
|
137
|
+
if (left === null || typeof left !== "object") {
|
|
138
|
+
return left;
|
|
139
|
+
}
|
|
140
|
+
const merged = { ...left };
|
|
141
|
+
const refKey = coerceRefKey(rawId);
|
|
142
|
+
if (right === void 0) {
|
|
143
|
+
if (refKey !== null && leg.mode === "strict") {
|
|
144
|
+
throw new DanglingReferenceError({
|
|
145
|
+
field: leg.field,
|
|
146
|
+
target: leg.target,
|
|
147
|
+
refId: refKey,
|
|
148
|
+
message: `.join() strict dangling: record references "${leg.target}:${refKey}" via field "${leg.field}", but no such record exists. Use ref() mode 'warn' or 'cascade' if dangling refs are acceptable, or run vault.checkIntegrity() to find and fix the orphans.`
|
|
149
|
+
});
|
|
150
|
+
}
|
|
151
|
+
if (refKey !== null && leg.mode === "warn") {
|
|
152
|
+
warnOnceDangling(leg.field, leg.target, refKey);
|
|
153
|
+
}
|
|
154
|
+
merged[leg.as] = null;
|
|
155
|
+
} else {
|
|
156
|
+
merged[leg.as] = right;
|
|
157
|
+
}
|
|
158
|
+
return merged;
|
|
159
|
+
}
|
|
160
|
+
function resetJoinWarnings() {
|
|
161
|
+
warnedDanglingKeys.clear();
|
|
162
|
+
warnedCeilingKeys.clear();
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
// src/query/live.ts
|
|
166
|
+
function buildLiveQuery(recompute, upstreams) {
|
|
167
|
+
return new LiveQueryImpl(recompute, upstreams);
|
|
168
|
+
}
|
|
169
|
+
var LiveQueryImpl = class {
|
|
170
|
+
constructor(recompute, upstreams) {
|
|
171
|
+
this.recompute = recompute;
|
|
172
|
+
this.refresh();
|
|
173
|
+
for (const upstream of upstreams) {
|
|
174
|
+
try {
|
|
175
|
+
this.unsubs.push(upstream.subscribe(this.onUpstreamChange));
|
|
176
|
+
} catch (err) {
|
|
177
|
+
this._error = err instanceof Error ? err : new Error(String(err));
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
recompute;
|
|
182
|
+
_value = [];
|
|
183
|
+
_error = null;
|
|
184
|
+
listeners = /* @__PURE__ */ new Set();
|
|
185
|
+
unsubs = [];
|
|
186
|
+
stopped = false;
|
|
187
|
+
get value() {
|
|
188
|
+
return this._value;
|
|
189
|
+
}
|
|
190
|
+
get error() {
|
|
191
|
+
return this._error;
|
|
192
|
+
}
|
|
193
|
+
/**
|
|
194
|
+
* Bound change handler — used as the callback passed to every
|
|
195
|
+
* upstream's subscribe. Bound via class field so the `this`
|
|
196
|
+
* context survives the indirect call from arbitrary upstreams.
|
|
197
|
+
*/
|
|
198
|
+
onUpstreamChange = () => {
|
|
199
|
+
this.refresh();
|
|
200
|
+
for (const cb of this.listeners) {
|
|
201
|
+
try {
|
|
202
|
+
cb();
|
|
203
|
+
} catch {
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
};
|
|
207
|
+
refresh() {
|
|
208
|
+
if (this.stopped) return;
|
|
209
|
+
try {
|
|
210
|
+
this._value = this.recompute();
|
|
211
|
+
this._error = null;
|
|
212
|
+
} catch (err) {
|
|
213
|
+
this._error = err instanceof Error ? err : new Error(String(err));
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
subscribe(cb) {
|
|
217
|
+
if (this.stopped) return () => {
|
|
218
|
+
};
|
|
219
|
+
this.listeners.add(cb);
|
|
220
|
+
return () => this.listeners.delete(cb);
|
|
221
|
+
}
|
|
222
|
+
stop() {
|
|
223
|
+
if (this.stopped) return;
|
|
224
|
+
this.stopped = true;
|
|
225
|
+
for (const unsub of this.unsubs) {
|
|
226
|
+
try {
|
|
227
|
+
unsub();
|
|
228
|
+
} catch {
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
this.unsubs.length = 0;
|
|
232
|
+
this.listeners.clear();
|
|
233
|
+
}
|
|
234
|
+
};
|
|
235
|
+
|
|
236
|
+
// src/aggregate/strategy.ts
|
|
237
|
+
var NOT_ENABLED = new Error(
|
|
238
|
+
'Aggregate / groupBy is not enabled on this Noydb instance. Import `{ withAggregate }` from "@noy-db/hub/aggregate" and pass it to `createNoydb({ aggregateStrategy: withAggregate() })`.'
|
|
239
|
+
);
|
|
240
|
+
var NO_AGGREGATE = {
|
|
241
|
+
aggregate() {
|
|
242
|
+
throw NOT_ENABLED;
|
|
243
|
+
},
|
|
244
|
+
groupBy() {
|
|
245
|
+
throw NOT_ENABLED;
|
|
246
|
+
},
|
|
247
|
+
scanAggregate() {
|
|
248
|
+
throw NOT_ENABLED;
|
|
249
|
+
}
|
|
250
|
+
};
|
|
251
|
+
|
|
252
|
+
// src/query/builder.ts
|
|
253
|
+
var EMPTY_PLAN = {
|
|
254
|
+
clauses: [],
|
|
255
|
+
orderBy: [],
|
|
256
|
+
limit: void 0,
|
|
257
|
+
offset: 0,
|
|
258
|
+
joins: []
|
|
259
|
+
};
|
|
260
|
+
var Query = class _Query {
|
|
261
|
+
source;
|
|
262
|
+
plan;
|
|
263
|
+
joinContext;
|
|
264
|
+
aggregateStrategy;
|
|
265
|
+
constructor(source, plan = EMPTY_PLAN, joinContext, aggregateStrategy = NO_AGGREGATE) {
|
|
266
|
+
this.source = source;
|
|
267
|
+
this.plan = plan;
|
|
268
|
+
this.joinContext = joinContext;
|
|
269
|
+
this.aggregateStrategy = aggregateStrategy;
|
|
270
|
+
}
|
|
271
|
+
/** Add a field comparison. Multiple where() calls are AND-combined. */
|
|
272
|
+
where(field, op, value) {
|
|
273
|
+
const clause = { type: "field", field, op, value };
|
|
274
|
+
return new _Query(
|
|
275
|
+
this.source,
|
|
276
|
+
{ ...this.plan, clauses: [...this.plan.clauses, clause] },
|
|
277
|
+
this.joinContext,
|
|
278
|
+
this.aggregateStrategy
|
|
279
|
+
);
|
|
280
|
+
}
|
|
281
|
+
/**
|
|
282
|
+
* Logical OR group. Pass a callback that builds a sub-query.
|
|
283
|
+
* Each clause inside the callback is OR-combined; the group itself
|
|
284
|
+
* joins the parent plan with AND.
|
|
285
|
+
*/
|
|
286
|
+
or(builder) {
|
|
287
|
+
const sub = builder(
|
|
288
|
+
new _Query(this.source, EMPTY_PLAN, this.joinContext, this.aggregateStrategy)
|
|
289
|
+
);
|
|
290
|
+
const group = {
|
|
291
|
+
type: "group",
|
|
292
|
+
op: "or",
|
|
293
|
+
clauses: sub.plan.clauses
|
|
294
|
+
};
|
|
295
|
+
return new _Query(
|
|
296
|
+
this.source,
|
|
297
|
+
{ ...this.plan, clauses: [...this.plan.clauses, group] },
|
|
298
|
+
this.joinContext,
|
|
299
|
+
this.aggregateStrategy
|
|
300
|
+
);
|
|
301
|
+
}
|
|
302
|
+
/**
|
|
303
|
+
* Logical AND group. Same shape as `or()` but every clause inside the group
|
|
304
|
+
* must match. Useful for explicit grouping inside a larger OR.
|
|
305
|
+
*/
|
|
306
|
+
and(builder) {
|
|
307
|
+
const sub = builder(
|
|
308
|
+
new _Query(this.source, EMPTY_PLAN, this.joinContext, this.aggregateStrategy)
|
|
309
|
+
);
|
|
310
|
+
const group = {
|
|
311
|
+
type: "group",
|
|
312
|
+
op: "and",
|
|
313
|
+
clauses: sub.plan.clauses
|
|
314
|
+
};
|
|
315
|
+
return new _Query(
|
|
316
|
+
this.source,
|
|
317
|
+
{ ...this.plan, clauses: [...this.plan.clauses, group] },
|
|
318
|
+
this.joinContext,
|
|
319
|
+
this.aggregateStrategy
|
|
320
|
+
);
|
|
321
|
+
}
|
|
322
|
+
/** Escape hatch: add an arbitrary predicate function. Not serializable. */
|
|
323
|
+
filter(fn) {
|
|
324
|
+
const clause = {
|
|
325
|
+
type: "filter",
|
|
326
|
+
fn
|
|
327
|
+
};
|
|
328
|
+
return new _Query(
|
|
329
|
+
this.source,
|
|
330
|
+
{ ...this.plan, clauses: [...this.plan.clauses, clause] },
|
|
331
|
+
this.joinContext,
|
|
332
|
+
this.aggregateStrategy
|
|
333
|
+
);
|
|
334
|
+
}
|
|
335
|
+
/** Sort by a field. Subsequent calls are tie-breakers. */
|
|
336
|
+
orderBy(field, direction = "asc") {
|
|
337
|
+
return new _Query(
|
|
338
|
+
this.source,
|
|
339
|
+
{ ...this.plan, orderBy: [...this.plan.orderBy, { field, direction }] },
|
|
340
|
+
this.joinContext,
|
|
341
|
+
this.aggregateStrategy
|
|
342
|
+
);
|
|
343
|
+
}
|
|
344
|
+
/** Cap the result size. */
|
|
345
|
+
limit(n) {
|
|
346
|
+
return new _Query(
|
|
347
|
+
this.source,
|
|
348
|
+
{ ...this.plan, limit: n },
|
|
349
|
+
this.joinContext,
|
|
350
|
+
this.aggregateStrategy
|
|
351
|
+
);
|
|
352
|
+
}
|
|
353
|
+
/** Skip the first N matching records (after ordering). */
|
|
354
|
+
offset(n) {
|
|
355
|
+
return new _Query(
|
|
356
|
+
this.source,
|
|
357
|
+
{ ...this.plan, offset: n },
|
|
358
|
+
this.joinContext,
|
|
359
|
+
this.aggregateStrategy
|
|
360
|
+
);
|
|
361
|
+
}
|
|
362
|
+
/**
|
|
363
|
+
* Resolve a `ref()`-declared foreign key and attach the right-side
|
|
364
|
+
* record under `opts.as`. — eager, single-FK, intra-
|
|
365
|
+
* vault joins.
|
|
366
|
+
*
|
|
367
|
+
* ```ts
|
|
368
|
+
* const rows = invoices.query()
|
|
369
|
+
* .where('status', '==', 'open')
|
|
370
|
+
* .join('clientId', { as: 'client' })
|
|
371
|
+
* .toArray()
|
|
372
|
+
* // → [{ id, amount, client: { id, name, ... } }, ...]
|
|
373
|
+
* ```
|
|
374
|
+
*
|
|
375
|
+
* Preconditions:
|
|
376
|
+
* - The Query must have a `joinContext` (constructed via
|
|
377
|
+
* `Collection.query()`, not `new Query`).
|
|
378
|
+
* - `field` must have a matching `refs: { [field]: ref('<target>') }`
|
|
379
|
+
* declaration on the left collection.
|
|
380
|
+
* - The target collection must be reachable via the vault
|
|
381
|
+
* (either currently open or openable on demand).
|
|
382
|
+
*
|
|
383
|
+
* Strategy:
|
|
384
|
+
* - Nested-loop against `lookupById` when the target source
|
|
385
|
+
* provides it (the common path for Collection targets).
|
|
386
|
+
* - Hash join otherwise, or when `{ strategy: 'hash' }` is
|
|
387
|
+
* explicitly passed for test purposes.
|
|
388
|
+
*
|
|
389
|
+
* Ref-mode semantics on dangling refs (left record has a non-null
|
|
390
|
+
* FK value pointing at a right-side id that doesn't exist):
|
|
391
|
+
* - `strict` → throws `DanglingReferenceError` with the full
|
|
392
|
+
* field / target / refId context.
|
|
393
|
+
* - `warn` → attaches `null` and emits a one-shot warning per
|
|
394
|
+
* unique dangling pair.
|
|
395
|
+
* - `cascade` → attaches `null` silently. Cascade is a
|
|
396
|
+
* delete-time mode; dangling refs visible at read time are
|
|
397
|
+
* either mid-flight cascades or pre-existing orphans, not a
|
|
398
|
+
* DSL-level error.
|
|
399
|
+
*
|
|
400
|
+
* A left-side record whose FK field is `null` / `undefined` is NOT
|
|
401
|
+
* a dangling ref — it's "no reference at all", always allowed
|
|
402
|
+
* regardless of mode.
|
|
403
|
+
*
|
|
404
|
+
* The return type widens `T` with `Record<As, R | null>`. The `R`
|
|
405
|
+
* parameter is optional — supply it explicitly for type-checked
|
|
406
|
+
* access to the joined fields:
|
|
407
|
+
*
|
|
408
|
+
* ```ts
|
|
409
|
+
* invoices.query().join<'client', Client>('clientId', { as: 'client' })
|
|
410
|
+
* // ^^^^^^^^^^^^^^^^^^^ alias literal + right-side type
|
|
411
|
+
* ```
|
|
412
|
+
*
|
|
413
|
+
* Without the generic, the joined field is typed as `unknown`, which
|
|
414
|
+
* still works but requires a cast to access its properties.
|
|
415
|
+
*
|
|
416
|
+
* Joins stay intra-vault by construction — cross-vault
|
|
417
|
+
* correlation goes through `Noydb.queryAcross`, not
|
|
418
|
+
* `.join()`.
|
|
419
|
+
*/
|
|
420
|
+
join(field, opts) {
|
|
421
|
+
if (!this.joinContext) {
|
|
422
|
+
throw new Error(
|
|
423
|
+
`Query.join() requires a join context. Use collection.query() to construct a join-capable Query instead of the Query constructor directly (the direct constructor is only used for tests with plain-object sources).`
|
|
424
|
+
);
|
|
425
|
+
}
|
|
426
|
+
const descriptor = this.joinContext.resolveRef(field);
|
|
427
|
+
const isDictJoinField = !descriptor && this.joinContext.resolveDictSource?.(field) != null;
|
|
428
|
+
if (!descriptor && !isDictJoinField) {
|
|
429
|
+
throw new Error(
|
|
430
|
+
`Query.join(): no ref() declared for field "${field}" on collection "${this.joinContext.leftCollection}". Add refs: { ${field}: ref('<target-collection>') } to the collection options, then retry. See the ref() docs for the full list of modes.`
|
|
431
|
+
);
|
|
432
|
+
}
|
|
433
|
+
const leg = descriptor ? {
|
|
434
|
+
field,
|
|
435
|
+
as: opts.as,
|
|
436
|
+
target: descriptor.target,
|
|
437
|
+
mode: descriptor.mode,
|
|
438
|
+
strategy: opts.strategy,
|
|
439
|
+
maxRows: opts.maxRows,
|
|
440
|
+
// constraint #1 — always 'all' in. Do not remove.
|
|
441
|
+
partitionScope: "all"
|
|
442
|
+
} : {
|
|
443
|
+
// Dict join leg
|
|
444
|
+
field,
|
|
445
|
+
as: opts.as,
|
|
446
|
+
target: field,
|
|
447
|
+
// dict name = field name for dictKey
|
|
448
|
+
mode: "strict",
|
|
449
|
+
strategy: opts.strategy,
|
|
450
|
+
maxRows: opts.maxRows,
|
|
451
|
+
partitionScope: "all",
|
|
452
|
+
isDictJoin: true
|
|
453
|
+
};
|
|
454
|
+
return new _Query(
|
|
455
|
+
this.source,
|
|
456
|
+
{ ...this.plan, joins: [...this.plan.joins, leg] },
|
|
457
|
+
this.joinContext,
|
|
458
|
+
this.aggregateStrategy
|
|
459
|
+
);
|
|
460
|
+
}
|
|
461
|
+
/**
|
|
462
|
+
* Execute the plan and return the matching records. When the plan
|
|
463
|
+
* carries any join legs, they are applied after `where` / `orderBy`
|
|
464
|
+
* / `limit` / `offset` narrow the left set. See the `.join()` doc
|
|
465
|
+
* for the ordering rationale.
|
|
466
|
+
*/
|
|
467
|
+
toArray() {
|
|
468
|
+
const base = executePlanWithSource(this.source, this.plan);
|
|
469
|
+
if (this.plan.joins.length === 0) return base;
|
|
470
|
+
if (!this.joinContext) {
|
|
471
|
+
throw new Error(
|
|
472
|
+
`Query.toArray(): plan carries ${this.plan.joins.length} join leg(s) but no JoinContext is attached. This usually means the Query was constructed via the raw Query constructor with a plan that had joins pre-populated. Use collection.query().join(...) instead.`
|
|
473
|
+
);
|
|
474
|
+
}
|
|
475
|
+
return applyJoins(base, this.plan.joins, this.joinContext);
|
|
476
|
+
}
|
|
477
|
+
/** Return the first matching record, or null. Joins are applied. */
|
|
478
|
+
first() {
|
|
479
|
+
const arr = this.limit(1).toArray();
|
|
480
|
+
return arr[0] ?? null;
|
|
481
|
+
}
|
|
482
|
+
/**
|
|
483
|
+
* Return the number of matching records (after where/filter,
|
|
484
|
+
* before limit). **Joins are NOT applied** — count() reports the
|
|
485
|
+
* left-side cardinality, because joins in are projection-only
|
|
486
|
+
* (they attach an aliased field; they never filter). Running joins
|
|
487
|
+
* here just to discard the aliases would be wasteful, and in strict
|
|
488
|
+
* mode it could throw `DanglingReferenceError` for a call whose
|
|
489
|
+
* intent is purely to count.
|
|
490
|
+
*/
|
|
491
|
+
count() {
|
|
492
|
+
const { candidates, remainingClauses } = candidateRecords(this.source, this.plan.clauses);
|
|
493
|
+
if (remainingClauses.length === 0) return candidates.length;
|
|
494
|
+
return filterRecords(candidates, remainingClauses).length;
|
|
495
|
+
}
|
|
496
|
+
/**
|
|
497
|
+
* Reduce the matching records through a named set of reducers.
|
|
498
|
+
* the aggregation terminal.
|
|
499
|
+
*
|
|
500
|
+
* ```ts
|
|
501
|
+
* const { total, n, avgAmount } = invoices.query()
|
|
502
|
+
* .where('status', '==', 'open')
|
|
503
|
+
* .aggregate({
|
|
504
|
+
* total: sum('amount'),
|
|
505
|
+
* n: count(),
|
|
506
|
+
* avgAmount: avg('amount'),
|
|
507
|
+
* })
|
|
508
|
+
* .run()
|
|
509
|
+
* ```
|
|
510
|
+
*
|
|
511
|
+
* Returns an `Aggregation<R>` wrapper with two terminals:
|
|
512
|
+
* - `.run(): R` — synchronous one-shot reduction
|
|
513
|
+
* - `.live(): LiveAggregation<R>` — reactive primitive that
|
|
514
|
+
* re-runs the reduction whenever the source notifies of a
|
|
515
|
+
* change. Always call `live.stop()` when finished.
|
|
516
|
+
*
|
|
517
|
+
* The reducer spec is bound here once and reused by both
|
|
518
|
+
* terminals — this is why `.aggregate()` returns a wrapper instead
|
|
519
|
+
* of being a direct terminal. Consumers who only need the static
|
|
520
|
+
* value read `.run()`; consumers wiring a reactive UI read
|
|
521
|
+
* `.live()`.
|
|
522
|
+
*
|
|
523
|
+
* Joins are intentionally NOT applied to aggregations in —
|
|
524
|
+
* the same logic as `.count()`. Joins in are projection-only
|
|
525
|
+
* (they attach an aliased field and never filter), so running
|
|
526
|
+
* them just to throw the aliases away would be wasteful. If you
|
|
527
|
+
* need a reducer that reads a joined field, open an issue —
|
|
528
|
+
* aggregations-across-joins is explicitly out of scope for v1.
|
|
529
|
+
*
|
|
530
|
+
* Every reducer factory accepts an optional `{ seed }` parameter
|
|
531
|
+
* that is plumbed through the protocol but unused by the
|
|
532
|
+
* executor — that's constraint #2. When partition-aware
|
|
533
|
+
* aggregation lands, the seed will carry running state across
|
|
534
|
+
* partition boundaries without an API break.
|
|
535
|
+
*/
|
|
536
|
+
aggregate(spec) {
|
|
537
|
+
const source = this.source;
|
|
538
|
+
const clauses = this.plan.clauses;
|
|
539
|
+
const executeRecords = () => {
|
|
540
|
+
const { candidates, remainingClauses } = candidateRecords(source, clauses);
|
|
541
|
+
return remainingClauses.length === 0 ? candidates : filterRecords(candidates, remainingClauses);
|
|
542
|
+
};
|
|
543
|
+
const upstreams = [];
|
|
544
|
+
if (source.subscribe) {
|
|
545
|
+
const subscribe = source.subscribe.bind(source);
|
|
546
|
+
upstreams.push({ subscribe: (cb) => subscribe(cb) });
|
|
547
|
+
}
|
|
548
|
+
return this.aggregateStrategy.aggregate(executeRecords, spec, upstreams);
|
|
549
|
+
}
|
|
550
|
+
/**
|
|
551
|
+
* Partition matching records into buckets keyed by a field, then
|
|
552
|
+
* terminate with `.aggregate(spec)` to compute per-bucket
|
|
553
|
+
* reducers..
|
|
554
|
+
*
|
|
555
|
+
* ```ts
|
|
556
|
+
* const byClient = invoices.query()
|
|
557
|
+
* .where('status', '==', 'open')
|
|
558
|
+
* .groupBy('clientId')
|
|
559
|
+
* .aggregate({ total: sum('amount'), n: count() })
|
|
560
|
+
* .run()
|
|
561
|
+
* // → [ { clientId: 'c1', total: 5250, n: 3 }, … ]
|
|
562
|
+
* ```
|
|
563
|
+
*
|
|
564
|
+
* Result rows carry the group key value under the grouping field
|
|
565
|
+
* name plus every reducer output from the spec. Buckets are
|
|
566
|
+
* emitted in first-seen order — consumers who want a specific
|
|
567
|
+
* ordering should `.sort()` downstream.
|
|
568
|
+
*
|
|
569
|
+
* **Cardinality caps:** a one-shot warning fires at 10_000
|
|
570
|
+
* distinct groups; `GroupCardinalityError` throws at 100_000.
|
|
571
|
+
* Grouping on a high-uniqueness field like `id` or `createdAt` is
|
|
572
|
+
* almost always a query mistake — the error message names the
|
|
573
|
+
* field and observed cardinality and suggests narrowing with
|
|
574
|
+
* `.where()` first.
|
|
575
|
+
*
|
|
576
|
+
* **Null / undefined keys:** records with a missing or explicitly
|
|
577
|
+
* `null` group field get their own buckets. `Map`-based
|
|
578
|
+
* partitioning distinguishes `undefined` from `null`, so the two
|
|
579
|
+
* cases do NOT merge. Consumers who want them merged should
|
|
580
|
+
* coalesce upstream with `.filter()`.
|
|
581
|
+
*
|
|
582
|
+
* **Joins are not applied** — same rationale as `.count()` and
|
|
583
|
+
* `.aggregate()`. Joined fields in are projection-only, so
|
|
584
|
+
* running a join inside a grouping pipeline would be wasteful and
|
|
585
|
+
* could trigger `DanglingReferenceError` in strict mode for a
|
|
586
|
+
* call whose intent is purely to bucket-and-reduce. Grouping by
|
|
587
|
+
* a joined field is explicitly out of scope for — file an
|
|
588
|
+
* issue if a real consumer needs it.
|
|
589
|
+
*
|
|
590
|
+
* **Filter clauses (`.filter(fn)`):** grouped queries still
|
|
591
|
+
* support filter clauses in the underlying plan — they run in
|
|
592
|
+
* the same candidate/filter pipeline that `.aggregate()` uses.
|
|
593
|
+
* The performance caveat is the same: filter clauses cost O(N)
|
|
594
|
+
* per record and can't be index-accelerated.
|
|
595
|
+
*/
|
|
596
|
+
groupBy(field) {
|
|
597
|
+
const source = this.source;
|
|
598
|
+
const clauses = this.plan.clauses;
|
|
599
|
+
const executeRecords = () => {
|
|
600
|
+
const { candidates, remainingClauses } = candidateRecords(source, clauses);
|
|
601
|
+
return remainingClauses.length === 0 ? candidates : filterRecords(candidates, remainingClauses);
|
|
602
|
+
};
|
|
603
|
+
const upstreams = [];
|
|
604
|
+
if (source.subscribe) {
|
|
605
|
+
const subscribe = source.subscribe.bind(source);
|
|
606
|
+
upstreams.push({ subscribe: (cb) => subscribe(cb) });
|
|
607
|
+
}
|
|
608
|
+
const joinCtx = this.joinContext;
|
|
609
|
+
const dictLabelResolver = joinCtx?.resolveDictSource ? (() => {
|
|
610
|
+
const dictSource = joinCtx.resolveDictSource(field);
|
|
611
|
+
if (!dictSource) return void 0;
|
|
612
|
+
const snapshot = dictSource.snapshot();
|
|
613
|
+
const dictMap = /* @__PURE__ */ new Map();
|
|
614
|
+
for (const entry of snapshot) {
|
|
615
|
+
const k = entry["key"];
|
|
616
|
+
const labels = entry["labels"];
|
|
617
|
+
if (typeof k === "string" && labels && typeof labels === "object") {
|
|
618
|
+
dictMap.set(k, labels);
|
|
619
|
+
}
|
|
620
|
+
}
|
|
621
|
+
return async (key, locale, fallback) => {
|
|
622
|
+
const labels = dictMap.get(key);
|
|
623
|
+
if (!labels) return void 0;
|
|
624
|
+
if (labels[locale] !== void 0) return labels[locale];
|
|
625
|
+
const chain = Array.isArray(fallback) ? fallback : fallback ? [fallback] : [];
|
|
626
|
+
for (const fb of chain) {
|
|
627
|
+
if (fb === "any") {
|
|
628
|
+
const any = Object.values(labels)[0];
|
|
629
|
+
if (any !== void 0) return any;
|
|
630
|
+
} else if (labels[fb] !== void 0) {
|
|
631
|
+
return labels[fb];
|
|
632
|
+
}
|
|
633
|
+
}
|
|
634
|
+
return void 0;
|
|
635
|
+
};
|
|
636
|
+
})() : void 0;
|
|
637
|
+
return this.aggregateStrategy.groupBy(executeRecords, field, upstreams, dictLabelResolver);
|
|
638
|
+
}
|
|
639
|
+
/**
|
|
640
|
+
* Re-run the query whenever the source notifies of changes.
|
|
641
|
+
* Returns an unsubscribe function. The callback receives the latest result.
|
|
642
|
+
* Throws if the source does not support subscriptions.
|
|
643
|
+
*
|
|
644
|
+
* **For joined queries, prefer `.live()`** — `subscribe()`
|
|
645
|
+
* only re-fires on LEFT-side changes, so joined data can be
|
|
646
|
+
* stale if the right side mutates between emissions. `.live()`
|
|
647
|
+
* merges change streams from every join target.
|
|
648
|
+
*/
|
|
649
|
+
subscribe(cb) {
|
|
650
|
+
if (!this.source.subscribe) {
|
|
651
|
+
throw new Error("Query source does not support subscriptions. Pass a source with a subscribe() method.");
|
|
652
|
+
}
|
|
653
|
+
cb(this.toArray());
|
|
654
|
+
return this.source.subscribe(() => cb(this.toArray()));
|
|
655
|
+
}
|
|
656
|
+
/**
|
|
657
|
+
* Reactive terminal — returns a `LiveQuery<T>` that re-runs the
|
|
658
|
+
* query and updates its `value` whenever any source feeding it
|
|
659
|
+
* mutates..
|
|
660
|
+
*
|
|
661
|
+
* For non-joined queries, `.live()` is a convenience over the
|
|
662
|
+
* existing `.subscribe()` callback shape: a hand-rolled reactive
|
|
663
|
+
* primitive with `value` / `error` fields and a `subscribe(cb)`
|
|
664
|
+
* notification channel. Frame-agnostic — Vue / React / Solid
|
|
665
|
+
* adapters wrap it in their own primitive.
|
|
666
|
+
*
|
|
667
|
+
* For joined queries, `.live()` additionally subscribes to every
|
|
668
|
+
* join target's change stream. Mutations on a right-side
|
|
669
|
+
* collection (insert / update / delete of a client referenced by
|
|
670
|
+
* an invoice) re-fire the live query and re-evaluate every
|
|
671
|
+
* dependent left row. Right-side targets are deduped by
|
|
672
|
+
* collection name, so a chain that joins the same target twice
|
|
673
|
+
* (e.g. billing client + shipping client → both 'clients') only
|
|
674
|
+
* subscribes once.
|
|
675
|
+
*
|
|
676
|
+
* **Ref-mode behavior on right-side disappearance** — matches the
|
|
677
|
+
* eager `.toArray()` contract from :
|
|
678
|
+
* - `strict` → re-run throws `DanglingReferenceError`. The
|
|
679
|
+
* LiveQuery catches the throw, stores it in `live.error`, and
|
|
680
|
+
* notifies listeners (the throw does NOT propagate out of
|
|
681
|
+
* the source's change handler — that would tear down the
|
|
682
|
+
* emitter). Consumers check `live.error` after each
|
|
683
|
+
* notification and render an error state in the UI.
|
|
684
|
+
* - `warn` → joined value flips to `null`; the existing
|
|
685
|
+
* warn-channel deduplication keeps repeated re-runs from
|
|
686
|
+
* spamming the console.
|
|
687
|
+
* - `cascade` → no special handling needed; the cascade-
|
|
688
|
+
* delete mechanism propagates the right-side delete into the
|
|
689
|
+
* left collection on the next tick, and the live query
|
|
690
|
+
* naturally re-fires with the orphaned left rows gone.
|
|
691
|
+
*
|
|
692
|
+
* Always call `live.stop()` when finished — it tears down every
|
|
693
|
+
* upstream subscription. The Vue layer's `onUnmounted` hook
|
|
694
|
+
* should call `stop()` automatically; raw consumers must do it
|
|
695
|
+
* themselves.
|
|
696
|
+
*
|
|
697
|
+
* **Limitations:**
|
|
698
|
+
* - No granular delta updates — the whole query re-runs on
|
|
699
|
+
* every change.
|
|
700
|
+
* - No microtask batching — bursty changes produce one re-run
|
|
701
|
+
* per change.
|
|
702
|
+
* - No re-planning under live mutations — the planner picks
|
|
703
|
+
* once at subscription time and reuses the same plan.
|
|
704
|
+
* - Streaming live joins are deferred.
|
|
705
|
+
*/
|
|
706
|
+
live() {
|
|
707
|
+
const upstreams = [];
|
|
708
|
+
if (this.source.subscribe) {
|
|
709
|
+
const leftSubscribe = this.source.subscribe.bind(this.source);
|
|
710
|
+
upstreams.push({
|
|
711
|
+
subscribe: (cb) => leftSubscribe(cb)
|
|
712
|
+
});
|
|
713
|
+
}
|
|
714
|
+
if (this.plan.joins.length > 0 && this.joinContext) {
|
|
715
|
+
const subscribed = /* @__PURE__ */ new Set();
|
|
716
|
+
for (const leg of this.plan.joins) {
|
|
717
|
+
if (subscribed.has(leg.target)) continue;
|
|
718
|
+
subscribed.add(leg.target);
|
|
719
|
+
const rightSource = this.joinContext.resolveSource(leg.target);
|
|
720
|
+
if (rightSource?.subscribe) {
|
|
721
|
+
const rightSubscribe = rightSource.subscribe.bind(rightSource);
|
|
722
|
+
upstreams.push({
|
|
723
|
+
subscribe: (cb) => rightSubscribe(cb)
|
|
724
|
+
});
|
|
725
|
+
}
|
|
726
|
+
}
|
|
727
|
+
}
|
|
728
|
+
return buildLiveQuery(() => this.toArray(), upstreams);
|
|
729
|
+
}
|
|
730
|
+
/**
|
|
731
|
+
* Return the plan as a JSON-friendly object. FilterClause entries are
|
|
732
|
+
* stripped (their `fn` cannot be serialized) and replaced with
|
|
733
|
+
* { type: 'filter', fn: '[function]' } so devtools can still see them.
|
|
734
|
+
*/
|
|
735
|
+
toPlan() {
|
|
736
|
+
return serializePlan(this.plan);
|
|
737
|
+
}
|
|
738
|
+
};
|
|
739
|
+
function executePlanWithSource(source, plan) {
|
|
740
|
+
const { candidates, remainingClauses } = candidateRecords(source, plan.clauses);
|
|
741
|
+
let result = remainingClauses.length === 0 ? [...candidates] : filterRecords(candidates, remainingClauses);
|
|
742
|
+
if (plan.orderBy.length > 0) {
|
|
743
|
+
result = sortRecords(result, plan.orderBy);
|
|
744
|
+
}
|
|
745
|
+
if (plan.offset > 0) {
|
|
746
|
+
result = result.slice(plan.offset);
|
|
747
|
+
}
|
|
748
|
+
if (plan.limit !== void 0) {
|
|
749
|
+
result = result.slice(0, plan.limit);
|
|
750
|
+
}
|
|
751
|
+
return result;
|
|
752
|
+
}
|
|
753
|
+
function candidateRecords(source, clauses) {
|
|
754
|
+
const indexes = source.getIndexes?.();
|
|
755
|
+
if (!indexes || !source.lookupById || clauses.length === 0) {
|
|
756
|
+
return { candidates: source.snapshot(), remainingClauses: clauses };
|
|
757
|
+
}
|
|
758
|
+
const lookupById = (id) => source.lookupById?.(id);
|
|
759
|
+
for (let i = 0; i < clauses.length; i++) {
|
|
760
|
+
const clause = clauses[i];
|
|
761
|
+
if (clause.type !== "field") continue;
|
|
762
|
+
if (!indexes.has(clause.field)) continue;
|
|
763
|
+
let ids = null;
|
|
764
|
+
if (clause.op === "==") {
|
|
765
|
+
ids = indexes.lookupEqual(clause.field, clause.value);
|
|
766
|
+
} else if (clause.op === "in" && Array.isArray(clause.value)) {
|
|
767
|
+
ids = indexes.lookupIn(clause.field, clause.value);
|
|
768
|
+
}
|
|
769
|
+
if (ids !== null) {
|
|
770
|
+
const remaining = [];
|
|
771
|
+
for (let j = 0; j < clauses.length; j++) {
|
|
772
|
+
if (j !== i) remaining.push(clauses[j]);
|
|
773
|
+
}
|
|
774
|
+
return {
|
|
775
|
+
candidates: materializeIds(ids, lookupById),
|
|
776
|
+
remainingClauses: remaining
|
|
777
|
+
};
|
|
778
|
+
}
|
|
779
|
+
}
|
|
780
|
+
return { candidates: source.snapshot(), remainingClauses: clauses };
|
|
781
|
+
}
|
|
782
|
+
function materializeIds(ids, lookupById) {
|
|
783
|
+
const out = [];
|
|
784
|
+
for (const id of ids) {
|
|
785
|
+
const record = lookupById(id);
|
|
786
|
+
if (record !== void 0) out.push(record);
|
|
787
|
+
}
|
|
788
|
+
return out;
|
|
789
|
+
}
|
|
790
|
+
function executePlan(records, plan) {
|
|
791
|
+
let result = filterRecords(records, plan.clauses);
|
|
792
|
+
if (plan.orderBy.length > 0) {
|
|
793
|
+
result = sortRecords(result, plan.orderBy);
|
|
794
|
+
}
|
|
795
|
+
if (plan.offset > 0) {
|
|
796
|
+
result = result.slice(plan.offset);
|
|
797
|
+
}
|
|
798
|
+
if (plan.limit !== void 0) {
|
|
799
|
+
result = result.slice(0, plan.limit);
|
|
800
|
+
}
|
|
801
|
+
return result;
|
|
802
|
+
}
|
|
803
|
+
function filterRecords(records, clauses) {
|
|
804
|
+
if (clauses.length === 0) return [...records];
|
|
805
|
+
const out = [];
|
|
806
|
+
for (const r of records) {
|
|
807
|
+
let matches = true;
|
|
808
|
+
for (const clause of clauses) {
|
|
809
|
+
if (!evaluateClause(r, clause)) {
|
|
810
|
+
matches = false;
|
|
811
|
+
break;
|
|
812
|
+
}
|
|
813
|
+
}
|
|
814
|
+
if (matches) out.push(r);
|
|
815
|
+
}
|
|
816
|
+
return out;
|
|
817
|
+
}
|
|
818
|
+
function sortRecords(records, orderBy) {
|
|
819
|
+
return [...records].sort((a, b) => {
|
|
820
|
+
for (const { field, direction } of orderBy) {
|
|
821
|
+
const av = readField(a, field);
|
|
822
|
+
const bv = readField(b, field);
|
|
823
|
+
const cmp = compareValues(av, bv);
|
|
824
|
+
if (cmp !== 0) return direction === "asc" ? cmp : -cmp;
|
|
825
|
+
}
|
|
826
|
+
return 0;
|
|
827
|
+
});
|
|
828
|
+
}
|
|
829
|
+
function readField(record, field) {
|
|
830
|
+
if (record === null || record === void 0) return void 0;
|
|
831
|
+
if (!field.includes(".")) {
|
|
832
|
+
return record[field];
|
|
833
|
+
}
|
|
834
|
+
const segments = field.split(".");
|
|
835
|
+
let cursor = record;
|
|
836
|
+
for (const segment of segments) {
|
|
837
|
+
if (cursor === null || cursor === void 0) return void 0;
|
|
838
|
+
cursor = cursor[segment];
|
|
839
|
+
}
|
|
840
|
+
return cursor;
|
|
841
|
+
}
|
|
842
|
+
function compareValues(a, b) {
|
|
843
|
+
if (a === void 0 || a === null) return b === void 0 || b === null ? 0 : 1;
|
|
844
|
+
if (b === void 0 || b === null) return -1;
|
|
845
|
+
if (typeof a === "number" && typeof b === "number") return a - b;
|
|
846
|
+
if (typeof a === "string" && typeof b === "string") return a < b ? -1 : a > b ? 1 : 0;
|
|
847
|
+
if (a instanceof Date && b instanceof Date) return a.getTime() - b.getTime();
|
|
848
|
+
return 0;
|
|
849
|
+
}
|
|
850
|
+
function serializePlan(plan) {
|
|
851
|
+
return {
|
|
852
|
+
clauses: plan.clauses.map(serializeClause),
|
|
853
|
+
orderBy: plan.orderBy,
|
|
854
|
+
limit: plan.limit,
|
|
855
|
+
offset: plan.offset,
|
|
856
|
+
joins: plan.joins
|
|
857
|
+
};
|
|
858
|
+
}
|
|
859
|
+
function serializeClause(clause) {
|
|
860
|
+
if (clause.type === "filter") {
|
|
861
|
+
return { type: "filter", fn: "[function]" };
|
|
862
|
+
}
|
|
863
|
+
if (clause.type === "group") {
|
|
864
|
+
return {
|
|
865
|
+
type: "group",
|
|
866
|
+
op: clause.op,
|
|
867
|
+
clauses: clause.clauses.map(serializeClause)
|
|
868
|
+
};
|
|
869
|
+
}
|
|
870
|
+
return clause;
|
|
871
|
+
}
|
|
872
|
+
|
|
873
|
+
// src/query/scan-builder.ts
|
|
874
|
+
var DEFAULT_SCAN_PAGE_SIZE = 100;
|
|
875
|
+
var ScanBuilder = class _ScanBuilder {
|
|
876
|
+
pageProvider;
|
|
877
|
+
pageSize;
|
|
878
|
+
clauses;
|
|
879
|
+
/**
|
|
880
|
+
* Zero-or-more join legs to apply per record as the stream flows.
|
|
881
|
+
* Each leg attaches the resolved right-side record (or null) under
|
|
882
|
+
* its alias. — streaming joins.
|
|
883
|
+
*
|
|
884
|
+
* Joins are evaluated AFTER clauses, so a `where()` filtered-out
|
|
885
|
+
* record never triggers a right-side lookup. This is the same
|
|
886
|
+
* ordering as `Query.toArray()` (clauses first, joins after) and
|
|
887
|
+
* keeps the streaming path from doing wasted work.
|
|
888
|
+
*/
|
|
889
|
+
joins;
|
|
890
|
+
/**
|
|
891
|
+
* Join resolution context. Required for `.join()` to translate a
|
|
892
|
+
* field name into a target collection + ref mode and to resolve
|
|
893
|
+
* the right-side `JoinableSource`. Optional because tests
|
|
894
|
+
* construct ScanBuilder directly with synthetic page providers
|
|
895
|
+
* that don't know about ref() — calling `.join()` without a
|
|
896
|
+
* context throws with an actionable error.
|
|
897
|
+
*/
|
|
898
|
+
joinContext;
|
|
899
|
+
constructor(pageProvider, pageSize = DEFAULT_SCAN_PAGE_SIZE, clauses = [], joins = [], joinContext) {
|
|
900
|
+
this.pageProvider = pageProvider;
|
|
901
|
+
this.pageSize = pageSize;
|
|
902
|
+
this.clauses = clauses;
|
|
903
|
+
this.joins = joins;
|
|
904
|
+
this.joinContext = joinContext;
|
|
905
|
+
}
|
|
906
|
+
/**
|
|
907
|
+
* Add a field comparison. Runs per record as the scan stream
|
|
908
|
+
* flows through, so non-matching records are dropped before they
|
|
909
|
+
* reach `.aggregate()` or the iteration consumer. Multiple
|
|
910
|
+
* `.where()` calls are AND-combined — same semantics as
|
|
911
|
+
* `Query.where()`.
|
|
912
|
+
*
|
|
913
|
+
* Clauses cannot use the secondary-index fast path here because
|
|
914
|
+
* the scan sources records from the adapter's paginator, not from
|
|
915
|
+
* the in-memory cache where indexes live. Index-accelerated scans
|
|
916
|
+
* are a future optimization — the current implementation
|
|
917
|
+
* evaluates clauses per record in O(1) per clause.
|
|
918
|
+
*/
|
|
919
|
+
where(field, op, value) {
|
|
920
|
+
const clause = { type: "field", field, op, value };
|
|
921
|
+
return new _ScanBuilder(
|
|
922
|
+
this.pageProvider,
|
|
923
|
+
this.pageSize,
|
|
924
|
+
[...this.clauses, clause],
|
|
925
|
+
this.joins,
|
|
926
|
+
this.joinContext
|
|
927
|
+
);
|
|
928
|
+
}
|
|
929
|
+
/**
|
|
930
|
+
* Escape hatch: add an arbitrary predicate function. Same
|
|
931
|
+
* non-serializable caveat as `Query.filter()` — filter clauses
|
|
932
|
+
* don't round-trip through `toPlan()`. Prefer `.where()` when
|
|
933
|
+
* possible.
|
|
934
|
+
*/
|
|
935
|
+
filter(fn) {
|
|
936
|
+
const clause = {
|
|
937
|
+
type: "filter",
|
|
938
|
+
fn
|
|
939
|
+
};
|
|
940
|
+
return new _ScanBuilder(
|
|
941
|
+
this.pageProvider,
|
|
942
|
+
this.pageSize,
|
|
943
|
+
[...this.clauses, clause],
|
|
944
|
+
this.joins,
|
|
945
|
+
this.joinContext
|
|
946
|
+
);
|
|
947
|
+
}
|
|
948
|
+
/**
|
|
949
|
+
* Resolve a `ref()`-declared foreign key per record as the scan
|
|
950
|
+
* stream flows, attaching the right-side record (or null) under
|
|
951
|
+
* `opts.as`. — streaming joins over `scan()`.
|
|
952
|
+
*
|
|
953
|
+
* ```ts
|
|
954
|
+
* for await (const inv of invoices.scan().join('clientId', { as: 'client' })) {
|
|
955
|
+
* await processInvoice(inv) // inv.client is attached
|
|
956
|
+
* }
|
|
957
|
+
*
|
|
958
|
+
* // Or terminate with .aggregate() for streaming joined aggregation
|
|
959
|
+
* const { total } = await invoices.scan()
|
|
960
|
+
* .where('status', '==', 'open')
|
|
961
|
+
* .join('clientId', { as: 'client' })
|
|
962
|
+
* .aggregate({ total: sum('amount') })
|
|
963
|
+
* ```
|
|
964
|
+
*
|
|
965
|
+
* **The key difference from eager `.join()`:** the LEFT
|
|
966
|
+
* side streams page-by-page from the adapter and is never
|
|
967
|
+
* materialized. Memory ceiling on the left is O(pageSize), not
|
|
968
|
+
* O(rowCount). This is what makes streaming joins suitable for
|
|
969
|
+
* collections that exceed the eager join's 50_000-row ceiling.
|
|
970
|
+
*
|
|
971
|
+
* **Right-side strategy** is auto-selected per leg:
|
|
972
|
+
* - **Indexed** — right source exposes `lookupById`, so each
|
|
973
|
+
* left row costs O(1). This is the common path for
|
|
974
|
+
* Collection right sides, which back `lookupById` with a Map
|
|
975
|
+
* lookup over the in-memory cache. The right collection must
|
|
976
|
+
* be in eager mode (the same constraint as eager join's
|
|
977
|
+
* `querySourceForJoin` from ).
|
|
978
|
+
* - **Hash** — right source has only `snapshot()`. Build a
|
|
979
|
+
* `Map<id, record>` once at iteration start, probe per left
|
|
980
|
+
* row. Same correctness, same per-row cost as the indexed
|
|
981
|
+
* path; the difference is the upfront cost of materializing
|
|
982
|
+
* the right side once.
|
|
983
|
+
*
|
|
984
|
+
* Both strategies hold the right side in memory for the duration
|
|
985
|
+
* of the iteration. The "streaming" property applies to the LEFT
|
|
986
|
+
* side only — true left-and-right streaming joins (where neither
|
|
987
|
+
* side fits in memory) require a sort-merge join planner that's
|
|
988
|
+
* out of scope for.
|
|
989
|
+
*
|
|
990
|
+
* **Ref-mode semantics** match eager `.join()` exactly:
|
|
991
|
+
* - `strict` → throws `DanglingReferenceError` mid-stream
|
|
992
|
+
* when a left record points at a non-existent right id.
|
|
993
|
+
* The throw aborts the async iterator — consumers should
|
|
994
|
+
* wrap the `for await` in try/catch if they want to recover.
|
|
995
|
+
* - `warn` → attaches `null` and emits a one-shot warning
|
|
996
|
+
* per unique dangling pair (deduped via the same warn
|
|
997
|
+
* channel as eager join).
|
|
998
|
+
* - `cascade` → attaches `null` silently. A delete-time mode;
|
|
999
|
+
* dangling refs at read time are mid-flight or pre-existing
|
|
1000
|
+
* orphans, not a DSL error.
|
|
1001
|
+
*
|
|
1002
|
+
* Left records with null/undefined FK values attach `null`
|
|
1003
|
+
* regardless of mode — same "no reference at all" policy as
|
|
1004
|
+
* eager join and write-time `enforceRefsOnPut`.
|
|
1005
|
+
*
|
|
1006
|
+
* **Multi-FK chaining** is supported via repeated `.join()`
|
|
1007
|
+
* calls: each leg resolves an independent ref. Each leg
|
|
1008
|
+
* independently picks its right-side strategy and applies its
|
|
1009
|
+
* own ref mode.
|
|
1010
|
+
*
|
|
1011
|
+
* **Joins are NOT applied** to a `.aggregate()` terminal that
|
|
1012
|
+
* doesn't reference joined fields — wait, that's not quite
|
|
1013
|
+
* right. The streaming path actually DOES apply joins before
|
|
1014
|
+
* `.aggregate()` because the join attaches a field that the
|
|
1015
|
+
* spec might reference. Unlike `Query.aggregate()` (which skips
|
|
1016
|
+
* joins entirely as a projection-only short-circuit), the
|
|
1017
|
+
* streaming aggregation can't know whether the spec touches a
|
|
1018
|
+
* joined field, so it always applies joins. Consumers who want
|
|
1019
|
+
* unjoined streaming aggregation should leave `.join()` off the
|
|
1020
|
+
* chain — the chain is composable for a reason.
|
|
1021
|
+
*
|
|
1022
|
+
* constraint #1 — every JoinLeg carries `partitionScope:
|
|
1023
|
+
* 'all'` plumbed through but never read by. Same seam as
|
|
1024
|
+
* eager join.
|
|
1025
|
+
*/
|
|
1026
|
+
join(field, opts) {
|
|
1027
|
+
if (!this.joinContext) {
|
|
1028
|
+
throw new Error(
|
|
1029
|
+
`ScanBuilder.join() requires a join context. Use collection.scan() to construct a join-capable scan instead of the ScanBuilder constructor directly (the direct constructor is only used for tests with synthetic page providers).`
|
|
1030
|
+
);
|
|
1031
|
+
}
|
|
1032
|
+
const descriptor = this.joinContext.resolveRef(field);
|
|
1033
|
+
if (!descriptor) {
|
|
1034
|
+
throw new Error(
|
|
1035
|
+
`ScanBuilder.join(): no ref() declared for field "${field}" on collection "${this.joinContext.leftCollection}". Add refs: { ${field}: ref('<target-collection>') } to the collection options, then retry.`
|
|
1036
|
+
);
|
|
1037
|
+
}
|
|
1038
|
+
const leg = {
|
|
1039
|
+
field,
|
|
1040
|
+
as: opts.as,
|
|
1041
|
+
target: descriptor.target,
|
|
1042
|
+
mode: descriptor.mode,
|
|
1043
|
+
strategy: void 0,
|
|
1044
|
+
maxRows: void 0,
|
|
1045
|
+
// constraint #1 — always 'all' in, never read by
|
|
1046
|
+
// the streaming executor. partition-aware scan joins
|
|
1047
|
+
// will populate this from where() predicates without
|
|
1048
|
+
// changing the planner shape.
|
|
1049
|
+
partitionScope: "all"
|
|
1050
|
+
};
|
|
1051
|
+
return new _ScanBuilder(
|
|
1052
|
+
this.pageProvider,
|
|
1053
|
+
this.pageSize,
|
|
1054
|
+
this.clauses,
|
|
1055
|
+
[...this.joins, leg],
|
|
1056
|
+
this.joinContext
|
|
1057
|
+
);
|
|
1058
|
+
}
|
|
1059
|
+
/**
|
|
1060
|
+
* Iterate the scan as an async iterable. Walks the page
|
|
1061
|
+
* provider's cursors forward until exhaustion, applying every
|
|
1062
|
+
* clause per record — only matching records are yielded.
|
|
1063
|
+
*
|
|
1064
|
+
* Backward-compatible with the previous async-generator `scan()`
|
|
1065
|
+
* return type for `for await … of` consumers.
|
|
1066
|
+
*/
|
|
1067
|
+
async *[Symbol.asyncIterator]() {
|
|
1068
|
+
const joinResolvers = this.joins.length === 0 ? null : this.buildJoinResolvers();
|
|
1069
|
+
let page = await this.pageProvider.listPage({ limit: this.pageSize });
|
|
1070
|
+
while (true) {
|
|
1071
|
+
for (const record of page.items) {
|
|
1072
|
+
if (!this.recordMatches(record)) continue;
|
|
1073
|
+
if (joinResolvers === null) {
|
|
1074
|
+
yield record;
|
|
1075
|
+
} else {
|
|
1076
|
+
let attached = record;
|
|
1077
|
+
for (const resolver of joinResolvers) {
|
|
1078
|
+
attached = this.applyOneJoinStreaming(attached, resolver);
|
|
1079
|
+
}
|
|
1080
|
+
yield attached;
|
|
1081
|
+
}
|
|
1082
|
+
}
|
|
1083
|
+
if (page.nextCursor === null) return;
|
|
1084
|
+
page = await this.pageProvider.listPage({
|
|
1085
|
+
cursor: page.nextCursor,
|
|
1086
|
+
limit: this.pageSize
|
|
1087
|
+
});
|
|
1088
|
+
}
|
|
1089
|
+
}
|
|
1090
|
+
/**
|
|
1091
|
+
* Per-leg right-side resolution state. Built once at iteration
|
|
1092
|
+
* start and reused for every left record. Two strategies:
|
|
1093
|
+
*
|
|
1094
|
+
* - `lookupById`: present when the right source exposes the
|
|
1095
|
+
* hook directly (typical Collection right side). Per-row
|
|
1096
|
+
* cost is O(1).
|
|
1097
|
+
* - `hashByPrimaryKey`: built from `snapshot()` when no
|
|
1098
|
+
* lookupById. Per-row cost is O(1) after the upfront O(N)
|
|
1099
|
+
* materialization. Same as eager join's hash strategy.
|
|
1100
|
+
*
|
|
1101
|
+
* `warnedKeys` is the per-leg dedup set for ref-mode 'warn'. We
|
|
1102
|
+
* key on `field→target:refId` so the same dangling pair only
|
|
1103
|
+
* warns once per iteration. The dedup is per-iteration, not
|
|
1104
|
+
* per-process — a long-running scan that re-iterates would warn
|
|
1105
|
+
* again, which is the desired behavior (the data may have
|
|
1106
|
+
* changed between iterations).
|
|
1107
|
+
*/
|
|
1108
|
+
buildJoinResolvers() {
|
|
1109
|
+
if (!this.joinContext) {
|
|
1110
|
+
throw new Error(
|
|
1111
|
+
`ScanBuilder iterator: ${this.joins.length} join leg(s) present but no JoinContext attached. Use collection.scan() to construct a join-capable scan.`
|
|
1112
|
+
);
|
|
1113
|
+
}
|
|
1114
|
+
const resolvers = [];
|
|
1115
|
+
for (const leg of this.joins) {
|
|
1116
|
+
const source = this.joinContext.resolveSource(leg.target);
|
|
1117
|
+
if (!source) {
|
|
1118
|
+
throw new Error(
|
|
1119
|
+
`ScanBuilder.join() cannot resolve target collection "${leg.target}" (referenced from field "${leg.field}" on "${this.joinContext.leftCollection}"). Make sure the target collection has been opened via vault.collection() at least once before iterating the scan.`
|
|
1120
|
+
);
|
|
1121
|
+
}
|
|
1122
|
+
let lookupById = null;
|
|
1123
|
+
let hashByPrimaryKey = null;
|
|
1124
|
+
if (source.lookupById) {
|
|
1125
|
+
const fn = source.lookupById.bind(source);
|
|
1126
|
+
lookupById = (id) => fn(id);
|
|
1127
|
+
} else {
|
|
1128
|
+
const map = /* @__PURE__ */ new Map();
|
|
1129
|
+
for (const record of source.snapshot()) {
|
|
1130
|
+
const rawId = readPath(record, "id");
|
|
1131
|
+
const key = coerceRefKey2(rawId);
|
|
1132
|
+
if (key !== null) map.set(key, record);
|
|
1133
|
+
}
|
|
1134
|
+
hashByPrimaryKey = map;
|
|
1135
|
+
}
|
|
1136
|
+
resolvers.push({
|
|
1137
|
+
leg,
|
|
1138
|
+
source,
|
|
1139
|
+
lookupById,
|
|
1140
|
+
hashByPrimaryKey,
|
|
1141
|
+
warnedKeys: /* @__PURE__ */ new Set()
|
|
1142
|
+
});
|
|
1143
|
+
}
|
|
1144
|
+
return resolvers;
|
|
1145
|
+
}
|
|
1146
|
+
/**
|
|
1147
|
+
* Resolve a single join leg for one left record and return the
|
|
1148
|
+
* left record with the joined field attached under
|
|
1149
|
+
* `leg.as`. Pure function over `(left, resolver)`; never
|
|
1150
|
+
* mutates the input.
|
|
1151
|
+
*
|
|
1152
|
+
* Ref-mode dispatch matches eager `applyJoins` from :
|
|
1153
|
+
* - null/undefined FK → attach null silently (always allowed)
|
|
1154
|
+
* - dangling FK + strict → throw `DanglingReferenceError`
|
|
1155
|
+
* - dangling FK + warn → attach null, warn-once per pair
|
|
1156
|
+
* - dangling FK + cascade → attach null silently
|
|
1157
|
+
*/
|
|
1158
|
+
applyOneJoinStreaming(left, resolver) {
|
|
1159
|
+
if (left === null || typeof left !== "object") {
|
|
1160
|
+
return left;
|
|
1161
|
+
}
|
|
1162
|
+
const { leg } = resolver;
|
|
1163
|
+
const rawId = readPath(left, leg.field);
|
|
1164
|
+
const refKey = coerceRefKey2(rawId);
|
|
1165
|
+
let right = void 0;
|
|
1166
|
+
if (refKey !== null) {
|
|
1167
|
+
if (resolver.lookupById !== null) {
|
|
1168
|
+
right = resolver.lookupById(refKey);
|
|
1169
|
+
} else if (resolver.hashByPrimaryKey !== null) {
|
|
1170
|
+
right = resolver.hashByPrimaryKey.get(refKey);
|
|
1171
|
+
}
|
|
1172
|
+
}
|
|
1173
|
+
const merged = {
|
|
1174
|
+
...left
|
|
1175
|
+
};
|
|
1176
|
+
if (right === void 0) {
|
|
1177
|
+
if (refKey !== null && leg.mode === "strict") {
|
|
1178
|
+
throw new DanglingReferenceError({
|
|
1179
|
+
field: leg.field,
|
|
1180
|
+
target: leg.target,
|
|
1181
|
+
refId: refKey,
|
|
1182
|
+
message: `ScanBuilder.join() strict dangling: record references "${leg.target}:${refKey}" via field "${leg.field}", but no such record exists. Use ref() mode 'warn' or 'cascade' if dangling refs are acceptable, or run vault.checkIntegrity() to find and fix the orphans.`
|
|
1183
|
+
});
|
|
1184
|
+
}
|
|
1185
|
+
if (refKey !== null && leg.mode === "warn") {
|
|
1186
|
+
const dedupKey = `${leg.field}\u2192${leg.target}:${refKey}`;
|
|
1187
|
+
if (!resolver.warnedKeys.has(dedupKey)) {
|
|
1188
|
+
resolver.warnedKeys.add(dedupKey);
|
|
1189
|
+
console.warn(
|
|
1190
|
+
`[noy-db] ScanBuilder.join() encountered dangling ref in 'warn' mode: field "${leg.field}" \u2192 "${leg.target}:${refKey}" not found. Attaching null.`
|
|
1191
|
+
);
|
|
1192
|
+
}
|
|
1193
|
+
}
|
|
1194
|
+
merged[leg.as] = null;
|
|
1195
|
+
} else {
|
|
1196
|
+
merged[leg.as] = right;
|
|
1197
|
+
}
|
|
1198
|
+
return merged;
|
|
1199
|
+
}
|
|
1200
|
+
/**
|
|
1201
|
+
* Reduce the scan stream through a named set of reducers and
|
|
1202
|
+
* return the final aggregated shape.
|
|
1203
|
+
*
|
|
1204
|
+
* Memory is O(reducers): one mutable state slot per spec key.
|
|
1205
|
+
* Records flow through the pipeline one at a time via
|
|
1206
|
+
* `for await` and are discarded after their `step()` is applied
|
|
1207
|
+
* — never collected into an array. This is the distinguishing
|
|
1208
|
+
* property from `Query.aggregate()`, which materializes the full
|
|
1209
|
+
* match set first.
|
|
1210
|
+
*
|
|
1211
|
+
* Reuses the same reducer protocol as `Query.aggregate()`,
|
|
1212
|
+
* so `count()`, `sum(field)`, `avg(field)`, `min(field)`,
|
|
1213
|
+
* `max(field)` all work unchanged. The `{ seed }` parameter
|
|
1214
|
+
* plumbing from constraint #2 is honored transparently — the
|
|
1215
|
+
* factories ignore it in and the scan executor never
|
|
1216
|
+
* touches the per-reducer state construction.
|
|
1217
|
+
*
|
|
1218
|
+
* **Returns a Promise**, unlike `Query.aggregate().run()` which
|
|
1219
|
+
* is synchronous. The scan is inherently async because it walks
|
|
1220
|
+
* adapter pages, so the terminal has to be too. Consumers
|
|
1221
|
+
* destructure with await:
|
|
1222
|
+
*
|
|
1223
|
+
* ```ts
|
|
1224
|
+
* const { total, n } = await invoices.scan()
|
|
1225
|
+
* .where('year', '==', 2025)
|
|
1226
|
+
* .aggregate({ total: sum('amount'), n: count() })
|
|
1227
|
+
* ```
|
|
1228
|
+
*
|
|
1229
|
+
* **No `.live()` in.** `scan().aggregate().live()` would
|
|
1230
|
+
* require reconciling an unbounded streaming iteration with a
|
|
1231
|
+
* change-stream subscription — a design problem, not just a code
|
|
1232
|
+
* one. Consumers with huge collections and live needs should
|
|
1233
|
+
* narrow with `.where()` enough to fit in the 50k `query()`
|
|
1234
|
+
* limit and use `query().aggregate().live()` instead.
|
|
1235
|
+
*/
|
|
1236
|
+
async aggregate(spec) {
|
|
1237
|
+
const keys = Object.keys(spec);
|
|
1238
|
+
const state = {};
|
|
1239
|
+
for (const key of keys) {
|
|
1240
|
+
state[key] = spec[key].init();
|
|
1241
|
+
}
|
|
1242
|
+
for await (const record of this) {
|
|
1243
|
+
for (const key of keys) {
|
|
1244
|
+
state[key] = spec[key].step(state[key], record);
|
|
1245
|
+
}
|
|
1246
|
+
}
|
|
1247
|
+
const result = {};
|
|
1248
|
+
for (const key of keys) {
|
|
1249
|
+
result[key] = spec[key].finalize(state[key]);
|
|
1250
|
+
}
|
|
1251
|
+
return result;
|
|
1252
|
+
}
|
|
1253
|
+
/**
|
|
1254
|
+
* Evaluate the clause list against a single record. Linear in
|
|
1255
|
+
* the clause count; short-circuits on first false. Clauses on a
|
|
1256
|
+
* scan are always re-evaluated per record — no index-accelerated
|
|
1257
|
+
* path, because the stream sources records from the adapter
|
|
1258
|
+
* paginator, not from the in-memory cache where indexes live.
|
|
1259
|
+
*/
|
|
1260
|
+
recordMatches(record) {
|
|
1261
|
+
if (this.clauses.length === 0) return true;
|
|
1262
|
+
for (const clause of this.clauses) {
|
|
1263
|
+
if (!evaluateClause(record, clause)) return false;
|
|
1264
|
+
}
|
|
1265
|
+
return true;
|
|
1266
|
+
}
|
|
1267
|
+
};
|
|
1268
|
+
function coerceRefKey2(value) {
|
|
1269
|
+
if (value === null || value === void 0) return null;
|
|
1270
|
+
if (typeof value === "string") return value;
|
|
1271
|
+
if (typeof value === "number" || typeof value === "bigint") return String(value);
|
|
1272
|
+
return null;
|
|
1273
|
+
}
|
|
1274
|
+
|
|
1275
|
+
export {
|
|
1276
|
+
DEFAULT_JOIN_MAX_ROWS,
|
|
1277
|
+
applyJoins,
|
|
1278
|
+
resetJoinWarnings,
|
|
1279
|
+
buildLiveQuery,
|
|
1280
|
+
NO_AGGREGATE,
|
|
1281
|
+
Query,
|
|
1282
|
+
executePlan,
|
|
1283
|
+
ScanBuilder
|
|
1284
|
+
};
|
|
1285
|
+
//# sourceMappingURL=chunk-GOUT6DND.js.map
|