@vellumai/assistant 0.5.3 → 0.5.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/docs/architecture/memory.md +105 -0
- package/package.json +1 -1
- package/src/__tests__/archive-recall.test.ts +560 -0
- package/src/__tests__/conversation-clear-safety.test.ts +259 -0
- package/src/__tests__/conversation-switch-memory-reduction.test.ts +474 -0
- package/src/__tests__/db-schedule-syntax-migration.test.ts +3 -0
- package/src/__tests__/memory-reducer-job.test.ts +538 -0
- package/src/__tests__/memory-reducer-scheduling.test.ts +473 -0
- package/src/__tests__/memory-reducer-types.test.ts +12 -4
- package/src/__tests__/memory-reducer.test.ts +7 -1
- package/src/__tests__/memory-regressions.test.ts +24 -4
- package/src/__tests__/memory-simplified-config.test.ts +4 -4
- package/src/__tests__/simplified-memory-e2e.test.ts +666 -0
- package/src/__tests__/simplified-memory-runtime.test.ts +616 -0
- package/src/cli/commands/conversations.ts +18 -0
- package/src/config/bundled-skills/schedule/TOOLS.json +8 -0
- package/src/config/loader.ts +0 -1
- package/src/config/schemas/memory-simplified.ts +1 -1
- package/src/daemon/conversation-memory.ts +117 -0
- package/src/daemon/conversation-runtime-assembly.ts +1 -0
- package/src/daemon/handlers/conversations.ts +11 -0
- package/src/daemon/lifecycle.ts +44 -1
- package/src/memory/archive-recall.ts +516 -0
- package/src/memory/brief-time.ts +5 -4
- package/src/memory/conversation-crud.ts +210 -0
- package/src/memory/conversation-key-store.ts +33 -4
- package/src/memory/db-init.ts +4 -0
- package/src/memory/job-handlers/backfill-simplified-memory.ts +462 -0
- package/src/memory/job-handlers/conversation-starters.ts +9 -3
- package/src/memory/job-handlers/reduce-conversation-memory.ts +229 -0
- package/src/memory/jobs-store.ts +2 -0
- package/src/memory/jobs-worker.ts +8 -0
- package/src/memory/migrations/036-normalize-phone-identities.ts +49 -14
- package/src/memory/migrations/135-backfill-contact-interaction-stats.ts +9 -1
- package/src/memory/migrations/141-rename-verification-table.ts +8 -0
- package/src/memory/migrations/142-rename-verification-session-id-column.ts +7 -2
- package/src/memory/migrations/174-rename-thread-starters-table.ts +8 -0
- package/src/memory/migrations/188-schedule-quiet-flag.ts +13 -0
- package/src/memory/migrations/index.ts +1 -0
- package/src/memory/reducer-scheduler.ts +242 -0
- package/src/memory/reducer-types.ts +9 -2
- package/src/memory/reducer.ts +25 -11
- package/src/memory/schema/infrastructure.ts +1 -0
- package/src/runtime/auth/route-policy.ts +10 -1
- package/src/runtime/routes/conversation-management-routes.ts +88 -2
- package/src/runtime/routes/guardian-bootstrap-routes.ts +19 -7
- package/src/runtime/routes/secret-routes.ts +1 -0
- package/src/schedule/schedule-store.ts +7 -0
- package/src/schedule/scheduler.ts +6 -2
- package/src/telemetry/usage-telemetry-reporter.ts +1 -1
- package/src/tools/filesystem/edit.ts +6 -1
- package/src/tools/filesystem/read.ts +6 -1
- package/src/tools/filesystem/write.ts +6 -1
- package/src/tools/memory/handlers.ts +129 -1
- package/src/tools/schedule/create.ts +3 -0
- package/src/tools/schedule/list.ts +5 -1
- package/src/tools/schedule/update.ts +6 -0
|
@@ -0,0 +1,516 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Archive recall: retrieval layer over the simplified memory archive tables
|
|
3
|
+
* (memory_observations, memory_chunks, memory_episodes).
|
|
4
|
+
*
|
|
5
|
+
* Two retrieval paths:
|
|
6
|
+
*
|
|
7
|
+
* 1. **Prefetch** — lightweight query run on every turn. Fetches recent
|
|
8
|
+
* episodes and observations to detect whether the user's turn references
|
|
9
|
+
* past context that the archive can answer.
|
|
10
|
+
*
|
|
11
|
+
* 2. **Deeper recall** — triggered when the prefetch surfaces strong hits,
|
|
12
|
+
* or when the user's turn contains explicit past-reference or
|
|
13
|
+
* analogy/debugging-shaped language. Queries all three archive tables
|
|
14
|
+
* and returns up to 3 source-linked bullets wrapped in
|
|
15
|
+
* `<supporting_recall>`.
|
|
16
|
+
*
|
|
17
|
+
* Empty results produce no output (no `<supporting_recall>` tag).
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
import { and, desc, eq, like, or, sql } from "drizzle-orm";
|
|
21
|
+
|
|
22
|
+
import { getLogger } from "../util/logger.js";
|
|
23
|
+
import { getDb } from "./db.js";
|
|
24
|
+
import { memoryChunks, memoryEpisodes, memoryObservations } from "./schema.js";
|
|
25
|
+
|
|
26
|
+
const log = getLogger("memory-archive-recall");
|
|
27
|
+
|
|
28
|
+
// ── Pattern matchers ────────────────────────────────────────────────
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Phrases that signal the user is explicitly referencing a past
|
|
32
|
+
* interaction, artifact, or fact the assistant should recall.
|
|
33
|
+
*/
|
|
34
|
+
const PAST_REFERENCE_PATTERNS = [
|
|
35
|
+
/\b(?:remember|recall|mentioned|talked about|discussed|said|told you|last time|earlier|before|previously)\b/i,
|
|
36
|
+
/\bwhat (?:did|was|were)\b.*\b(?:we|i|you)\b/i,
|
|
37
|
+
/\bdo you (?:know|remember)\b/i,
|
|
38
|
+
];
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Phrases that signal an analogy or debugging-shaped query where
|
|
42
|
+
* historical context would be especially valuable.
|
|
43
|
+
*/
|
|
44
|
+
const ANALOGY_DEBUG_PATTERNS = [
|
|
45
|
+
/\b(?:similar to|like when|same (?:issue|problem|error|bug)|happened before|recurring|déjà vu)\b/i,
|
|
46
|
+
/\b(?:last time.*(?:fix|solve|debug|resolve))\b/i,
|
|
47
|
+
/\b(?:keep (?:getting|seeing|hitting)|again|keeps happening)\b/i,
|
|
48
|
+
];
|
|
49
|
+
|
|
50
|
+
// ── Turn classification ─────────────────────────────────────────────
|
|
51
|
+
|
|
52
|
+
export type RecallTrigger =
|
|
53
|
+
| "explicit_past_reference"
|
|
54
|
+
| "analogy_debug"
|
|
55
|
+
| "strong_prefetch"
|
|
56
|
+
| "none";
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Classify whether a user turn warrants deeper archive recall.
|
|
60
|
+
*/
|
|
61
|
+
export function classifyRecallTrigger(
|
|
62
|
+
userText: string,
|
|
63
|
+
prefetchHitCount: number,
|
|
64
|
+
): RecallTrigger {
|
|
65
|
+
if (PAST_REFERENCE_PATTERNS.some((p) => p.test(userText))) {
|
|
66
|
+
return "explicit_past_reference";
|
|
67
|
+
}
|
|
68
|
+
if (ANALOGY_DEBUG_PATTERNS.some((p) => p.test(userText))) {
|
|
69
|
+
return "analogy_debug";
|
|
70
|
+
}
|
|
71
|
+
if (prefetchHitCount >= 2) {
|
|
72
|
+
return "strong_prefetch";
|
|
73
|
+
}
|
|
74
|
+
return "none";
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// ── Prefetch ────────────────────────────────────────────────────────
|
|
78
|
+
|
|
79
|
+
/** A lightweight prefetch hit from the archive tables. */
|
|
80
|
+
export interface PrefetchHit {
|
|
81
|
+
source: "episode" | "observation" | "chunk";
|
|
82
|
+
id: string;
|
|
83
|
+
content: string;
|
|
84
|
+
createdAt: number;
|
|
85
|
+
conversationId?: string | null;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Lightweight prefetch over recent episodes and observations for the
|
|
90
|
+
* given scope. Returns up to `limit` hits ordered by recency. This is
|
|
91
|
+
* cheap enough to run on every turn.
|
|
92
|
+
*/
|
|
93
|
+
export function prefetchArchive(
|
|
94
|
+
scopeId: string,
|
|
95
|
+
userText: string,
|
|
96
|
+
limit: number = 10,
|
|
97
|
+
): PrefetchHit[] {
|
|
98
|
+
const db = getDb();
|
|
99
|
+
const hits: PrefetchHit[] = [];
|
|
100
|
+
|
|
101
|
+
// Extract meaningful keywords from user text (words >= 4 chars)
|
|
102
|
+
const keywords = extractKeywords(userText);
|
|
103
|
+
if (keywords.length === 0) return hits;
|
|
104
|
+
|
|
105
|
+
try {
|
|
106
|
+
// Query recent episodes whose title or summary contain any keyword
|
|
107
|
+
const episodeConditions = keywords.map((kw) =>
|
|
108
|
+
or(
|
|
109
|
+
like(memoryEpisodes.title, `%${kw}%`),
|
|
110
|
+
like(memoryEpisodes.summary, `%${kw}%`),
|
|
111
|
+
),
|
|
112
|
+
);
|
|
113
|
+
|
|
114
|
+
const episodes = db
|
|
115
|
+
.select({
|
|
116
|
+
id: memoryEpisodes.id,
|
|
117
|
+
title: memoryEpisodes.title,
|
|
118
|
+
summary: memoryEpisodes.summary,
|
|
119
|
+
createdAt: memoryEpisodes.createdAt,
|
|
120
|
+
conversationId: memoryEpisodes.conversationId,
|
|
121
|
+
})
|
|
122
|
+
.from(memoryEpisodes)
|
|
123
|
+
.where(and(eq(memoryEpisodes.scopeId, scopeId), or(...episodeConditions)))
|
|
124
|
+
.orderBy(desc(memoryEpisodes.createdAt))
|
|
125
|
+
.limit(limit)
|
|
126
|
+
.all();
|
|
127
|
+
|
|
128
|
+
for (const ep of episodes) {
|
|
129
|
+
hits.push({
|
|
130
|
+
source: "episode",
|
|
131
|
+
id: ep.id,
|
|
132
|
+
content: `${ep.title}: ${ep.summary}`,
|
|
133
|
+
createdAt: ep.createdAt,
|
|
134
|
+
conversationId: ep.conversationId,
|
|
135
|
+
});
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// Query recent observations whose content matches any keyword
|
|
139
|
+
const observationConditions = keywords.map((kw) =>
|
|
140
|
+
like(memoryObservations.content, `%${kw}%`),
|
|
141
|
+
);
|
|
142
|
+
|
|
143
|
+
const observations = db
|
|
144
|
+
.select({
|
|
145
|
+
id: memoryObservations.id,
|
|
146
|
+
content: memoryObservations.content,
|
|
147
|
+
createdAt: memoryObservations.createdAt,
|
|
148
|
+
conversationId: memoryObservations.conversationId,
|
|
149
|
+
})
|
|
150
|
+
.from(memoryObservations)
|
|
151
|
+
.where(
|
|
152
|
+
and(
|
|
153
|
+
eq(memoryObservations.scopeId, scopeId),
|
|
154
|
+
or(...observationConditions),
|
|
155
|
+
),
|
|
156
|
+
)
|
|
157
|
+
.orderBy(desc(memoryObservations.createdAt))
|
|
158
|
+
.limit(limit)
|
|
159
|
+
.all();
|
|
160
|
+
|
|
161
|
+
for (const obs of observations) {
|
|
162
|
+
hits.push({
|
|
163
|
+
source: "observation",
|
|
164
|
+
id: obs.id,
|
|
165
|
+
content: obs.content,
|
|
166
|
+
createdAt: obs.createdAt,
|
|
167
|
+
conversationId: obs.conversationId,
|
|
168
|
+
});
|
|
169
|
+
}
|
|
170
|
+
} catch (err) {
|
|
171
|
+
log.warn({ err }, "Archive prefetch failed");
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
// Sort all hits by recency and cap at limit
|
|
175
|
+
hits.sort((a, b) => b.createdAt - a.createdAt);
|
|
176
|
+
return hits.slice(0, limit);
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
// ── Deeper recall ───────────────────────────────────────────────────
|
|
180
|
+
|
|
181
|
+
/** A source-linked recall bullet for injection. */
|
|
182
|
+
export interface RecallBullet {
|
|
183
|
+
/** Human-readable one-line summary. */
|
|
184
|
+
text: string;
|
|
185
|
+
/** Which archive table sourced this bullet. */
|
|
186
|
+
source: "episode" | "observation" | "chunk";
|
|
187
|
+
/** Row ID in the source table. */
|
|
188
|
+
sourceId: string;
|
|
189
|
+
/** Optional conversation title for provenance. */
|
|
190
|
+
conversationTitle?: string | null;
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
export interface ArchiveRecallResult {
|
|
194
|
+
/** The recall trigger that activated deeper recall (or "none"). */
|
|
195
|
+
trigger: RecallTrigger;
|
|
196
|
+
/** Up to 3 source-linked bullets. Empty when no relevant results. */
|
|
197
|
+
bullets: RecallBullet[];
|
|
198
|
+
/** Rendered `<supporting_recall>` block, or empty string. */
|
|
199
|
+
text: string;
|
|
200
|
+
/** Number of prefetch hits examined. */
|
|
201
|
+
prefetchHitCount: number;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
/**
|
|
205
|
+
* Run archive recall for a user turn.
|
|
206
|
+
*
|
|
207
|
+
* 1. Runs a lightweight prefetch over episodes and observations.
|
|
208
|
+
* 2. Classifies whether deeper recall is warranted.
|
|
209
|
+
* 3. If triggered, queries all three archive tables and assembles
|
|
210
|
+
* up to 3 source-linked bullets.
|
|
211
|
+
* 4. Returns rendered `<supporting_recall>` or empty string.
|
|
212
|
+
*/
|
|
213
|
+
export function buildArchiveRecall(
|
|
214
|
+
scopeId: string,
|
|
215
|
+
userText: string,
|
|
216
|
+
): ArchiveRecallResult {
|
|
217
|
+
// Step 1: prefetch
|
|
218
|
+
const prefetchHits = prefetchArchive(scopeId, userText);
|
|
219
|
+
const prefetchHitCount = prefetchHits.length;
|
|
220
|
+
|
|
221
|
+
// Step 2: classify
|
|
222
|
+
const trigger = classifyRecallTrigger(userText, prefetchHitCount);
|
|
223
|
+
|
|
224
|
+
if (trigger === "none") {
|
|
225
|
+
return {
|
|
226
|
+
trigger,
|
|
227
|
+
bullets: [],
|
|
228
|
+
text: "",
|
|
229
|
+
prefetchHitCount,
|
|
230
|
+
};
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
// Step 3: deeper recall
|
|
234
|
+
const bullets = deeperRecall(scopeId, userText, prefetchHits);
|
|
235
|
+
|
|
236
|
+
// Step 4: render
|
|
237
|
+
const text = renderSupportingRecall(bullets);
|
|
238
|
+
|
|
239
|
+
log.debug(
|
|
240
|
+
{
|
|
241
|
+
trigger,
|
|
242
|
+
prefetchHitCount,
|
|
243
|
+
bulletCount: bullets.length,
|
|
244
|
+
},
|
|
245
|
+
"Archive recall completed",
|
|
246
|
+
);
|
|
247
|
+
|
|
248
|
+
return {
|
|
249
|
+
trigger,
|
|
250
|
+
bullets,
|
|
251
|
+
text,
|
|
252
|
+
prefetchHitCount,
|
|
253
|
+
};
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
// ── Deeper recall implementation ────────────────────────────────────
|
|
257
|
+
|
|
258
|
+
/**
|
|
259
|
+
* Query all three archive tables for the user's text and assemble
|
|
260
|
+
* up to 3 source-linked bullets. Prioritizes episodes (narrative
|
|
261
|
+
* summaries) over observations (raw facts) over chunks (indexed text).
|
|
262
|
+
*/
|
|
263
|
+
function deeperRecall(
|
|
264
|
+
scopeId: string,
|
|
265
|
+
userText: string,
|
|
266
|
+
prefetchHits: PrefetchHit[],
|
|
267
|
+
): RecallBullet[] {
|
|
268
|
+
const db = getDb();
|
|
269
|
+
const keywords = extractKeywords(userText);
|
|
270
|
+
if (keywords.length === 0) return [];
|
|
271
|
+
|
|
272
|
+
const bullets: RecallBullet[] = [];
|
|
273
|
+
const seenContent = new Set<string>();
|
|
274
|
+
const MAX_BULLETS = 3;
|
|
275
|
+
|
|
276
|
+
try {
|
|
277
|
+
// --- Episodes: highest signal (narrative summaries) ---
|
|
278
|
+
const episodeConditions = keywords.map((kw) =>
|
|
279
|
+
or(
|
|
280
|
+
like(memoryEpisodes.title, `%${kw}%`),
|
|
281
|
+
like(memoryEpisodes.summary, `%${kw}%`),
|
|
282
|
+
),
|
|
283
|
+
);
|
|
284
|
+
|
|
285
|
+
const episodes = db
|
|
286
|
+
.select({
|
|
287
|
+
id: memoryEpisodes.id,
|
|
288
|
+
title: memoryEpisodes.title,
|
|
289
|
+
summary: memoryEpisodes.summary,
|
|
290
|
+
conversationId: memoryEpisodes.conversationId,
|
|
291
|
+
})
|
|
292
|
+
.from(memoryEpisodes)
|
|
293
|
+
.where(and(eq(memoryEpisodes.scopeId, scopeId), or(...episodeConditions)))
|
|
294
|
+
.orderBy(desc(memoryEpisodes.createdAt))
|
|
295
|
+
.limit(MAX_BULLETS)
|
|
296
|
+
.all();
|
|
297
|
+
|
|
298
|
+
for (const ep of episodes) {
|
|
299
|
+
if (bullets.length >= MAX_BULLETS) break;
|
|
300
|
+
const normalized = normalizeForDedup(ep.summary);
|
|
301
|
+
if (seenContent.has(normalized)) continue;
|
|
302
|
+
seenContent.add(normalized);
|
|
303
|
+
|
|
304
|
+
const convTitle = lookupConversationTitle(db, ep.conversationId);
|
|
305
|
+
bullets.push({
|
|
306
|
+
text: `${ep.title} — ${truncate(ep.summary, 200)}`,
|
|
307
|
+
source: "episode",
|
|
308
|
+
sourceId: ep.id,
|
|
309
|
+
conversationTitle: convTitle,
|
|
310
|
+
});
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
// --- Observations: raw factual statements ---
|
|
314
|
+
if (bullets.length < MAX_BULLETS) {
|
|
315
|
+
const observationConditions = keywords.map((kw) =>
|
|
316
|
+
like(memoryObservations.content, `%${kw}%`),
|
|
317
|
+
);
|
|
318
|
+
|
|
319
|
+
const observations = db
|
|
320
|
+
.select({
|
|
321
|
+
id: memoryObservations.id,
|
|
322
|
+
content: memoryObservations.content,
|
|
323
|
+
conversationId: memoryObservations.conversationId,
|
|
324
|
+
})
|
|
325
|
+
.from(memoryObservations)
|
|
326
|
+
.where(
|
|
327
|
+
and(
|
|
328
|
+
eq(memoryObservations.scopeId, scopeId),
|
|
329
|
+
or(...observationConditions),
|
|
330
|
+
),
|
|
331
|
+
)
|
|
332
|
+
.orderBy(desc(memoryObservations.createdAt))
|
|
333
|
+
.limit(MAX_BULLETS)
|
|
334
|
+
.all();
|
|
335
|
+
|
|
336
|
+
for (const obs of observations) {
|
|
337
|
+
if (bullets.length >= MAX_BULLETS) break;
|
|
338
|
+
const normalized = normalizeForDedup(obs.content);
|
|
339
|
+
if (seenContent.has(normalized)) continue;
|
|
340
|
+
seenContent.add(normalized);
|
|
341
|
+
|
|
342
|
+
const convTitle = lookupConversationTitle(db, obs.conversationId);
|
|
343
|
+
bullets.push({
|
|
344
|
+
text: truncate(obs.content, 200),
|
|
345
|
+
source: "observation",
|
|
346
|
+
sourceId: obs.id,
|
|
347
|
+
conversationTitle: convTitle,
|
|
348
|
+
});
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
// --- Chunks: indexed text fragments ---
|
|
353
|
+
if (bullets.length < MAX_BULLETS) {
|
|
354
|
+
const chunkConditions = keywords.map((kw) =>
|
|
355
|
+
like(memoryChunks.content, `%${kw}%`),
|
|
356
|
+
);
|
|
357
|
+
|
|
358
|
+
const chunks = db
|
|
359
|
+
.select({
|
|
360
|
+
id: memoryChunks.id,
|
|
361
|
+
content: memoryChunks.content,
|
|
362
|
+
observationId: memoryChunks.observationId,
|
|
363
|
+
})
|
|
364
|
+
.from(memoryChunks)
|
|
365
|
+
.where(and(eq(memoryChunks.scopeId, scopeId), or(...chunkConditions)))
|
|
366
|
+
.orderBy(desc(memoryChunks.createdAt))
|
|
367
|
+
.limit(MAX_BULLETS)
|
|
368
|
+
.all();
|
|
369
|
+
|
|
370
|
+
for (const chunk of chunks) {
|
|
371
|
+
if (bullets.length >= MAX_BULLETS) break;
|
|
372
|
+
const normalized = normalizeForDedup(chunk.content);
|
|
373
|
+
if (seenContent.has(normalized)) continue;
|
|
374
|
+
seenContent.add(normalized);
|
|
375
|
+
|
|
376
|
+
// Look up the observation's conversationId for provenance
|
|
377
|
+
const obs = db
|
|
378
|
+
.select({ conversationId: memoryObservations.conversationId })
|
|
379
|
+
.from(memoryObservations)
|
|
380
|
+
.where(eq(memoryObservations.id, chunk.observationId))
|
|
381
|
+
.get();
|
|
382
|
+
|
|
383
|
+
const convTitle = obs
|
|
384
|
+
? lookupConversationTitle(db, obs.conversationId)
|
|
385
|
+
: null;
|
|
386
|
+
|
|
387
|
+
bullets.push({
|
|
388
|
+
text: truncate(chunk.content, 200),
|
|
389
|
+
source: "chunk",
|
|
390
|
+
sourceId: chunk.id,
|
|
391
|
+
conversationTitle: convTitle,
|
|
392
|
+
});
|
|
393
|
+
}
|
|
394
|
+
}
|
|
395
|
+
} catch (err) {
|
|
396
|
+
log.warn({ err }, "Deeper archive recall failed");
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
// Also incorporate prefetch hits that weren't already captured
|
|
400
|
+
for (const hit of prefetchHits) {
|
|
401
|
+
if (bullets.length >= MAX_BULLETS) break;
|
|
402
|
+
const normalized = normalizeForDedup(hit.content);
|
|
403
|
+
if (seenContent.has(normalized)) continue;
|
|
404
|
+
seenContent.add(normalized);
|
|
405
|
+
|
|
406
|
+
bullets.push({
|
|
407
|
+
text: truncate(hit.content, 200),
|
|
408
|
+
source: hit.source,
|
|
409
|
+
sourceId: hit.id,
|
|
410
|
+
});
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
return bullets.slice(0, MAX_BULLETS);
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
// ── Rendering ───────────────────────────────────────────────────────
|
|
417
|
+
|
|
418
|
+
/**
|
|
419
|
+
* Render recall bullets into `<supporting_recall>` XML block.
|
|
420
|
+
* Returns empty string when there are no bullets.
|
|
421
|
+
*/
|
|
422
|
+
export function renderSupportingRecall(bullets: RecallBullet[]): string {
|
|
423
|
+
if (bullets.length === 0) return "";
|
|
424
|
+
|
|
425
|
+
const lines = bullets.map((b) => {
|
|
426
|
+
const provenance = b.conversationTitle
|
|
427
|
+
? ` (from: ${b.conversationTitle})`
|
|
428
|
+
: "";
|
|
429
|
+
return `- ${b.text}${provenance}`;
|
|
430
|
+
});
|
|
431
|
+
|
|
432
|
+
return `<supporting_recall>\n${lines.join("\n")}\n</supporting_recall>`;
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
// ── Helpers ─────────────────────────────────────────────────────────
|
|
436
|
+
|
|
437
|
+
/**
|
|
438
|
+
* Extract meaningful keywords from user text for LIKE-based matching.
|
|
439
|
+
* Filters out short words (< 4 chars) and common stop words.
|
|
440
|
+
*/
|
|
441
|
+
export function extractKeywords(text: string): string[] {
|
|
442
|
+
const STOP_WORDS = new Set([
|
|
443
|
+
"about",
|
|
444
|
+
"also",
|
|
445
|
+
"been",
|
|
446
|
+
"could",
|
|
447
|
+
"does",
|
|
448
|
+
"from",
|
|
449
|
+
"have",
|
|
450
|
+
"into",
|
|
451
|
+
"just",
|
|
452
|
+
"know",
|
|
453
|
+
"like",
|
|
454
|
+
"make",
|
|
455
|
+
"more",
|
|
456
|
+
"much",
|
|
457
|
+
"only",
|
|
458
|
+
"over",
|
|
459
|
+
"said",
|
|
460
|
+
"some",
|
|
461
|
+
"than",
|
|
462
|
+
"that",
|
|
463
|
+
"them",
|
|
464
|
+
"then",
|
|
465
|
+
"they",
|
|
466
|
+
"this",
|
|
467
|
+
"very",
|
|
468
|
+
"want",
|
|
469
|
+
"were",
|
|
470
|
+
"what",
|
|
471
|
+
"when",
|
|
472
|
+
"will",
|
|
473
|
+
"with",
|
|
474
|
+
"your",
|
|
475
|
+
]);
|
|
476
|
+
|
|
477
|
+
const words = text
|
|
478
|
+
.toLowerCase()
|
|
479
|
+
.replace(/[^\w\s]/g, " ")
|
|
480
|
+
.split(/\s+/)
|
|
481
|
+
.filter((w) => w.length >= 4 && !STOP_WORDS.has(w));
|
|
482
|
+
|
|
483
|
+
// Deduplicate while preserving order
|
|
484
|
+
return [...new Set(words)];
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
/**
|
|
488
|
+
* Look up a conversation's title for provenance display.
|
|
489
|
+
*/
|
|
490
|
+
function lookupConversationTitle(
|
|
491
|
+
db: ReturnType<typeof getDb>,
|
|
492
|
+
conversationId: string,
|
|
493
|
+
): string | null {
|
|
494
|
+
try {
|
|
495
|
+
const row = db
|
|
496
|
+
.select({ title: sql<string | null>`title` })
|
|
497
|
+
.from(sql`conversations`)
|
|
498
|
+
.where(sql`id = ${conversationId}`)
|
|
499
|
+
.get();
|
|
500
|
+
return row?.title ?? null;
|
|
501
|
+
} catch {
|
|
502
|
+
return null;
|
|
503
|
+
}
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
function truncate(text: string, max: number): string {
|
|
507
|
+
if (text.length <= max) return text;
|
|
508
|
+
return `${text.slice(0, max - 3)}...`;
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
/**
|
|
512
|
+
* Normalize text for content deduplication across sources.
|
|
513
|
+
*/
|
|
514
|
+
function normalizeForDedup(text: string): string {
|
|
515
|
+
return text.toLowerCase().replace(/\s+/g, " ").trim();
|
|
516
|
+
}
|
package/src/memory/brief-time.ts
CHANGED
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
* schedule jobs, sorts them by urgency bucket, and caps the output.
|
|
5
5
|
*/
|
|
6
6
|
|
|
7
|
-
import { and, gte, lte } from "drizzle-orm";
|
|
7
|
+
import { and, eq, gte, lte } from "drizzle-orm";
|
|
8
8
|
|
|
9
9
|
import { getDueSoonSchedules } from "../schedule/schedule-store.js";
|
|
10
10
|
import type { BriefEntry } from "./brief-formatting.js";
|
|
@@ -69,18 +69,19 @@ function collectTimeContexts(
|
|
|
69
69
|
now: number,
|
|
70
70
|
out: Candidate[],
|
|
71
71
|
): void {
|
|
72
|
-
// Active time contexts: activeFrom <= now AND activeUntil >= now
|
|
72
|
+
// Active time contexts: scopeId match AND activeFrom <= now AND activeUntil >= now
|
|
73
|
+
// Uses idx_time_contexts_scope_active_until composite index
|
|
73
74
|
const rows = db
|
|
74
75
|
.select()
|
|
75
76
|
.from(timeContexts)
|
|
76
77
|
.where(
|
|
77
78
|
and(
|
|
79
|
+
eq(timeContexts.scopeId, scopeId),
|
|
78
80
|
lte(timeContexts.activeFrom, now),
|
|
79
81
|
gte(timeContexts.activeUntil, now),
|
|
80
82
|
),
|
|
81
83
|
)
|
|
82
|
-
.all()
|
|
83
|
-
.filter((r) => r.scopeId === scopeId);
|
|
84
|
+
.all();
|
|
84
85
|
|
|
85
86
|
for (const row of rows) {
|
|
86
87
|
const remaining = row.activeUntil - now;
|