@yeaft/webchat-agent 0.1.408 → 0.1.409
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/unify/cli.js +214 -16
- package/unify/config.js +13 -0
- package/unify/conversation/persist.js +436 -0
- package/unify/conversation/search.js +65 -0
- package/unify/engine.js +210 -18
- package/unify/index.js +6 -0
- package/unify/memory/consolidate.js +187 -0
- package/unify/memory/extract.js +97 -0
- package/unify/memory/recall.js +243 -0
- package/unify/memory/store.js +507 -0
- package/unify/prompts.js +51 -3
package/unify/engine.js
CHANGED
|
@@ -2,22 +2,33 @@
|
|
|
2
2
|
* engine.js — Yeaft query loop
|
|
3
3
|
*
|
|
4
4
|
* The engine is the core orchestrator:
|
|
5
|
-
* 1.
|
|
6
|
-
* 2.
|
|
7
|
-
* 3.
|
|
8
|
-
* 4.
|
|
9
|
-
* 5. If
|
|
10
|
-
* 6. If
|
|
5
|
+
* 1. Before first turn: recall memories → inject into system prompt
|
|
6
|
+
* 2. Build messages array (with compact summary if available)
|
|
7
|
+
* 3. Call adapter.stream()
|
|
8
|
+
* 4. Collect text + tool_calls from stream events
|
|
9
|
+
* 5. If tool_calls → execute tools → append results → goto 3
|
|
10
|
+
* 6. If end_turn → persist messages → check consolidation → done
|
|
11
|
+
* 7. If max_tokens → auto-continue (up to maxContinueTurns)
|
|
12
|
+
* 8. On LLMContextError → force compact → retry
|
|
13
|
+
* 9. On retryable error with fallbackModel → switch model → retry
|
|
11
14
|
*
|
|
12
15
|
* Pattern derived from Claude Code's query loop (src/query.ts).
|
|
16
|
+
*
|
|
17
|
+
* Reference: yeaft-unify-implementation-plan.md §3.1, §4 (Phase 2)
|
|
13
18
|
*/
|
|
14
19
|
|
|
15
20
|
import { randomUUID } from 'crypto';
|
|
16
21
|
import { buildSystemPrompt } from './prompts.js';
|
|
22
|
+
import { LLMContextError } from './llm/adapter.js';
|
|
23
|
+
import { recall } from './memory/recall.js';
|
|
24
|
+
import { shouldConsolidate, consolidate } from './memory/consolidate.js';
|
|
17
25
|
|
|
18
26
|
/** Maximum number of turns before the engine stops to prevent infinite loops. */
|
|
19
27
|
const MAX_TURNS = 25;
|
|
20
28
|
|
|
29
|
+
/** Maximum auto-continue turns when stopReason is 'max_tokens'. */
|
|
30
|
+
const MAX_CONTINUE_TURNS = 3;
|
|
31
|
+
|
|
21
32
|
// ─── Engine Events (superset of adapter events) ──────────────────
|
|
22
33
|
|
|
23
34
|
/**
|
|
@@ -25,8 +36,11 @@ const MAX_TURNS = 25;
|
|
|
25
36
|
* @typedef {{ type: 'turn_end', turnNumber: number, stopReason: string }} TurnEndEvent
|
|
26
37
|
* @typedef {{ type: 'tool_start', id: string, name: string, input: object }} ToolStartEvent
|
|
27
38
|
* @typedef {{ type: 'tool_end', id: string, name: string, output: string, isError: boolean }} ToolEndEvent
|
|
39
|
+
* @typedef {{ type: 'consolidate', archivedCount: number, extractedCount: number }} ConsolidateEvent
|
|
40
|
+
* @typedef {{ type: 'recall', entryCount: number, cached: boolean }} RecallEvent
|
|
41
|
+
* @typedef {{ type: 'fallback', from: string, to: string, reason: string }} FallbackEvent
|
|
28
42
|
*
|
|
29
|
-
* @typedef {import('./llm/adapter.js').StreamEvent | TurnStartEvent | TurnEndEvent | ToolStartEvent | ToolEndEvent} EngineEvent
|
|
43
|
+
* @typedef {import('./llm/adapter.js').StreamEvent | TurnStartEvent | TurnEndEvent | ToolStartEvent | ToolEndEvent | ConsolidateEvent | RecallEvent | FallbackEvent} EngineEvent
|
|
30
44
|
*/
|
|
31
45
|
|
|
32
46
|
// ─── Engine ──────────────────────────────────────────────────────
|
|
@@ -47,15 +61,29 @@ export class Engine {
|
|
|
47
61
|
/** @type {string} */
|
|
48
62
|
#traceId;
|
|
49
63
|
|
|
64
|
+
/** @type {import('./conversation/persist.js').ConversationStore|null} */
|
|
65
|
+
#conversationStore;
|
|
66
|
+
|
|
67
|
+
/** @type {import('./memory/store.js').MemoryStore|null} */
|
|
68
|
+
#memoryStore;
|
|
69
|
+
|
|
50
70
|
/**
|
|
51
|
-
* @param {{
|
|
71
|
+
* @param {{
|
|
72
|
+
* adapter: import('./llm/adapter.js').LLMAdapter,
|
|
73
|
+
* trace: object,
|
|
74
|
+
* config: object,
|
|
75
|
+
* conversationStore?: import('./conversation/persist.js').ConversationStore,
|
|
76
|
+
* memoryStore?: import('./memory/store.js').MemoryStore
|
|
77
|
+
* }} params
|
|
52
78
|
*/
|
|
53
|
-
constructor({ adapter, trace, config }) {
|
|
79
|
+
constructor({ adapter, trace, config, conversationStore, memoryStore }) {
|
|
54
80
|
this.#adapter = adapter;
|
|
55
81
|
this.#trace = trace;
|
|
56
82
|
this.#config = config;
|
|
57
83
|
this.#tools = new Map();
|
|
58
84
|
this.#traceId = randomUUID();
|
|
85
|
+
this.#conversationStore = conversationStore || null;
|
|
86
|
+
this.#memoryStore = memoryStore || null;
|
|
59
87
|
}
|
|
60
88
|
|
|
61
89
|
/**
|
|
@@ -94,19 +122,120 @@ export class Engine {
|
|
|
94
122
|
}
|
|
95
123
|
|
|
96
124
|
/**
|
|
97
|
-
* Build the system prompt.
|
|
125
|
+
* Build the system prompt with memory and compact summary.
|
|
98
126
|
*
|
|
99
|
-
* @param {string} mode
|
|
127
|
+
* @param {string} mode
|
|
128
|
+
* @param {{ profile?: string, entries?: object[] }} [memory]
|
|
129
|
+
* @param {string} [compactSummary]
|
|
100
130
|
* @returns {string}
|
|
101
131
|
*/
|
|
102
|
-
#buildSystemPrompt(mode) {
|
|
132
|
+
#buildSystemPrompt(mode, memory, compactSummary) {
|
|
103
133
|
return buildSystemPrompt({
|
|
104
134
|
language: this.#config.language || 'en',
|
|
105
135
|
mode,
|
|
106
136
|
toolNames: Array.from(this.#tools.keys()),
|
|
137
|
+
memory,
|
|
138
|
+
compactSummary,
|
|
107
139
|
});
|
|
108
140
|
}
|
|
109
141
|
|
|
142
|
+
/**
|
|
143
|
+
* Perform memory recall for a given prompt.
|
|
144
|
+
*
|
|
145
|
+
* @param {string} prompt
|
|
146
|
+
* @returns {Promise<{ profile: string, entries: object[] }|null>}
|
|
147
|
+
*/
|
|
148
|
+
async #recallMemory(prompt) {
|
|
149
|
+
if (!this.#memoryStore) return null;
|
|
150
|
+
|
|
151
|
+
const memory = { profile: '', entries: [] };
|
|
152
|
+
|
|
153
|
+
// Read user profile
|
|
154
|
+
memory.profile = this.#memoryStore.readProfile();
|
|
155
|
+
|
|
156
|
+
// Recall relevant entries
|
|
157
|
+
try {
|
|
158
|
+
const result = await recall({
|
|
159
|
+
prompt,
|
|
160
|
+
adapter: this.#adapter,
|
|
161
|
+
config: this.#config,
|
|
162
|
+
memoryStore: this.#memoryStore,
|
|
163
|
+
});
|
|
164
|
+
memory.entries = result.entries;
|
|
165
|
+
} catch {
|
|
166
|
+
// Recall failure is non-critical
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
return memory;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
/**
|
|
173
|
+
* Read compact summary from conversation store.
|
|
174
|
+
*
|
|
175
|
+
* @returns {string}
|
|
176
|
+
*/
|
|
177
|
+
#getCompactSummary() {
|
|
178
|
+
if (!this.#conversationStore) return '';
|
|
179
|
+
return this.#conversationStore.readCompactSummary();
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
/**
|
|
183
|
+
* Persist user message and assistant response to conversation store.
|
|
184
|
+
*
|
|
185
|
+
* @param {string} userContent
|
|
186
|
+
* @param {string} assistantContent
|
|
187
|
+
* @param {string} mode
|
|
188
|
+
* @param {object[]} [toolCalls]
|
|
189
|
+
*/
|
|
190
|
+
#persistMessages(userContent, assistantContent, mode, toolCalls) {
|
|
191
|
+
if (!this.#conversationStore) return;
|
|
192
|
+
|
|
193
|
+
// Persist user message
|
|
194
|
+
this.#conversationStore.append({
|
|
195
|
+
role: 'user',
|
|
196
|
+
content: userContent,
|
|
197
|
+
mode,
|
|
198
|
+
});
|
|
199
|
+
|
|
200
|
+
// Persist assistant message
|
|
201
|
+
const assistantMsg = {
|
|
202
|
+
role: 'assistant',
|
|
203
|
+
content: assistantContent,
|
|
204
|
+
mode,
|
|
205
|
+
model: this.#config.model,
|
|
206
|
+
};
|
|
207
|
+
if (toolCalls && toolCalls.length > 0) {
|
|
208
|
+
assistantMsg.toolCalls = toolCalls;
|
|
209
|
+
}
|
|
210
|
+
this.#conversationStore.append(assistantMsg);
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
/**
|
|
214
|
+
* Check and trigger consolidation if needed.
|
|
215
|
+
*
|
|
216
|
+
* @returns {Promise<{ archivedCount: number, extractedCount: number }|null>}
|
|
217
|
+
*/
|
|
218
|
+
async #maybeConsolidate() {
|
|
219
|
+
if (!this.#conversationStore || !this.#memoryStore) return null;
|
|
220
|
+
|
|
221
|
+
const budget = this.#config.messageTokenBudget || 8192;
|
|
222
|
+
if (!shouldConsolidate(this.#conversationStore, budget)) return null;
|
|
223
|
+
|
|
224
|
+
try {
|
|
225
|
+
const result = await consolidate({
|
|
226
|
+
conversationStore: this.#conversationStore,
|
|
227
|
+
memoryStore: this.#memoryStore,
|
|
228
|
+
adapter: this.#adapter,
|
|
229
|
+
config: this.#config,
|
|
230
|
+
budget,
|
|
231
|
+
});
|
|
232
|
+
return { archivedCount: result.archivedCount, extractedCount: result.extractedEntries.length };
|
|
233
|
+
} catch {
|
|
234
|
+
// Consolidation failure is non-critical
|
|
235
|
+
return null;
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
|
|
110
239
|
/**
|
|
111
240
|
* Run a query — the main loop.
|
|
112
241
|
*
|
|
@@ -126,7 +255,14 @@ export class Engine {
|
|
|
126
255
|
return;
|
|
127
256
|
}
|
|
128
257
|
|
|
129
|
-
|
|
258
|
+
// ─── Pre-query: Recall + Compact Summary ────────────────
|
|
259
|
+
const memory = await this.#recallMemory(prompt);
|
|
260
|
+
if (memory && memory.entries.length > 0) {
|
|
261
|
+
yield { type: 'recall', entryCount: memory.entries.length, cached: false };
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
const compactSummary = this.#getCompactSummary();
|
|
265
|
+
const systemPrompt = this.#buildSystemPrompt(mode, memory, compactSummary);
|
|
130
266
|
|
|
131
267
|
// Build conversation: existing messages + new user message
|
|
132
268
|
const conversationMessages = [
|
|
@@ -136,6 +272,9 @@ export class Engine {
|
|
|
136
272
|
|
|
137
273
|
const toolDefs = this.#getToolDefs();
|
|
138
274
|
let turnNumber = 0;
|
|
275
|
+
let continueTurns = 0; // auto-continue counter
|
|
276
|
+
let fullResponseText = '';
|
|
277
|
+
let currentModel = this.#config.model;
|
|
139
278
|
|
|
140
279
|
while (true) {
|
|
141
280
|
turnNumber++;
|
|
@@ -166,9 +305,8 @@ export class Engine {
|
|
|
166
305
|
|
|
167
306
|
try {
|
|
168
307
|
// Stream from adapter
|
|
169
|
-
// Note: pass a snapshot of messages so later mutations don't affect the adapter
|
|
170
308
|
for await (const event of this.#adapter.stream({
|
|
171
|
-
model:
|
|
309
|
+
model: currentModel,
|
|
172
310
|
system: systemPrompt,
|
|
173
311
|
messages: [...conversationMessages],
|
|
174
312
|
tools: toolDefs.length > 0 ? toolDefs : undefined,
|
|
@@ -202,10 +340,9 @@ export class Engine {
|
|
|
202
340
|
}
|
|
203
341
|
}
|
|
204
342
|
} catch (err) {
|
|
205
|
-
// Adapter threw an exception (network, auth, etc.)
|
|
206
343
|
const latencyMs = Date.now() - startTime;
|
|
207
344
|
this.#trace.endTurn(turnId, {
|
|
208
|
-
model:
|
|
345
|
+
model: currentModel,
|
|
209
346
|
inputTokens: totalUsage.inputTokens,
|
|
210
347
|
outputTokens: totalUsage.outputTokens,
|
|
211
348
|
stopReason: 'error',
|
|
@@ -213,6 +350,26 @@ export class Engine {
|
|
|
213
350
|
responseText,
|
|
214
351
|
});
|
|
215
352
|
|
|
353
|
+
// ─── LLMContextError → force compact → retry ──────
|
|
354
|
+
if (err instanceof LLMContextError && this.#conversationStore && this.#memoryStore) {
|
|
355
|
+
const consolidated = await this.#maybeConsolidate();
|
|
356
|
+
if (consolidated && consolidated.archivedCount > 0) {
|
|
357
|
+
yield { type: 'consolidate', archivedCount: consolidated.archivedCount, extractedCount: consolidated.extractedCount };
|
|
358
|
+
yield { type: 'turn_end', turnNumber, stopReason: 'context_overflow_retry' };
|
|
359
|
+
continue; // retry with fewer messages
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
// ─── Fallback model ──────────────────────────────
|
|
364
|
+
const fallbackModel = this.#config.fallbackModel;
|
|
365
|
+
if (fallbackModel && fallbackModel !== currentModel &&
|
|
366
|
+
(err.name === 'LLMRateLimitError' || err.name === 'LLMServerError')) {
|
|
367
|
+
yield { type: 'fallback', from: currentModel, to: fallbackModel, reason: err.message };
|
|
368
|
+
currentModel = fallbackModel;
|
|
369
|
+
yield { type: 'turn_end', turnNumber, stopReason: 'fallback_retry' };
|
|
370
|
+
continue; // retry with fallback model
|
|
371
|
+
}
|
|
372
|
+
|
|
216
373
|
yield {
|
|
217
374
|
type: 'error',
|
|
218
375
|
error: err,
|
|
@@ -226,7 +383,7 @@ export class Engine {
|
|
|
226
383
|
|
|
227
384
|
// Record turn in debug trace
|
|
228
385
|
this.#trace.endTurn(turnId, {
|
|
229
|
-
model:
|
|
386
|
+
model: currentModel,
|
|
230
387
|
inputTokens: totalUsage.inputTokens,
|
|
231
388
|
outputTokens: totalUsage.outputTokens,
|
|
232
389
|
stopReason,
|
|
@@ -244,10 +401,29 @@ export class Engine {
|
|
|
244
401
|
}));
|
|
245
402
|
}
|
|
246
403
|
conversationMessages.push(assistantMsg);
|
|
404
|
+
fullResponseText += responseText;
|
|
405
|
+
|
|
406
|
+
// ─── Handle max_tokens → auto-continue ────────────
|
|
407
|
+
if (stopReason === 'max_tokens' && continueTurns < MAX_CONTINUE_TURNS) {
|
|
408
|
+
continueTurns++;
|
|
409
|
+
// Append a "Continue" user message
|
|
410
|
+
conversationMessages.push({ role: 'user', content: 'Continue' });
|
|
411
|
+
yield { type: 'turn_end', turnNumber, stopReason: 'max_tokens_continue' };
|
|
412
|
+
continue; // loop back to call adapter again
|
|
413
|
+
}
|
|
247
414
|
|
|
248
415
|
// If no tool calls, we're done
|
|
249
416
|
if (stopReason !== 'tool_use' || toolCalls.length === 0) {
|
|
250
417
|
yield { type: 'turn_end', turnNumber, stopReason };
|
|
418
|
+
|
|
419
|
+
// ─── Post-query: Persist + Consolidate ────────────
|
|
420
|
+
this.#persistMessages(prompt, fullResponseText, mode, assistantMsg.toolCalls);
|
|
421
|
+
|
|
422
|
+
const consolidated = await this.#maybeConsolidate();
|
|
423
|
+
if (consolidated && consolidated.archivedCount > 0) {
|
|
424
|
+
yield { type: 'consolidate', archivedCount: consolidated.archivedCount, extractedCount: consolidated.extractedCount };
|
|
425
|
+
}
|
|
426
|
+
|
|
251
427
|
break;
|
|
252
428
|
}
|
|
253
429
|
|
|
@@ -316,4 +492,20 @@ export class Engine {
|
|
|
316
492
|
get toolNames() {
|
|
317
493
|
return Array.from(this.#tools.keys());
|
|
318
494
|
}
|
|
495
|
+
|
|
496
|
+
/**
|
|
497
|
+
* Get the conversation store (for external access, e.g., CLI commands).
|
|
498
|
+
* @returns {import('./conversation/persist.js').ConversationStore|null}
|
|
499
|
+
*/
|
|
500
|
+
get conversationStore() {
|
|
501
|
+
return this.#conversationStore;
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
/**
|
|
505
|
+
* Get the memory store (for external access, e.g., CLI commands).
|
|
506
|
+
* @returns {import('./memory/store.js').MemoryStore|null}
|
|
507
|
+
*/
|
|
508
|
+
get memoryStore() {
|
|
509
|
+
return this.#memoryStore;
|
|
510
|
+
}
|
|
319
511
|
}
|
package/unify/index.js
CHANGED
|
@@ -19,3 +19,9 @@ export {
|
|
|
19
19
|
export { MODEL_REGISTRY, resolveModel, listModels, isKnownModel } from './models.js';
|
|
20
20
|
export { buildSystemPrompt, SUPPORTED_LANGUAGES } from './prompts.js';
|
|
21
21
|
export { Engine } from './engine.js';
|
|
22
|
+
export { ConversationStore, parseMessage, estimateTokens } from './conversation/persist.js';
|
|
23
|
+
export { searchMessages } from './conversation/search.js';
|
|
24
|
+
export { MemoryStore, parseEntry, serializeEntry, MEMORY_KINDS } from './memory/store.js';
|
|
25
|
+
export { recall, extractKeywords, computeFingerprint, clearRecallCache } from './memory/recall.js';
|
|
26
|
+
export { extractMemories } from './memory/extract.js';
|
|
27
|
+
export { consolidate, shouldConsolidate } from './memory/consolidate.js';
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* consolidate.js — Consolidate = compact + extract (one LLM call)
|
|
3
|
+
*
|
|
4
|
+
* Triggered when hot_tokens > MESSAGE_TOKEN_BUDGET.
|
|
5
|
+
* One LLM call does two things simultaneously:
|
|
6
|
+
* 1. Generate compact summary → append to compact.md ("short-term memory")
|
|
7
|
+
* 2. Extract memory entries → write to entries/ ("long-term memory")
|
|
8
|
+
*
|
|
9
|
+
* After consolidation:
|
|
10
|
+
* - Processed messages moved from messages/ to cold/
|
|
11
|
+
* - index.md + scopes.md updated
|
|
12
|
+
*
|
|
13
|
+
* Reference: yeaft-unify-core-systems.md §3.1, §4.2
|
|
14
|
+
* yeaft-unify-design.md §6.1
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import { extractMemories } from './extract.js';
|
|
18
|
+
|
|
19
|
+
// ─── Constants ──────────────────────────────────────────────────
|
|
20
|
+
|
|
21
|
+
/** Default MESSAGE_TOKEN_BUDGET (context * 4%, default ~8192). */
|
|
22
|
+
export const DEFAULT_MESSAGE_TOKEN_BUDGET = 8192;
|
|
23
|
+
|
|
24
|
+
/** After compact, keep this fraction of the budget. */
|
|
25
|
+
export const COMPACT_KEEP_RATIO = 0.4;
|
|
26
|
+
|
|
27
|
+
/** Minimum messages to keep hot (newest). */
|
|
28
|
+
const MIN_KEEP_MESSAGES = 3;
|
|
29
|
+
|
|
30
|
+
// ─── Consolidate ────────────────────────────────────────────────
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Check if consolidation should be triggered.
|
|
34
|
+
*
|
|
35
|
+
* @param {import('../conversation/persist.js').ConversationStore} conversationStore
|
|
36
|
+
* @param {number} [budget] — MESSAGE_TOKEN_BUDGET
|
|
37
|
+
* @returns {boolean}
|
|
38
|
+
*/
|
|
39
|
+
export function shouldConsolidate(conversationStore, budget = DEFAULT_MESSAGE_TOKEN_BUDGET) {
|
|
40
|
+
const hotTokens = conversationStore.hotTokens();
|
|
41
|
+
return hotTokens > budget;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Determine which messages to archive (move to cold).
|
|
46
|
+
* Strategy: from oldest, accumulate tokens until remaining ≤ budget * 40%.
|
|
47
|
+
* Always keep at least MIN_KEEP_MESSAGES.
|
|
48
|
+
*
|
|
49
|
+
* @param {object[]} messages — all hot messages, sorted chronologically
|
|
50
|
+
* @param {number} budget — MESSAGE_TOKEN_BUDGET
|
|
51
|
+
* @returns {{ toArchive: object[], toKeep: object[] }}
|
|
52
|
+
*/
|
|
53
|
+
export function partitionMessages(messages, budget = DEFAULT_MESSAGE_TOKEN_BUDGET) {
|
|
54
|
+
if (messages.length <= MIN_KEEP_MESSAGES) {
|
|
55
|
+
return { toArchive: [], toKeep: messages };
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
const keepBudget = Math.floor(budget * COMPACT_KEEP_RATIO);
|
|
59
|
+
|
|
60
|
+
// Work backwards from newest: accumulate tokens until we hit keepBudget
|
|
61
|
+
let keepTokens = 0;
|
|
62
|
+
let keepStart = messages.length;
|
|
63
|
+
|
|
64
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
65
|
+
const msgTokens = messages[i].tokens_est || 0;
|
|
66
|
+
if (keepTokens + msgTokens > keepBudget && (messages.length - i) >= MIN_KEEP_MESSAGES) {
|
|
67
|
+
keepStart = i + 1;
|
|
68
|
+
break;
|
|
69
|
+
}
|
|
70
|
+
keepTokens += msgTokens;
|
|
71
|
+
if (i === 0) keepStart = 0;
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// Ensure at least MIN_KEEP_MESSAGES are kept
|
|
75
|
+
keepStart = Math.min(keepStart, messages.length - MIN_KEEP_MESSAGES);
|
|
76
|
+
keepStart = Math.max(keepStart, 0);
|
|
77
|
+
|
|
78
|
+
return {
|
|
79
|
+
toArchive: messages.slice(0, keepStart),
|
|
80
|
+
toKeep: messages.slice(keepStart),
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Generate a compact summary of messages.
|
|
86
|
+
*
|
|
87
|
+
* @param {object[]} messages — messages to summarize
|
|
88
|
+
* @param {object} adapter — LLM adapter with .call()
|
|
89
|
+
* @param {object} config — { model }
|
|
90
|
+
* @returns {Promise<string>} — compact summary text
|
|
91
|
+
*/
|
|
92
|
+
async function generateSummary(messages, adapter, config) {
|
|
93
|
+
const conversation = messages.map(m => {
|
|
94
|
+
const prefix = m.role === 'user' ? 'User' : m.role === 'assistant' ? 'Assistant' : m.role;
|
|
95
|
+
return `[${prefix}]: ${(m.content || '').slice(0, 500)}`;
|
|
96
|
+
}).join('\n\n');
|
|
97
|
+
|
|
98
|
+
const system = 'You are a conversation summarizer. Summarize the conversation concisely in 2-3 paragraphs, preserving key decisions, facts, and context. Write in the same language as the conversation.';
|
|
99
|
+
|
|
100
|
+
try {
|
|
101
|
+
const result = await adapter.call({
|
|
102
|
+
model: config.model,
|
|
103
|
+
system,
|
|
104
|
+
messages: [{ role: 'user', content: `Summarize this conversation:\n\n${conversation}` }],
|
|
105
|
+
maxTokens: 1024,
|
|
106
|
+
});
|
|
107
|
+
return result.text.trim();
|
|
108
|
+
} catch {
|
|
109
|
+
// Fallback: simple concatenation of first/last messages
|
|
110
|
+
const first = messages[0]?.content?.slice(0, 200) || '';
|
|
111
|
+
const last = messages[messages.length - 1]?.content?.slice(0, 200) || '';
|
|
112
|
+
return `[Auto-summary failed] Started with: ${first}... Ended with: ${last}`;
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* Run the full Consolidate pipeline.
|
|
118
|
+
*
|
|
119
|
+
* 1. Partition messages (what to archive vs keep)
|
|
120
|
+
* 2. Generate compact summary (LLM call)
|
|
121
|
+
* 3. Extract memory entries (LLM call)
|
|
122
|
+
* 4. Move archived messages to cold/
|
|
123
|
+
* 5. Update compact.md, index.md, scopes.md
|
|
124
|
+
*
|
|
125
|
+
* @param {{
|
|
126
|
+
* conversationStore: import('../conversation/persist.js').ConversationStore,
|
|
127
|
+
* memoryStore: import('./store.js').MemoryStore,
|
|
128
|
+
* adapter: object,
|
|
129
|
+
* config: object,
|
|
130
|
+
* budget?: number
|
|
131
|
+
* }} params
|
|
132
|
+
* @returns {Promise<{ compactSummary: string, extractedEntries: string[], archivedCount: number }>}
|
|
133
|
+
*/
|
|
134
|
+
export async function consolidate({ conversationStore, memoryStore, adapter, config, budget = DEFAULT_MESSAGE_TOKEN_BUDGET }) {
|
|
135
|
+
// Load all hot messages
|
|
136
|
+
const messages = conversationStore.loadAll();
|
|
137
|
+
|
|
138
|
+
if (messages.length <= MIN_KEEP_MESSAGES) {
|
|
139
|
+
return { compactSummary: '', extractedEntries: [], archivedCount: 0 };
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// Step 1: Partition
|
|
143
|
+
const { toArchive, toKeep } = partitionMessages(messages, budget);
|
|
144
|
+
|
|
145
|
+
if (toArchive.length === 0) {
|
|
146
|
+
return { compactSummary: '', extractedEntries: [], archivedCount: 0 };
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
// Step 2: Generate compact summary
|
|
150
|
+
const compactSummary = await generateSummary(toArchive, adapter, config);
|
|
151
|
+
|
|
152
|
+
// Step 3: Extract memory entries
|
|
153
|
+
const extracted = await extractMemories({ messages: toArchive, adapter, config });
|
|
154
|
+
|
|
155
|
+
// Step 4: Move archived messages to cold
|
|
156
|
+
const archiveIds = toArchive.map(m => m.id).filter(Boolean);
|
|
157
|
+
conversationStore.moveToColdBatch(archiveIds);
|
|
158
|
+
|
|
159
|
+
// Step 5a: Update compact.md
|
|
160
|
+
if (compactSummary) {
|
|
161
|
+
conversationStore.updateCompactSummary(compactSummary);
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
// Step 5b: Write extracted memory entries
|
|
165
|
+
const entryNames = [];
|
|
166
|
+
for (const entry of extracted) {
|
|
167
|
+
const slug = memoryStore.writeEntry(entry);
|
|
168
|
+
entryNames.push(slug);
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
// Step 5c: Update index.md
|
|
172
|
+
const lastMsg = toKeep[toKeep.length - 1];
|
|
173
|
+
conversationStore.updateIndex({
|
|
174
|
+
lastMessageId: lastMsg?.id || null,
|
|
175
|
+
});
|
|
176
|
+
|
|
177
|
+
// Step 5d: Rebuild scopes.md
|
|
178
|
+
if (entryNames.length > 0) {
|
|
179
|
+
memoryStore.rebuildScopes();
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
return {
|
|
183
|
+
compactSummary,
|
|
184
|
+
extractedEntries: entryNames,
|
|
185
|
+
archivedCount: archiveIds.length,
|
|
186
|
+
};
|
|
187
|
+
}
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* extract.js — Extract memory-worthy entries from conversation
|
|
3
|
+
*
|
|
4
|
+
* Called by consolidate.js during the Consolidate lifecycle.
|
|
5
|
+
* Uses a single LLM call to identify facts, preferences, skills,
|
|
6
|
+
* lessons, contexts, and relations from conversation messages.
|
|
7
|
+
*
|
|
8
|
+
* Reference: yeaft-unify-core-systems.md §3.1, yeaft-unify-design.md §6.1
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { MEMORY_KINDS } from './store.js';
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Build the extraction prompt.
|
|
15
|
+
* @param {object[]} messages — conversation messages to analyze
|
|
16
|
+
* @returns {string}
|
|
17
|
+
*/
|
|
18
|
+
function buildExtractionPrompt(messages) {
|
|
19
|
+
const conversation = messages.map(m => {
|
|
20
|
+
const prefix = m.role === 'user' ? 'User' : m.role === 'assistant' ? 'Assistant' : 'System';
|
|
21
|
+
return `[${prefix}]: ${m.content}`;
|
|
22
|
+
}).join('\n\n');
|
|
23
|
+
|
|
24
|
+
return `Analyze the following conversation and extract any memorable information worth saving to long-term memory.
|
|
25
|
+
|
|
26
|
+
For each memory, provide:
|
|
27
|
+
- **name**: A short slug-friendly name (e.g., "user-prefers-typescript", "project-uses-vue3")
|
|
28
|
+
- **kind**: One of: ${MEMORY_KINDS.join(', ')}
|
|
29
|
+
- **scope**: A tree path (e.g., "global", "tech/typescript", "work/project-name")
|
|
30
|
+
- **tags**: Relevant keywords as an array
|
|
31
|
+
- **importance**: "high", "normal", or "low"
|
|
32
|
+
- **content**: 1-3 sentences describing the memory
|
|
33
|
+
|
|
34
|
+
Memory kinds explained:
|
|
35
|
+
- fact: Objective facts (project structure, tech stack)
|
|
36
|
+
- preference: User preferences (coding style, tools)
|
|
37
|
+
- skill: How to do something (patterns, techniques)
|
|
38
|
+
- lesson: Lessons learned (bugs, pitfalls)
|
|
39
|
+
- context: Temporal context (current OKR, progress)
|
|
40
|
+
- relation: People and relationships (teammates, roles)
|
|
41
|
+
|
|
42
|
+
Do NOT extract:
|
|
43
|
+
- Specific code snippets (too large, will become stale)
|
|
44
|
+
- Temporary debugging information
|
|
45
|
+
- Trivial greetings or small talk
|
|
46
|
+
|
|
47
|
+
Return a JSON array of memory objects. If nothing is worth remembering, return an empty array [].
|
|
48
|
+
|
|
49
|
+
Conversation:
|
|
50
|
+
${conversation}`;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Extract memory entries from a set of conversation messages.
|
|
55
|
+
*
|
|
56
|
+
* @param {{ messages: object[], adapter: object, config: object }} params
|
|
57
|
+
* @returns {Promise<object[]>} — extracted memory entries
|
|
58
|
+
*/
|
|
59
|
+
export async function extractMemories({ messages, adapter, config }) {
|
|
60
|
+
if (!messages || messages.length === 0) return [];
|
|
61
|
+
|
|
62
|
+
const system = 'You are a memory extraction assistant. Analyze conversations and extract important facts, preferences, and lessons. Return ONLY a valid JSON array, no other text.';
|
|
63
|
+
|
|
64
|
+
const extractionPrompt = buildExtractionPrompt(messages);
|
|
65
|
+
|
|
66
|
+
try {
|
|
67
|
+
const result = await adapter.call({
|
|
68
|
+
model: config.model,
|
|
69
|
+
system,
|
|
70
|
+
messages: [{ role: 'user', content: extractionPrompt }],
|
|
71
|
+
maxTokens: 2048,
|
|
72
|
+
});
|
|
73
|
+
|
|
74
|
+
const text = result.text.trim();
|
|
75
|
+
|
|
76
|
+
// Try to parse JSON array from the response
|
|
77
|
+
const jsonMatch = text.match(/\[[\s\S]*\]/);
|
|
78
|
+
if (!jsonMatch) return [];
|
|
79
|
+
|
|
80
|
+
const entries = JSON.parse(jsonMatch[0]);
|
|
81
|
+
|
|
82
|
+
// Validate and normalize entries
|
|
83
|
+
return entries
|
|
84
|
+
.filter(e => e && typeof e === 'object' && e.name && e.content)
|
|
85
|
+
.map(e => ({
|
|
86
|
+
name: String(e.name).slice(0, 80),
|
|
87
|
+
kind: MEMORY_KINDS.includes(e.kind) ? e.kind : 'fact',
|
|
88
|
+
scope: String(e.scope || 'global'),
|
|
89
|
+
tags: Array.isArray(e.tags) ? e.tags.map(String) : [],
|
|
90
|
+
importance: ['high', 'normal', 'low'].includes(e.importance) ? e.importance : 'normal',
|
|
91
|
+
content: String(e.content),
|
|
92
|
+
}));
|
|
93
|
+
} catch {
|
|
94
|
+
// LLM failure — return empty (non-critical operation)
|
|
95
|
+
return [];
|
|
96
|
+
}
|
|
97
|
+
}
|