@memtensor/memos-local-openclaw-plugin 0.1.2 → 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +13 -5
- package/README.md +180 -68
- package/dist/capture/index.d.ts +5 -7
- package/dist/capture/index.d.ts.map +1 -1
- package/dist/capture/index.js +72 -43
- package/dist/capture/index.js.map +1 -1
- package/dist/ingest/providers/anthropic.d.ts +2 -0
- package/dist/ingest/providers/anthropic.d.ts.map +1 -1
- package/dist/ingest/providers/anthropic.js +110 -1
- package/dist/ingest/providers/anthropic.js.map +1 -1
- package/dist/ingest/providers/bedrock.d.ts +2 -5
- package/dist/ingest/providers/bedrock.d.ts.map +1 -1
- package/dist/ingest/providers/bedrock.js +110 -6
- package/dist/ingest/providers/bedrock.js.map +1 -1
- package/dist/ingest/providers/gemini.d.ts +2 -0
- package/dist/ingest/providers/gemini.d.ts.map +1 -1
- package/dist/ingest/providers/gemini.js +106 -1
- package/dist/ingest/providers/gemini.js.map +1 -1
- package/dist/ingest/providers/index.d.ts +9 -0
- package/dist/ingest/providers/index.d.ts.map +1 -1
- package/dist/ingest/providers/index.js +66 -4
- package/dist/ingest/providers/index.js.map +1 -1
- package/dist/ingest/providers/openai.d.ts +2 -0
- package/dist/ingest/providers/openai.d.ts.map +1 -1
- package/dist/ingest/providers/openai.js +112 -1
- package/dist/ingest/providers/openai.js.map +1 -1
- package/dist/ingest/task-processor.d.ts +63 -0
- package/dist/ingest/task-processor.d.ts.map +1 -0
- package/dist/ingest/task-processor.js +339 -0
- package/dist/ingest/task-processor.js.map +1 -0
- package/dist/ingest/worker.d.ts +1 -1
- package/dist/ingest/worker.d.ts.map +1 -1
- package/dist/ingest/worker.js +18 -13
- package/dist/ingest/worker.js.map +1 -1
- package/dist/recall/engine.d.ts +1 -0
- package/dist/recall/engine.d.ts.map +1 -1
- package/dist/recall/engine.js +21 -11
- package/dist/recall/engine.js.map +1 -1
- package/dist/recall/mmr.d.ts.map +1 -1
- package/dist/recall/mmr.js +3 -1
- package/dist/recall/mmr.js.map +1 -1
- package/dist/storage/sqlite.d.ts +67 -1
- package/dist/storage/sqlite.d.ts.map +1 -1
- package/dist/storage/sqlite.js +251 -5
- package/dist/storage/sqlite.js.map +1 -1
- package/dist/types.d.ts +15 -0
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js +2 -0
- package/dist/types.js.map +1 -1
- package/dist/viewer/html.d.ts +1 -1
- package/dist/viewer/html.d.ts.map +1 -1
- package/dist/viewer/html.js +955 -115
- package/dist/viewer/html.js.map +1 -1
- package/dist/viewer/server.d.ts +3 -0
- package/dist/viewer/server.d.ts.map +1 -1
- package/dist/viewer/server.js +59 -1
- package/dist/viewer/server.js.map +1 -1
- package/index.ts +221 -45
- package/openclaw.plugin.json +20 -45
- package/package.json +3 -4
- package/skill/SKILL.md +59 -0
- package/src/capture/index.ts +85 -45
- package/src/ingest/providers/anthropic.ts +128 -1
- package/src/ingest/providers/bedrock.ts +130 -6
- package/src/ingest/providers/gemini.ts +128 -1
- package/src/ingest/providers/index.ts +74 -8
- package/src/ingest/providers/openai.ts +130 -1
- package/src/ingest/task-processor.ts +380 -0
- package/src/ingest/worker.ts +21 -15
- package/src/recall/engine.ts +22 -12
- package/src/recall/mmr.ts +3 -1
- package/src/storage/sqlite.ts +298 -5
- package/src/types.ts +19 -0
- package/src/viewer/html.ts +955 -115
- package/src/viewer/server.ts +63 -1
- package/SKILL.md +0 -43
- package/www/index.html +0 -606
|
@@ -0,0 +1,380 @@
|
|
|
1
|
+
import { v4 as uuid } from "uuid";
|
|
2
|
+
import type { SqliteStore } from "../storage/sqlite";
|
|
3
|
+
import type { PluginContext, Task, Chunk } from "../types";
|
|
4
|
+
import { DEFAULTS } from "../types";
|
|
5
|
+
import { Summarizer } from "./providers";
|
|
6
|
+
|
|
7
|
+
const TRIVIAL_PATTERNS = [
|
|
8
|
+
/^(test|testing|hello|hi|hey|ok|okay|yes|no|yeah|nope|sure|thanks|thank you|thx|ping|pong|哈哈|好的|嗯|是的|不是|谢谢|你好|测试)\s*[.!?。!?]*$/,
|
|
9
|
+
/^(aaa+|bbb+|xxx+|zzz+|123+|asdf+|qwer+|haha+|lol+|hmm+)\s*$/,
|
|
10
|
+
/^[\s\p{P}\p{S}]*$/u,
|
|
11
|
+
];
|
|
12
|
+
|
|
13
|
+
const SKIP_REASONS = {
|
|
14
|
+
noChunks: "该任务没有对话内容,已自动跳过。",
|
|
15
|
+
} as const;
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Asynchronous task-level processor.
|
|
19
|
+
*
|
|
20
|
+
* After each ingestion batch, checks whether the current conversation
|
|
21
|
+
* constitutes a "new task" compared to the previous one. If so:
|
|
22
|
+
* 1. Finalizes the previous task (generates a detailed summary).
|
|
23
|
+
* 2. Creates a new active task for incoming chunks.
|
|
24
|
+
*
|
|
25
|
+
* Task boundary detection:
|
|
26
|
+
* - Session change → always new task
|
|
27
|
+
* - Time gap > 2h → always new task
|
|
28
|
+
* - LLM judges whether new user message starts a different topic
|
|
29
|
+
*/
|
|
30
|
+
export class TaskProcessor {
|
|
31
|
+
private summarizer: Summarizer;
|
|
32
|
+
private processing = false;
|
|
33
|
+
|
|
34
|
+
constructor(
|
|
35
|
+
private store: SqliteStore,
|
|
36
|
+
private ctx: PluginContext,
|
|
37
|
+
) {
|
|
38
|
+
this.summarizer = new Summarizer(ctx.config.summarizer, ctx.log);
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Called after new chunks are ingested.
|
|
43
|
+
* Determines if a new task boundary was crossed and handles transition.
|
|
44
|
+
*/
|
|
45
|
+
async onChunksIngested(sessionKey: string, latestTimestamp: number): Promise<void> {
|
|
46
|
+
this.ctx.log.debug(`TaskProcessor.onChunksIngested called session=${sessionKey} ts=${latestTimestamp} processing=${this.processing}`);
|
|
47
|
+
if (this.processing) {
|
|
48
|
+
this.ctx.log.debug("TaskProcessor.onChunksIngested skipped — already processing");
|
|
49
|
+
return;
|
|
50
|
+
}
|
|
51
|
+
this.processing = true;
|
|
52
|
+
try {
|
|
53
|
+
await this.detectAndProcess(sessionKey, latestTimestamp);
|
|
54
|
+
} catch (err) {
|
|
55
|
+
this.ctx.log.error(`TaskProcessor error: ${err}`);
|
|
56
|
+
} finally {
|
|
57
|
+
this.processing = false;
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
private async detectAndProcess(sessionKey: string, latestTimestamp: number): Promise<void> {
|
|
62
|
+
this.ctx.log.debug(`TaskProcessor.detectAndProcess session=${sessionKey}`);
|
|
63
|
+
|
|
64
|
+
// Finalize any active tasks from OTHER sessions (session change = task boundary)
|
|
65
|
+
const allActive = this.store.getAllActiveTasks();
|
|
66
|
+
for (const t of allActive) {
|
|
67
|
+
if (t.sessionKey !== sessionKey) {
|
|
68
|
+
this.ctx.log.info(`Session changed: finalizing task=${t.id} from session=${t.sessionKey}`);
|
|
69
|
+
await this.finalizeTask(t);
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const activeTask = this.store.getActiveTask(sessionKey);
|
|
74
|
+
this.ctx.log.debug(`TaskProcessor.detectAndProcess activeTask=${activeTask?.id ?? "none"}`);
|
|
75
|
+
|
|
76
|
+
if (!activeTask) {
|
|
77
|
+
await this.createNewTask(sessionKey, latestTimestamp);
|
|
78
|
+
return;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
const isNewTask = await this.isTaskBoundary(activeTask, sessionKey, latestTimestamp);
|
|
82
|
+
|
|
83
|
+
if (isNewTask) {
|
|
84
|
+
await this.finalizeTask(activeTask);
|
|
85
|
+
await this.createNewTask(sessionKey, latestTimestamp);
|
|
86
|
+
} else {
|
|
87
|
+
this.assignUnassignedChunks(sessionKey, activeTask.id);
|
|
88
|
+
this.store.updateTask(activeTask.id, { endedAt: undefined });
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
private async isTaskBoundary(activeTask: Task, sessionKey: string, latestTimestamp: number): Promise<boolean> {
|
|
93
|
+
if (activeTask.sessionKey !== sessionKey) return true;
|
|
94
|
+
|
|
95
|
+
const chunks = this.store.getChunksByTask(activeTask.id);
|
|
96
|
+
if (chunks.length === 0) return false;
|
|
97
|
+
|
|
98
|
+
const lastChunkTs = Math.max(...chunks.map((c) => c.createdAt));
|
|
99
|
+
const gap = latestTimestamp - lastChunkTs;
|
|
100
|
+
|
|
101
|
+
// Hard timeout: always split after 2h regardless of topic
|
|
102
|
+
if (gap > DEFAULTS.taskIdleTimeoutMs) {
|
|
103
|
+
this.ctx.log.info(
|
|
104
|
+
`Task boundary: time gap ${Math.round(gap / 60000)}min > ${Math.round(DEFAULTS.taskIdleTimeoutMs / 60000)}min`,
|
|
105
|
+
);
|
|
106
|
+
return true;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// LLM topic judgment: build context from existing task and compare with new message
|
|
110
|
+
const newUserChunks = this.store.getUnassignedChunks(sessionKey).filter((c) => c.role === "user");
|
|
111
|
+
if (newUserChunks.length === 0) return false;
|
|
112
|
+
|
|
113
|
+
const existingUserChunks = chunks.filter((c) => c.role === "user");
|
|
114
|
+
if (existingUserChunks.length === 0) return false;
|
|
115
|
+
|
|
116
|
+
const currentContext = this.buildContextSummary(chunks);
|
|
117
|
+
const newMessage = newUserChunks.map((c) => c.content).join("\n");
|
|
118
|
+
|
|
119
|
+
const isNew = await this.summarizer.judgeNewTopic(currentContext, newMessage);
|
|
120
|
+
|
|
121
|
+
if (isNew === null) {
|
|
122
|
+
this.ctx.log.debug("Topic judge unavailable (no LLM configured), keeping current task");
|
|
123
|
+
return false;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
if (isNew) {
|
|
127
|
+
this.ctx.log.info(`Task boundary: LLM judged new topic. New message: "${newMessage.slice(0, 80)}..."`);
|
|
128
|
+
} else {
|
|
129
|
+
this.ctx.log.debug(`LLM judged SAME topic, continuing task=${activeTask.id}`);
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
return isNew;
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Build a concise context string from existing task chunks for the LLM topic judge.
|
|
137
|
+
* Takes recent user/assistant summaries to keep token usage low.
|
|
138
|
+
*/
|
|
139
|
+
private buildContextSummary(chunks: Chunk[]): string {
|
|
140
|
+
const relevant = chunks
|
|
141
|
+
.filter((c) => c.role === "user" || c.role === "assistant")
|
|
142
|
+
.slice(-6);
|
|
143
|
+
|
|
144
|
+
return relevant
|
|
145
|
+
.map((c) => `[${c.role === "user" ? "User" : "Assistant"}]: ${c.summary || c.content.slice(0, 150)}`)
|
|
146
|
+
.join("\n");
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
private async createNewTask(sessionKey: string, timestamp: number): Promise<void> {
|
|
150
|
+
const taskId = uuid();
|
|
151
|
+
const task: Task = {
|
|
152
|
+
id: taskId,
|
|
153
|
+
sessionKey,
|
|
154
|
+
title: "",
|
|
155
|
+
summary: "",
|
|
156
|
+
status: "active",
|
|
157
|
+
startedAt: timestamp,
|
|
158
|
+
endedAt: null,
|
|
159
|
+
updatedAt: timestamp,
|
|
160
|
+
};
|
|
161
|
+
this.store.insertTask(task);
|
|
162
|
+
this.assignUnassignedChunks(sessionKey, taskId);
|
|
163
|
+
this.ctx.log.info(`Created new task=${taskId} session=${sessionKey}`);
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
private assignUnassignedChunks(sessionKey: string, taskId: string): void {
|
|
167
|
+
const unassigned = this.store.getUnassignedChunks(sessionKey);
|
|
168
|
+
for (const chunk of unassigned) {
|
|
169
|
+
this.store.setChunkTaskId(chunk.id, taskId);
|
|
170
|
+
}
|
|
171
|
+
if (unassigned.length > 0) {
|
|
172
|
+
this.ctx.log.debug(`Assigned ${unassigned.length} chunks to task=${taskId}`);
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
async finalizeTask(task: Task): Promise<void> {
|
|
177
|
+
const chunks = this.store.getChunksByTask(task.id);
|
|
178
|
+
const fallbackTitle = chunks.length > 0 ? this.extractTitle(chunks) : "";
|
|
179
|
+
|
|
180
|
+
if (chunks.length === 0) {
|
|
181
|
+
this.ctx.log.info(`Task ${task.id} skipped: no chunks`);
|
|
182
|
+
this.store.updateTask(task.id, { title: fallbackTitle, summary: SKIP_REASONS.noChunks, status: "skipped", endedAt: Date.now() });
|
|
183
|
+
return;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
const skipReason = this.shouldSkipSummary(chunks);
|
|
187
|
+
|
|
188
|
+
if (skipReason) {
|
|
189
|
+
this.ctx.log.info(`Task ${task.id} skipped: ${skipReason} (chunks=${chunks.length}, title="${fallbackTitle}")`);
|
|
190
|
+
const reason = this.humanReadableSkipReason(skipReason, chunks);
|
|
191
|
+
this.store.updateTask(task.id, { title: fallbackTitle, summary: reason, status: "skipped", endedAt: Date.now() });
|
|
192
|
+
return;
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
const conversationText = this.buildConversationText(chunks);
|
|
196
|
+
let summary: string;
|
|
197
|
+
try {
|
|
198
|
+
summary = await this.summarizer.summarizeTask(conversationText);
|
|
199
|
+
} catch (err) {
|
|
200
|
+
this.ctx.log.warn(`Task summary generation failed for task=${task.id}: ${err}`);
|
|
201
|
+
summary = this.fallbackSummary(chunks);
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
const { title: llmTitle, body } = this.parseTitleFromSummary(summary);
|
|
205
|
+
const title = llmTitle || fallbackTitle;
|
|
206
|
+
|
|
207
|
+
this.store.updateTask(task.id, {
|
|
208
|
+
title,
|
|
209
|
+
summary: body,
|
|
210
|
+
status: "completed",
|
|
211
|
+
endedAt: Date.now(),
|
|
212
|
+
});
|
|
213
|
+
|
|
214
|
+
this.ctx.log.info(
|
|
215
|
+
`Finalized task=${task.id} title="${title}" chunks=${chunks.length} summaryLen=${body.length}`,
|
|
216
|
+
);
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
/**
|
|
220
|
+
* Determine if a task is too trivial to warrant an LLM summary call.
|
|
221
|
+
* Returns a skip reason string, or null if summary should proceed.
|
|
222
|
+
*
|
|
223
|
+
* Skip conditions (any one triggers skip):
|
|
224
|
+
* 1. Total chunks < 4 — too few messages to form a meaningful task
|
|
225
|
+
* 2. Real conversation turns < 2 — no back-and-forth dialogue
|
|
226
|
+
* 3. No user messages — purely system/tool generated, no user intent
|
|
227
|
+
* 4. Total content < 200 chars — not enough substance
|
|
228
|
+
* 5. User content is trivial/test data — "hello", "test", "ok" etc.
|
|
229
|
+
* 6. All messages are tool results — automated output, no conversation
|
|
230
|
+
* 7. High content repetition — user repeated the same thing (debug loops)
|
|
231
|
+
*/
|
|
232
|
+
private shouldSkipSummary(chunks: Chunk[]): string | null {
|
|
233
|
+
const userChunks = chunks.filter((c) => c.role === "user");
|
|
234
|
+
const assistantChunks = chunks.filter((c) => c.role === "assistant");
|
|
235
|
+
const toolChunks = chunks.filter((c) => c.role === "tool");
|
|
236
|
+
|
|
237
|
+
// 1. Too few chunks
|
|
238
|
+
if (chunks.length < 4) {
|
|
239
|
+
return `too few chunks (${chunks.length} < 4 minimum)`;
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
// 2. Not enough real conversation turns (need at least 2 user-assistant exchanges)
|
|
243
|
+
const turns = Math.min(userChunks.length, assistantChunks.length);
|
|
244
|
+
if (turns < 2) {
|
|
245
|
+
return `too few conversation turns (${turns} < 2 minimum)`;
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
// 3. No user messages at all — purely automated
|
|
249
|
+
if (userChunks.length === 0) {
|
|
250
|
+
return "no user messages — task appears to be automated/system-generated";
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// 4. Total content too short
|
|
254
|
+
// CJK characters carry more info per char, so use a lower threshold
|
|
255
|
+
const totalContentLen = chunks.reduce((sum, c) => sum + c.content.length, 0);
|
|
256
|
+
const hasCJK = /[\u4e00-\u9fff\u3040-\u30ff\uac00-\ud7af]/.test(
|
|
257
|
+
userChunks[0]?.content ?? "",
|
|
258
|
+
);
|
|
259
|
+
const minContentLen = hasCJK ? 80 : 200;
|
|
260
|
+
if (totalContentLen < minContentLen) {
|
|
261
|
+
return `content too short (${totalContentLen} chars < ${minContentLen} minimum)`;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
// 5. User content is trivial/test data
|
|
265
|
+
const userContent = userChunks.map((c) => c.content).join("\n");
|
|
266
|
+
if (this.looksLikeTrivialContent(userContent)) {
|
|
267
|
+
return "user content appears to be test/trivial data";
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
// 6. Assistant content is also trivial (both sides are low-value)
|
|
271
|
+
const assistantContent = assistantChunks.map((c) => c.content).join("\n");
|
|
272
|
+
if (this.looksLikeTrivialContent(userContent + "\n" + assistantContent)) {
|
|
273
|
+
return "conversation content (both user and assistant) appears trivial";
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
// 7. Almost all messages are tool results with minimal user interaction
|
|
277
|
+
if (toolChunks.length > 0 && toolChunks.length >= chunks.length * 0.7 && userChunks.length <= 1) {
|
|
278
|
+
return `dominated by tool results (${toolChunks.length}/${chunks.length} chunks) with minimal user input`;
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
// 8. High repetition — user keeps saying the same thing
|
|
282
|
+
if (userChunks.length >= 3) {
|
|
283
|
+
const uniqueUserMsgs = new Set(userChunks.map((c) => c.content.trim().toLowerCase()));
|
|
284
|
+
const uniqueRatio = uniqueUserMsgs.size / userChunks.length;
|
|
285
|
+
if (uniqueRatio < 0.4) {
|
|
286
|
+
return `high content repetition (${uniqueUserMsgs.size} unique out of ${userChunks.length} user messages)`;
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
return null;
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
private looksLikeTrivialContent(text: string): boolean {
|
|
294
|
+
const lines = text.toLowerCase().split(/\n/).map((l) => l.trim()).filter(Boolean);
|
|
295
|
+
if (lines.length === 0) return true;
|
|
296
|
+
|
|
297
|
+
const trivialCount = lines.filter((line) => {
|
|
298
|
+
if (line.length < 5) return true;
|
|
299
|
+
if (TRIVIAL_PATTERNS.some((p) => p.test(line))) return true;
|
|
300
|
+
return false;
|
|
301
|
+
}).length;
|
|
302
|
+
|
|
303
|
+
return trivialCount / lines.length > 0.7;
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
private buildConversationText(chunks: Chunk[]): string {
|
|
307
|
+
const lines: string[] = [];
|
|
308
|
+
for (const c of chunks) {
|
|
309
|
+
const roleLabel = c.role === "user" ? "User" : c.role === "assistant" ? "Assistant" : c.role;
|
|
310
|
+
lines.push(`[${roleLabel}]: ${c.content}`);
|
|
311
|
+
}
|
|
312
|
+
return lines.join("\n\n");
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
/**
|
|
316
|
+
* Extract the LLM-generated title from the summary output.
|
|
317
|
+
* The LLM is prompted to output "📌 Title\n<title text>" as the first section.
|
|
318
|
+
* Returns the title and the remaining body (with the title section stripped).
|
|
319
|
+
*/
|
|
320
|
+
private parseTitleFromSummary(summary: string): { title: string; body: string } {
|
|
321
|
+
const titleMatch = summary.match(/📌\s*(?:Title|标题)\s*\n(.+)/);
|
|
322
|
+
if (titleMatch) {
|
|
323
|
+
const title = titleMatch[1].trim().slice(0, 80);
|
|
324
|
+
const body = summary.replace(/📌\s*(?:Title|标题)\s*\n.+\n?/, "").trim();
|
|
325
|
+
return { title, body };
|
|
326
|
+
}
|
|
327
|
+
return { title: "", body: summary };
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
private extractTitle(chunks: Chunk[]): string {
|
|
331
|
+
const firstUser = chunks.find((c) => c.role === "user");
|
|
332
|
+
if (!firstUser) return "Untitled Task";
|
|
333
|
+
const text = firstUser.content.trim();
|
|
334
|
+
if (text.length <= 60) return text;
|
|
335
|
+
return text.slice(0, 57) + "...";
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
private humanReadableSkipReason(reason: string, chunks: Chunk[]): string {
|
|
339
|
+
const userCount = chunks.filter((c) => c.role === "user").length;
|
|
340
|
+
const assistantCount = chunks.filter((c) => c.role === "assistant").length;
|
|
341
|
+
|
|
342
|
+
if (reason.includes("too few chunks")) {
|
|
343
|
+
return `对话内容过少(${chunks.length} 条消息),不足以生成有效摘要。至少需要 4 条消息。`;
|
|
344
|
+
}
|
|
345
|
+
if (reason.includes("too few conversation turns")) {
|
|
346
|
+
return `对话轮次不足(${Math.min(userCount, assistantCount)} 轮),需要至少 2 轮完整的问答交互才能生成摘要。`;
|
|
347
|
+
}
|
|
348
|
+
if (reason.includes("no user messages")) {
|
|
349
|
+
return "该任务没有用户消息,仅包含系统或工具自动生成的内容。";
|
|
350
|
+
}
|
|
351
|
+
if (reason.includes("content too short")) {
|
|
352
|
+
return "对话内容过短,信息量不足以生成有意义的摘要。";
|
|
353
|
+
}
|
|
354
|
+
if (reason.includes("trivial")) {
|
|
355
|
+
return "对话内容为简单问候或测试数据(如 hello、test、ok),无需生成摘要。";
|
|
356
|
+
}
|
|
357
|
+
if (reason.includes("tool results")) {
|
|
358
|
+
return "该任务主要由工具执行结果组成,缺少足够的用户交互内容。";
|
|
359
|
+
}
|
|
360
|
+
if (reason.includes("repetition")) {
|
|
361
|
+
return "对话中存在大量重复内容,无法提取有效信息生成摘要。";
|
|
362
|
+
}
|
|
363
|
+
return `对话未达到生成摘要的条件:${reason}`;
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
private fallbackSummary(chunks: Chunk[]): string {
|
|
367
|
+
const title = this.extractTitle(chunks);
|
|
368
|
+
const summaries = chunks
|
|
369
|
+
.filter((c) => c.summary)
|
|
370
|
+
.map((c) => `- ${c.summary}`);
|
|
371
|
+
const lines = [
|
|
372
|
+
`🎯 Goal`,
|
|
373
|
+
title,
|
|
374
|
+
``,
|
|
375
|
+
`📋 Key Steps`,
|
|
376
|
+
...summaries.slice(0, 20),
|
|
377
|
+
];
|
|
378
|
+
return lines.join("\n");
|
|
379
|
+
}
|
|
380
|
+
}
|
package/src/ingest/worker.ts
CHANGED
|
@@ -1,13 +1,15 @@
|
|
|
1
1
|
import { v4 as uuid } from "uuid";
|
|
2
|
+
import { createHash } from "crypto";
|
|
2
3
|
import type { ConversationMessage, Chunk, PluginContext } from "../types";
|
|
3
4
|
import type { SqliteStore } from "../storage/sqlite";
|
|
4
5
|
import type { Embedder } from "../embedding";
|
|
5
6
|
import { Summarizer } from "./providers";
|
|
6
|
-
import { chunkText } from "./chunker";
|
|
7
7
|
import { findDuplicate } from "./dedup";
|
|
8
|
+
import { TaskProcessor } from "./task-processor";
|
|
8
9
|
|
|
9
10
|
export class IngestWorker {
|
|
10
11
|
private summarizer: Summarizer;
|
|
12
|
+
private taskProcessor: TaskProcessor;
|
|
11
13
|
private queue: ConversationMessage[] = [];
|
|
12
14
|
private processing = false;
|
|
13
15
|
private flushResolvers: Array<() => void> = [];
|
|
@@ -18,6 +20,7 @@ export class IngestWorker {
|
|
|
18
20
|
private ctx: PluginContext,
|
|
19
21
|
) {
|
|
20
22
|
this.summarizer = new Summarizer(ctx.config.summarizer, ctx.log);
|
|
23
|
+
this.taskProcessor = new TaskProcessor(store, ctx);
|
|
21
24
|
}
|
|
22
25
|
|
|
23
26
|
enqueue(messages: ConversationMessage[]): void {
|
|
@@ -41,38 +44,40 @@ export class IngestWorker {
|
|
|
41
44
|
private async processQueue(): Promise<void> {
|
|
42
45
|
this.processing = true;
|
|
43
46
|
|
|
47
|
+
let lastSessionKey: string | undefined;
|
|
48
|
+
let lastTimestamp = 0;
|
|
49
|
+
|
|
44
50
|
while (this.queue.length > 0) {
|
|
45
51
|
const msg = this.queue.shift()!;
|
|
46
52
|
try {
|
|
47
53
|
await this.ingestMessage(msg);
|
|
54
|
+
lastSessionKey = msg.sessionKey;
|
|
55
|
+
lastTimestamp = Math.max(lastTimestamp, msg.timestamp);
|
|
48
56
|
} catch (err) {
|
|
49
57
|
this.ctx.log.error(`Failed to ingest message turn=${msg.turnId}: ${err}`);
|
|
50
58
|
}
|
|
51
59
|
}
|
|
52
60
|
|
|
61
|
+
if (lastSessionKey) {
|
|
62
|
+
this.ctx.log.debug(`Calling TaskProcessor.onChunksIngested session=${lastSessionKey} ts=${lastTimestamp}`);
|
|
63
|
+
this.taskProcessor
|
|
64
|
+
.onChunksIngested(lastSessionKey, lastTimestamp)
|
|
65
|
+
.catch((err) => this.ctx.log.error(`TaskProcessor post-ingest error: ${err}`));
|
|
66
|
+
}
|
|
67
|
+
|
|
53
68
|
this.processing = false;
|
|
54
69
|
for (const resolve of this.flushResolvers) resolve();
|
|
55
70
|
this.flushResolvers = [];
|
|
56
71
|
}
|
|
57
72
|
|
|
58
73
|
private async ingestMessage(msg: ConversationMessage): Promise<void> {
|
|
59
|
-
if (msg.role
|
|
60
|
-
|
|
74
|
+
if (this.store.chunkExistsByContent(msg.sessionKey, msg.role, msg.content)) {
|
|
75
|
+
this.ctx.log.debug(`Skipping duplicate message: session=${msg.sessionKey} role=${msg.role} len=${msg.content.length}`);
|
|
61
76
|
return;
|
|
62
77
|
}
|
|
63
78
|
|
|
64
|
-
const
|
|
65
|
-
this.
|
|
66
|
-
|
|
67
|
-
for (let seq = 0; seq < rawChunks.length; seq++) {
|
|
68
|
-
const raw = rawChunks[seq];
|
|
69
|
-
await this.storeChunk(msg, raw.content, raw.kind, seq);
|
|
70
|
-
}
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
private async ingestToolResult(msg: ConversationMessage): Promise<void> {
|
|
74
|
-
this.ctx.log.debug(`Ingesting tool result turn=${msg.turnId} tool=${msg.toolName ?? "unknown"} len=${msg.content.length}`);
|
|
75
|
-
await this.storeChunk(msg, msg.content, "tool_result", 0);
|
|
79
|
+
const kind = msg.role === "tool" ? "tool_result" : "paragraph";
|
|
80
|
+
await this.storeChunk(msg, msg.content, kind, 0);
|
|
76
81
|
}
|
|
77
82
|
|
|
78
83
|
private async storeChunk(
|
|
@@ -117,6 +122,7 @@ export class IngestWorker {
|
|
|
117
122
|
kind,
|
|
118
123
|
summary,
|
|
119
124
|
embedding: null,
|
|
125
|
+
taskId: null,
|
|
120
126
|
createdAt: msg.timestamp,
|
|
121
127
|
updatedAt: msg.timestamp,
|
|
122
128
|
};
|
package/src/recall/engine.ts
CHANGED
|
@@ -10,6 +10,7 @@ export interface RecallOptions {
|
|
|
10
10
|
query?: string;
|
|
11
11
|
maxResults?: number;
|
|
12
12
|
minScore?: number;
|
|
13
|
+
role?: string;
|
|
13
14
|
}
|
|
14
15
|
|
|
15
16
|
const MAX_RECENT_QUERIES = 20;
|
|
@@ -31,6 +32,7 @@ export class RecallEngine {
|
|
|
31
32
|
);
|
|
32
33
|
const minScore = opts.minScore ?? recallCfg.minScoreDefault!;
|
|
33
34
|
const query = opts.query ?? "";
|
|
35
|
+
const roleFilter = opts.role;
|
|
34
36
|
|
|
35
37
|
const repeatNote = this.checkRepeat(query, maxResults, minScore);
|
|
36
38
|
const candidatePool = maxResults * 5;
|
|
@@ -82,24 +84,31 @@ export class RecallEngine {
|
|
|
82
84
|
});
|
|
83
85
|
const decayed = applyRecencyDecay(withTs, recallCfg.recencyHalfLifeDays);
|
|
84
86
|
|
|
85
|
-
// Step 5:
|
|
86
|
-
const
|
|
87
|
-
const
|
|
87
|
+
// Step 5: Apply relative threshold on raw scores, then normalize to [0,1]
|
|
88
|
+
const sorted = [...decayed].sort((a, b) => b.score - a.score);
|
|
89
|
+
const topScore = sorted.length > 0 ? sorted[0].score : 0;
|
|
90
|
+
|
|
91
|
+
const absoluteFloor = topScore * minScore * 0.3;
|
|
92
|
+
// When role filter is active, keep a larger pool before slicing so we don't
|
|
93
|
+
// discard target-role candidates that rank below non-target ones.
|
|
94
|
+
const preSliceLimit = roleFilter ? maxResults * 5 : maxResults;
|
|
95
|
+
const filtered = sorted
|
|
96
|
+
.filter((d) => d.score >= absoluteFloor)
|
|
97
|
+
.slice(0, preSliceLimit);
|
|
98
|
+
|
|
99
|
+
const displayMax = filtered.length > 0 ? filtered[0].score : 1;
|
|
100
|
+
const normalized = filtered.map((d) => ({
|
|
88
101
|
...d,
|
|
89
|
-
score: d.score /
|
|
102
|
+
score: d.score / displayMax,
|
|
90
103
|
}));
|
|
91
104
|
|
|
92
|
-
// Step 6:
|
|
93
|
-
const filtered = normalized
|
|
94
|
-
.filter((d) => d.score >= minScore)
|
|
95
|
-
.sort((a, b) => b.score - a.score)
|
|
96
|
-
.slice(0, maxResults);
|
|
97
|
-
|
|
98
|
-
// Step 7: Build hits
|
|
105
|
+
// Step 6: Build hits (with optional role filter), applying maxResults cap at the end
|
|
99
106
|
const hits: SearchHit[] = [];
|
|
100
|
-
for (const candidate of
|
|
107
|
+
for (const candidate of normalized) {
|
|
108
|
+
if (hits.length >= maxResults) break;
|
|
101
109
|
const chunk = this.store.getChunk(candidate.id);
|
|
102
110
|
if (!chunk) continue;
|
|
111
|
+
if (roleFilter && chunk.role !== roleFilter) continue;
|
|
103
112
|
|
|
104
113
|
hits.push({
|
|
105
114
|
summary: chunk.summary,
|
|
@@ -111,6 +120,7 @@ export class RecallEngine {
|
|
|
111
120
|
seq: chunk.seq,
|
|
112
121
|
},
|
|
113
122
|
score: Math.round(candidate.score * 1000) / 1000,
|
|
123
|
+
taskId: chunk.taskId,
|
|
114
124
|
source: {
|
|
115
125
|
ts: chunk.createdAt,
|
|
116
126
|
role: chunk.role,
|
package/src/recall/mmr.ts
CHANGED
|
@@ -53,7 +53,9 @@ export function mmrRerank(
|
|
|
53
53
|
}
|
|
54
54
|
|
|
55
55
|
const chosen = remaining.splice(bestIdx, 1)[0];
|
|
56
|
-
|
|
56
|
+
// Preserve original RRF score for downstream filtering;
|
|
57
|
+
// MMR only determines selection order, not the score value.
|
|
58
|
+
selected.push({ id: chosen.id, score: chosen.score });
|
|
57
59
|
}
|
|
58
60
|
|
|
59
61
|
return selected;
|