@memtensor/memos-local-openclaw-plugin 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/.env.example +13 -5
  2. package/README.md +180 -68
  3. package/dist/capture/index.d.ts +5 -7
  4. package/dist/capture/index.d.ts.map +1 -1
  5. package/dist/capture/index.js +72 -43
  6. package/dist/capture/index.js.map +1 -1
  7. package/dist/ingest/providers/anthropic.d.ts +2 -0
  8. package/dist/ingest/providers/anthropic.d.ts.map +1 -1
  9. package/dist/ingest/providers/anthropic.js +110 -1
  10. package/dist/ingest/providers/anthropic.js.map +1 -1
  11. package/dist/ingest/providers/bedrock.d.ts +2 -5
  12. package/dist/ingest/providers/bedrock.d.ts.map +1 -1
  13. package/dist/ingest/providers/bedrock.js +110 -6
  14. package/dist/ingest/providers/bedrock.js.map +1 -1
  15. package/dist/ingest/providers/gemini.d.ts +2 -0
  16. package/dist/ingest/providers/gemini.d.ts.map +1 -1
  17. package/dist/ingest/providers/gemini.js +106 -1
  18. package/dist/ingest/providers/gemini.js.map +1 -1
  19. package/dist/ingest/providers/index.d.ts +9 -0
  20. package/dist/ingest/providers/index.d.ts.map +1 -1
  21. package/dist/ingest/providers/index.js +66 -4
  22. package/dist/ingest/providers/index.js.map +1 -1
  23. package/dist/ingest/providers/openai.d.ts +2 -0
  24. package/dist/ingest/providers/openai.d.ts.map +1 -1
  25. package/dist/ingest/providers/openai.js +112 -1
  26. package/dist/ingest/providers/openai.js.map +1 -1
  27. package/dist/ingest/task-processor.d.ts +63 -0
  28. package/dist/ingest/task-processor.d.ts.map +1 -0
  29. package/dist/ingest/task-processor.js +339 -0
  30. package/dist/ingest/task-processor.js.map +1 -0
  31. package/dist/ingest/worker.d.ts +1 -1
  32. package/dist/ingest/worker.d.ts.map +1 -1
  33. package/dist/ingest/worker.js +18 -13
  34. package/dist/ingest/worker.js.map +1 -1
  35. package/dist/recall/engine.d.ts +1 -0
  36. package/dist/recall/engine.d.ts.map +1 -1
  37. package/dist/recall/engine.js +21 -11
  38. package/dist/recall/engine.js.map +1 -1
  39. package/dist/recall/mmr.d.ts.map +1 -1
  40. package/dist/recall/mmr.js +3 -1
  41. package/dist/recall/mmr.js.map +1 -1
  42. package/dist/storage/sqlite.d.ts +67 -1
  43. package/dist/storage/sqlite.d.ts.map +1 -1
  44. package/dist/storage/sqlite.js +251 -5
  45. package/dist/storage/sqlite.js.map +1 -1
  46. package/dist/types.d.ts +15 -0
  47. package/dist/types.d.ts.map +1 -1
  48. package/dist/types.js +2 -0
  49. package/dist/types.js.map +1 -1
  50. package/dist/viewer/html.d.ts +1 -1
  51. package/dist/viewer/html.d.ts.map +1 -1
  52. package/dist/viewer/html.js +955 -115
  53. package/dist/viewer/html.js.map +1 -1
  54. package/dist/viewer/server.d.ts +3 -0
  55. package/dist/viewer/server.d.ts.map +1 -1
  56. package/dist/viewer/server.js +59 -1
  57. package/dist/viewer/server.js.map +1 -1
  58. package/index.ts +221 -45
  59. package/openclaw.plugin.json +20 -45
  60. package/package.json +3 -4
  61. package/skill/SKILL.md +59 -0
  62. package/src/capture/index.ts +85 -45
  63. package/src/ingest/providers/anthropic.ts +128 -1
  64. package/src/ingest/providers/bedrock.ts +130 -6
  65. package/src/ingest/providers/gemini.ts +128 -1
  66. package/src/ingest/providers/index.ts +74 -8
  67. package/src/ingest/providers/openai.ts +130 -1
  68. package/src/ingest/task-processor.ts +380 -0
  69. package/src/ingest/worker.ts +21 -15
  70. package/src/recall/engine.ts +22 -12
  71. package/src/recall/mmr.ts +3 -1
  72. package/src/storage/sqlite.ts +298 -5
  73. package/src/types.ts +19 -0
  74. package/src/viewer/html.ts +955 -115
  75. package/src/viewer/server.ts +63 -1
  76. package/SKILL.md +0 -43
  77. package/www/index.html +0 -606
@@ -1,6 +1,133 @@
1
1
  import type { SummarizerConfig, Logger } from "../../types";
2
2
 
3
- const SYSTEM_PROMPT = `Summarize the text in ONE concise sentence (max 60 tokens). Preserve exact names, commands, error codes. No bullet points, no preamble — output only the sentence.`;
3
+ const SYSTEM_PROMPT = `Summarize the text in ONE concise sentence (max 120 characters). IMPORTANT: Use the SAME language as the input text — if the input is Chinese, write Chinese; if English, write English. Preserve exact names, commands, error codes. No bullet points, no preamble — output only the sentence.`;
4
+
5
+ const TASK_SUMMARY_PROMPT = `You create a DETAILED task summary from a multi-turn conversation. This summary will be the ONLY record of this conversation, so it must preserve ALL important information.
6
+
7
+ CRITICAL LANGUAGE RULE: You MUST write in the SAME language as the user's messages. Chinese input → Chinese output. English input → English output. NEVER mix languages.
8
+
9
+ Output EXACTLY this structure:
10
+
11
+ 📌 Title
12
+ A short, descriptive title (10-30 characters). Like a chat group name.
13
+
14
+ 🎯 Goal
15
+ One sentence: what the user wanted to accomplish.
16
+
17
+ 📋 Key Steps
18
+ - Describe each meaningful step in detail
19
+ - Include the ACTUAL content produced: code snippets, commands, config blocks, formulas, key paragraphs
20
+ - For code: include the function signature and core logic (up to ~30 lines per block), use fenced code blocks
21
+ - For configs: include the actual config values and structure
22
+ - For lists/instructions: include the actual items, not just "provided a list"
23
+ - Merge only truly trivial back-and-forth (like "ok" / "sure")
24
+ - Do NOT over-summarize: "provided a function" is BAD; show the actual function
25
+
26
+ ✅ Result
27
+ What was the final outcome? Include the final version of any code/config/content produced.
28
+
29
+ 💡 Key Details
30
+ - Decisions made, trade-offs discussed, caveats noted, alternative approaches mentioned
31
+ - Specific values: numbers, versions, thresholds, URLs, file paths, model names
32
+ - Omit this section only if there truly are no noteworthy details
33
+
34
+ RULES:
35
+ - This summary is a KNOWLEDGE BASE ENTRY, not a brief note. Be thorough.
36
+ - PRESERVE verbatim: code, commands, URLs, file paths, error messages, config values, version numbers, names, amounts
37
+ - DISCARD only: greetings, filler, the assistant explaining what it will do before doing it
38
+ - Replace secrets (API keys, tokens, passwords) with [REDACTED]
39
+ - Target length: 30-50% of the original conversation length. Longer conversations need longer summaries.
40
+ - Output summary only, no preamble.`;
41
+
42
+ export async function summarizeTaskGemini(
43
+ text: string,
44
+ cfg: SummarizerConfig,
45
+ log: Logger,
46
+ ): Promise<string> {
47
+ const model = cfg.model ?? "gemini-1.5-flash";
48
+ const endpoint =
49
+ cfg.endpoint ??
50
+ `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent`;
51
+
52
+ const url = `${endpoint}?key=${cfg.apiKey}`;
53
+ const headers: Record<string, string> = {
54
+ "Content-Type": "application/json",
55
+ ...cfg.headers,
56
+ };
57
+
58
+ const resp = await fetch(url, {
59
+ method: "POST",
60
+ headers,
61
+ body: JSON.stringify({
62
+ systemInstruction: { parts: [{ text: TASK_SUMMARY_PROMPT }] },
63
+ contents: [{ parts: [{ text }] }],
64
+ generationConfig: { temperature: cfg.temperature ?? 0.1, maxOutputTokens: 4096 },
65
+ }),
66
+ signal: AbortSignal.timeout(cfg.timeoutMs ?? 60_000),
67
+ });
68
+
69
+ if (!resp.ok) {
70
+ const body = await resp.text();
71
+ throw new Error(`Gemini task-summarize failed (${resp.status}): ${body}`);
72
+ }
73
+
74
+ const json = (await resp.json()) as { candidates: Array<{ content: { parts: Array<{ text: string }> } }> };
75
+ return json.candidates?.[0]?.content?.parts?.[0]?.text?.trim() ?? "";
76
+ }
77
+
78
+ const TOPIC_JUDGE_PROMPT = `You are a conversation topic boundary detector. Given a summary of the CURRENT conversation and a NEW user message, determine if the new message starts a DIFFERENT topic/task.
79
+
80
+ Answer ONLY "NEW" or "SAME".
81
+
82
+ Rules:
83
+ - "NEW" = the new message is about a completely different subject, project, or task
84
+ - "SAME" = the new message continues, follows up on, or is closely related to the current topic
85
+ - Follow-up questions, clarifications, refinements, bug fixes, or next steps on the same task = SAME
86
+ - Greetings or meta-questions like "你好" or "谢谢" without new substance = SAME
87
+ - A clearly unrelated request (e.g., current topic is deployment, new message asks about cooking) = NEW
88
+
89
+ Output exactly one word: NEW or SAME`;
90
+
91
+ export async function judgeNewTopicGemini(
92
+ currentContext: string,
93
+ newMessage: string,
94
+ cfg: SummarizerConfig,
95
+ log: Logger,
96
+ ): Promise<boolean> {
97
+ const model = cfg.model ?? "gemini-1.5-flash";
98
+ const endpoint =
99
+ cfg.endpoint ??
100
+ `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent`;
101
+
102
+ const url = `${endpoint}?key=${cfg.apiKey}`;
103
+ const headers: Record<string, string> = {
104
+ "Content-Type": "application/json",
105
+ ...cfg.headers,
106
+ };
107
+
108
+ const userContent = `CURRENT CONVERSATION SUMMARY:\n${currentContext}\n\nNEW USER MESSAGE:\n${newMessage}`;
109
+
110
+ const resp = await fetch(url, {
111
+ method: "POST",
112
+ headers,
113
+ body: JSON.stringify({
114
+ systemInstruction: { parts: [{ text: TOPIC_JUDGE_PROMPT }] },
115
+ contents: [{ parts: [{ text: userContent }] }],
116
+ generationConfig: { temperature: 0, maxOutputTokens: 10 },
117
+ }),
118
+ signal: AbortSignal.timeout(cfg.timeoutMs ?? 15_000),
119
+ });
120
+
121
+ if (!resp.ok) {
122
+ const body = await resp.text();
123
+ throw new Error(`Gemini topic-judge failed (${resp.status}): ${body}`);
124
+ }
125
+
126
+ const json = (await resp.json()) as { candidates: Array<{ content: { parts: Array<{ text: string }> } }> };
127
+ const answer = json.candidates?.[0]?.content?.parts?.[0]?.text?.trim().toUpperCase() ?? "";
128
+ log.debug(`Topic judge result: "${answer}"`);
129
+ return answer.startsWith("NEW");
130
+ }
4
131
 
5
132
  export async function summarizeGemini(
6
133
  text: string,
@@ -1,8 +1,8 @@
1
1
  import type { SummarizerConfig, Logger } from "../../types";
2
- import { summarizeOpenAI } from "./openai";
3
- import { summarizeAnthropic } from "./anthropic";
4
- import { summarizeGemini } from "./gemini";
5
- import { summarizeBedrock } from "./bedrock";
2
+ import { summarizeOpenAI, summarizeTaskOpenAI, judgeNewTopicOpenAI } from "./openai";
3
+ import { summarizeAnthropic, summarizeTaskAnthropic, judgeNewTopicAnthropic } from "./anthropic";
4
+ import { summarizeGemini, summarizeTaskGemini, judgeNewTopicGemini } from "./gemini";
5
+ import { summarizeBedrock, summarizeTaskBedrock, judgeNewTopicBedrock } from "./bedrock";
6
6
 
7
7
  export class Summarizer {
8
8
  constructor(
@@ -23,6 +23,19 @@ export class Summarizer {
23
23
  }
24
24
  }
25
25
 
26
+ async summarizeTask(text: string): Promise<string> {
27
+ if (!this.cfg) {
28
+ return taskFallback(text);
29
+ }
30
+
31
+ try {
32
+ return await this.callTaskProvider(text);
33
+ } catch (err) {
34
+ this.log.warn(`Task summarizer failed, using fallback: ${err}`);
35
+ return taskFallback(text);
36
+ }
37
+ }
38
+
26
39
  private async callProvider(text: string): Promise<string> {
27
40
  const cfg = this.cfg!;
28
41
  switch (cfg.provider) {
@@ -41,12 +54,65 @@ export class Summarizer {
41
54
  throw new Error(`Unknown summarizer provider: ${cfg.provider}`);
42
55
  }
43
56
  }
57
+
58
+ /**
59
+ * Ask the LLM whether the new message starts a different topic from the current conversation.
60
+ * Returns true if it's a new topic, false if it continues the current one.
61
+ * Returns null if no summarizer is configured (caller should fall back to heuristic).
62
+ */
63
+ async judgeNewTopic(currentContext: string, newMessage: string): Promise<boolean | null> {
64
+ if (!this.cfg) return null;
65
+
66
+ try {
67
+ return await this.callTopicJudge(currentContext, newMessage);
68
+ } catch (err) {
69
+ this.log.warn(`Topic judge failed: ${err}`);
70
+ return null;
71
+ }
72
+ }
73
+
74
+ private async callTopicJudge(currentContext: string, newMessage: string): Promise<boolean> {
75
+ const cfg = this.cfg!;
76
+ switch (cfg.provider) {
77
+ case "openai":
78
+ case "openai_compatible":
79
+ case "azure_openai":
80
+ return judgeNewTopicOpenAI(currentContext, newMessage, cfg, this.log);
81
+ case "anthropic":
82
+ return judgeNewTopicAnthropic(currentContext, newMessage, cfg, this.log);
83
+ case "gemini":
84
+ return judgeNewTopicGemini(currentContext, newMessage, cfg, this.log);
85
+ case "bedrock":
86
+ return judgeNewTopicBedrock(currentContext, newMessage, cfg, this.log);
87
+ default:
88
+ throw new Error(`Unknown summarizer provider: ${cfg.provider}`);
89
+ }
90
+ }
91
+
92
+ private async callTaskProvider(text: string): Promise<string> {
93
+ const cfg = this.cfg!;
94
+ switch (cfg.provider) {
95
+ case "openai":
96
+ case "openai_compatible":
97
+ case "azure_openai":
98
+ return summarizeTaskOpenAI(text, cfg, this.log);
99
+ case "anthropic":
100
+ return summarizeTaskAnthropic(text, cfg, this.log);
101
+ case "gemini":
102
+ return summarizeTaskGemini(text, cfg, this.log);
103
+ case "bedrock":
104
+ return summarizeTaskBedrock(text, cfg, this.log);
105
+ default:
106
+ throw new Error(`Unknown summarizer provider: ${cfg.provider}`);
107
+ }
108
+ }
109
+ }
110
+
111
+ function taskFallback(text: string): string {
112
+ const lines = text.split("\n").filter((l) => l.trim().length > 10);
113
+ return lines.slice(0, 30).join("\n").slice(0, 2000);
44
114
  }
45
115
 
46
- /**
47
- * Rule-based fallback: produce a single short sentence from the first
48
- * meaningful line, appending any key entities found in the text.
49
- */
50
116
  function ruleFallback(text: string): string {
51
117
  const lines = text.split("\n").filter((l) => l.trim().length > 10);
52
118
  const first = (lines[0] ?? text).trim();
@@ -1,6 +1,80 @@
1
1
  import type { SummarizerConfig, Logger } from "../../types";
2
2
 
3
- const SYSTEM_PROMPT = `Summarize the text in ONE concise sentence (max 60 tokens). Preserve exact names, commands, error codes. No bullet points, no preamble — output only the sentence.`;
3
+ const SYSTEM_PROMPT = `Summarize the text in ONE concise sentence (max 120 characters). IMPORTANT: Use the SAME language as the input text — if the input is Chinese, write Chinese; if English, write English. Preserve exact names, commands, error codes. No bullet points, no preamble — output only the sentence.`;
4
+
5
+ const TASK_SUMMARY_PROMPT = `You create a DETAILED task summary from a multi-turn conversation. This summary will be the ONLY record of this conversation, so it must preserve ALL important information.
6
+
7
+ CRITICAL LANGUAGE RULE: You MUST write in the SAME language as the user's messages. Chinese input → Chinese output. English input → English output. NEVER mix languages.
8
+
9
+ Output EXACTLY this structure:
10
+
11
+ 📌 Title
12
+ A short, descriptive title (10-30 characters). Like a chat group name.
13
+
14
+ 🎯 Goal
15
+ One sentence: what the user wanted to accomplish.
16
+
17
+ 📋 Key Steps
18
+ - Describe each meaningful step in detail
19
+ - Include the ACTUAL content produced: code snippets, commands, config blocks, formulas, key paragraphs
20
+ - For code: include the function signature and core logic (up to ~30 lines per block), use fenced code blocks
21
+ - For configs: include the actual config values and structure
22
+ - For lists/instructions: include the actual items, not just "provided a list"
23
+ - Merge only truly trivial back-and-forth (like "ok" / "sure")
24
+ - Do NOT over-summarize: "provided a function" is BAD; show the actual function
25
+
26
+ ✅ Result
27
+ What was the final outcome? Include the final version of any code/config/content produced.
28
+
29
+ 💡 Key Details
30
+ - Decisions made, trade-offs discussed, caveats noted, alternative approaches mentioned
31
+ - Specific values: numbers, versions, thresholds, URLs, file paths, model names
32
+ - Omit this section only if there truly are no noteworthy details
33
+
34
+ RULES:
35
+ - This summary is a KNOWLEDGE BASE ENTRY, not a brief note. Be thorough.
36
+ - PRESERVE verbatim: code, commands, URLs, file paths, error messages, config values, version numbers, names, amounts
37
+ - DISCARD only: greetings, filler, the assistant explaining what it will do before doing it
38
+ - Replace secrets (API keys, tokens, passwords) with [REDACTED]
39
+ - Target length: 30-50% of the original conversation length. Longer conversations need longer summaries.
40
+ - Output summary only, no preamble.`;
41
+
42
+ export async function summarizeTaskOpenAI(
43
+ text: string,
44
+ cfg: SummarizerConfig,
45
+ log: Logger,
46
+ ): Promise<string> {
47
+ const endpoint = normalizeChatEndpoint(cfg.endpoint ?? "https://api.openai.com/v1/chat/completions");
48
+ const model = cfg.model ?? "gpt-4o-mini";
49
+ const headers: Record<string, string> = {
50
+ "Content-Type": "application/json",
51
+ Authorization: `Bearer ${cfg.apiKey}`,
52
+ ...cfg.headers,
53
+ };
54
+
55
+ const resp = await fetch(endpoint, {
56
+ method: "POST",
57
+ headers,
58
+ body: JSON.stringify({
59
+ model,
60
+ temperature: cfg.temperature ?? 0.1,
61
+ max_tokens: 4096,
62
+ messages: [
63
+ { role: "system", content: TASK_SUMMARY_PROMPT },
64
+ { role: "user", content: text },
65
+ ],
66
+ }),
67
+ signal: AbortSignal.timeout(cfg.timeoutMs ?? 60_000),
68
+ });
69
+
70
+ if (!resp.ok) {
71
+ const body = await resp.text();
72
+ throw new Error(`OpenAI task-summarize failed (${resp.status}): ${body}`);
73
+ }
74
+
75
+ const json = (await resp.json()) as { choices: Array<{ message: { content: string } }> };
76
+ return json.choices[0]?.message?.content?.trim() ?? "";
77
+ }
4
78
 
5
79
  export async function summarizeOpenAI(
6
80
  text: string,
@@ -40,6 +114,61 @@ export async function summarizeOpenAI(
40
114
  return json.choices[0]?.message?.content?.trim() ?? "";
41
115
  }
42
116
 
117
+ const TOPIC_JUDGE_PROMPT = `You are a conversation topic boundary detector. Given a summary of the CURRENT conversation and a NEW user message, determine if the new message starts a DIFFERENT topic/task.
118
+
119
+ Answer ONLY "NEW" or "SAME".
120
+
121
+ Rules:
122
+ - "NEW" = the new message is about a completely different subject, project, or task
123
+ - "SAME" = the new message continues, follows up on, or is closely related to the current topic
124
+ - Follow-up questions, clarifications, refinements, bug fixes, or next steps on the same task = SAME
125
+ - Greetings or meta-questions like "你好" or "谢谢" without new substance = SAME
126
+ - A clearly unrelated request (e.g., current topic is deployment, new message asks about cooking) = NEW
127
+
128
+ Output exactly one word: NEW or SAME`;
129
+
130
+ export async function judgeNewTopicOpenAI(
131
+ currentContext: string,
132
+ newMessage: string,
133
+ cfg: SummarizerConfig,
134
+ log: Logger,
135
+ ): Promise<boolean> {
136
+ const endpoint = normalizeChatEndpoint(cfg.endpoint ?? "https://api.openai.com/v1/chat/completions");
137
+ const model = cfg.model ?? "gpt-4o-mini";
138
+ const headers: Record<string, string> = {
139
+ "Content-Type": "application/json",
140
+ Authorization: `Bearer ${cfg.apiKey}`,
141
+ ...cfg.headers,
142
+ };
143
+
144
+ const userContent = `CURRENT CONVERSATION SUMMARY:\n${currentContext}\n\nNEW USER MESSAGE:\n${newMessage}`;
145
+
146
+ const resp = await fetch(endpoint, {
147
+ method: "POST",
148
+ headers,
149
+ body: JSON.stringify({
150
+ model,
151
+ temperature: 0,
152
+ max_tokens: 10,
153
+ messages: [
154
+ { role: "system", content: TOPIC_JUDGE_PROMPT },
155
+ { role: "user", content: userContent },
156
+ ],
157
+ }),
158
+ signal: AbortSignal.timeout(cfg.timeoutMs ?? 15_000),
159
+ });
160
+
161
+ if (!resp.ok) {
162
+ const body = await resp.text();
163
+ throw new Error(`OpenAI topic-judge failed (${resp.status}): ${body}`);
164
+ }
165
+
166
+ const json = (await resp.json()) as { choices: Array<{ message: { content: string } }> };
167
+ const answer = json.choices[0]?.message?.content?.trim().toUpperCase() ?? "";
168
+ log.debug(`Topic judge result: "${answer}"`);
169
+ return answer.startsWith("NEW");
170
+ }
171
+
43
172
  function normalizeChatEndpoint(url: string): string {
44
173
  const stripped = url.replace(/\/+$/, "");
45
174
  if (stripped.endsWith("/chat/completions")) return stripped;