xiaozuoassistant 0.2.7 → 0.2.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client/assets/browser-ponyfill-C7q-vgM7.js +2 -0
- package/dist/client/assets/index-DkOojrRj.js +201 -0
- package/dist/client/assets/index-u0lXmgyZ.css +1 -0
- package/dist/client/favicon.svg +4 -0
- package/dist/client/index.html +14 -0
- package/dist/client/locales/en/translation.json +110 -0
- package/dist/client/locales/zh/translation.json +112 -0
- package/dist/server/agents/office.js +23 -0
- package/dist/server/app.js +50 -0
- package/dist/server/channels/base-channel.js +23 -0
- package/dist/server/channels/create-channels.js +18 -0
- package/dist/server/channels/dingtalk.js +83 -0
- package/dist/server/channels/feishu.js +108 -0
- package/dist/server/channels/telegram.js +53 -0
- package/dist/server/channels/terminal.js +49 -0
- package/dist/server/channels/web.js +66 -0
- package/dist/server/channels/wechat.js +107 -0
- package/dist/server/config/loader.js +96 -0
- package/dist/server/config/paths.js +24 -0
- package/dist/server/config/prompts.js +12 -0
- package/dist/server/core/agents/manager.js +27 -0
- package/dist/server/core/agents/runtime.js +92 -0
- package/dist/server/core/brain.js +255 -0
- package/dist/server/core/event-bus.js +24 -0
- package/dist/server/core/logger.js +71 -0
- package/dist/server/core/memories/manager.js +238 -0
- package/dist/server/core/memories/short-term.js +512 -0
- package/dist/server/core/memories/structured.js +357 -0
- package/dist/server/core/memories/vector.js +137 -0
- package/dist/server/core/memory.js +2 -0
- package/dist/server/core/plugin-manager.js +128 -0
- package/dist/server/core/plugin.js +1 -0
- package/dist/server/core/scheduler.js +85 -0
- package/dist/server/core/task-queue.js +104 -0
- package/dist/server/core/types.js +1 -0
- package/dist/server/index.js +862 -0
- package/dist/server/llm/openai.js +23 -0
- package/dist/server/plugins/core-skills/src/create-agent.js +58 -0
- package/dist/server/plugins/core-skills/src/delegate.js +39 -0
- package/dist/server/plugins/core-skills/src/file-system.js +142 -0
- package/dist/server/plugins/core-skills/src/index.js +26 -0
- package/dist/server/plugins/core-skills/src/list-agents.js +24 -0
- package/dist/server/plugins/core-skills/src/search.js +31 -0
- package/dist/server/plugins/core-skills/src/system-time.js +27 -0
- package/dist/server/plugins/office-skills/src/index.js +19 -0
- package/dist/server/plugins/office-skills/src/office-excel.js +84 -0
- package/dist/server/plugins/office-skills/src/office-ppt.js +58 -0
- package/dist/server/plugins/office-skills/src/office-word.js +90 -0
- package/dist/server/routes/auth.js +28 -0
- package/dist/server/server/create-http.js +22 -0
- package/dist/server/server.js +29 -0
- package/dist/server/skills/base-skill.js +20 -0
- package/dist/server/skills/registry.js +52 -0
- package/package.json +1 -1
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
import { config } from '../config/loader.js';
|
|
2
|
+
import { skillRegistry } from '../skills/registry.js';
|
|
3
|
+
import { SYSTEM_PROMPT } from '../config/prompts.js';
|
|
4
|
+
import { createOpenAIClient } from '../llm/openai.js';
|
|
5
|
+
export class Brain {
|
|
6
|
+
constructor() {
|
|
7
|
+
this.openai = createOpenAIClient(config.llm);
|
|
8
|
+
this.updateClient();
|
|
9
|
+
}
|
|
10
|
+
static getInstance() {
|
|
11
|
+
if (!Brain.instance) {
|
|
12
|
+
Brain.instance = new Brain();
|
|
13
|
+
}
|
|
14
|
+
return Brain.instance;
|
|
15
|
+
}
|
|
16
|
+
updateClient() {
|
|
17
|
+
this.openai = createOpenAIClient(config.llm);
|
|
18
|
+
}
|
|
19
|
+
async processMessage(history, newMessage, systemPrompt, context) {
|
|
20
|
+
if (process.env.DEBUG)
|
|
21
|
+
console.log('[Brain] Processing message:', newMessage);
|
|
22
|
+
const defaultSystemPrompt = systemPrompt || config.systemPrompt || SYSTEM_PROMPT || 'You are xiaozuoAssistant, a helpful AI assistant. You can use tools to help users.';
|
|
23
|
+
// Check if API key is configured
|
|
24
|
+
if (!config.llm.apiKey) {
|
|
25
|
+
console.warn('API key not configured, cannot process message');
|
|
26
|
+
return 'Error: API key not configured. Please set up your API key in the configuration.';
|
|
27
|
+
}
|
|
28
|
+
// Check for long inactivity (Session Wake-up)
|
|
29
|
+
const sessionLastActive = context?.session?.lastActiveAt || 0;
|
|
30
|
+
const now = Date.now();
|
|
31
|
+
const hoursSinceActive = (now - sessionLastActive) / (1000 * 60 * 60);
|
|
32
|
+
const WAKEUP_THRESHOLD = config.llm.sessionWakeupHours ?? 24;
|
|
33
|
+
let wakeupContext = '';
|
|
34
|
+
if (sessionLastActive > 0 && hoursSinceActive > WAKEUP_THRESHOLD) {
|
|
35
|
+
if (process.env.DEBUG)
|
|
36
|
+
console.log(`[Brain] Session wake-up detected (${hoursSinceActive.toFixed(1)}h > ${WAKEUP_THRESHOLD}h). Generating recap...`);
|
|
37
|
+
// Use recent history for quick summary, or could query Vector DB for deeper context
|
|
38
|
+
// For now, simple summary of recent messages to refresh context
|
|
39
|
+
const recentText = history.slice(-50).map(m => `${m.role}: ${m.content}`).join('\n');
|
|
40
|
+
if (recentText.length > 100) {
|
|
41
|
+
const summary = await this.generateSummary(recentText);
|
|
42
|
+
if (summary) {
|
|
43
|
+
wakeupContext = `\n\n[System Notice]: The user has returned after ${Math.floor(hoursSinceActive)} hours. Here is a brief recap of the previous conversation context to help you catch up:\n${summary}`;
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
// Convert history messages to the format expected by OpenAI
|
|
48
|
+
// Strategy: Keep last N messages to avoid context window overflow
|
|
49
|
+
// TODO: A better strategy would be token-based truncation.
|
|
50
|
+
const MAX_HISTORY_MESSAGES = config.llm.maxHistoryMessages ?? 20;
|
|
51
|
+
const recentHistory = history.slice(-MAX_HISTORY_MESSAGES);
|
|
52
|
+
const messageHistory = recentHistory.map(m => {
|
|
53
|
+
const msg = { role: m.role, content: m.content };
|
|
54
|
+
if (m.name)
|
|
55
|
+
msg.name = m.name;
|
|
56
|
+
if (m.tool_call_id)
|
|
57
|
+
msg.tool_call_id = m.tool_call_id;
|
|
58
|
+
if (m.tool_calls)
|
|
59
|
+
msg.tool_calls = m.tool_calls;
|
|
60
|
+
return msg;
|
|
61
|
+
});
|
|
62
|
+
const messages = [
|
|
63
|
+
{ role: 'system', content: defaultSystemPrompt + wakeupContext },
|
|
64
|
+
...messageHistory,
|
|
65
|
+
{ role: 'user', content: newMessage }
|
|
66
|
+
];
|
|
67
|
+
try {
|
|
68
|
+
if (process.env.DEBUG)
|
|
69
|
+
console.log('[Brain] Calling LLM...');
|
|
70
|
+
let response = await this.callLLM(messages, newMessage);
|
|
71
|
+
// Log only the content snippet to avoid flooding logs with full JSON
|
|
72
|
+
const contentSnippet = response.choices[0].message.content ? response.choices[0].message.content.substring(0, 100) + '...' : 'No content';
|
|
73
|
+
if (process.env.DEBUG)
|
|
74
|
+
console.log('[Brain] LLM Response (snippet):', contentSnippet);
|
|
75
|
+
let iterations = 0;
|
|
76
|
+
const MAX_ITERATIONS = config.llm.maxToolIterations ?? 15; // Reduced default from 200 to 15 for safety
|
|
77
|
+
while (response.choices[0].message.tool_calls && iterations < MAX_ITERATIONS) {
|
|
78
|
+
iterations++;
|
|
79
|
+
const toolCalls = response.choices[0].message.tool_calls;
|
|
80
|
+
const assistantMsg = response.choices[0].message;
|
|
81
|
+
messages.push(assistantMsg); // Add assistant message with tool calls
|
|
82
|
+
for (const toolCall of toolCalls) {
|
|
83
|
+
if (toolCall.type !== 'function')
|
|
84
|
+
continue;
|
|
85
|
+
const functionName = toolCall.function.name;
|
|
86
|
+
const functionArgs = JSON.parse(toolCall.function.arguments);
|
|
87
|
+
if (process.env.DEBUG)
|
|
88
|
+
console.log(`[Brain] Executing tool: ${functionName}`);
|
|
89
|
+
const skill = skillRegistry.getSkill(functionName);
|
|
90
|
+
let toolResult = '';
|
|
91
|
+
if (skill) {
|
|
92
|
+
try {
|
|
93
|
+
const ctxForTool = context
|
|
94
|
+
? { ...context, metadata: { ...(context.metadata || {}), toolCall: { id: toolCall.id, name: functionName } } }
|
|
95
|
+
: undefined;
|
|
96
|
+
const result = await skill.execute(functionArgs, ctxForTool);
|
|
97
|
+
toolResult = JSON.stringify(result);
|
|
98
|
+
}
|
|
99
|
+
catch (error) {
|
|
100
|
+
toolResult = JSON.stringify({ error: error.message });
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
else {
|
|
104
|
+
toolResult = JSON.stringify({ error: 'Tool not found' });
|
|
105
|
+
}
|
|
106
|
+
// Log tool result snippet
|
|
107
|
+
if (process.env.DEBUG)
|
|
108
|
+
console.log(`[Brain] Tool Result (snippet):`, toolResult.substring(0, 100) + '...');
|
|
109
|
+
messages.push({
|
|
110
|
+
role: 'tool',
|
|
111
|
+
tool_call_id: toolCall.id,
|
|
112
|
+
content: toolResult
|
|
113
|
+
});
|
|
114
|
+
}
|
|
115
|
+
// Call LLM again with tool results
|
|
116
|
+
if (process.env.DEBUG)
|
|
117
|
+
console.log('[Brain] Calling LLM with tool results...');
|
|
118
|
+
response = await this.callLLM(messages);
|
|
119
|
+
const nextContentSnippet = response.choices[0].message.content ? response.choices[0].message.content.substring(0, 100) + '...' : 'No content';
|
|
120
|
+
if (process.env.DEBUG)
|
|
121
|
+
console.log('[Brain] LLM Response (after tool, snippet):', nextContentSnippet);
|
|
122
|
+
}
|
|
123
|
+
const hitLimit = Boolean(response.choices[0].message.tool_calls) && iterations >= MAX_ITERATIONS;
|
|
124
|
+
const finalContent = response.choices[0].message.content || 'I could not generate a response.';
|
|
125
|
+
if (process.env.DEBUG)
|
|
126
|
+
console.log('[Brain] Final Response (snippet):', finalContent.substring(0, 100) + '...');
|
|
127
|
+
if (!hitLimit)
|
|
128
|
+
return finalContent;
|
|
129
|
+
return `${finalContent}\n\n(已到达回合上限,建议回复“继续”以接着执行;可在 config.json 设置 llm.maxToolIterations,当前=${MAX_ITERATIONS})`;
|
|
130
|
+
}
|
|
131
|
+
catch (error) {
|
|
132
|
+
console.error('[Brain] Error in processing:', error);
|
|
133
|
+
return `Error: ${error.message}`;
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
async generateSummary(content) {
|
|
137
|
+
try {
|
|
138
|
+
// Check if API key is configured
|
|
139
|
+
if (!config.llm.apiKey) {
|
|
140
|
+
console.warn('API key not configured, skipping summary generation');
|
|
141
|
+
return '';
|
|
142
|
+
}
|
|
143
|
+
const response = await this.openai.chat.completions.create({
|
|
144
|
+
model: config.llm.model,
|
|
145
|
+
messages: [
|
|
146
|
+
{ role: 'system', content: 'You are a helpful assistant. Please summarize the following content concisely.' },
|
|
147
|
+
{ role: 'user', content: content }
|
|
148
|
+
],
|
|
149
|
+
temperature: 0.5
|
|
150
|
+
}, { timeout: config.llm.requestTimeoutMs ?? 600000 });
|
|
151
|
+
return response.choices[0].message.content || '';
|
|
152
|
+
}
|
|
153
|
+
catch (e) {
|
|
154
|
+
console.error('Summary generation failed:', e);
|
|
155
|
+
return '';
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
async extractKeyInformation(content, contextType) {
|
|
159
|
+
try {
|
|
160
|
+
// Check if API key is configured
|
|
161
|
+
if (!config.llm.apiKey) {
|
|
162
|
+
console.warn('API key not configured, skipping key information extraction');
|
|
163
|
+
return '';
|
|
164
|
+
}
|
|
165
|
+
const prompt = contextType === 'project'
|
|
166
|
+
? 'Extract key project-related information, decisions, and tasks from the following conversation. Return a concise summary.'
|
|
167
|
+
: 'Extract key information, insights, and valuable knowledge from the following conversation. Return a concise summary suitable for long-term memory.';
|
|
168
|
+
const response = await this.openai.chat.completions.create({
|
|
169
|
+
model: config.llm.model,
|
|
170
|
+
messages: [
|
|
171
|
+
{ role: 'system', content: prompt },
|
|
172
|
+
{ role: 'user', content: content }
|
|
173
|
+
],
|
|
174
|
+
temperature: 0.3
|
|
175
|
+
}, { timeout: config.llm.requestTimeoutMs ?? 600000 });
|
|
176
|
+
return response.choices[0].message.content || '';
|
|
177
|
+
}
|
|
178
|
+
catch (e) {
|
|
179
|
+
console.error('Key information extraction failed:', e);
|
|
180
|
+
return '';
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
async extractNotebookNotes(content, keywords) {
|
|
184
|
+
try {
|
|
185
|
+
// Check if API key is configured
|
|
186
|
+
if (!config.llm.apiKey) {
|
|
187
|
+
console.warn('API key not configured, skipping notebook note extraction');
|
|
188
|
+
return [];
|
|
189
|
+
}
|
|
190
|
+
const prompt = keywords
|
|
191
|
+
? `You are a note-taker. Extract notes relevant to the following keywords: "${keywords}".`
|
|
192
|
+
: `You are a note-taker. Extract important notes, knowledge points, code snippets, or actionable items from the conversation.`;
|
|
193
|
+
const response = await this.openai.chat.completions.create({
|
|
194
|
+
model: config.llm.model,
|
|
195
|
+
messages: [
|
|
196
|
+
{ role: 'system', content: `${prompt}
|
|
197
|
+
Return a JSON array of objects with "title" and "content" fields. If no relevant info, return empty array [].
|
|
198
|
+
Format: [{"title": "...", "content": "..."}]` },
|
|
199
|
+
{ role: 'user', content: content }
|
|
200
|
+
],
|
|
201
|
+
temperature: 0.3,
|
|
202
|
+
response_format: { type: 'json_object' }
|
|
203
|
+
}, { timeout: config.llm.requestTimeoutMs ?? 600000 });
|
|
204
|
+
const jsonStr = response.choices[0].message.content || '{"notes": []}';
|
|
205
|
+
try {
|
|
206
|
+
const parsed = JSON.parse(jsonStr);
|
|
207
|
+
if (Array.isArray(parsed))
|
|
208
|
+
return parsed;
|
|
209
|
+
if (parsed.notes && Array.isArray(parsed.notes))
|
|
210
|
+
return parsed.notes;
|
|
211
|
+
return [];
|
|
212
|
+
}
|
|
213
|
+
catch {
|
|
214
|
+
return [];
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
catch (e) {
|
|
218
|
+
console.error('Notebook note extraction failed:', e);
|
|
219
|
+
return [];
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
async callLLM(messages, userQuery) {
|
|
223
|
+
// 根据用户当前输入,获取过滤后的 Tools
|
|
224
|
+
const tools = skillRegistry.getToolsDefinition(userQuery);
|
|
225
|
+
// OpenAI SDK expects tools to be undefined if empty array, or valid tools array
|
|
226
|
+
const toolsParam = tools.length > 0 ? tools : undefined;
|
|
227
|
+
const maxRetries = config.llm.maxRetries ?? 2;
|
|
228
|
+
const timeout = config.llm.requestTimeoutMs ?? 600000;
|
|
229
|
+
let lastError;
|
|
230
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
231
|
+
try {
|
|
232
|
+
return await this.openai.chat.completions.create({
|
|
233
|
+
model: config.llm.model,
|
|
234
|
+
messages: messages,
|
|
235
|
+
tools: toolsParam,
|
|
236
|
+
temperature: config.llm.temperature
|
|
237
|
+
}, { timeout });
|
|
238
|
+
}
|
|
239
|
+
catch (e) {
|
|
240
|
+
lastError = e;
|
|
241
|
+
const msg = String(e?.message || '').toLowerCase();
|
|
242
|
+
const code = String(e?.code || '').toLowerCase();
|
|
243
|
+
const isTimeout = msg.includes('timeout') || code.includes('etimedout');
|
|
244
|
+
const isConn = msg.includes('econnreset') || msg.includes('network') || code.includes('econnreset');
|
|
245
|
+
const retryable = isTimeout || isConn;
|
|
246
|
+
if (!retryable || attempt === maxRetries)
|
|
247
|
+
break;
|
|
248
|
+
const backoffMs = Math.min(2000 * (attempt + 1), 8000);
|
|
249
|
+
await new Promise(r => setTimeout(r, backoffMs));
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
throw lastError;
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
export const brain = Brain.getInstance();
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import { EventEmitter } from 'events';
|
|
2
|
+
class EventBus extends EventEmitter {
|
|
3
|
+
constructor() {
|
|
4
|
+
super();
|
|
5
|
+
}
|
|
6
|
+
static getInstance() {
|
|
7
|
+
if (!EventBus.instance) {
|
|
8
|
+
EventBus.instance = new EventBus();
|
|
9
|
+
}
|
|
10
|
+
return EventBus.instance;
|
|
11
|
+
}
|
|
12
|
+
emitEvent(event) {
|
|
13
|
+
this.emit(event.type, event);
|
|
14
|
+
// 同时也触发一个通用的 '*' 事件,方便日志记录
|
|
15
|
+
this.emit('*', event);
|
|
16
|
+
}
|
|
17
|
+
onEvent(eventType, handler) {
|
|
18
|
+
this.on(eventType, handler);
|
|
19
|
+
}
|
|
20
|
+
offEvent(eventType, handler) {
|
|
21
|
+
this.off(eventType, handler);
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
export const eventBus = EventBus.getInstance();
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import winston from 'winston';
|
|
2
|
+
import 'winston-daily-rotate-file';
|
|
3
|
+
import path from 'path';
|
|
4
|
+
import fs from 'fs';
|
|
5
|
+
// Determine log directory
|
|
6
|
+
// In production, logs should be in the directory where the user runs the app (process.cwd()/logs)
|
|
7
|
+
// In development, it might be the project root.
|
|
8
|
+
const logDir = path.join(process.cwd(), 'logs');
|
|
9
|
+
// Ensure log directory exists
|
|
10
|
+
if (!fs.existsSync(logDir)) {
|
|
11
|
+
try {
|
|
12
|
+
fs.mkdirSync(logDir, { recursive: true });
|
|
13
|
+
}
|
|
14
|
+
catch (e) {
|
|
15
|
+
console.error('Failed to create log directory:', e);
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
// Define the custom format
|
|
19
|
+
const logFormat = winston.format.combine(winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }), winston.format.errors({ stack: true }), // Print stack trace for errors
|
|
20
|
+
winston.format.splat(), winston.format.printf(({ timestamp, level, message, stack }) => {
|
|
21
|
+
return `[${timestamp}] ${level.toUpperCase()}: ${message} ${stack || ''}`;
|
|
22
|
+
}));
|
|
23
|
+
// Create the rotating file transport
|
|
24
|
+
const fileTransport = new winston.transports.DailyRotateFile({
|
|
25
|
+
filename: path.join(logDir, 'app-%DATE%.log'),
|
|
26
|
+
datePattern: 'YYYY-MM-DD',
|
|
27
|
+
zippedArchive: true, // Archive old logs (gzip)
|
|
28
|
+
maxSize: '20m', // Rotate if size exceeds 20MB (optional safety)
|
|
29
|
+
maxFiles: '30d', // Keep logs for 30 days
|
|
30
|
+
createSymlink: true, // Create a symlink 'app.log' pointing to current log
|
|
31
|
+
symlinkName: 'app.log',
|
|
32
|
+
level: 'info'
|
|
33
|
+
});
|
|
34
|
+
const transports = [
|
|
35
|
+
fileTransport,
|
|
36
|
+
new winston.transports.Console({
|
|
37
|
+
format: winston.format.combine(winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }), winston.format.colorize(), winston.format.printf(({ timestamp, level, message, stack }) => {
|
|
38
|
+
return `[${timestamp}] ${level}: ${message} ${stack || ''}`;
|
|
39
|
+
}))
|
|
40
|
+
})
|
|
41
|
+
];
|
|
42
|
+
export const logger = winston.createLogger({
|
|
43
|
+
level: 'info',
|
|
44
|
+
format: logFormat,
|
|
45
|
+
transports: transports,
|
|
46
|
+
exceptionHandlers: [
|
|
47
|
+
new winston.transports.File({ filename: path.join(logDir, 'exceptions.log') })
|
|
48
|
+
],
|
|
49
|
+
rejectionHandlers: [
|
|
50
|
+
new winston.transports.File({ filename: path.join(logDir, 'rejections.log') })
|
|
51
|
+
]
|
|
52
|
+
});
|
|
53
|
+
// Helper to integrate with existing console.log usage
|
|
54
|
+
export const overrideConsole = () => {
|
|
55
|
+
const originalLog = console.log;
|
|
56
|
+
const originalError = console.error;
|
|
57
|
+
const originalWarn = console.warn;
|
|
58
|
+
const originalInfo = console.info;
|
|
59
|
+
console.log = (...args) => {
|
|
60
|
+
logger.info(args.map(arg => typeof arg === 'object' ? JSON.stringify(arg, null, 2) : arg).join(' '));
|
|
61
|
+
};
|
|
62
|
+
console.error = (...args) => {
|
|
63
|
+
logger.error(args.map(arg => typeof arg === 'object' ? JSON.stringify(arg, null, 2) : arg).join(' '));
|
|
64
|
+
};
|
|
65
|
+
console.warn = (...args) => {
|
|
66
|
+
logger.warn(args.map(arg => typeof arg === 'object' ? JSON.stringify(arg, null, 2) : arg).join(' '));
|
|
67
|
+
};
|
|
68
|
+
console.info = (...args) => {
|
|
69
|
+
logger.info(args.map(arg => typeof arg === 'object' ? JSON.stringify(arg, null, 2) : arg).join(' '));
|
|
70
|
+
};
|
|
71
|
+
};
|
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
import { ShortTermMemory } from './short-term.js';
|
|
2
|
+
import { VectorMemory } from './vector.js';
|
|
3
|
+
import { StructuredMemory } from './structured.js';
|
|
4
|
+
import { brain } from '../brain.js';
|
|
5
|
+
import { config } from '../../config/loader.js';
|
|
6
|
+
export class MemoryManager {
|
|
7
|
+
constructor() {
|
|
8
|
+
this.shortTerm = ShortTermMemory.getInstance();
|
|
9
|
+
this.vector = VectorMemory.getInstance();
|
|
10
|
+
this.structured = StructuredMemory.getInstance();
|
|
11
|
+
}
|
|
12
|
+
static getInstance() {
|
|
13
|
+
if (!MemoryManager.instance) {
|
|
14
|
+
MemoryManager.instance = new MemoryManager();
|
|
15
|
+
}
|
|
16
|
+
return MemoryManager.instance;
|
|
17
|
+
}
|
|
18
|
+
// --- Layer 1: Short-term (Session) ---
|
|
19
|
+
async createSession(input) {
|
|
20
|
+
return this.shortTerm.createSession(input);
|
|
21
|
+
}
|
|
22
|
+
async getSession(id) {
|
|
23
|
+
return this.shortTerm.getSession(id);
|
|
24
|
+
}
|
|
25
|
+
async listSessions() {
|
|
26
|
+
return this.shortTerm.listSessions();
|
|
27
|
+
}
|
|
28
|
+
async updateSessionMeta(id, patch) {
|
|
29
|
+
return this.shortTerm.updateSessionMeta(id, patch);
|
|
30
|
+
}
|
|
31
|
+
async persistSession(id) {
|
|
32
|
+
return this.shortTerm.persistSession(id);
|
|
33
|
+
}
|
|
34
|
+
async deleteSession(id) {
|
|
35
|
+
return this.shortTerm.deleteSession(id);
|
|
36
|
+
}
|
|
37
|
+
async addMessage(sessionId, message) {
|
|
38
|
+
// 1. Add to Short-term
|
|
39
|
+
await this.shortTerm.addMessage(sessionId, message);
|
|
40
|
+
// 2. Add to Recent/Long-term (Vector) - Async/Background
|
|
41
|
+
// For now, we only embed user messages or significant assistant responses
|
|
42
|
+
// to avoid cluttering.
|
|
43
|
+
if (message.content.length > 10) { // Simple filter
|
|
44
|
+
const userId = config.userId || 'default';
|
|
45
|
+
this.vector.addMemory(message.content, 'recent', { sessionId, role: message.role, userId })
|
|
46
|
+
.catch(err => console.error('Background vector add failed:', err));
|
|
47
|
+
}
|
|
48
|
+
// 3. Extract Facts (Structured) - This usually requires an LLM call to extract
|
|
49
|
+
// structured info from the message. For MVP, we skip automatic extraction here
|
|
50
|
+
// but provide the API for the Brain to call explicitly.
|
|
51
|
+
}
|
|
52
|
+
async clearSessionMessages(sessionId) {
|
|
53
|
+
await this.shortTerm.clearMessages(sessionId);
|
|
54
|
+
}
|
|
55
|
+
updateClients() {
|
|
56
|
+
this.vector.updateClient();
|
|
57
|
+
}
|
|
58
|
+
async createRun(sessionId, userContent, runId) {
|
|
59
|
+
return this.shortTerm.createRun(sessionId, userContent, runId);
|
|
60
|
+
}
|
|
61
|
+
async updateRun(sessionId, runId, patch) {
|
|
62
|
+
return this.shortTerm.updateRun(sessionId, runId, patch);
|
|
63
|
+
}
|
|
64
|
+
async listRuns(sessionId) {
|
|
65
|
+
return this.shortTerm.listRuns(sessionId);
|
|
66
|
+
}
|
|
67
|
+
async getHistory(sessionId) {
|
|
68
|
+
return this.shortTerm.getMessages(sessionId);
|
|
69
|
+
}
|
|
70
|
+
// --- Retrieval ---
|
|
71
|
+
async getRelevantContext(query, sessionId, userId) {
|
|
72
|
+
const uid = userId || config.userId || 'default';
|
|
73
|
+
// 1. Get Short-term context (recent messages)
|
|
74
|
+
const history = await this.getHistory(sessionId);
|
|
75
|
+
const recentMessages = history.slice(-10).map(m => `${m.role}: ${m.content}`).join('\n');
|
|
76
|
+
// 2. Search Vector Memory (Recent & Long-term)
|
|
77
|
+
const vectorResults = await this.vector.search(query, undefined, 10);
|
|
78
|
+
const vectorFiltered = vectorResults.filter(r => (r.metadata?.userId || 'default') === uid).slice(0, 3);
|
|
79
|
+
const vectorContext = vectorFiltered.map(r => `[Memory]: ${r.text}`).join('\n');
|
|
80
|
+
// 3. Get User Profile (Structured)
|
|
81
|
+
const profile = await this.structured.getUserProfile(uid);
|
|
82
|
+
const profileContext = Object.entries(profile).map(([k, v]) => `[User Info] ${k}: ${v}`).join('\n');
|
|
83
|
+
return `
|
|
84
|
+
=== User Identity (userId: ${uid}) ===
|
|
85
|
+
${profileContext}
|
|
86
|
+
|
|
87
|
+
=== Relevant Memories ===
|
|
88
|
+
${vectorContext}
|
|
89
|
+
|
|
90
|
+
=== Recent Conversation ===
|
|
91
|
+
${recentMessages}
|
|
92
|
+
`;
|
|
93
|
+
}
|
|
94
|
+
getUserProfile(userId) {
|
|
95
|
+
const uid = userId || config.userId || 'default';
|
|
96
|
+
return this.structured.getUserProfile(uid);
|
|
97
|
+
}
|
|
98
|
+
updateUserProfile(userId, patch) {
|
|
99
|
+
const uid = userId || config.userId || 'default';
|
|
100
|
+
for (const [k, v] of Object.entries(patch || {})) {
|
|
101
|
+
const key = String(k).trim();
|
|
102
|
+
if (!key)
|
|
103
|
+
continue;
|
|
104
|
+
const value = v === null || v === undefined ? '' : String(v);
|
|
105
|
+
this.structured.updateUserProfile(uid, key, value);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
async runSessionArchiving() {
|
|
109
|
+
console.log('[MemoryManager] Running session archiving...');
|
|
110
|
+
const sessions = await this.listSessions();
|
|
111
|
+
const now = Date.now();
|
|
112
|
+
for (const session of sessions) {
|
|
113
|
+
try {
|
|
114
|
+
// Check if session has new activity since last archive
|
|
115
|
+
const lastArchived = session.lastArchivedAt || 0;
|
|
116
|
+
const lastActive = session.lastActiveAt || session.updatedAt || 0;
|
|
117
|
+
// If no new activity, skip
|
|
118
|
+
if (lastActive <= lastArchived)
|
|
119
|
+
continue;
|
|
120
|
+
// If less than 3 minutes since last archive, skip (though scheduler runs every 3 mins, so this check is redundant but safe)
|
|
121
|
+
// Actually, we want to archive if there is new content.
|
|
122
|
+
console.log(`[MemoryManager] Archiving session ${session.id} (Alias: ${session.alias || 'N/A'})...`);
|
|
123
|
+
const messages = await this.getHistory(session.id);
|
|
124
|
+
// Filter messages after lastArchived
|
|
125
|
+
const newMessages = messages.filter(m => (m.timestamp || 0) > lastArchived);
|
|
126
|
+
if (newMessages.length === 0) {
|
|
127
|
+
// Update lastArchivedAt to now to avoid re-checking empty interval
|
|
128
|
+
await this.updateSessionMeta(session.id, { lastArchivedAt: now });
|
|
129
|
+
continue;
|
|
130
|
+
}
|
|
131
|
+
// Process in chunks to avoid context overflow
|
|
132
|
+
const CHUNK_SIZE = 50; // Process 50 messages at a time
|
|
133
|
+
for (let i = 0; i < newMessages.length; i += CHUNK_SIZE) {
|
|
134
|
+
const chunk = newMessages.slice(i, i + CHUNK_SIZE);
|
|
135
|
+
const content = chunk.map(m => `${m.role}: ${m.content}`).join('\n');
|
|
136
|
+
// 1. Memory Archiving (Project or General)
|
|
137
|
+
const projectIds = session.projectIds || (session.projectId ? [session.projectId] : []);
|
|
138
|
+
if (projectIds.length > 0) {
|
|
139
|
+
// Project Context
|
|
140
|
+
const projectInfo = await brain.extractKeyInformation(content, 'project');
|
|
141
|
+
if (projectInfo) {
|
|
142
|
+
for (const pid of projectIds) {
|
|
143
|
+
await this.vector.addMemory(projectInfo, 'project_archive', {
|
|
144
|
+
sessionId: session.id,
|
|
145
|
+
projectId: pid,
|
|
146
|
+
archivedAt: now
|
|
147
|
+
});
|
|
148
|
+
console.log(`[MemoryManager] Archived project info for Project ${pid} (Chunk ${i / CHUNK_SIZE + 1})`);
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
else {
|
|
153
|
+
// General Context (No Project)
|
|
154
|
+
const generalInfo = await brain.extractKeyInformation(content, 'general');
|
|
155
|
+
if (generalInfo) {
|
|
156
|
+
await this.vector.addMemory(generalInfo, 'long_term', {
|
|
157
|
+
sessionId: session.id,
|
|
158
|
+
archivedAt: now,
|
|
159
|
+
source: 'auto_archive'
|
|
160
|
+
});
|
|
161
|
+
console.log(`[MemoryManager] Archived general info for Session ${session.id} (Chunk ${i / CHUNK_SIZE + 1})`);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
// 2. Notebook Memory Archiving
|
|
165
|
+
if (session.notebookId) {
|
|
166
|
+
const notebook = this.structured.getNotebook(session.notebookId);
|
|
167
|
+
if (notebook) {
|
|
168
|
+
// Extract notes regardless of keywords (Brain handles fallback)
|
|
169
|
+
const notes = await brain.extractNotebookNotes(content, notebook.keywords);
|
|
170
|
+
if (notes && notes.length > 0) {
|
|
171
|
+
for (const note of notes) {
|
|
172
|
+
if (!note.title || !note.content)
|
|
173
|
+
continue;
|
|
174
|
+
this.structured.createNote(require('uuid').v4(), session.notebookId, note.title, note.content);
|
|
175
|
+
console.log(`[MemoryManager] Created note "${note.title}" in Notebook ${notebook.name} (Chunk ${i / CHUNK_SIZE + 1})`);
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
// Update lastArchivedAt
|
|
182
|
+
await this.updateSessionMeta(session.id, { lastArchivedAt: now });
|
|
183
|
+
}
|
|
184
|
+
catch (e) {
|
|
185
|
+
console.error(`[MemoryManager] Failed to archive session ${session.id}:`, e);
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
// --- Maintenance ---
|
|
190
|
+
// Managed by Scheduler
|
|
191
|
+
async runMaintenance(input) {
|
|
192
|
+
const sessionMaxAgeDays = input?.sessionMaxAgeDays ?? 5;
|
|
193
|
+
const vectorMaxAgeDays = input?.vectorMaxAgeDays ?? 15;
|
|
194
|
+
console.log(`[MemoryManager] Running maintenance (Sessions: ${sessionMaxAgeDays} days, Vector: ${vectorMaxAgeDays} days)...`);
|
|
195
|
+
try {
|
|
196
|
+
// 0. Auto-archive active sessions
|
|
197
|
+
await this.runSessionArchiving();
|
|
198
|
+
// 1. Clean up old sessions
|
|
199
|
+
const deletedSessions = await this.shortTerm.cleanupOldSessions(sessionMaxAgeDays);
|
|
200
|
+
if (deletedSessions > 0) {
|
|
201
|
+
console.log(`[MemoryManager] Deleted ${deletedSessions} old sessions.`);
|
|
202
|
+
}
|
|
203
|
+
// 2. Summarize and Prune Vector Memories
|
|
204
|
+
const oldMemories = await this.vector.getMemoriesOlderThan(vectorMaxAgeDays);
|
|
205
|
+
if (oldMemories.length > 0) {
|
|
206
|
+
console.log(`[MemoryManager] Found ${oldMemories.length} old memories to summarize.`);
|
|
207
|
+
// Batch process
|
|
208
|
+
const BATCH_SIZE = 10;
|
|
209
|
+
for (let i = 0; i < oldMemories.length; i += BATCH_SIZE) {
|
|
210
|
+
const batch = oldMemories.slice(i, i + BATCH_SIZE);
|
|
211
|
+
const batchText = batch.map(m => `[${new Date(m.metadata.timestamp).toISOString()}] ${m.text}`).join('\n');
|
|
212
|
+
try {
|
|
213
|
+
const summary = await brain.generateSummary(batchText);
|
|
214
|
+
if (summary) {
|
|
215
|
+
await this.vector.addMemory(summary, 'long_term', {
|
|
216
|
+
original_count: batch.length,
|
|
217
|
+
summary_date: Date.now(),
|
|
218
|
+
source: 'maintenance_summary'
|
|
219
|
+
});
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
catch (e) {
|
|
223
|
+
console.error('Failed to summarize batch:', e);
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
// Prune after summarization
|
|
227
|
+
await this.vector.pruneOldMemories(vectorMaxAgeDays);
|
|
228
|
+
}
|
|
229
|
+
else {
|
|
230
|
+
// No old memories
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
catch (error) {
|
|
234
|
+
console.error('[MemoryManager] Maintenance failed:', error);
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
export const memoryManager = MemoryManager.getInstance();
|