xiaozuoassistant 0.1.95 → 0.1.96

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/package.json +1 -1
  2. package/dist/client/assets/browser-ponyfill-Do7nqjGM.js +0 -2
  3. package/dist/client/assets/index-BtPLguA3.css +0 -1
  4. package/dist/client/assets/index-yLtVOGjh.js +0 -196
  5. package/dist/client/favicon.svg +0 -4
  6. package/dist/client/index.html +0 -14
  7. package/dist/client/locales/en/translation.json +0 -110
  8. package/dist/client/locales/zh/translation.json +0 -112
  9. package/dist/server/agents/office.js +0 -23
  10. package/dist/server/app.js +0 -50
  11. package/dist/server/channels/base-channel.js +0 -23
  12. package/dist/server/channels/create-channels.js +0 -18
  13. package/dist/server/channels/dingtalk.js +0 -83
  14. package/dist/server/channels/feishu.js +0 -108
  15. package/dist/server/channels/telegram.js +0 -53
  16. package/dist/server/channels/terminal.js +0 -49
  17. package/dist/server/channels/web.js +0 -66
  18. package/dist/server/channels/wechat.js +0 -107
  19. package/dist/server/config/loader.js +0 -96
  20. package/dist/server/config/paths.js +0 -24
  21. package/dist/server/config/prompts.js +0 -12
  22. package/dist/server/core/agents/manager.js +0 -27
  23. package/dist/server/core/agents/runtime.js +0 -92
  24. package/dist/server/core/brain.js +0 -235
  25. package/dist/server/core/event-bus.js +0 -24
  26. package/dist/server/core/logger.js +0 -71
  27. package/dist/server/core/memories/manager.js +0 -238
  28. package/dist/server/core/memories/short-term.js +0 -512
  29. package/dist/server/core/memories/structured.js +0 -357
  30. package/dist/server/core/memories/vector.js +0 -137
  31. package/dist/server/core/memory.js +0 -2
  32. package/dist/server/core/plugin-manager.js +0 -128
  33. package/dist/server/core/plugin.js +0 -1
  34. package/dist/server/core/scheduler.js +0 -85
  35. package/dist/server/core/task-queue.js +0 -104
  36. package/dist/server/core/types.js +0 -1
  37. package/dist/server/index.js +0 -866
  38. package/dist/server/llm/openai.js +0 -23
  39. package/dist/server/plugins/core-skills/src/create-agent.js +0 -58
  40. package/dist/server/plugins/core-skills/src/delegate.js +0 -39
  41. package/dist/server/plugins/core-skills/src/file-system.js +0 -142
  42. package/dist/server/plugins/core-skills/src/index.js +0 -26
  43. package/dist/server/plugins/core-skills/src/list-agents.js +0 -24
  44. package/dist/server/plugins/core-skills/src/search.js +0 -31
  45. package/dist/server/plugins/core-skills/src/system-time.js +0 -27
  46. package/dist/server/plugins/office-skills/src/index.js +0 -19
  47. package/dist/server/plugins/office-skills/src/office-excel.js +0 -84
  48. package/dist/server/plugins/office-skills/src/office-ppt.js +0 -58
  49. package/dist/server/plugins/office-skills/src/office-word.js +0 -90
  50. package/dist/server/routes/auth.js +0 -28
  51. package/dist/server/server/create-http.js +0 -22
  52. package/dist/server/server.js +0 -29
  53. package/dist/server/skills/base-skill.js +0 -20
  54. package/dist/server/skills/registry.js +0 -52
@@ -1,66 +0,0 @@
1
- export class WebChannel {
2
- constructor(io) {
3
- this.name = 'web';
4
- this.messageHandler = null;
5
- this.syncHandler = null;
6
- this.io = io;
7
- }
8
- async start() {
9
- this.io.on('connection', (socket) => {
10
- console.log(`Web client connected: ${socket.id}`);
11
- // 当客户端加入会话房间
12
- socket.on('join_session', (sessionId) => {
13
- socket.join(sessionId);
14
- console.log(`Socket ${socket.id} joined session ${sessionId}`);
15
- });
16
- socket.on('sync_session', async (data) => {
17
- try {
18
- if (!this.syncHandler)
19
- return;
20
- const { sessionId, since } = data || {};
21
- if (!sessionId)
22
- return;
23
- const snapshot = await this.syncHandler(sessionId, since);
24
- socket.emit('session_sync', { sessionId, ...snapshot });
25
- }
26
- catch (e) {
27
- socket.emit('session_sync', { sessionId: data?.sessionId, messages: [], runs: [], error: String(e?.message || e) });
28
- }
29
- });
30
- // 当客户端发送消息
31
- socket.on('message', (data) => {
32
- console.log(`[WebChannel] Received message from ${data.sessionId}: ${data.content}`);
33
- if (this.messageHandler) {
34
- this.messageHandler(data.sessionId, data.content);
35
- }
36
- });
37
- socket.on('disconnect', () => {
38
- console.log(`Web client disconnected: ${socket.id}`);
39
- });
40
- });
41
- console.log('Web Channel Started');
42
- }
43
- async stop() {
44
- // Socket.io handles close with the http server,
45
- // but we can disconnect all sockets if needed
46
- this.io.disconnectSockets();
47
- }
48
- send(sessionId, message) {
49
- console.log(`[WebChannel] Sending message to ${sessionId}: ${message}`);
50
- // 发送消息到特定的会话房间
51
- this.io.to(sessionId).emit('message', {
52
- role: 'assistant',
53
- content: message,
54
- timestamp: Date.now()
55
- });
56
- }
57
- sendEvent(sessionId, event, payload) {
58
- this.io.to(sessionId).emit(event, payload);
59
- }
60
- onMessage(handler) {
61
- this.messageHandler = handler;
62
- }
63
- setSyncHandler(handler) {
64
- this.syncHandler = handler;
65
- }
66
- }
@@ -1,107 +0,0 @@
1
- import { WechatyBuilder, ScanStatus } from 'wechaty';
2
- import qrcodeTerminal from 'qrcode-terminal';
3
- import { BaseChannel } from './base-channel.js';
4
- import { config } from '../config/loader.js';
5
- export class WechatChannel extends BaseChannel {
6
- constructor() {
7
- super();
8
- this.name = 'wechat';
9
- this.isReady = false;
10
- const options = {
11
- name: 'xiaozuo-bot',
12
- };
13
- if (config.channels?.wechat?.puppet) {
14
- options.puppet = config.channels.wechat.puppet;
15
- }
16
- if (config.channels?.wechat?.token) {
17
- options.puppetOptions = {
18
- token: config.channels.wechat.token,
19
- };
20
- }
21
- this.bot = WechatyBuilder.build(options);
22
- }
23
- async start() {
24
- this.bot
25
- .on('scan', (qrcode, status) => {
26
- if (status === ScanStatus.Waiting || status === ScanStatus.Timeout) {
27
- console.log(`[WeChat] Scan QR Code to login: ${status}\nhttps://wechaty.js.org/qrcode/${encodeURIComponent(qrcode)}`);
28
- qrcodeTerminal.generate(qrcode, { small: true });
29
- }
30
- else {
31
- console.log(`[WeChat] Scan status: ${status}`);
32
- }
33
- })
34
- .on('login', (user) => {
35
- console.log(`[WeChat] User ${user} logged in`);
36
- this.isReady = true;
37
- })
38
- .on('message', async (message) => {
39
- if (message.self())
40
- return;
41
- // Only handle text messages for now
42
- if (message.type() !== this.bot.Message.Type.Text)
43
- return;
44
- const room = message.room();
45
- const talker = message.talker();
46
- const text = message.text();
47
- // If in a room, maybe only respond if mentioned?
48
- // For simplicity, let's respond to everything in direct messages,
49
- // and only mentions in rooms if possible, or everything for now (can be noisy).
50
- // Let's implement mention check for rooms.
51
- if (room) {
52
- if (await message.mentionSelf()) {
53
- // Remove the mention part from text if needed, or just pass it.
54
- // Wechaty usually keeps the text as is.
55
- // We use room.id as sessionId for room messages
56
- console.log(`[WeChat] Room Message from ${talker.name()} in ${await room.topic()}: ${text}`);
57
- this.emitMessage(room.id, text.replace(/^@\w+\s+/, '')); // Simple strip
58
- }
59
- }
60
- else {
61
- // Direct message
62
- console.log(`[WeChat] Direct Message from ${talker.name()}: ${text}`);
63
- // Use talker.id as sessionId
64
- this.emitMessage(talker.id, text);
65
- }
66
- })
67
- .on('logout', (user) => {
68
- console.log(`[WeChat] User ${user} logged out`);
69
- this.isReady = false;
70
- })
71
- .on('error', (e) => {
72
- console.error('[WeChat] Error:', e);
73
- });
74
- await this.bot.start();
75
- console.log('[WeChat] Bot started');
76
- }
77
- async stop() {
78
- if (this.bot) {
79
- await this.bot.stop();
80
- console.log('[WeChat] Bot stopped');
81
- }
82
- }
83
- async send(sessionId, message) {
84
- if (!this.isReady) {
85
- console.warn('[WeChat] Bot not ready, cannot send message');
86
- return;
87
- }
88
- try {
89
- // Try to find room first
90
- const room = await this.bot.Room.find({ id: sessionId });
91
- if (room) {
92
- await room.say(message);
93
- return;
94
- }
95
- // Try to find contact
96
- const contact = await this.bot.Contact.find({ id: sessionId });
97
- if (contact) {
98
- await contact.say(message);
99
- return;
100
- }
101
- console.warn(`[WeChat] Could not find contact or room with ID: ${sessionId}`);
102
- }
103
- catch (error) {
104
- console.error(`[WeChat] Failed to send message to ${sessionId}:`, error);
105
- }
106
- }
107
- }
@@ -1,96 +0,0 @@
1
- import fs from 'fs';
2
- import path from 'path';
3
- import { SYSTEM_PROMPT } from './prompts.js';
4
- const configPath = path.resolve(process.cwd(), 'config.json');
5
- let loadedConfig;
6
- try {
7
- const fileContent = fs.readFileSync(configPath, 'utf-8');
8
- loadedConfig = JSON.parse(fileContent);
9
- // Set defaults if missing
10
- if (!loadedConfig.systemPrompt)
11
- loadedConfig.systemPrompt = SYSTEM_PROMPT;
12
- if (!loadedConfig.scheduler)
13
- loadedConfig.scheduler = { memoryMaintenanceCron: '0 0 * * *', sessionRetentionDays: 5 };
14
- if (loadedConfig.scheduler.sessionRetentionDays === undefined)
15
- loadedConfig.scheduler.sessionRetentionDays = 5;
16
- if (!loadedConfig.workspace)
17
- loadedConfig.workspace = process.cwd();
18
- if (!loadedConfig.userId)
19
- loadedConfig.userId = 'default';
20
- // Ensure LLM config exists
21
- if (!loadedConfig.llm) {
22
- loadedConfig.llm = {
23
- provider: 'qwen',
24
- apiKey: '',
25
- baseURL: 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1',
26
- model: 'qwen-plus',
27
- embeddingModel: 'text-embedding-v1', // Qwen compatible
28
- temperature: 0.7,
29
- requestTimeoutMs: 600000,
30
- maxRetries: 2,
31
- maxToolIterations: 200,
32
- maxHistoryMessages: 20
33
- };
34
- }
35
- else {
36
- if (!loadedConfig.llm.apiKey)
37
- loadedConfig.llm.apiKey = '';
38
- if (loadedConfig.llm.requestTimeoutMs === undefined)
39
- loadedConfig.llm.requestTimeoutMs = 600000;
40
- if (loadedConfig.llm.maxRetries === undefined)
41
- loadedConfig.llm.maxRetries = 2;
42
- if (loadedConfig.llm.maxToolIterations === undefined)
43
- loadedConfig.llm.maxToolIterations = 200;
44
- if (loadedConfig.llm.maxHistoryMessages === undefined)
45
- loadedConfig.llm.maxHistoryMessages = 20;
46
- }
47
- // Override with env vars if present (optional, but good for security)
48
- if (process.env.XIAOZUO_LLM_API_KEY)
49
- loadedConfig.llm.apiKey = process.env.XIAOZUO_LLM_API_KEY;
50
- // Log the loaded API key (masked)
51
- console.log('[Config] API Key Status:', loadedConfig.llm.apiKey ? 'Configured' : 'Not configured (Empty)');
52
- console.log('[Config] Base URL:', loadedConfig.llm.baseURL);
53
- }
54
- catch (error) {
55
- console.warn('Failed to load config.json, creating a new one with defaults');
56
- loadedConfig = {
57
- server: { port: 3001, host: 'localhost' },
58
- userId: 'default',
59
- llm: {
60
- provider: 'qwen',
61
- apiKey: '',
62
- baseURL: 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1',
63
- model: 'qwen-plus',
64
- temperature: 0.7,
65
- requestTimeoutMs: 600000,
66
- maxRetries: 2,
67
- maxToolIterations: 200,
68
- maxHistoryMessages: 20,
69
- sessionWakeupHours: 24
70
- },
71
- logging: { level: 'info' },
72
- scheduler: { memoryMaintenanceCron: '0 0 * * *', sessionRetentionDays: 5 },
73
- workspace: process.cwd(),
74
- systemPrompt: SYSTEM_PROMPT
75
- };
76
- // Auto-create config.json if it doesn't exist
77
- try {
78
- fs.writeFileSync(configPath, JSON.stringify(loadedConfig, null, 2), 'utf-8');
79
- console.log(`[Config] Created new config.json at ${configPath}`);
80
- }
81
- catch (writeError) {
82
- console.error('[Config] Failed to create default config.json', writeError);
83
- }
84
- }
85
- export const config = loadedConfig;
86
- export const saveConfig = (newConfig) => {
87
- try {
88
- fs.writeFileSync(configPath, JSON.stringify(newConfig, null, 2), 'utf-8');
89
- // Update memory
90
- Object.assign(config, newConfig);
91
- }
92
- catch (error) {
93
- console.error('Failed to save config.json', error);
94
- throw error;
95
- }
96
- };
@@ -1,24 +0,0 @@
1
- import path from 'path';
2
- export function defaultWorkspacePath() {
3
- return path.resolve(process.cwd(), 'workspace');
4
- }
5
- export function resolveSessionWorkspace(ctx) {
6
- const fromSession = ctx?.session?.workspace;
7
- const fromMetadata = ctx?.metadata?.workspace;
8
- const candidate = typeof fromSession === 'string' ? fromSession : (typeof fromMetadata === 'string' ? fromMetadata : '');
9
- const trimmed = candidate.trim();
10
- if (!trimmed)
11
- return defaultWorkspacePath();
12
- return path.resolve(trimmed);
13
- }
14
- export function resolvePathWithinWorkspace(workspace, targetPath) {
15
- const ws = path.resolve(workspace);
16
- const abs = path.isAbsolute(targetPath) ? path.resolve(targetPath) : path.resolve(ws, targetPath);
17
- const rel = path.relative(ws, abs);
18
- if (!rel || rel === '.')
19
- return abs;
20
- if (rel.startsWith('..') || path.isAbsolute(rel)) {
21
- throw new Error('Path is outside of workspace');
22
- }
23
- return abs;
24
- }
@@ -1,12 +0,0 @@
1
- export const SYSTEM_PROMPT = `
2
- 你现在是 我的个人AI助手,叫xiaozuoAssistant。
3
-
4
- 核心规则:
5
- 1. 所有记忆完全本地存储,不上传任何内容到云端。
6
- 2. 默认工作模式:优先使用 office_work 类别的记忆。
7
- 3. 当用户提到 Word、PPT、Excel、汇报、客户、模板、格式、数据整理、邮件等办公相关内容时,**必须先检索相关记忆**,然后再回答。
8
- 4. 如果记忆中有明确偏好,直接应用,不要再次询问。
9
- 5. 风格:专业、简洁、高效,像一个靠谱的行政/助理。
10
- 6. 语言:默认用中文,必要时中英混用(比如函数名、文件名)。
11
- 7. 主动建议:如果用户在处理重复性工作,可以提醒“您上次处理类似内容时用了XX方法,要不要继续沿用?”
12
- `.trim();
@@ -1,27 +0,0 @@
1
- export class AgentManager {
2
- constructor() {
3
- this.agents = new Map();
4
- }
5
- static getInstance() {
6
- if (!AgentManager.instance) {
7
- AgentManager.instance = new AgentManager();
8
- }
9
- return AgentManager.instance;
10
- }
11
- registerAgent(agent) {
12
- this.agents.set(agent.name, agent);
13
- console.log(`[AgentManager] Registered agent: ${agent.name}`);
14
- }
15
- getAgent(name) {
16
- return this.agents.get(name);
17
- }
18
- getAllAgents() {
19
- return Array.from(this.agents.values());
20
- }
21
- updateAllConfigs() {
22
- for (const agent of this.agents.values()) {
23
- agent.updateConfig();
24
- }
25
- }
26
- }
27
- export const agentManager = AgentManager.getInstance();
@@ -1,92 +0,0 @@
1
- import { config } from '../../config/loader.js';
2
- import { createOpenAIClient } from '../../llm/openai.js';
3
- export class AgentRuntime {
4
- constructor(options) {
5
- this.openai = createOpenAIClient(config.llm);
6
- this.name = options.name;
7
- this.description = options.description;
8
- this.systemPrompt = options.systemPrompt;
9
- this.skills = options.skills || [];
10
- this.model = options.model || config.llm.model;
11
- this.temperature = options.temperature || config.llm.temperature;
12
- this.openai = createOpenAIClient(config.llm);
13
- }
14
- updateConfig() {
15
- this.openai = createOpenAIClient(config.llm);
16
- this.model = config.llm.model;
17
- this.temperature = config.llm.temperature;
18
- }
19
- getToolsDefinition() {
20
- return this.skills.map(skill => skill.toJSON());
21
- }
22
- async process(history, newMessage, contextPrompt, context) {
23
- const finalSystemPrompt = contextPrompt
24
- ? `${this.systemPrompt}\n${contextPrompt}`
25
- : this.systemPrompt;
26
- const messages = [
27
- { role: 'system', content: finalSystemPrompt },
28
- ...history.map(m => ({ role: m.role, content: m.content, name: m.name, tool_call_id: m.tool_call_id, tool_calls: m.tool_calls })),
29
- { role: 'user', content: newMessage }
30
- ];
31
- try {
32
- console.log(`[Agent:${this.name}] Calling LLM...`);
33
- let response = await this.callLLM(messages);
34
- let iterations = 0;
35
- const MAX_ITERATIONS = config.llm.maxToolIterations ?? 200;
36
- while (response.choices[0].message.tool_calls && iterations < MAX_ITERATIONS) {
37
- iterations++;
38
- const toolCalls = response.choices[0].message.tool_calls;
39
- messages.push(response.choices[0].message);
40
- for (const toolCall of toolCalls) {
41
- if (toolCall.type !== 'function')
42
- continue;
43
- const functionName = toolCall.function.name;
44
- const functionArgs = JSON.parse(toolCall.function.arguments);
45
- console.log(`[Agent:${this.name}] Executing tool: ${functionName}`);
46
- const skill = this.skills.find(s => s.name === functionName);
47
- let toolResult = '';
48
- if (skill) {
49
- try {
50
- const ctxForTool = context
51
- ? { ...context, metadata: { ...(context.metadata || {}), toolCall: { id: toolCall.id, name: functionName } } }
52
- : undefined;
53
- const result = await skill.execute(functionArgs, ctxForTool);
54
- toolResult = JSON.stringify(result);
55
- }
56
- catch (error) {
57
- toolResult = JSON.stringify({ error: error.message });
58
- }
59
- }
60
- else {
61
- toolResult = JSON.stringify({ error: 'Tool not found' });
62
- }
63
- messages.push({
64
- role: 'tool',
65
- tool_call_id: toolCall.id,
66
- content: toolResult
67
- });
68
- }
69
- response = await this.callLLM(messages);
70
- }
71
- if (response.choices[0].message.tool_calls && iterations >= MAX_ITERATIONS) {
72
- const content = response.choices[0].message.content || 'No response generated.';
73
- return `${content}\n\n(已到达回合上限,建议回复“继续”以接着执行;可在 config.json 设置 llm.maxToolIterations,当前=${MAX_ITERATIONS})`;
74
- }
75
- return response.choices[0].message.content || 'No response generated.';
76
- }
77
- catch (error) {
78
- console.error(`[Agent:${this.name}] Error:`, error);
79
- return `Error: ${error.message}`;
80
- }
81
- }
82
- async callLLM(messages) {
83
- const tools = this.getToolsDefinition();
84
- const toolsParam = tools.length > 0 ? tools : undefined;
85
- return await this.openai.chat.completions.create({
86
- model: this.model,
87
- messages: messages,
88
- tools: toolsParam,
89
- temperature: this.temperature
90
- });
91
- }
92
- }
@@ -1,235 +0,0 @@
1
- import { config } from '../config/loader.js';
2
- import { skillRegistry } from '../skills/registry.js';
3
- import { SYSTEM_PROMPT } from '../config/prompts.js';
4
- import { createOpenAIClient } from '../llm/openai.js';
5
- export class Brain {
6
- constructor() {
7
- this.openai = createOpenAIClient(config.llm);
8
- this.updateClient();
9
- }
10
- static getInstance() {
11
- if (!Brain.instance) {
12
- Brain.instance = new Brain();
13
- }
14
- return Brain.instance;
15
- }
16
- updateClient() {
17
- this.openai = createOpenAIClient(config.llm);
18
- }
19
- async processMessage(history, newMessage, systemPrompt, context) {
20
- if (process.env.DEBUG)
21
- console.log('[Brain] Processing message:', newMessage);
22
- const defaultSystemPrompt = systemPrompt || config.systemPrompt || SYSTEM_PROMPT || 'You are xiaozuoAssistant, a helpful AI assistant. You can use tools to help users.';
23
- // Check for long inactivity (Session Wake-up)
24
- const sessionLastActive = context?.session?.lastActiveAt || 0;
25
- const now = Date.now();
26
- const hoursSinceActive = (now - sessionLastActive) / (1000 * 60 * 60);
27
- const WAKEUP_THRESHOLD = config.llm.sessionWakeupHours ?? 24;
28
- let wakeupContext = '';
29
- if (sessionLastActive > 0 && hoursSinceActive > WAKEUP_THRESHOLD) {
30
- if (process.env.DEBUG)
31
- console.log(`[Brain] Session wake-up detected (${hoursSinceActive.toFixed(1)}h > ${WAKEUP_THRESHOLD}h). Generating recap...`);
32
- // Use recent history for quick summary, or could query Vector DB for deeper context
33
- // For now, simple summary of recent messages to refresh context
34
- const recentText = history.slice(-50).map(m => `${m.role}: ${m.content}`).join('\n');
35
- if (recentText.length > 100) {
36
- const summary = await this.generateSummary(recentText);
37
- if (summary) {
38
- wakeupContext = `\n\n[System Notice]: The user has returned after ${Math.floor(hoursSinceActive)} hours. Here is a brief recap of the previous conversation context to help you catch up:\n${summary}`;
39
- }
40
- }
41
- }
42
- // Convert history messages to the format expected by OpenAI
43
- // Strategy: Keep last N messages to avoid context window overflow
44
- // TODO: A better strategy would be token-based truncation.
45
- const MAX_HISTORY_MESSAGES = config.llm.maxHistoryMessages ?? 20;
46
- const recentHistory = history.slice(-MAX_HISTORY_MESSAGES);
47
- const messageHistory = recentHistory.map(m => {
48
- const msg = { role: m.role, content: m.content };
49
- if (m.name)
50
- msg.name = m.name;
51
- if (m.tool_call_id)
52
- msg.tool_call_id = m.tool_call_id;
53
- if (m.tool_calls)
54
- msg.tool_calls = m.tool_calls;
55
- return msg;
56
- });
57
- const messages = [
58
- { role: 'system', content: defaultSystemPrompt + wakeupContext },
59
- ...messageHistory,
60
- { role: 'user', content: newMessage }
61
- ];
62
- try {
63
- if (process.env.DEBUG)
64
- console.log('[Brain] Calling LLM...');
65
- let response = await this.callLLM(messages, newMessage);
66
- // Log only the content snippet to avoid flooding logs with full JSON
67
- const contentSnippet = response.choices[0].message.content ? response.choices[0].message.content.substring(0, 100) + '...' : 'No content';
68
- if (process.env.DEBUG)
69
- console.log('[Brain] LLM Response (snippet):', contentSnippet);
70
- let iterations = 0;
71
- const MAX_ITERATIONS = config.llm.maxToolIterations ?? 15; // Reduced default from 200 to 15 for safety
72
- while (response.choices[0].message.tool_calls && iterations < MAX_ITERATIONS) {
73
- iterations++;
74
- const toolCalls = response.choices[0].message.tool_calls;
75
- const assistantMsg = response.choices[0].message;
76
- messages.push(assistantMsg); // Add assistant message with tool calls
77
- for (const toolCall of toolCalls) {
78
- if (toolCall.type !== 'function')
79
- continue;
80
- const functionName = toolCall.function.name;
81
- const functionArgs = JSON.parse(toolCall.function.arguments);
82
- if (process.env.DEBUG)
83
- console.log(`[Brain] Executing tool: ${functionName}`);
84
- const skill = skillRegistry.getSkill(functionName);
85
- let toolResult = '';
86
- if (skill) {
87
- try {
88
- const ctxForTool = context
89
- ? { ...context, metadata: { ...(context.metadata || {}), toolCall: { id: toolCall.id, name: functionName } } }
90
- : undefined;
91
- const result = await skill.execute(functionArgs, ctxForTool);
92
- toolResult = JSON.stringify(result);
93
- }
94
- catch (error) {
95
- toolResult = JSON.stringify({ error: error.message });
96
- }
97
- }
98
- else {
99
- toolResult = JSON.stringify({ error: 'Tool not found' });
100
- }
101
- // Log tool result snippet
102
- if (process.env.DEBUG)
103
- console.log(`[Brain] Tool Result (snippet):`, toolResult.substring(0, 100) + '...');
104
- messages.push({
105
- role: 'tool',
106
- tool_call_id: toolCall.id,
107
- content: toolResult
108
- });
109
- }
110
- // Call LLM again with tool results
111
- if (process.env.DEBUG)
112
- console.log('[Brain] Calling LLM with tool results...');
113
- response = await this.callLLM(messages);
114
- const nextContentSnippet = response.choices[0].message.content ? response.choices[0].message.content.substring(0, 100) + '...' : 'No content';
115
- if (process.env.DEBUG)
116
- console.log('[Brain] LLM Response (after tool, snippet):', nextContentSnippet);
117
- }
118
- const hitLimit = Boolean(response.choices[0].message.tool_calls) && iterations >= MAX_ITERATIONS;
119
- const finalContent = response.choices[0].message.content || 'I could not generate a response.';
120
- if (process.env.DEBUG)
121
- console.log('[Brain] Final Response (snippet):', finalContent.substring(0, 100) + '...');
122
- if (!hitLimit)
123
- return finalContent;
124
- return `${finalContent}\n\n(已到达回合上限,建议回复“继续”以接着执行;可在 config.json 设置 llm.maxToolIterations,当前=${MAX_ITERATIONS})`;
125
- }
126
- catch (error) {
127
- console.error('[Brain] Error in processing:', error);
128
- return `Error: ${error.message}`;
129
- }
130
- }
131
- async generateSummary(content) {
132
- try {
133
- const response = await this.openai.chat.completions.create({
134
- model: config.llm.model,
135
- messages: [
136
- { role: 'system', content: 'You are a helpful assistant. Please summarize the following content concisely.' },
137
- { role: 'user', content: content }
138
- ],
139
- temperature: 0.5
140
- }, { timeout: config.llm.requestTimeoutMs ?? 600000 });
141
- return response.choices[0].message.content || '';
142
- }
143
- catch (e) {
144
- console.error('Summary generation failed:', e);
145
- return '';
146
- }
147
- }
148
- async extractKeyInformation(content, contextType) {
149
- try {
150
- const prompt = contextType === 'project'
151
- ? 'Extract key project-related information, decisions, and tasks from the following conversation. Return a concise summary.'
152
- : 'Extract key information, insights, and valuable knowledge from the following conversation. Return a concise summary suitable for long-term memory.';
153
- const response = await this.openai.chat.completions.create({
154
- model: config.llm.model,
155
- messages: [
156
- { role: 'system', content: prompt },
157
- { role: 'user', content: content }
158
- ],
159
- temperature: 0.3
160
- }, { timeout: config.llm.requestTimeoutMs ?? 600000 });
161
- return response.choices[0].message.content || '';
162
- }
163
- catch (e) {
164
- console.error('Key information extraction failed:', e);
165
- return '';
166
- }
167
- }
168
- async extractNotebookNotes(content, keywords) {
169
- try {
170
- const prompt = keywords
171
- ? `You are a note-taker. Extract notes relevant to the following keywords: "${keywords}".`
172
- : `You are a note-taker. Extract important notes, knowledge points, code snippets, or actionable items from the conversation.`;
173
- const response = await this.openai.chat.completions.create({
174
- model: config.llm.model,
175
- messages: [
176
- { role: 'system', content: `${prompt}
177
- Return a JSON array of objects with "title" and "content" fields. If no relevant info, return empty array [].
178
- Format: [{"title": "...", "content": "..."}]` },
179
- { role: 'user', content: content }
180
- ],
181
- temperature: 0.3,
182
- response_format: { type: 'json_object' }
183
- }, { timeout: config.llm.requestTimeoutMs ?? 600000 });
184
- const jsonStr = response.choices[0].message.content || '{"notes": []}';
185
- try {
186
- const parsed = JSON.parse(jsonStr);
187
- if (Array.isArray(parsed))
188
- return parsed;
189
- if (parsed.notes && Array.isArray(parsed.notes))
190
- return parsed.notes;
191
- return [];
192
- }
193
- catch {
194
- return [];
195
- }
196
- }
197
- catch (e) {
198
- console.error('Notebook note extraction failed:', e);
199
- return [];
200
- }
201
- }
202
- async callLLM(messages, userQuery) {
203
- // 根据用户当前输入,获取过滤后的 Tools
204
- const tools = skillRegistry.getToolsDefinition(userQuery);
205
- // OpenAI SDK expects tools to be undefined if empty array, or valid tools array
206
- const toolsParam = tools.length > 0 ? tools : undefined;
207
- const maxRetries = config.llm.maxRetries ?? 2;
208
- const timeout = config.llm.requestTimeoutMs ?? 600000;
209
- let lastError;
210
- for (let attempt = 0; attempt <= maxRetries; attempt++) {
211
- try {
212
- return await this.openai.chat.completions.create({
213
- model: config.llm.model,
214
- messages: messages,
215
- tools: toolsParam,
216
- temperature: config.llm.temperature
217
- }, { timeout });
218
- }
219
- catch (e) {
220
- lastError = e;
221
- const msg = String(e?.message || '').toLowerCase();
222
- const code = String(e?.code || '').toLowerCase();
223
- const isTimeout = msg.includes('timeout') || code.includes('etimedout');
224
- const isConn = msg.includes('econnreset') || msg.includes('network') || code.includes('econnreset');
225
- const retryable = isTimeout || isConn;
226
- if (!retryable || attempt === maxRetries)
227
- break;
228
- const backoffMs = Math.min(2000 * (attempt + 1), 8000);
229
- await new Promise(r => setTimeout(r, backoffMs));
230
- }
231
- }
232
- throw lastError;
233
- }
234
- }
235
- export const brain = Brain.getInstance();