neuro-simulator 0.1.3__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. neuro_simulator/__init__.py +1 -10
  2. neuro_simulator/agent/__init__.py +1 -8
  3. neuro_simulator/agent/base.py +43 -0
  4. neuro_simulator/agent/core.py +105 -398
  5. neuro_simulator/agent/factory.py +30 -0
  6. neuro_simulator/agent/llm.py +34 -31
  7. neuro_simulator/agent/memory/__init__.py +1 -4
  8. neuro_simulator/agent/memory/manager.py +61 -203
  9. neuro_simulator/agent/tools/__init__.py +1 -4
  10. neuro_simulator/agent/tools/core.py +8 -18
  11. neuro_simulator/api/__init__.py +1 -0
  12. neuro_simulator/api/agent.py +163 -0
  13. neuro_simulator/api/stream.py +55 -0
  14. neuro_simulator/api/system.py +90 -0
  15. neuro_simulator/cli.py +60 -143
  16. neuro_simulator/core/__init__.py +1 -0
  17. neuro_simulator/core/agent_factory.py +52 -0
  18. neuro_simulator/core/agent_interface.py +91 -0
  19. neuro_simulator/core/application.py +278 -0
  20. neuro_simulator/services/__init__.py +1 -0
  21. neuro_simulator/{chatbot.py → services/audience.py} +24 -24
  22. neuro_simulator/{audio_synthesis.py → services/audio.py} +18 -15
  23. neuro_simulator/services/builtin.py +87 -0
  24. neuro_simulator/services/letta.py +206 -0
  25. neuro_simulator/{stream_manager.py → services/stream.py} +39 -47
  26. neuro_simulator/utils/__init__.py +1 -0
  27. neuro_simulator/utils/logging.py +90 -0
  28. neuro_simulator/utils/process.py +67 -0
  29. neuro_simulator/{stream_chat.py → utils/queue.py} +17 -4
  30. neuro_simulator/utils/state.py +14 -0
  31. neuro_simulator/{websocket_manager.py → utils/websocket.py} +18 -14
  32. {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.1.dist-info}/METADATA +83 -33
  33. neuro_simulator-0.2.1.dist-info/RECORD +37 -0
  34. neuro_simulator/agent/api.py +0 -737
  35. neuro_simulator/agent/memory.py +0 -137
  36. neuro_simulator/agent/tools.py +0 -69
  37. neuro_simulator/builtin_agent.py +0 -83
  38. neuro_simulator/config.yaml.example +0 -157
  39. neuro_simulator/letta.py +0 -164
  40. neuro_simulator/log_handler.py +0 -43
  41. neuro_simulator/main.py +0 -673
  42. neuro_simulator/media/neuro_start.mp4 +0 -0
  43. neuro_simulator/process_manager.py +0 -70
  44. neuro_simulator/shared_state.py +0 -11
  45. neuro_simulator-0.1.3.dist-info/RECORD +0 -31
  46. /neuro_simulator/{config.py → core/config.py} +0 -0
  47. {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.1.dist-info}/WHEEL +0 -0
  48. {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.1.dist-info}/entry_points.txt +0 -0
  49. {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.1.dist-info}/top_level.txt +0 -0
@@ -1,137 +0,0 @@
1
- # agent/memory.py
2
- """
3
- Memory management for the Neuro Simulator Agent
4
- """
5
-
6
- import os
7
- import json
8
- import asyncio
9
- from typing import Dict, List, Any
10
- from datetime import datetime, timedelta
11
-
12
- class MemoryManager:
13
- """Manages both immutable and mutable memory for the agent"""
14
-
15
- def __init__(self, memory_dir: str = "agent_memory"):
16
- self.memory_dir = memory_dir
17
- self.immutable_memory_file = os.path.join(memory_dir, "immutable_memory.json")
18
- self.mutable_memory_file = os.path.join(memory_dir, "mutable_memory.json")
19
- self.conversation_history_file = os.path.join(memory_dir, "conversation_history.json")
20
-
21
- # In-memory storage for faster access
22
- self.immutable_memory: Dict[str, Any] = {}
23
- self.mutable_memory: Dict[str, Any] = {}
24
- self.conversation_history: List[Dict[str, Any]] = []
25
-
26
- # Create memory directory if it doesn't exist
27
- os.makedirs(self.memory_dir, exist_ok=True)
28
-
29
- async def initialize(self):
30
- """Load memory from files"""
31
- # Load immutable memory
32
- if os.path.exists(self.immutable_memory_file):
33
- with open(self.immutable_memory_file, 'r') as f:
34
- self.immutable_memory = json.load(f)
35
- else:
36
- # Initialize with default immutable data
37
- self.immutable_memory = {
38
- "name": "Neuro-Sama",
39
- "personality": "Friendly, curious, and entertaining AI VTuber",
40
- "capabilities": ["chat", "answer questions", "entertain viewers"]
41
- }
42
- await self._save_immutable_memory()
43
-
44
- # Load mutable memory
45
- if os.path.exists(self.mutable_memory_file):
46
- with open(self.mutable_memory_file, 'r') as f:
47
- self.mutable_memory = json.load(f)
48
- else:
49
- # Initialize with default mutable data
50
- self.mutable_memory = {
51
- "mood": "happy",
52
- "current_topic": "streaming",
53
- "viewer_count": 0
54
- }
55
- await self._save_mutable_memory()
56
-
57
- # Load conversation history
58
- if os.path.exists(self.conversation_history_file):
59
- with open(self.conversation_history_file, 'r') as f:
60
- self.conversation_history = json.load(f)
61
-
62
- print("Memory manager initialized")
63
-
64
- async def _save_immutable_memory(self):
65
- """Save immutable memory to file"""
66
- with open(self.immutable_memory_file, 'w') as f:
67
- json.dump(self.immutable_memory, f, indent=2)
68
-
69
- async def _save_mutable_memory(self):
70
- """Save mutable memory to file"""
71
- with open(self.mutable_memory_file, 'w') as f:
72
- json.dump(self.mutable_memory, f, indent=2)
73
-
74
- async def _save_conversation_history(self):
75
- """Save conversation history to file"""
76
- with open(self.conversation_history_file, 'w') as f:
77
- json.dump(self.conversation_history, f, indent=2)
78
-
79
- async def reset(self):
80
- """Reset all memory"""
81
- self.immutable_memory = {
82
- "name": "Neuro-Sama",
83
- "personality": "Friendly, curious, and entertaining AI VTuber",
84
- "capabilities": ["chat", "answer questions", "entertain viewers"]
85
- }
86
- self.mutable_memory = {
87
- "mood": "happy",
88
- "current_topic": "streaming",
89
- "viewer_count": 0
90
- }
91
- self.conversation_history = []
92
-
93
- await self._save_immutable_memory()
94
- await self._save_mutable_memory()
95
- await self._save_conversation_history()
96
-
97
- print("Memory reset completed")
98
-
99
- async def add_message(self, message: Dict[str, Any]):
100
- """Add a message to conversation history"""
101
- self.conversation_history.append(message)
102
- # Keep only the last 50 messages
103
- if len(self.conversation_history) > 50:
104
- self.conversation_history = self.conversation_history[-50:]
105
- await self._save_conversation_history()
106
-
107
- async def get_context(self, max_messages: int = 10) -> str:
108
- """Get context from conversation history"""
109
- # Get recent messages
110
- recent_messages = self.conversation_history[-max_messages:] if self.conversation_history else []
111
-
112
- # Format context
113
- context_parts = []
114
-
115
- # Add immutable memory as context
116
- context_parts.append("Character Information:")
117
- for key, value in self.immutable_memory.items():
118
- context_parts.append(f"- {key}: {value}")
119
-
120
- # Add mutable memory as context
121
- context_parts.append("\nCurrent State:")
122
- for key, value in self.mutable_memory.items():
123
- context_parts.append(f"- {key}: {value}")
124
-
125
- # Add conversation history
126
- if recent_messages:
127
- context_parts.append("\nRecent Conversation:")
128
- for msg in recent_messages:
129
- context_parts.append(f"{msg['role']}: {msg['content']}")
130
-
131
- return "\n".join(context_parts)
132
-
133
- async def update_mutable_memory(self, updates: Dict[str, Any]):
134
- """Update mutable memory with new values"""
135
- self.mutable_memory.update(updates)
136
- await self._save_mutable_memory()
137
- print(f"Mutable memory updated: {updates}")
@@ -1,69 +0,0 @@
1
- # agent/tools.py
2
- """
3
- Tools that the Neuro Simulator Agent can use
4
- """
5
-
6
- import json
7
- import asyncio
8
- from typing import Dict, Any
9
- from .memory import MemoryManager
10
-
11
- class ToolManager:
12
- """Manages tools that the agent can use to interact with its memory"""
13
-
14
- def __init__(self, memory_manager: MemoryManager):
15
- self.memory_manager = memory_manager
16
- self.tools = {}
17
- self._register_default_tools()
18
-
19
- def _register_default_tools(self):
20
- """Register default tools for memory management"""
21
- self.tools["update_mood"] = self._update_mood
22
- self.tools["update_topic"] = self._update_topic
23
- self.tools["update_viewer_count"] = self._update_viewer_count
24
- self.tools["get_memory_state"] = self._get_memory_state
25
-
26
- async def _update_mood(self, mood: str) -> str:
27
- """Update the agent's mood"""
28
- await self.memory_manager.update_mutable_memory({"mood": mood})
29
- return f"Mood updated to: {mood}"
30
-
31
- async def _update_topic(self, topic: str) -> str:
32
- """Update the current topic"""
33
- await self.memory_manager.update_mutable_memory({"current_topic": topic})
34
- return f"Topic updated to: {topic}"
35
-
36
- async def _update_viewer_count(self, count: int) -> str:
37
- """Update the viewer count"""
38
- await self.memory_manager.update_mutable_memory({"viewer_count": count})
39
- return f"Viewer count updated to: {count}"
40
-
41
- async def _get_memory_state(self) -> Dict[str, Any]:
42
- """Get the current state of all memory"""
43
- return {
44
- "immutable": self.memory_manager.immutable_memory,
45
- "mutable": self.memory_manager.mutable_memory,
46
- "conversation_history_length": len(self.memory_manager.conversation_history)
47
- }
48
-
49
- def get_tool_descriptions(self) -> str:
50
- """Get descriptions of all available tools"""
51
- descriptions = [
52
- "Available tools:",
53
- "1. update_mood(mood: string) - Update the agent's mood",
54
- "2. update_topic(topic: string) - Update the current topic of conversation",
55
- "3. update_viewer_count(count: integer) - Update the viewer count",
56
- "4. get_memory_state() - Get the current state of all memory"
57
- ]
58
- return "\n".join(descriptions)
59
-
60
- async def execute_tool(self, tool_name: str, params: Dict[str, Any]) -> Any:
61
- """Execute a tool by name with given parameters"""
62
- if tool_name in self.tools:
63
- try:
64
- result = await self.tools[tool_name](**params)
65
- return result
66
- except Exception as e:
67
- return f"Error executing tool '{tool_name}': {str(e)}"
68
- else:
69
- return f"Tool '{tool_name}' not found"
@@ -1,83 +0,0 @@
1
- # backend/builtin_agent.py
2
- """Builtin agent module for Neuro Simulator"""
3
-
4
- import asyncio
5
- from typing import List, Dict, Union
6
- from .config import config_manager
7
- import time
8
- from datetime import datetime
9
-
10
- # Global variables
11
- local_agent = None
12
-
13
- async def initialize_builtin_agent():
14
- """Initialize the builtin agent"""
15
- global local_agent
16
-
17
- try:
18
- from .agent.core import Agent as LocalAgentImport
19
- from .stream_manager import live_stream_manager
20
-
21
- local_agent = LocalAgentImport(working_dir=live_stream_manager._working_dir)
22
- await local_agent.initialize()
23
- except Exception as e:
24
- print(f"初始化本地 Agent 失败: {e}")
25
- import traceback
26
- traceback.print_exc()
27
- local_agent = None
28
-
29
- async def reset_builtin_agent_memory():
30
- """Reset the builtin agent's memory"""
31
- global local_agent
32
-
33
- if local_agent is not None:
34
- await local_agent.reset_all_memory()
35
- else:
36
- print("错误: 本地 Agent 未初始化,无法重置记忆。")
37
-
38
- async def clear_builtin_agent_temp_memory():
39
- """Clear the builtin agent's temp memory"""
40
- global local_agent
41
-
42
- if local_agent is not None:
43
- # Reset only temp memory
44
- await local_agent.memory_manager.reset_temp_memory()
45
- else:
46
- print("错误: 本地 Agent 未初始化,无法清空临时记忆。")
47
-
48
- async def clear_builtin_agent_context():
49
- """Clear the builtin agent's context (dialog history)"""
50
- global local_agent
51
-
52
- if local_agent is not None:
53
- # Reset only context
54
- await local_agent.memory_manager.reset_context()
55
-
56
- # Send context update via WebSocket to notify frontend
57
- from .websocket_manager import connection_manager
58
- await connection_manager.broadcast({
59
- "type": "agent_context",
60
- "action": "update",
61
- "messages": []
62
- })
63
- else:
64
- print("错误: 本地 Agent 未初始化,无法清空上下文。")
65
-
66
- async def get_builtin_response(chat_messages: list[dict]) -> dict:
67
- """Get response from the builtin agent with detailed processing information"""
68
- global local_agent
69
-
70
- if local_agent is not None:
71
- response = await local_agent.process_messages(chat_messages)
72
-
73
- # Return the response directly without adding processing details to temp memory
74
- return response
75
- else:
76
- print("错误: 本地 Agent 未初始化,无法获取响应。")
77
- return {
78
- "input_messages": chat_messages,
79
- "llm_response": "",
80
- "tool_executions": [],
81
- "final_response": "Someone tell Vedal there is a problem with my AI.",
82
- "error": "Agent not initialized"
83
- }
@@ -1,157 +0,0 @@
1
- # --- API密钥配置(无法通过外部控制面板修改) ---
2
- api_keys:
3
- # Letta API Token - 用于与 Letta 服务进行身份验证
4
- # 如果你自建 Letta Server 且没有设置 token,可以留空
5
- letta_token: "YOUR_LETTA_TOKEN_HERE"
6
-
7
- # Letta Server 基础 URL - 如果你自建 Letta Server 请填写完整地址,否则留空以使用Letta Cloud
8
- letta_base_url: ""
9
-
10
- # Neuro Agent ID - 在 Letta 中创建的 Agent 的唯一标识符
11
- neuro_agent_id: "YOUR_AGENT_ID_HERE"
12
-
13
- # Gemini API Key - 用于调用 Google Gemini API 生成观众聊天内容
14
- gemini_api_key: "YOUR_GEMINI_KEY_HERE"
15
-
16
- # OpenAI API Key - 用于调用兼容 OpenAI 的 API 生成观众聊天内容
17
- openai_api_key: "YOUR_OPENAI_KEY_HERE"
18
-
19
- # OpenAI API 基础 URL - 如果使用第三方兼容 OpenAI API 服务(如 SiliconFlow)请填写对应地址
20
- openai_api_base_url: "YOUR_OPENAI_BASE_URL_HERE"
21
-
22
- # Azure 语音服务密钥 - 用于调用微软 Azure TTS 服务合成语音
23
- azure_speech_key: "YOUR_AZURE_KEY_HERE"
24
-
25
- # Azure 语音服务区域 - Azure 服务所在的区域,如 "eastus" 或 "westus"
26
- azure_speech_region: "YOUR_AZURE_REGION_HERE"
27
-
28
- # --- 直播元数据配置 ---
29
- stream_metadata:
30
- # 主播昵称 - 显示在直播中的主播名称(此设置无法通过外部控制面板修改)
31
- streamer_nickname: "vedal987"
32
-
33
- # 直播标题 - 显示在直播页面的标题
34
- stream_title: "neuro-sama is here for u all"
35
-
36
- # 直播分类 - 直播内容的分类标签
37
- stream_category: "谈天说地"
38
-
39
- # 直播标签 - 用于描述直播内容的标签列表
40
- stream_tags: ["Vtuber", "AI", "Cute", "English", "Gremlin", "catgirl"]
41
-
42
- # --- Agent 类型设置 ---
43
- # 选择一个用来模拟Neuro的Agent提供方
44
- # - "letta": 使用Letta作为Agent,需要在上方配置Letta API相关信息
45
- # - "builtin": 使用内建Agent,请在下方填写配置
46
- agent_type: "builtin"
47
-
48
- # --- 内建Agent设置 ---
49
- # 仅当agent_type设置为"builtin"时生效
50
- agent:
51
- # Agent的API服务商,支持"gemini"和"openai",API Key配置使用顶部填写的值
52
- agent_provider: "gemini"
53
- # Agent使用的模型,切换gemini/openai时记得更改
54
- agent_model: "gemini-2.5-flash-lite"
55
-
56
- # --- Neuro 行为与节奏控制 ---
57
- neuro_behavior:
58
- # 输入聊天采样数量 - 每次生成 Neuro 回复时从观众聊天中采样的消息数量,不建议太长
59
- input_chat_sample_size: 10
60
-
61
- # 说话后冷却时间(秒) - Neuro 每次说完话后的等待时间
62
- post_speech_cooldown_sec: 1.0
63
-
64
- # 初始问候语 - 直播开始时给 Neuro 的系统提示语
65
- initial_greeting: "The stream has just started. Greet your audience and say hello!"
66
-
67
- # --- Chatbot 配置 ---
68
- audience_simulation:
69
- # LLM 提供商 - 选择用于生成观众聊天的 AI 服务,只能是 'gemini' 或 'openai'
70
- llm_provider: "gemini"
71
-
72
- # Gemini 模型 - 使用 Gemini 服务时的具体模型名称
73
- # 推荐使用gemma-3-27b-it,每天可免费调用14000次(15:00 GMT+8 刷新次数)
74
- gemini_model: "gemma-3-27b-it"
75
-
76
- # OpenAI 模型 - 使用 OpenAI 服务时的具体模型名称
77
- # 推荐使用SiliconFlow,9B以下模型免费不限量调用(注意TPM限制)
78
- openai_model: "THUDM/GLM-4-9B-0414"
79
-
80
- # LLM 温度 - 控制 AI 生成内容的随机性,值越高越随机(0-2之间)
81
- llm_temperature: 0.7
82
-
83
- # 聊天生成间隔(秒) - 调用 Chatbot 生成新观众聊天的时间间隔
84
- chat_generation_interval_sec: 2
85
-
86
- # 每批聊天生成数量 - 每次调用 Chatbot 时生成的聊天消息数量
87
- chats_per_batch: 3
88
-
89
- # 最大输出 Token 数 - 单次调用 Chatbot 时允许生成的最大 token 数量
90
- max_output_tokens: 300
91
-
92
- # Chatbot 提示模板 - 用于指导 AI 生成观众聊天内容的提示词
93
- # 其中 {neuro_speech} 和 {num_chats_to_generate} 会被动态替换
94
- prompt_template: |
95
- You are a Twitch live stream viewer. Your goal is to generate short, realistic, and relevant chat messages.
96
- The streamer, Neuro-Sama, just said the following:
97
- ---
98
- {neuro_speech}
99
- ---
100
- Based on what Neuro-Sama said, generate a variety of chat messages. Your messages should be:
101
- - Directly reacting to her words.
102
- - Asking follow-up questions.
103
- - Using relevant Twitch emotes (like LUL, Pog, Kappa, etc.).
104
- - General banter related to the topic.
105
- - Short and punchy, like real chat messages.
106
- Do NOT act as the streamer. Do NOT generate full conversations.
107
- Generate exactly {num_chats_to_generate} distinct chat messages. Each message must be prefixed with a DIFFERENT fictional username, like 'ChatterBoy: message text', 'EmoteFan: message text'.
108
-
109
- # 用户名黑名单 - 当检测到这些用户名时会自动替换为 username_pool 中的用户名
110
- username_blocklist: ["ChatterBoy", "EmoteFan", "Username", "User"]
111
-
112
- # 用户名池 - 用于替换黑名单用户名或生成新用户名(有时候Chatbot LLM可能未给出用户名)的候选列表
113
- username_pool:
114
- - "ChatterBox"
115
- - "EmoteLord"
116
- - "QuestionMark"
117
- - "StreamFan"
118
- - "PixelPundit"
119
- - "CodeSage"
120
- - "DataDiver"
121
- - "ByteBard"
122
-
123
- # --- 音频合成 (TTS) 配置 ---
124
- tts:
125
- # 语音名称 - 不要调整这个设置
126
- voice_name: "en-US-AshleyNeural"
127
-
128
- # 语音音调 - 除非你不想听Neuro的声音
129
- voice_pitch: 1.25
130
-
131
- # --- 数据流与性能配置 ---
132
- performance:
133
- # 输入队列最大大小 - 可能被提供给 Neuro 作为输入的聊天消息最大数量
134
- # 具体逻辑是在 neuro_input_queue_max_size 中抽取 input_chat_sample_size 条消息发送
135
- neuro_input_queue_max_size: 200
136
-
137
- # 观众聊天缓冲区最大大小 - 后端存储的聊天记录总量
138
- audience_chat_buffer_max_size: 1000
139
-
140
- # 客户端初始聊天数 - 向新客户端发送的历史聊天消息数量,主要用来应对中途加入的客户端
141
- initial_chat_backlog_limit: 50
142
-
143
- # --- 服务器配置 ---
144
- server:
145
- # 服务器主机地址 - 服务器监听的主机地址(使用 uvicorn 命令启动时此设置无效)
146
- host: "127.0.0.1"
147
-
148
- # 服务器端口 - 服务器监听的端口号(使用 uvicorn 命令启动时此设置无效)
149
- port: 8000
150
-
151
- # 面板密码 - 设置 API token 用于外部控制面板的身份验证,在公网持续部署时强烈建议开启
152
- panel_password: "your-secret-api-token-here"
153
-
154
- # 客户端来源 - 允许跨域访问的客户端地址列表,非本机访问时记得添加一下
155
- client_origins:
156
- - "http://localhost:5173"
157
- - "http://127.0.0.1:5173"
neuro_simulator/letta.py DELETED
@@ -1,164 +0,0 @@
1
- # backend/letta.py
2
- from letta_client import Letta, MessageCreate, TextContent, LlmConfig, AssistantMessage
3
- from fastapi import HTTPException, status
4
- from .config import config_manager
5
- import asyncio
6
- from typing import Union
7
-
8
- # Global variables
9
- letta_client: Union[Letta, None] = None
10
-
11
- def initialize_letta_client():
12
- """Initializes the Letta client if not already initialized."""
13
- global letta_client
14
- if letta_client:
15
- return
16
-
17
- try:
18
- if not config_manager.settings.api_keys.letta_token:
19
- raise ValueError("LETTA_API_TOKEN is not set. Cannot initialize Letta client.")
20
-
21
- client_args = {'token': config_manager.settings.api_keys.letta_token}
22
- if config_manager.settings.api_keys.letta_base_url:
23
- client_args['base_url'] = config_manager.settings.api_keys.letta_base_url
24
- print(f"Letta client is being initialized for self-hosted URL: {config_manager.settings.api_keys.letta_base_url}")
25
- else:
26
- print("Letta client is being initialized for Letta Cloud.")
27
-
28
- letta_client = Letta(**client_args)
29
-
30
- if config_manager.settings.api_keys.neuro_agent_id:
31
- try:
32
- agent_data = letta_client.agents.retrieve(agent_id=config_manager.settings.api_keys.neuro_agent_id)
33
- print(f"成功获取 Letta Agent 详情,ID: {agent_data.id}")
34
- llm_model_info = "N/A"
35
- if hasattr(agent_data, 'model') and agent_data.model:
36
- llm_model_info = agent_data.model
37
- elif agent_data.llm_config:
38
- if isinstance(agent_data.llm_config, LlmConfig):
39
- llm_config_dict = agent_data.llm_config.model_dump() if hasattr(agent_data.llm_config, 'model_dump') else agent_data.llm_config.__dict__
40
- llm_model_info = llm_config_dict.get('model_name') or llm_config_dict.get('name') or llm_config_dict.get('model')
41
- if not llm_model_info:
42
- llm_model_info = str(agent_data.llm_config)
43
- print(f" -> Agent 名称: {agent_data.name}")
44
- print(f" -> LLM 模型: {llm_model_info}")
45
- except Exception as e:
46
- error_msg = f"错误: 无法获取 Neuro Letta Agent (ID: {config_manager.settings.api_keys.neuro_agent_id})。请确保 ID 正确,且服务可访问。详情: {e}"
47
- print(error_msg)
48
- raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=error_msg)
49
- except Exception as e:
50
- print(f"初始化 Letta 客户端失败: {e}")
51
- letta_client = None
52
-
53
- def get_letta_client():
54
- if letta_client is None: raise ValueError("Letta client is not initialized.")
55
- return letta_client
56
-
57
- async def initialize_agent():
58
- """Initialize the appropriate agent based on configuration"""
59
- agent_type = config_manager.settings.agent_type
60
-
61
- if agent_type == "letta":
62
- initialize_letta_client()
63
- print("Using Letta as the agent")
64
- else:
65
- print(f"Unknown agent type: {agent_type}. Defaulting to Letta.")
66
- initialize_letta_client()
67
-
68
- return agent_type
69
-
70
- async def reset_neuro_agent_memory():
71
- """
72
- 重置 Agent 的记忆,包括:
73
- 1. 清空所有消息历史记录。
74
- 2. 清空指定的 'conversation_summary' 核心内存块。
75
- """
76
- # Ensure letta client is initialized before using it
77
- initialize_letta_client()
78
- if letta_client is None:
79
- print("Letta client 未初始化,跳过重置。")
80
- return
81
-
82
- agent_id = config_manager.settings.api_keys.neuro_agent_id
83
- if not agent_id:
84
- print("Letta Agent ID 未配置,跳过重置。")
85
- return
86
-
87
- # --- 步骤 1: 重置消息历史记录 (上下文) ---
88
- try:
89
- letta_client.agents.messages.reset(agent_id=agent_id)
90
- print(f"Neuro Agent (ID: {agent_id}) 的消息历史已成功重置。")
91
- except Exception as e:
92
- print(f"警告: 重置 Agent 消息历史失败: {e}。")
93
-
94
- # --- 步骤 2: 清空 'conversation_summary' 核心内存块 ---
95
- block_label_to_clear = "conversation_summary"
96
- try:
97
- print(f"正在尝试清空核心记忆块: '{block_label_to_clear}'...")
98
-
99
- # 调用 modify 方法,将 value 设置为空字符串
100
- letta_client.agents.blocks.modify(
101
- agent_id=agent_id,
102
- block_label=block_label_to_clear,
103
- value=""
104
- )
105
-
106
- print(f"核心记忆块 '{block_label_to_clear}' 已成功清空。")
107
- except Exception as e:
108
- # 优雅地处理块不存在的情况
109
- # API 在找不到块时通常会返回包含 404 或 "not found" 的错误
110
- error_str = str(e).lower()
111
- if "not found" in error_str or "404" in error_str:
112
- print(f"信息: 核心记忆块 '{block_label_to_clear}' 不存在,无需清空。")
113
- else:
114
- print(f"警告: 清空核心记忆块 '{block_label_to_clear}' 失败: {e}。")
115
-
116
- async def get_neuro_response(chat_messages: list[dict]) -> str:
117
- # Ensure letta client is initialized before using it
118
- initialize_letta_client()
119
- if letta_client is None or not config_manager.settings.api_keys.neuro_agent_id:
120
- print("错误: Letta client 或 Agent ID 未配置,无法获取响应。")
121
- return "Someone tell Vedal there is a problem with my AI."
122
-
123
- if chat_messages:
124
- injected_chat_lines = [f"{chat['username']}: {chat['text']}" for chat in chat_messages]
125
- injected_chat_text = (
126
- "Here are some recent messages from my Twitch chat:\n---\n" +
127
- "\n".join(injected_chat_lines) +
128
- "\n---\nNow, as the streamer Neuro-Sama, please continue the conversation naturally."
129
- )
130
- else:
131
- injected_chat_text = "My chat is quiet right now. As Neuro-Sama, what should I say to engage them?"
132
-
133
- print(f"正在向 Neuro Agent 发送输入 (包含 {len(chat_messages)} 条消息)..." )
134
-
135
- try:
136
- # 使用 asyncio.to_thread 在线程池中执行阻塞调用,避免阻塞事件循环
137
- response = await asyncio.to_thread(
138
- letta_client.agents.messages.create,
139
- agent_id=config_manager.settings.api_keys.neuro_agent_id,
140
- messages=[MessageCreate(role="user", content=injected_chat_text)]
141
- )
142
-
143
- ai_full_response_text = ""
144
- if response and response.messages:
145
- last_message = response.messages[-1]
146
- if isinstance(last_message, AssistantMessage) and hasattr(last_message, 'content'):
147
- content = last_message.content
148
- if isinstance(content, str):
149
- ai_full_response_text = content.strip()
150
- elif isinstance(content, list) and content:
151
- first_part = content[0]
152
- if isinstance(first_part, TextContent) and hasattr(first_part, 'text'):
153
- ai_full_response_text = first_part.text.strip()
154
-
155
- if not ai_full_response_text:
156
- print(f"警告: 未能从 Letta 响应中解析出有效的文本。响应对象: {response}")
157
- return "Someone tell Vedal there is a problem with my AI."
158
-
159
- print(f"成功从 Letta 解析到响应: '{ai_full_response_text[:70]}...'")
160
- return ai_full_response_text
161
-
162
- except Exception as e:
163
- print(f"错误: 调用 Letta Agent ({config_manager.settings.api_keys.neuro_agent_id}) 失败: {e}")
164
- return "Someone tell Vedal there is a problem with my AI."
@@ -1,43 +0,0 @@
1
- # backend/log_handler.py
2
- import logging
3
- from collections import deque
4
- from typing import Deque
5
-
6
- # 创建两个独立的、有界限的队列,用于不同来源的日志
7
- server_log_queue: Deque[str] = deque(maxlen=1000)
8
- agent_log_queue: Deque[str] = deque(maxlen=1000)
9
-
10
- class QueueLogHandler(logging.Handler):
11
- """一个将日志记录发送到指定队列的处理器。"""
12
- def __init__(self, queue: Deque[str]):
13
- super().__init__()
14
- self.queue = queue
15
-
16
- def emit(self, record: logging.LogRecord):
17
- log_entry = self.format(record)
18
- self.queue.append(log_entry)
19
-
20
- def configure_server_logging():
21
- """配置服务器(根)日志记录器,将其日志发送到 server_log_queue。"""
22
- # 为服务器日志创建一个处理器实例
23
- server_queue_handler = QueueLogHandler(server_log_queue)
24
- formatter = logging.Formatter('%(asctime)s - [SERVER] - %(levelname)s - %(message)s', datefmt='%H:%M:%S')
25
- server_queue_handler.setFormatter(formatter)
26
-
27
- # 获取根 logger 并添加 handler
28
- # 这将捕获所有未被专门处理的日志(来自fastapi, uvicorn等)
29
- root_logger = logging.getLogger()
30
- # 清除可能存在的旧handler,以防万一
31
- if root_logger.hasHandlers():
32
- root_logger.handlers.clear()
33
-
34
- root_logger.addHandler(server_queue_handler)
35
- root_logger.setLevel(logging.INFO)
36
-
37
- # 将 uvicorn 的日志也引导到我们的 handler
38
- logging.getLogger("uvicorn.access").handlers = [server_queue_handler]
39
- logging.getLogger("uvicorn.error").handlers = [server_queue_handler]
40
-
41
- print("服务器日志系统已配置,将日志输出到 server_log_queue。")
42
-
43
- # Agent 的日志配置将会在 agent 模块内部完成,以保持解耦