neuro-simulator 0.0.3__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
neuro_simulator/letta.py CHANGED
@@ -2,60 +2,86 @@
2
2
  from letta_client import Letta, MessageCreate, TextContent, LlmConfig, AssistantMessage
3
3
  from fastapi import HTTPException, status
4
4
  from .config import config_manager
5
+ import asyncio
6
+ from typing import Union
5
7
 
6
- # 初始化 Letta 客户端
7
- letta_client: Letta | None = None
8
- try:
9
- if not config_manager.settings.api_keys.letta_token:
10
- raise ValueError("LETTA_API_TOKEN is not set. Cannot initialize Letta client.")
11
-
12
- # 使用 settings 对象进行配置
13
- client_args = {'token': config_manager.settings.api_keys.letta_token}
14
- if config_manager.settings.api_keys.letta_base_url:
15
- client_args['base_url'] = config_manager.settings.api_keys.letta_base_url
16
- print(f"Letta client is being initialized for self-hosted URL: {config_manager.settings.api_keys.letta_base_url}")
17
- else:
18
- print("Letta client is being initialized for Letta Cloud.")
19
-
20
- letta_client = Letta(**client_args)
21
-
22
- if config_manager.settings.api_keys.neuro_agent_id:
23
- try:
24
- agent_data = letta_client.agents.retrieve(agent_id=config_manager.settings.api_keys.neuro_agent_id)
25
- print(f"成功获取 Letta Agent 详情,ID: {agent_data.id}")
26
- llm_model_info = "N/A"
27
- if hasattr(agent_data, 'model') and agent_data.model:
28
- llm_model_info = agent_data.model
29
- elif agent_data.llm_config:
30
- if isinstance(agent_data.llm_config, LlmConfig):
31
- llm_config_dict = agent_data.llm_config.model_dump() if hasattr(agent_data.llm_config, 'model_dump') else agent_data.llm_config.__dict__
32
- llm_model_info = llm_config_dict.get('model_name') or llm_config_dict.get('name') or llm_config_dict.get('model')
33
- if not llm_model_info:
34
- llm_model_info = str(agent_data.llm_config)
35
- print(f" -> Agent 名称: {agent_data.name}")
36
- print(f" -> LLM 模型: {llm_model_info}")
37
-
38
- except Exception as e:
39
- error_msg = f"错误: 无法获取 Neuro Letta Agent (ID: {config_manager.settings.api_keys.neuro_agent_id})。请确保 ID 正确,且服务可访问。详情: {e}"
40
- print(error_msg)
41
- raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=error_msg)
42
- except Exception as e:
43
- print(f"初始化 Letta 客户端失败: {e}")
44
- letta_client = None
8
+ # Global variables
9
+ letta_client: Union[Letta, None] = None
10
+
11
+ def initialize_letta_client():
12
+ """Initializes the Letta client if not already initialized."""
13
+ global letta_client
14
+ if letta_client:
15
+ return
16
+
17
+ try:
18
+ if not config_manager.settings.api_keys.letta_token:
19
+ raise ValueError("LETTA_API_TOKEN is not set. Cannot initialize Letta client.")
20
+
21
+ client_args = {'token': config_manager.settings.api_keys.letta_token}
22
+ if config_manager.settings.api_keys.letta_base_url:
23
+ client_args['base_url'] = config_manager.settings.api_keys.letta_base_url
24
+ print(f"Letta client is being initialized for self-hosted URL: {config_manager.settings.api_keys.letta_base_url}")
25
+ else:
26
+ print("Letta client is being initialized for Letta Cloud.")
27
+
28
+ letta_client = Letta(**client_args)
29
+
30
+ if config_manager.settings.api_keys.neuro_agent_id:
31
+ try:
32
+ agent_data = letta_client.agents.retrieve(agent_id=config_manager.settings.api_keys.neuro_agent_id)
33
+ print(f"成功获取 Letta Agent 详情,ID: {agent_data.id}")
34
+ llm_model_info = "N/A"
35
+ if hasattr(agent_data, 'model') and agent_data.model:
36
+ llm_model_info = agent_data.model
37
+ elif agent_data.llm_config:
38
+ if isinstance(agent_data.llm_config, LlmConfig):
39
+ llm_config_dict = agent_data.llm_config.model_dump() if hasattr(agent_data.llm_config, 'model_dump') else agent_data.llm_config.__dict__
40
+ llm_model_info = llm_config_dict.get('model_name') or llm_config_dict.get('name') or llm_config_dict.get('model')
41
+ if not llm_model_info:
42
+ llm_model_info = str(agent_data.llm_config)
43
+ print(f" -> Agent 名称: {agent_data.name}")
44
+ print(f" -> LLM 模型: {llm_model_info}")
45
+ except Exception as e:
46
+ error_msg = f"错误: 无法获取 Neuro Letta Agent (ID: {config_manager.settings.api_keys.neuro_agent_id})。请确保 ID 正确,且服务可访问。详情: {e}"
47
+ print(error_msg)
48
+ raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=error_msg)
49
+ except Exception as e:
50
+ print(f"初始化 Letta 客户端失败: {e}")
51
+ letta_client = None
45
52
 
46
53
  def get_letta_client():
47
54
  if letta_client is None: raise ValueError("Letta client is not initialized.")
48
55
  return letta_client
49
56
 
57
+ async def initialize_agent():
58
+ """Initialize the appropriate agent based on configuration"""
59
+ agent_type = config_manager.settings.agent_type
60
+
61
+ if agent_type == "letta":
62
+ initialize_letta_client()
63
+ print("Using Letta as the agent")
64
+ else:
65
+ print(f"Unknown agent type: {agent_type}. Defaulting to Letta.")
66
+ initialize_letta_client()
67
+
68
+ return agent_type
69
+
50
70
  async def reset_neuro_agent_memory():
51
71
  """
52
72
  重置 Agent 的记忆,包括:
53
73
  1. 清空所有消息历史记录。
54
74
  2. 清空指定的 'conversation_summary' 核心内存块。
55
75
  """
76
+ # Ensure letta client is initialized before using it
77
+ initialize_letta_client()
78
+ if letta_client is None:
79
+ print("Letta client 未初始化,跳过重置。")
80
+ return
81
+
56
82
  agent_id = config_manager.settings.api_keys.neuro_agent_id
57
- if letta_client is None or not agent_id:
58
- print("Letta client 或 Agent ID 未配置,跳过重置。")
83
+ if not agent_id:
84
+ print("Letta Agent ID 未配置,跳过重置。")
59
85
  return
60
86
 
61
87
  # --- 步骤 1: 重置消息历史记录 (上下文) ---
@@ -87,11 +113,12 @@ async def reset_neuro_agent_memory():
87
113
  else:
88
114
  print(f"警告: 清空核心记忆块 '{block_label_to_clear}' 失败: {e}。")
89
115
 
90
-
91
116
  async def get_neuro_response(chat_messages: list[dict]) -> str:
117
+ # Ensure letta client is initialized before using it
118
+ initialize_letta_client()
92
119
  if letta_client is None or not config_manager.settings.api_keys.neuro_agent_id:
93
- print("警告: Letta client 或 Agent ID 未配置,无法获取响应。")
94
- return "我暂时无法回应,请稍后再试。"
120
+ print("错误: Letta client 或 Agent ID 未配置,无法获取响应。")
121
+ return "Someone tell Vedal there is a problem with my AI."
95
122
 
96
123
  if chat_messages:
97
124
  injected_chat_lines = [f"{chat['username']}: {chat['text']}" for chat in chat_messages]
@@ -106,7 +133,9 @@ async def get_neuro_response(chat_messages: list[dict]) -> str:
106
133
  print(f"正在向 Neuro Agent 发送输入 (包含 {len(chat_messages)} 条消息)..." )
107
134
 
108
135
  try:
109
- response = letta_client.agents.messages.create(
136
+ # 使用 asyncio.to_thread 在线程池中执行阻塞调用,避免阻塞事件循环
137
+ response = await asyncio.to_thread(
138
+ letta_client.agents.messages.create,
110
139
  agent_id=config_manager.settings.api_keys.neuro_agent_id,
111
140
  messages=[MessageCreate(role="user", content=injected_chat_text)]
112
141
  )
@@ -125,7 +154,7 @@ async def get_neuro_response(chat_messages: list[dict]) -> str:
125
154
 
126
155
  if not ai_full_response_text:
127
156
  print(f"警告: 未能从 Letta 响应中解析出有效的文本。响应对象: {response}")
128
- return "I seem to be at a loss for words right now."
157
+ return "Someone tell Vedal there is a problem with my AI."
129
158
 
130
159
  print(f"成功从 Letta 解析到响应: '{ai_full_response_text[:70]}...'")
131
160
  return ai_full_response_text
@@ -1,29 +1,43 @@
1
1
  # backend/log_handler.py
2
2
  import logging
3
- import asyncio
4
3
  from collections import deque
4
+ from typing import Deque
5
5
 
6
- # 使用 deque 作为有界队列,避免内存无限增长
7
- log_queue: deque = deque(maxlen=1000)
6
+ # 创建两个独立的、有界限的队列,用于不同来源的日志
7
+ server_log_queue: Deque[str] = deque(maxlen=1000)
8
+ agent_log_queue: Deque[str] = deque(maxlen=1000)
8
9
 
9
10
  class QueueLogHandler(logging.Handler):
10
- """一个将日志记录发送到队列的处理器。"""
11
+ """一个将日志记录发送到指定队列的处理器。"""
12
+ def __init__(self, queue: Deque[str]):
13
+ super().__init__()
14
+ self.queue = queue
15
+
11
16
  def emit(self, record: logging.LogRecord):
12
- # 格式化日志消息
13
17
  log_entry = self.format(record)
14
- log_queue.append(log_entry)
18
+ self.queue.append(log_entry)
15
19
 
16
- def configure_logging():
17
- """配置全局日志,添加我们的队列处理器。"""
18
- queue_handler = QueueLogHandler()
19
- # 设置一个你喜欢的日志格式
20
- formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%H:%M:%S')
21
- queue_handler.setFormatter(formatter)
20
+ def configure_server_logging():
21
+ """配置服务器(根)日志记录器,将其日志发送到 server_log_queue。"""
22
+ # 为服务器日志创建一个处理器实例
23
+ server_queue_handler = QueueLogHandler(server_log_queue)
24
+ formatter = logging.Formatter('%(asctime)s - [SERVER] - %(levelname)s - %(message)s', datefmt='%H:%M:%S')
25
+ server_queue_handler.setFormatter(formatter)
22
26
 
23
- # 获取根 logger 并添加我们的 handler
24
- # 这将捕获来自所有模块(fastapi, uvicorn, letta等)的日志
27
+ # 获取根 logger 并添加 handler
28
+ # 这将捕获所有未被专门处理的日志(来自fastapi, uvicorn等)
25
29
  root_logger = logging.getLogger()
26
- root_logger.addHandler(queue_handler)
30
+ # 清除可能存在的旧handler,以防万一
31
+ if root_logger.hasHandlers():
32
+ root_logger.handlers.clear()
33
+
34
+ root_logger.addHandler(server_queue_handler)
27
35
  root_logger.setLevel(logging.INFO)
28
36
 
29
- print("日志系统已配置,将日志输出到内部队列。")
37
+ # 将 uvicorn 的日志也引导到我们的 handler
38
+ logging.getLogger("uvicorn.access").handlers = [server_queue_handler]
39
+ logging.getLogger("uvicorn.error").handlers = [server_queue_handler]
40
+
41
+ print("服务器日志系统已配置,将日志输出到 server_log_queue。")
42
+
43
+ # Agent 的日志配置将会在 agent 模块内部完成,以保持解耦
neuro_simulator/main.py CHANGED
@@ -24,11 +24,11 @@ from fastapi.security import APIKeyCookie
24
24
  # --- 核心模块导入 ---
25
25
  from .config import config_manager, AppSettings
26
26
  from .process_manager import process_manager
27
- from .log_handler import configure_logging, log_queue
27
+ from .log_handler import configure_server_logging, server_log_queue, agent_log_queue
28
28
 
29
29
  # --- 功能模块导入 ---
30
30
  from .chatbot import ChatbotManager, get_dynamic_audience_prompt
31
- from .letta import get_neuro_response, reset_neuro_agent_memory
31
+ # from .letta import get_neuro_response, reset_neuro_agent_memory, initialize_agent # This will be imported dynamically
32
32
  from .audio_synthesis import synthesize_audio_segment
33
33
  from .stream_chat import (
34
34
  add_to_audience_buffer, add_to_neuro_input_queue,
@@ -39,7 +39,13 @@ from .stream_manager import live_stream_manager
39
39
  import neuro_simulator.shared_state as shared_state
40
40
 
41
41
  # --- FastAPI 应用和模板设置 ---
42
- app = FastAPI(title="Neuro-Sama Simulator Backend")
42
+ from .agent.api import router as agent_router
43
+
44
+ app = FastAPI(title="vedal987 Simulator API", version="1.0.0")
45
+
46
+ # 注册API路由
47
+ app.include_router(agent_router)
48
+ app.include_router(agent_router) # Include the agent management API router
43
49
  app.add_middleware(
44
50
  CORSMiddleware,
45
51
  allow_origins=config_manager.settings.server.client_origins + ["http://localhost:8080", "https://dashboard.live.jiahui.cafe"], # 添加dashboard_web的地址
@@ -142,6 +148,13 @@ async def neuro_response_cycle():
142
148
  print("Neuro响应周期: 任务启动。")
143
149
  is_first_response = True
144
150
 
151
+ # Dynamically import get_neuro_response to respect agent_type
152
+ agent_type = config_manager.settings.agent_type
153
+ if agent_type == "builtin":
154
+ from .builtin_agent import get_builtin_response as get_neuro_response
155
+ else:
156
+ from .letta import get_neuro_response
157
+
145
158
  while True:
146
159
  try:
147
160
  if is_first_response:
@@ -155,17 +168,42 @@ async def neuro_response_cycle():
155
168
  current_queue_snapshot = get_all_neuro_input_chats()
156
169
  sample_size = min(config_manager.settings.neuro_behavior.input_chat_sample_size, len(current_queue_snapshot))
157
170
  selected_chats = random.sample(current_queue_snapshot, sample_size)
158
- ai_full_response_text = await get_neuro_response(selected_chats)
171
+
172
+ # 使用 asyncio.wait_for 添加超时机制,避免长时间阻塞
173
+ try:
174
+ ai_full_response_text = await asyncio.wait_for(
175
+ get_neuro_response(selected_chats),
176
+ timeout=10.0 # 默认10秒超时
177
+ )
178
+ except asyncio.TimeoutError:
179
+ print(f"警告: {agent_type} 响应超时,跳过本轮。")
180
+ await asyncio.sleep(5)
181
+ continue
159
182
 
160
183
  async with shared_state.neuro_last_speech_lock:
161
- if ai_full_response_text and ai_full_response_text.strip():
162
- shared_state.neuro_last_speech = ai_full_response_text
184
+ # Handle both string and dict responses
185
+ response_text = ""
186
+ if isinstance(ai_full_response_text, dict):
187
+ # Extract the final response from the dict
188
+ response_text = ai_full_response_text.get("final_response", "")
189
+ else:
190
+ response_text = ai_full_response_text if ai_full_response_text else ""
191
+
192
+ if response_text and response_text.strip():
193
+ shared_state.neuro_last_speech = response_text
163
194
  else:
164
195
  shared_state.neuro_last_speech = "(Neuro-Sama is currently silent...)"
165
- print("警告: 从 Letta 获取的响应为空,跳过本轮。")
196
+ print(f"警告: 从 {agent_type} 获取的响应为空,跳过本轮。")
166
197
  continue
167
198
 
168
- sentences = [s.strip() for s in re.split(r'(?<=[.!?])\s+', ai_full_response_text.replace('\n', ' ').strip()) if s.strip()]
199
+ # Handle both string and dict responses for sentence splitting
200
+ response_text = ""
201
+ if isinstance(ai_full_response_text, dict):
202
+ response_text = ai_full_response_text.get("final_response", "")
203
+ else:
204
+ response_text = ai_full_response_text if ai_full_response_text else ""
205
+
206
+ sentences = [s.strip() for s in re.split(r'(?<=[.!?])\s+', response_text.replace('\n', ' ').strip()) if s.strip()]
169
207
  if not sentences:
170
208
  continue
171
209
 
@@ -212,7 +250,7 @@ async def neuro_response_cycle():
212
250
  async def startup_event():
213
251
  """应用启动时执行。"""
214
252
  global chatbot_manager
215
- configure_logging()
253
+ configure_server_logging()
216
254
 
217
255
  # 实例化管理器
218
256
  chatbot_manager = ChatbotManager()
@@ -224,6 +262,16 @@ async def startup_event():
224
262
  config_manager.register_update_callback(metadata_callback)
225
263
  config_manager.register_update_callback(chatbot_manager.handle_config_update)
226
264
 
265
+ # Initialize the appropriate agent
266
+ from .letta import initialize_agent
267
+ from .builtin_agent import initialize_builtin_agent
268
+
269
+ agent_type = config_manager.settings.agent_type
270
+ if agent_type == "builtin":
271
+ await initialize_builtin_agent()
272
+ else:
273
+ await initialize_agent()
274
+
227
275
  print("FastAPI 应用已启动。请通过外部控制面板控制直播进程。")
228
276
 
229
277
  @app.on_event("shutdown")
@@ -241,6 +289,13 @@ async def shutdown_event():
241
289
  @app.post("/api/stream/start", tags=["Stream Control"], dependencies=[Depends(get_api_token)])
242
290
  async def api_start_stream():
243
291
  """启动直播"""
292
+ # If using builtin agent, clear temp memory and context when starting stream
293
+ agent_type = config_manager.settings.agent_type
294
+ if agent_type == "builtin":
295
+ from .builtin_agent import clear_builtin_agent_temp_memory, clear_builtin_agent_context
296
+ await clear_builtin_agent_temp_memory()
297
+ await clear_builtin_agent_context()
298
+
244
299
  if not process_manager.is_running:
245
300
  process_manager.start_live_processes()
246
301
  return {"status": "success", "message": "直播已启动"}
@@ -264,6 +319,20 @@ async def api_restart_stream():
264
319
  process_manager.start_live_processes()
265
320
  return {"status": "success", "message": "直播已重启"}
266
321
 
322
+ @app.post("/api/agent/reset_memory", tags=["Agent"], dependencies=[Depends(get_api_token)])
323
+ async def api_reset_agent_memory():
324
+ """重置Agent记忆"""
325
+ agent_type = config_manager.settings.agent_type
326
+
327
+ if agent_type == "builtin":
328
+ from .builtin_agent import reset_builtin_agent_memory
329
+ await reset_builtin_agent_memory()
330
+ return {"status": "success", "message": "内置Agent记忆已重置"}
331
+ else:
332
+ from .letta import reset_neuro_agent_memory
333
+ await reset_neuro_agent_memory()
334
+ return {"status": "success", "message": "Letta Agent记忆已重置"}
335
+
267
336
  @app.get("/api/stream/status", tags=["Stream Control"], dependencies=[Depends(get_api_token)])
268
337
  async def api_get_stream_status():
269
338
  """获取直播状态"""
@@ -272,16 +341,6 @@ async def api_get_stream_status():
272
341
  "backend_status": "running" if process_manager.is_running else "stopped"
273
342
  }
274
343
 
275
- # -------------------------------------------------------------
276
- # --- 日志 API 端点 ---
277
- # -------------------------------------------------------------
278
-
279
- @app.get("/api/logs", tags=["Logs"], dependencies=[Depends(get_api_token)])
280
- async def api_get_logs(lines: int = 50):
281
- """获取最近的日志行"""
282
- logs_list = list(log_queue)
283
- return {"logs": logs_list[-lines:] if len(logs_list) > lines else logs_list}
284
-
285
344
  # -------------------------------------------------------------
286
345
  # --- WebSocket 端点 ---
287
346
  # -------------------------------------------------------------
@@ -316,23 +375,102 @@ async def websocket_stream_endpoint(websocket: WebSocket):
316
375
  finally:
317
376
  connection_manager.disconnect(websocket)
318
377
 
319
- @app.websocket("/ws/logs")
320
- async def websocket_logs_endpoint(websocket: WebSocket):
378
+ @app.websocket("/ws/admin")
379
+ async def websocket_admin_endpoint(websocket: WebSocket):
321
380
  await websocket.accept()
322
381
  try:
323
- for log_entry in list(log_queue):
324
- await websocket.send_text(log_entry)
382
+ # Send initial server logs
383
+ for log_entry in list(server_log_queue):
384
+ await websocket.send_json({"type": "server_log", "data": log_entry})
385
+
386
+ # Send initial agent logs
387
+ for log_entry in list(agent_log_queue):
388
+ await websocket.send_json({"type": "agent_log", "data": log_entry})
389
+
390
+ # Send initial context
391
+ # Import the appropriate agent based on config
392
+ from .config import config_manager
393
+ agent_type = config_manager.settings.agent_type
394
+ if agent_type == "builtin":
395
+ from .builtin_agent import local_agent
396
+ if local_agent is not None:
397
+ context_messages = await local_agent.memory_manager.get_recent_context()
398
+ await websocket.send_json({
399
+ "type": "agent_context",
400
+ "action": "update",
401
+ "messages": context_messages
402
+ })
403
+
404
+ # Keep track of last context messages to detect changes
405
+ last_context_messages = []
406
+
407
+ # Start heartbeat task
408
+ heartbeat_task = asyncio.create_task(send_heartbeat(websocket))
325
409
 
326
410
  while websocket.client_state == WebSocketState.CONNECTED:
327
- if log_queue:
328
- log_entry = log_queue.popleft()
329
- await websocket.send_text(log_entry)
330
- else:
331
- await asyncio.sleep(0.1)
411
+ # Check for new server logs
412
+ if server_log_queue:
413
+ log_entry = server_log_queue.popleft()
414
+ await websocket.send_json({"type": "server_log", "data": log_entry})
415
+
416
+ # Check for new agent logs
417
+ if agent_log_queue:
418
+ log_entry = agent_log_queue.popleft()
419
+ await websocket.send_json({"type": "agent_log", "data": log_entry})
420
+
421
+ # Check for context updates (for builtin agent)
422
+ if agent_type == "builtin" and local_agent is not None:
423
+ context_messages = await local_agent.memory_manager.get_recent_context()
424
+ # Compare with last context to detect changes
425
+ if context_messages != last_context_messages:
426
+ # Send only new messages
427
+ if len(context_messages) > len(last_context_messages):
428
+ new_messages = context_messages[len(last_context_messages):]
429
+ await websocket.send_json({
430
+ "type": "agent_context",
431
+ "action": "append",
432
+ "messages": new_messages
433
+ })
434
+ else:
435
+ # Only send full update if messages were actually removed (e.g., context reset)
436
+ # Don't send update if it's just a reordering or modification
437
+ if len(context_messages) < len(last_context_messages):
438
+ await websocket.send_json({
439
+ "type": "agent_context",
440
+ "action": "update",
441
+ "messages": context_messages
442
+ })
443
+ else:
444
+ # Send as append if same length but different content
445
+ await websocket.send_json({
446
+ "type": "agent_context",
447
+ "action": "append",
448
+ "messages": context_messages
449
+ })
450
+ last_context_messages = context_messages
451
+
452
+ # Small delay to prevent busy waiting
453
+ await asyncio.sleep(0.1)
332
454
  except WebSocketDisconnect:
333
- print("日志流客户端已断开连接。")
455
+ print("管理面板WebSocket客户端已断开连接。")
334
456
  finally:
335
- print("日志流WebSocket连接关闭。")
457
+ # Cancel heartbeat task
458
+ if 'heartbeat_task' in locals():
459
+ heartbeat_task.cancel()
460
+ print("管理面板WebSocket连接关闭。")
461
+
462
+
463
+ # 心跳任务,定期发送心跳消息以保持连接活跃
464
+ async def send_heartbeat(websocket: WebSocket):
465
+ while websocket.client_state == WebSocketState.CONNECTED:
466
+ try:
467
+ # 发送心跳消息
468
+ await websocket.send_json({"type": "heartbeat", "timestamp": time.time()})
469
+ # 每5秒发送一次心跳
470
+ await asyncio.sleep(5)
471
+ except Exception as e:
472
+ print(f"发送心跳消息时出错: {e}")
473
+ break
336
474
 
337
475
 
338
476
  # -------------------------------------------------------------
@@ -372,6 +510,13 @@ def filter_config_for_frontend(settings):
372
510
  'stream_tags': settings.stream_metadata.stream_tags
373
511
  }
374
512
 
513
+ # Agent settings (不包含 agent_type)
514
+ if hasattr(settings, 'agent'):
515
+ filtered_settings['agent'] = {
516
+ 'agent_provider': settings.agent.agent_provider,
517
+ 'agent_model': settings.agent.agent_model
518
+ }
519
+
375
520
  # Neuro behavior settings
376
521
  if hasattr(settings, 'neuro_behavior'):
377
522
  filtered_settings['neuro_behavior'] = {
@@ -421,6 +566,8 @@ async def update_configs(new_settings: dict):
421
566
  'stream_metadata.stream_title',
422
567
  'stream_metadata.stream_category',
423
568
  'stream_metadata.stream_tags',
569
+ 'agent.agent_provider', # 添加 agent 配置项(不包含 agent_type)
570
+ 'agent.agent_model',
424
571
  'neuro_behavior.input_chat_sample_size',
425
572
  'neuro_behavior.post_speech_cooldown_sec',
426
573
  'neuro_behavior.initial_greeting',
@@ -27,11 +27,14 @@ class ProcessManager:
27
27
  from .main import generate_audience_chat_task, neuro_response_cycle, broadcast_events_task
28
28
  from .stream_manager import live_stream_manager
29
29
  from .stream_chat import clear_all_queues
30
- from .letta import reset_neuro_agent_memory
30
+
31
+ # Initialize Agent and reset memory
32
+ from .letta import reset_neuro_agent_memory, initialize_agent
33
+ import asyncio
34
+ asyncio.create_task(initialize_agent())
31
35
 
32
36
  # 清理状态和队列,开始新的直播周期
33
37
  clear_all_queues()
34
- asyncio.create_task(reset_neuro_agent_memory())
35
38
  live_stream_manager.reset_stream_state()
36
39
 
37
40
  # 创建并存储任务
@@ -84,6 +84,12 @@ class LiveStreamManager:
84
84
  print("正在启动新的直播周期...")
85
85
  self._stream_start_global_time = time.time()
86
86
 
87
+ # 清除旧的上下文历史
88
+ from .builtin_agent import local_agent
89
+ if local_agent is not None:
90
+ await local_agent.memory_manager.reset_context()
91
+ print("旧的上下文历史已清除。")
92
+
87
93
  self._current_phase = self.StreamPhase.INITIALIZING
88
94
  print(f"进入阶段: {self.StreamPhase.INITIALIZING}. 广播 'play_welcome_video' 事件。")
89
95
  await self.event_queue.put({
@@ -1,13 +1,14 @@
1
- Metadata-Version: 2.1
2
- Name: neuro-simulator
3
- Version: 0.0.3
1
+ Metadata-Version: 2.4
2
+ Name: neuro_simulator
3
+ Version: 0.1.2
4
4
  Summary: Neuro Simulator Server
5
- Home-page: https://github.com/Moha-Master/neuro-simulator
6
- Author: Moha-Master
7
- Author-email: hongkongreporter@outlook.com
5
+ Author-email: Moha-Master <hongkongreporter@outlook.com>
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/Moha-Master/neuro-simulator
8
+ Project-URL: Repository, https://github.com/Moha-Master/neuro-simulator
9
+ Project-URL: Issues, https://github.com/Moha-Master/neuro-simulator/issues
8
10
  Classifier: Development Status :: 4 - Beta
9
11
  Classifier: Intended Audience :: Developers
10
- Classifier: License :: OSI Approved :: MIT License
11
12
  Classifier: Operating System :: OS Independent
12
13
  Classifier: Programming Language :: Python :: 3
13
14
  Classifier: Programming Language :: Python :: 3.8
@@ -17,17 +18,22 @@ Classifier: Programming Language :: Python :: 3.11
17
18
  Classifier: Programming Language :: Python :: 3.12
18
19
  Requires-Python: >=3.8
19
20
  Description-Content-Type: text/markdown
20
- Requires-Dist: azure-cognitiveservices-speech
21
21
  Requires-Dist: fastapi
22
+ Requires-Dist: uvicorn
22
23
  Requires-Dist: google-genai
23
- Requires-Dist: jinja2
24
+ Requires-Dist: azure-cognitiveservices-speech
24
25
  Requires-Dist: letta-client
25
- Requires-Dist: mutagen
26
26
  Requires-Dist: openai
27
+ Requires-Dist: pyyaml
27
28
  Requires-Dist: pydantic
29
+ Requires-Dist: jinja2
28
30
  Requires-Dist: python-multipart
29
- Requires-Dist: pyyaml
30
- Requires-Dist: uvicorn
31
+ Requires-Dist: mutagen
32
+ Provides-Extra: dev
33
+ Requires-Dist: pytest>=6.0; extra == "dev"
34
+ Requires-Dist: pytest-cov; extra == "dev"
35
+ Requires-Dist: black; extra == "dev"
36
+ Requires-Dist: flake8; extra == "dev"
31
37
 
32
38
  # Neuro-Simulator 服务端
33
39
 
@@ -57,7 +63,7 @@ neuro_simulator/
57
63
  ├── shared_state.py # 全局状态管理
58
64
  ├── log_handler.py # 日志处理模块
59
65
  ├── requirements.txt # Python 依赖列表
60
- ├── setup.py # Python 包安装配置
66
+ ├── pyproject.toml # Python 包安装配置
61
67
  ├── cli.py # 命令行启动脚本
62
68
  ├── config.yaml.example # 自带的备用配置模板
63
69
  └── media/ # 自带的备用媒体文件
@@ -75,7 +81,8 @@ working_dir_example/ # 工作目录结构,请将这个目录重命名和
75
81
  ## 安装与配置
76
82
 
77
83
  0. **在运行server前,必须有已经配置完成的Letta Agent。**
78
- 1. 复制一份 `../docs/working_dir_example` 到你想要的位置,作为配置文件目录
84
+ 1. 复制一份 `../docs/working_dir_example` 到你想要的位置,作为配置文件目录.
85
+ - 程序会在未指定 `--dir` 的情况下自动生成一个工作目录,路径为 `~/.config/neuro-simulator/`
79
86
  2. 然后进入配置文件目录,复制 `config.yaml.example` 到 `config.yaml`
80
87
  3. 编辑 `config.yaml` 文件,填入必要的 API 密钥和配置项:
81
88
  - Letta Token 和 Agent ID