neuro-simulator 0.1.3__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. neuro_simulator/__init__.py +1 -10
  2. neuro_simulator/agent/__init__.py +1 -8
  3. neuro_simulator/agent/base.py +43 -0
  4. neuro_simulator/agent/core.py +111 -397
  5. neuro_simulator/agent/factory.py +30 -0
  6. neuro_simulator/agent/llm.py +34 -31
  7. neuro_simulator/agent/memory/__init__.py +1 -4
  8. neuro_simulator/agent/memory/manager.py +61 -203
  9. neuro_simulator/agent/tools/__init__.py +1 -4
  10. neuro_simulator/agent/tools/core.py +8 -18
  11. neuro_simulator/api/__init__.py +1 -0
  12. neuro_simulator/api/agent.py +163 -0
  13. neuro_simulator/api/stream.py +55 -0
  14. neuro_simulator/api/system.py +90 -0
  15. neuro_simulator/cli.py +53 -142
  16. neuro_simulator/core/__init__.py +1 -0
  17. neuro_simulator/core/agent_factory.py +52 -0
  18. neuro_simulator/core/agent_interface.py +91 -0
  19. neuro_simulator/core/application.py +278 -0
  20. neuro_simulator/services/__init__.py +1 -0
  21. neuro_simulator/{chatbot.py → services/audience.py} +24 -24
  22. neuro_simulator/{audio_synthesis.py → services/audio.py} +18 -15
  23. neuro_simulator/services/builtin.py +87 -0
  24. neuro_simulator/services/letta.py +206 -0
  25. neuro_simulator/{stream_manager.py → services/stream.py} +39 -47
  26. neuro_simulator/utils/__init__.py +1 -0
  27. neuro_simulator/utils/logging.py +90 -0
  28. neuro_simulator/utils/process.py +67 -0
  29. neuro_simulator/{stream_chat.py → utils/queue.py} +17 -4
  30. neuro_simulator/utils/state.py +14 -0
  31. neuro_simulator/{websocket_manager.py → utils/websocket.py} +18 -14
  32. {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.0.dist-info}/METADATA +176 -176
  33. neuro_simulator-0.2.0.dist-info/RECORD +37 -0
  34. neuro_simulator/agent/api.py +0 -737
  35. neuro_simulator/agent/memory.py +0 -137
  36. neuro_simulator/agent/tools.py +0 -69
  37. neuro_simulator/builtin_agent.py +0 -83
  38. neuro_simulator/config.yaml.example +0 -157
  39. neuro_simulator/letta.py +0 -164
  40. neuro_simulator/log_handler.py +0 -43
  41. neuro_simulator/main.py +0 -673
  42. neuro_simulator/media/neuro_start.mp4 +0 -0
  43. neuro_simulator/process_manager.py +0 -70
  44. neuro_simulator/shared_state.py +0 -11
  45. neuro_simulator-0.1.3.dist-info/RECORD +0 -31
  46. /neuro_simulator/{config.py → core/config.py} +0 -0
  47. {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.0.dist-info}/WHEEL +0 -0
  48. {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.0.dist-info}/entry_points.txt +0 -0
  49. {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,206 @@
1
+ # neuro_simulator/services/letta.py
2
+ import asyncio
3
+ import logging
4
+ from typing import Union, List, Dict, Any, Optional
5
+
6
+ from fastapi import HTTPException, status
7
+ from letta_client import Letta, MessageCreate, TextContent, LlmConfig, AssistantMessage
8
+
9
+ from ..core.agent_interface import BaseAgent
10
+ from ..core.config import config_manager
11
+
12
+ # Standard logger for this module
13
+ logger = logging.getLogger(__name__.replace("neuro_simulator", "server", 1))
14
+
15
+ # Global client instance, initialized once
16
+ letta_client: Union[Letta, None] = None
17
+
18
+ def initialize_letta_client():
19
+ """Initializes the global Letta client if not already initialized."""
20
+ global letta_client
21
+ if letta_client:
22
+ return
23
+
24
+ try:
25
+ if not config_manager.settings.api_keys.letta_token:
26
+ raise ValueError("LETTA_API_TOKEN is not set. Cannot initialize Letta client.")
27
+
28
+ client_args = {'token': config_manager.settings.api_keys.letta_token}
29
+ if config_manager.settings.api_keys.letta_base_url:
30
+ client_args['base_url'] = config_manager.settings.api_keys.letta_base_url
31
+ logger.info(f"Letta client is being initialized for self-hosted URL: {config_manager.settings.api_keys.letta_base_url}")
32
+ else:
33
+ logger.info("Letta client is being initialized for Letta Cloud.")
34
+
35
+ letta_client = Letta(**client_args)
36
+
37
+ agent_id = config_manager.settings.api_keys.neuro_agent_id
38
+ if agent_id:
39
+ try:
40
+ agent_data = letta_client.agents.retrieve(agent_id=agent_id)
41
+ logger.info(f"Successfully verified Letta Agent, ID: {agent_data.id}, Name: {agent_data.name}")
42
+ except Exception as e:
43
+ error_msg = f"Error: Cannot retrieve Letta Agent (ID: {agent_id}). Details: {e}"
44
+ logger.error(error_msg)
45
+ raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=error_msg)
46
+ except Exception as e:
47
+ logger.error(f"Failed to initialize Letta client: {e}")
48
+ letta_client = None
49
+
50
+ def get_letta_client() -> Letta:
51
+ if letta_client is None:
52
+ raise ValueError("Letta client is not initialized.")
53
+ return letta_client
54
+
55
+ class LettaAgent(BaseAgent):
56
+ """Letta Agent implementation that adheres to the BaseAgent interface."""
57
+
58
+ def __init__(self):
59
+ self.client: Letta = None
60
+ self.agent_id: str = None
61
+
62
+ async def initialize(self):
63
+ initialize_letta_client()
64
+ self.client = get_letta_client()
65
+ self.agent_id = config_manager.settings.api_keys.neuro_agent_id
66
+ if not self.agent_id:
67
+ raise ValueError("Letta agent ID (neuro_agent_id) is not configured.")
68
+
69
+ async def reset_memory(self):
70
+ try:
71
+ await asyncio.to_thread(self.client.agents.messages.reset, agent_id=self.agent_id)
72
+ logger.info(f"Letta Agent (ID: {self.agent_id}) message history has been reset.")
73
+ except Exception as e:
74
+ logger.warning(f"Failed to reset Letta Agent message history: {e}")
75
+
76
+ async def process_messages(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
77
+ if messages:
78
+ injected_chat_lines = [f"{chat['username']}: {chat['text']}" for chat in messages]
79
+ injected_chat_text = (
80
+ "Here are some recent messages from my Twitch chat:\n---\n" +
81
+ "\n".join(injected_chat_lines) +
82
+ "\n---\nNow, as the streamer Neuro-Sama, please continue the conversation naturally."
83
+ )
84
+ else:
85
+ injected_chat_text = "My chat is quiet right now. As Neuro-Sama, what should I say to engage them?"
86
+
87
+ logger.info(f"Sending input to Letta Agent ({len(messages)} messages)...")
88
+
89
+ response_text = ""
90
+ error_str = None
91
+
92
+ try:
93
+ response = await asyncio.to_thread(
94
+ self.client.agents.messages.create,
95
+ agent_id=self.agent_id,
96
+ messages=[MessageCreate(role="user", content=injected_chat_text)]
97
+ )
98
+
99
+ if not response or not response.messages:
100
+ raise ValueError("Letta response is empty or contains no messages.")
101
+
102
+ for message in reversed(response.messages):
103
+ if isinstance(message, AssistantMessage) and hasattr(message, 'content'):
104
+ content = message.content
105
+ if isinstance(content, str) and content.strip():
106
+ response_text = content.strip()
107
+ break
108
+ elif isinstance(content, list) and content:
109
+ first_part = content[0]
110
+ if isinstance(first_part, TextContent) and hasattr(first_part, 'text') and first_part.text.strip():
111
+ response_text = first_part.text.strip()
112
+ break
113
+
114
+ if not response_text:
115
+ logger.warning(f"No valid AssistantMessage content found in Letta response.")
116
+ response_text = "I'm not sure what to say to that."
117
+
118
+ except Exception as e:
119
+ logger.error(f"Error calling Letta Agent ({self.agent_id}): {e}")
120
+ error_str = str(e)
121
+ response_text = "Someone tell Vedal there is a problem with my AI."
122
+
123
+ return {
124
+ "input_messages": messages,
125
+ "final_response": response_text,
126
+ "llm_response": response_text,
127
+ "tool_executions": [],
128
+ "error": error_str
129
+ }
130
+
131
+ # Memory Block Management
132
+ async def get_memory_blocks(self) -> List[Dict[str, Any]]:
133
+ try:
134
+ blocks = await asyncio.to_thread(self.client.agents.blocks.list, agent_id=self.agent_id)
135
+ return [block.model_dump() for block in blocks]
136
+ except Exception as e:
137
+ raise HTTPException(status_code=500, detail=f"Error getting memory blocks from Letta: {e}")
138
+
139
+ async def get_memory_block(self, block_id: str) -> Optional[Dict[str, Any]]:
140
+ try:
141
+ block = await asyncio.to_thread(self.client.agents.blocks.retrieve, agent_id=self.agent_id, block_id=block_id)
142
+ return block.model_dump()
143
+ except Exception as e:
144
+ raise HTTPException(status_code=500, detail=f"Error getting memory block from Letta: {e}")
145
+
146
+ async def create_memory_block(self, title: str, description: str, content: List[str]) -> Dict[str, str]:
147
+ try:
148
+ block = await asyncio.to_thread(
149
+ self.client.agents.blocks.create,
150
+ agent_id=self.agent_id,
151
+ name=title,
152
+ content="\n".join(content),
153
+ description=description
154
+ )
155
+ return {"block_id": block.id}
156
+ except Exception as e:
157
+ raise HTTPException(status_code=500, detail=f"Error creating memory block in Letta: {e}")
158
+
159
+ async def update_memory_block(self, block_id: str, title: Optional[str], description: Optional[str], content: Optional[List[str]]):
160
+ try:
161
+ update_params = {}
162
+ if title is not None: update_params["name"] = title
163
+ if description is not None: update_params["description"] = description
164
+ if content is not None: update_params["content"] = "\n".join(content)
165
+
166
+ await asyncio.to_thread(
167
+ self.client.agents.blocks.modify,
168
+ agent_id=self.agent_id,
169
+ block_id=block_id,
170
+ **update_params
171
+ )
172
+ except Exception as e:
173
+ raise HTTPException(status_code=500, detail=f"Error updating memory block in Letta: {e}")
174
+
175
+ async def delete_memory_block(self, block_id: str):
176
+ try:
177
+ await asyncio.to_thread(self.client.agents.blocks.delete, agent_id=self.agent_id, block_id=block_id)
178
+ except Exception as e:
179
+ raise HTTPException(status_code=500, detail=f"Error deleting memory block in Letta: {e}")
180
+
181
+ # Unsupported Features for Letta Agent
182
+ async def get_init_memory(self) -> Dict[str, Any]:
183
+ raise HTTPException(status_code=400, detail="Getting init memory is not supported for Letta agent")
184
+
185
+ async def update_init_memory(self, memory: Dict[str, Any]):
186
+ raise HTTPException(status_code=400, detail="Updating init memory is not supported for Letta agent")
187
+
188
+ async def get_temp_memory(self) -> List[Dict[str, Any]]:
189
+ raise HTTPException(status_code=400, detail="Getting temp memory is not supported for Letta agent")
190
+
191
+ async def add_temp_memory(self, content: str, role: str):
192
+ raise HTTPException(status_code=400, detail="Adding to temp memory is not supported for Letta agent")
193
+
194
+ async def clear_temp_memory(self):
195
+ raise HTTPException(status_code=400, detail="Clearing temp memory is not supported for Letta agent")
196
+
197
+ async def get_available_tools(self) -> str:
198
+ return "Tool management is not supported for Letta agent via this API"
199
+
200
+ async def execute_tool(self, tool_name: str, params: Dict[str, Any]) -> Any:
201
+ raise HTTPException(status_code=400, detail="Tool execution is not supported for Letta agent via this API")
202
+
203
+ async def get_message_history(self, limit: int = 20) -> List[Dict[str, Any]]:
204
+ # Letta's history is managed on their server and not directly exposed.
205
+ # Return an empty list to prevent breaking internal consumers like the admin panel.
206
+ return []
@@ -1,11 +1,16 @@
1
- # backend/stream_manager.py
1
+ # neuro_simulator/services/stream.py
2
2
  import asyncio
3
- import time
3
+ import logging
4
4
  import os
5
- from .config import config_manager
6
- import neuro_simulator.shared_state as shared_state
5
+ import time
6
+
7
7
  from mutagen.mp4 import MP4, MP4StreamInfoError
8
8
 
9
+ from ..core.config import config_manager
10
+ from ..utils.state import app_state
11
+
12
+ logger = logging.getLogger(__name__.replace("neuro_simulator", "server", 1))
13
+
9
14
  class LiveStreamManager:
10
15
  class NeuroAvatarStage:
11
16
  HIDDEN = "hidden"
@@ -20,121 +25,108 @@ class LiveStreamManager:
20
25
 
21
26
  event_queue: asyncio.Queue = asyncio.Queue()
22
27
 
23
- # Get the working directory where media files are located
24
- _working_dir = os.getcwd() # This will be set by cli.py to the --dir path
25
- _WELCOME_VIDEO_PATH_BACKEND = os.path.join(_working_dir, "media", "neuro_start.mp4")
28
+ _working_dir = os.getcwd()
29
+ _WELCOME_VIDEO_PATH_BACKEND = os.path.join(_working_dir, "assets", "neuro_start.mp4")
26
30
  _WELCOME_VIDEO_DURATION_SEC_DEFAULT = 10.0
27
31
 
28
- # --- NEW: 使用 mutagen 获取时长的静态方法 ---
29
32
  @staticmethod
30
- def _get_video_duration_mutagen_static(video_path: str) -> float:
31
- """使用 mutagen 库可靠地获取 MP4 视频时长。"""
33
+ def _get_video_duration(video_path: str) -> float:
34
+ """Gets the duration of an MP4 video file using mutagen."""
32
35
  if not os.path.exists(video_path):
33
- print(f"警告: 视频文件 '{video_path}' 不存在。将使用默认值。")
36
+ logger.warning(f"Video file '{video_path}' not found. Using default duration.")
34
37
  return LiveStreamManager._WELCOME_VIDEO_DURATION_SEC_DEFAULT
35
38
  try:
36
39
  video = MP4(video_path)
37
40
  duration = video.info.length
38
- print(f"已通过 mutagen 成功读取视频 '{video_path}' 时长: {duration:.2f} 秒。")
41
+ logger.info(f"Successfully read video duration for '{video_path}': {duration:.2f}s.")
39
42
  return duration
40
43
  except MP4StreamInfoError:
41
- print(f"警告: mutagen 无法解析 '{video_path}' 的流信息。它可能不是一个标准的MP4文件。将使用默认值。")
44
+ logger.warning(f"Could not parse stream info for '{video_path}'. Using default duration.")
42
45
  return LiveStreamManager._WELCOME_VIDEO_DURATION_SEC_DEFAULT
43
46
  except Exception as e:
44
- print(f"使用 mutagen 获取视频时长时出错: {e}. 将使用默认视频时长。")
47
+ logger.error(f"Error getting video duration: {e}. Using default duration.")
45
48
  return LiveStreamManager._WELCOME_VIDEO_DURATION_SEC_DEFAULT
46
49
 
47
- # --- 核心修改点: 调用新的 mutagen 方法 ---
48
- _WELCOME_VIDEO_DURATION_SEC = _get_video_duration_mutagen_static(_WELCOME_VIDEO_PATH_BACKEND)
50
+ _WELCOME_VIDEO_DURATION_SEC = _get_video_duration(_WELCOME_VIDEO_PATH_BACKEND)
49
51
  AVATAR_INTRO_TOTAL_DURATION_SEC = 3.0
50
52
 
51
53
  def __init__(self):
52
54
  self._current_phase: str = self.StreamPhase.OFFLINE
53
55
  self._stream_start_global_time: float = 0.0
54
56
  self._is_neuro_speaking: bool = False
55
- # Note: We don't call reset_stream_state here to avoid asyncio issues during initialization
56
- print("LiveStreamManager 初始化完成。")
57
+ logger.info("LiveStreamManager initialized.")
57
58
 
58
59
  async def broadcast_stream_metadata(self):
59
- """将直播元数据放入事件队列进行广播。"""
60
+ """Puts the stream metadata into the event queue for broadcasting."""
60
61
  metadata_event = {
61
62
  "type": "update_stream_metadata",
62
63
  **config_manager.settings.stream_metadata.model_dump()
63
64
  }
64
65
  await self.event_queue.put(metadata_event)
65
- print("直播元数据已放入广播队列。")
66
66
 
67
67
  def reset_stream_state(self):
68
- """重置直播状态到初始离线状态。"""
68
+ """Resets the stream state to offline."""
69
69
  self._current_phase = self.StreamPhase.OFFLINE
70
70
  self._stream_start_global_time = 0.0
71
71
  self._is_neuro_speaking = False
72
72
  while not self.event_queue.empty():
73
73
  self.event_queue.get_nowait()
74
- shared_state.live_phase_started_event.clear()
75
- print("直播状态已重置为 OFFLINE")
76
- # Don't create task during initialization, will be called properly in main.py startup
74
+ app_state.live_phase_started_event.clear()
75
+ logger.info("Stream state has been reset to OFFLINE.")
77
76
 
78
77
  async def start_new_stream_cycle(self):
79
- """开始一个全新的直播周期,从欢迎视频开始。"""
78
+ """Starts a new stream cycle, from the welcome video onwards."""
80
79
  if self._current_phase != self.StreamPhase.OFFLINE:
81
- print("警告: 直播已在进行中,无法开始新周期。")
82
80
  return
83
81
 
84
- print("正在启动新的直播周期...")
82
+ logger.info("Starting new stream cycle...")
85
83
  self._stream_start_global_time = time.time()
86
84
 
87
- # 清除旧的上下文历史
88
- from .builtin_agent import local_agent
89
- if local_agent is not None:
90
- await local_agent.memory_manager.reset_context()
91
- print("旧的上下文历史已清除。")
85
+ from ..core.agent_factory import create_agent
86
+ try:
87
+ agent = await create_agent()
88
+ await agent.reset_memory()
89
+ logger.info("Agent memory has been reset for the new stream cycle.")
90
+ except Exception as e:
91
+ logger.error(f"Failed to reset agent memory: {e}", exc_info=True)
92
92
 
93
93
  self._current_phase = self.StreamPhase.INITIALIZING
94
- print(f"进入阶段: {self.StreamPhase.INITIALIZING}. 广播 'play_welcome_video' 事件。")
95
94
  await self.event_queue.put({
96
95
  "type": "play_welcome_video",
97
96
  "progress": 0,
98
97
  "elapsed_time_sec": self.get_elapsed_time()
99
98
  })
100
99
 
101
- print(f"等待视频时长: {self._WELCOME_VIDEO_DURATION_SEC:.2f} 秒")
102
100
  await asyncio.sleep(self._WELCOME_VIDEO_DURATION_SEC)
103
101
 
104
102
  self._current_phase = self.StreamPhase.AVATAR_INTRO
105
- print(f"进入阶段: {self.StreamPhase.AVATAR_INTRO}. 广播 'start_avatar_intro' 事件。")
106
103
  await self.event_queue.put({"type": "start_avatar_intro", "elapsed_time_sec": self.get_elapsed_time()})
107
104
 
108
- print(f"等待立绘入场动画: {self.AVATAR_INTRO_TOTAL_DURATION_SEC} 秒")
109
105
  await asyncio.sleep(self.AVATAR_INTRO_TOTAL_DURATION_SEC)
110
106
 
111
107
  self._current_phase = self.StreamPhase.LIVE
112
- print(f"进入阶段: {self.StreamPhase.LIVE}. 广播 'enter_live_phase' 事件。")
113
108
  await self.event_queue.put({"type": "enter_live_phase", "elapsed_time_sec": self.get_elapsed_time()})
114
109
 
115
- shared_state.live_phase_started_event.set()
116
- print("Live phase started event has been set.")
110
+ app_state.live_phase_started_event.set()
111
+ logger.info("Live phase started event has been set.")
117
112
 
118
113
  def set_neuro_speaking_status(self, speaking: bool):
119
- """设置并广播Neuro是否正在说话。"""
114
+ """Sets and broadcasts the agent's speaking status."""
120
115
  if self._is_neuro_speaking != speaking:
121
116
  self._is_neuro_speaking = speaking
122
- # Only create task if we're in an event loop
123
117
  try:
124
- asyncio.get_running_loop()
125
118
  asyncio.create_task(self.event_queue.put({"type": "neuro_is_speaking", "speaking": speaking}))
126
119
  except RuntimeError:
127
- # No running loop, just put directly (this might block)
128
120
  self.event_queue.put_nowait({"type": "neuro_is_speaking", "speaking": speaking})
129
121
 
130
122
  def get_elapsed_time(self) -> float:
131
- """获取从直播开始到现在的总时长(秒)。"""
123
+ """Gets the total elapsed time since the stream started."""
132
124
  if self._stream_start_global_time > 0:
133
125
  return time.time() - self._stream_start_global_time
134
126
  return 0.0
135
127
 
136
128
  def get_initial_state_for_client(self) -> dict:
137
- """为新连接的客户端生成当前的初始状态事件。"""
129
+ """Generates the initial state event for a newly connected client."""
138
130
  elapsed_time = self.get_elapsed_time()
139
131
  base_state = {"elapsed_time_sec": elapsed_time}
140
132
  if self._current_phase == self.StreamPhase.INITIALIZING:
@@ -145,5 +137,5 @@ class LiveStreamManager:
145
137
  return {"type": "enter_live_phase", "is_speaking": self._is_neuro_speaking, **base_state}
146
138
  return {"type": "offline", **base_state}
147
139
 
148
- # 全局单例
149
- live_stream_manager = LiveStreamManager()
140
+ # Global singleton instance
141
+ live_stream_manager = LiveStreamManager()
@@ -0,0 +1 @@
1
+ # This file makes the 'utils' directory a Python package.
@@ -0,0 +1,90 @@
1
+ # neuro_simulator/utils/logging.py
2
+ import logging
3
+ import sys
4
+ from collections import deque
5
+ from typing import Deque
6
+
7
+ # Define a single, consistent format for all logs
8
+ LOG_FORMAT = '%(asctime)s - [%(name)-32s] - %(levelname)-8s - %(message)s'
9
+ DATE_FORMAT = '%H:%M:%S'
10
+
11
+ # --- Custom Colored Formatter for Console Output ---
12
+ class ColoredFormatter(logging.Formatter):
13
+ """A custom log formatter that adds color ONLY to the log level name."""
14
+
15
+ GREY = "\x1b[38;20m"
16
+ GREEN = "\x1b[32m"
17
+ YELLOW = "\x1b[33m"
18
+ RED = "\x1b[31m"
19
+ BOLD_RED = "\x1b[31;1m"
20
+ RESET = "\x1b[0m"
21
+
22
+ def __init__(self, fmt):
23
+ super().__init__(fmt, datefmt=DATE_FORMAT)
24
+ self.level_colors = {
25
+ logging.DEBUG: self.GREY,
26
+ logging.INFO: self.GREEN,
27
+ logging.WARNING: self.YELLOW,
28
+ logging.ERROR: self.RED,
29
+ logging.CRITICAL: self.BOLD_RED
30
+ }
31
+
32
+ def format(self, record):
33
+ # Create a copy of the record to avoid modifying the original
34
+ record_copy = logging.makeLogRecord(record.__dict__)
35
+
36
+ # Get the color for the level
37
+ color = self.level_colors.get(record_copy.levelno)
38
+
39
+ # If a color is found, apply it to the levelname
40
+ if color:
41
+ record_copy.levelname = f"{color}{record_copy.levelname}{self.RESET}"
42
+
43
+ # Use the parent class's formatter with the modified record
44
+ return super().format(record_copy)
45
+
46
+ # Create two independent, bounded queues for different log sources
47
+ server_log_queue: Deque[str] = deque(maxlen=1000)
48
+ agent_log_queue: Deque[str] = deque(maxlen=1000)
49
+
50
+ class QueueLogHandler(logging.Handler):
51
+ """A handler that sends log records to a specified queue."""
52
+ def __init__(self, queue: Deque[str]):
53
+ super().__init__()
54
+ self.queue = queue
55
+
56
+ def emit(self, record: logging.LogRecord):
57
+ log_entry = self.format(record)
58
+ self.queue.append(log_entry)
59
+
60
+ def configure_server_logging():
61
+ """Configures the server (root) logger to use the server_log_queue and a standard format."""
62
+ # Non-colored formatter for the queue (for the web UI)
63
+ queue_formatter = logging.Formatter(LOG_FORMAT, datefmt=DATE_FORMAT)
64
+
65
+ # Colored formatter for the console
66
+ console_formatter = ColoredFormatter(LOG_FORMAT)
67
+
68
+ # Create a handler that writes to the server log queue for the web UI
69
+ server_queue_handler = QueueLogHandler(server_log_queue)
70
+ server_queue_handler.setFormatter(queue_formatter)
71
+
72
+ # Create a handler that writes to the console (stdout)
73
+ console_handler = logging.StreamHandler(sys.stdout)
74
+ console_handler.setFormatter(console_formatter)
75
+
76
+ # Get the root logger, clear any existing handlers, and add our new ones
77
+ root_logger = logging.getLogger()
78
+ if root_logger.hasHandlers():
79
+ root_logger.handlers.clear()
80
+ root_logger.addHandler(server_queue_handler)
81
+ root_logger.addHandler(console_handler)
82
+ root_logger.setLevel(logging.INFO)
83
+
84
+ # Force uvicorn loggers to use our handlers
85
+ for logger_name in ["uvicorn", "uvicorn.access", "uvicorn.error"]:
86
+ uvicorn_logger = logging.getLogger(logger_name)
87
+ uvicorn_logger.handlers = [server_queue_handler, console_handler]
88
+ uvicorn_logger.propagate = False # Prevent double-logging
89
+
90
+ root_logger.info("Server logging configured with unified formatting for queue and console.")
@@ -0,0 +1,67 @@
1
+ # neuro_simulator/utils/process.py
2
+ import asyncio
3
+ import logging
4
+
5
+ logger = logging.getLogger(__name__.replace("neuro_simulator", "server", 1))
6
+
7
+ class ProcessManager:
8
+ """Manages the lifecycle of core background tasks for the stream."""
9
+
10
+ def __init__(self):
11
+ self._tasks: list[asyncio.Task] = []
12
+ self._is_running = False
13
+ logger.info("ProcessManager initialized.")
14
+
15
+ @property
16
+ def is_running(self) -> bool:
17
+ """Returns True if the core stream processes are running."""
18
+ return self._is_running
19
+
20
+ def start_live_processes(self):
21
+ """
22
+ Starts all background tasks related to the live stream.
23
+ Imports are done locally to prevent circular dependencies.
24
+ """
25
+ if self.is_running:
26
+ logger.warning("Processes are already running.")
27
+ return
28
+
29
+ logger.info("Starting core stream processes...")
30
+ from ..core.application import generate_audience_chat_task, neuro_response_cycle, broadcast_events_task
31
+ from ..services.stream import live_stream_manager
32
+ from ..utils.queue import clear_all_queues
33
+ from ..core.agent_factory import create_agent
34
+
35
+ asyncio.create_task(create_agent())
36
+
37
+ clear_all_queues()
38
+ live_stream_manager.reset_stream_state()
39
+
40
+ self._tasks.append(asyncio.create_task(live_stream_manager.start_new_stream_cycle()))
41
+ self._tasks.append(asyncio.create_task(broadcast_events_task()))
42
+ self._tasks.append(asyncio.create_task(generate_audience_chat_task()))
43
+ self._tasks.append(asyncio.create_task(neuro_response_cycle()))
44
+
45
+ self._is_running = True
46
+ logger.info(f"Core processes started: {len(self._tasks)} tasks.")
47
+
48
+ def stop_live_processes(self):
49
+ """Stops and cleans up all running background tasks."""
50
+ if not self.is_running:
51
+ return
52
+
53
+ logger.info(f"Stopping {len(self._tasks)} core tasks...")
54
+ for task in self._tasks:
55
+ if not task.done():
56
+ task.cancel()
57
+
58
+ self._tasks.clear()
59
+ self._is_running = False
60
+
61
+ from ..services.stream import live_stream_manager
62
+ live_stream_manager.reset_stream_state()
63
+
64
+ logger.info("All core tasks have been stopped.")
65
+
66
+ # Global singleton instance
67
+ process_manager = ProcessManager()
@@ -1,29 +1,42 @@
1
- # backend/stream_chat.py
1
+ # neuro_simulator/utils/queue.py
2
+ """Manages the chat queues for audience and agent input."""
3
+
4
+ import logging
2
5
  from collections import deque
3
- from .config import config_manager
6
+ from pathlib import Path
7
+
8
+ from ..core.config import config_manager
9
+
10
+ logger = logging.getLogger(__name__.replace("neuro_simulator", "server", 1))
4
11
 
5
- # 使用 settings 对象来初始化 deque maxlen
12
+ # Use settings from the config manager to initialize deque maxlen
6
13
  audience_chat_buffer: deque[dict] = deque(maxlen=config_manager.settings.performance.audience_chat_buffer_max_size)
7
14
  neuro_input_queue: deque[dict] = deque(maxlen=config_manager.settings.performance.neuro_input_queue_max_size)
8
15
 
9
16
  def clear_all_queues():
17
+ """Clears all chat queues."""
10
18
  audience_chat_buffer.clear()
11
19
  neuro_input_queue.clear()
12
- print("所有聊天队列已清空。")
20
+ logger.info("All chat queues have been cleared.")
13
21
 
14
22
  def add_to_audience_buffer(chat_item: dict):
23
+ """Adds a chat item to the audience buffer."""
15
24
  audience_chat_buffer.append(chat_item)
16
25
 
17
26
  def add_to_neuro_input_queue(chat_item: dict):
27
+ """Adds a chat item to the agent's input queue."""
18
28
  neuro_input_queue.append(chat_item)
19
29
 
20
30
  def get_recent_audience_chats(limit: int) -> list[dict]:
31
+ """Returns a list of recent chats from the audience buffer."""
21
32
  return list(audience_chat_buffer)[-limit:]
22
33
 
23
34
  def get_all_neuro_input_chats() -> list[dict]:
35
+ """Returns all chats from the agent's input queue and clears it."""
24
36
  chats = list(neuro_input_queue)
25
37
  neuro_input_queue.clear()
26
38
  return chats
27
39
 
28
40
  def is_neuro_input_queue_empty() -> bool:
41
+ """Checks if the agent's input queue is empty."""
29
42
  return not bool(neuro_input_queue)
@@ -0,0 +1,14 @@
1
+ # neuro_simulator/utils/state.py
2
+ """Manages the shared state of the application using a singleton class."""
3
+
4
+ import asyncio
5
+
6
+ class AppState:
7
+ """A singleton class to hold all shared application state."""
8
+ def __init__(self):
9
+ self.live_phase_started_event = asyncio.Event()
10
+ self.neuro_last_speech_lock = asyncio.Lock()
11
+ self.neuro_last_speech: str = "Neuro-Sama has just started the stream and hasn't said anything yet."
12
+
13
+ # Create a single, globally accessible instance of the AppState.
14
+ app_state = AppState()