neuro-simulator 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neuro_simulator/__init__.py +1 -0
- neuro_simulator/audio_synthesis.py +66 -0
- neuro_simulator/chatbot.py +104 -0
- neuro_simulator/cli.py +132 -0
- neuro_simulator/config.py +226 -0
- neuro_simulator/letta.py +135 -0
- neuro_simulator/log_handler.py +29 -0
- neuro_simulator/main.py +526 -0
- neuro_simulator/media/neuro_start.mp4 +0 -0
- neuro_simulator/process_manager.py +67 -0
- neuro_simulator/settings.yaml.example +143 -0
- neuro_simulator/shared_state.py +11 -0
- neuro_simulator/stream_chat.py +29 -0
- neuro_simulator/stream_manager.py +143 -0
- neuro_simulator/websocket_manager.py +51 -0
- neuro_simulator-0.0.1.dist-info/METADATA +181 -0
- neuro_simulator-0.0.1.dist-info/RECORD +20 -0
- neuro_simulator-0.0.1.dist-info/WHEEL +5 -0
- neuro_simulator-0.0.1.dist-info/entry_points.txt +2 -0
- neuro_simulator-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,29 @@
|
|
1
|
+
# backend/log_handler.py
|
2
|
+
import logging
|
3
|
+
import asyncio
|
4
|
+
from collections import deque
|
5
|
+
|
6
|
+
# 使用 deque 作为有界队列,避免内存无限增长
|
7
|
+
log_queue: deque = deque(maxlen=1000)
|
8
|
+
|
9
|
+
class QueueLogHandler(logging.Handler):
|
10
|
+
"""一个将日志记录发送到队列的处理器。"""
|
11
|
+
def emit(self, record: logging.LogRecord):
|
12
|
+
# 格式化日志消息
|
13
|
+
log_entry = self.format(record)
|
14
|
+
log_queue.append(log_entry)
|
15
|
+
|
16
|
+
def configure_logging():
|
17
|
+
"""配置全局日志,添加我们的队列处理器。"""
|
18
|
+
queue_handler = QueueLogHandler()
|
19
|
+
# 设置一个你喜欢的日志格式
|
20
|
+
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%H:%M:%S')
|
21
|
+
queue_handler.setFormatter(formatter)
|
22
|
+
|
23
|
+
# 获取根 logger 并添加我们的 handler
|
24
|
+
# 这将捕获来自所有模块(fastapi, uvicorn, letta等)的日志
|
25
|
+
root_logger = logging.getLogger()
|
26
|
+
root_logger.addHandler(queue_handler)
|
27
|
+
root_logger.setLevel(logging.INFO)
|
28
|
+
|
29
|
+
print("日志系统已配置,将日志输出到内部队列。")
|
neuro_simulator/main.py
ADDED
@@ -0,0 +1,526 @@
|
|
1
|
+
# backend/main.py
|
2
|
+
|
3
|
+
import asyncio
|
4
|
+
import json
|
5
|
+
import traceback
|
6
|
+
import random
|
7
|
+
import re
|
8
|
+
import time
|
9
|
+
import os
|
10
|
+
import sys
|
11
|
+
from typing import Optional
|
12
|
+
|
13
|
+
from fastapi import (
|
14
|
+
FastAPI, HTTPException, WebSocket, WebSocketDisconnect, Request, Form, Depends, status
|
15
|
+
)
|
16
|
+
from pydantic import BaseModel
|
17
|
+
from fastapi.middleware.cors import CORSMiddleware
|
18
|
+
from fastapi.templating import Jinja2Templates
|
19
|
+
from fastapi.responses import RedirectResponse, HTMLResponse
|
20
|
+
from starlette.websockets import WebSocketState
|
21
|
+
from starlette.status import HTTP_303_SEE_OTHER
|
22
|
+
from fastapi.security import APIKeyCookie
|
23
|
+
|
24
|
+
# --- 核心模块导入 ---
|
25
|
+
from .config import config_manager, AppSettings
|
26
|
+
from .process_manager import process_manager
|
27
|
+
from .log_handler import configure_logging, log_queue
|
28
|
+
|
29
|
+
# --- 功能模块导入 ---
|
30
|
+
from .chatbot import ChatbotManager, get_dynamic_audience_prompt
|
31
|
+
from .letta import get_neuro_response, reset_neuro_agent_memory
|
32
|
+
from .audio_synthesis import synthesize_audio_segment
|
33
|
+
from .stream_chat import (
|
34
|
+
add_to_audience_buffer, add_to_neuro_input_queue,
|
35
|
+
get_recent_audience_chats, is_neuro_input_queue_empty, get_all_neuro_input_chats
|
36
|
+
)
|
37
|
+
from .websocket_manager import connection_manager
|
38
|
+
from .stream_manager import live_stream_manager
|
39
|
+
import neuro_simulator.shared_state as shared_state
|
40
|
+
|
41
|
+
# --- FastAPI 应用和模板设置 ---
|
42
|
+
app = FastAPI(title="Neuro-Sama Simulator Backend")
|
43
|
+
app.add_middleware(
|
44
|
+
CORSMiddleware,
|
45
|
+
allow_origins=config_manager.settings.server.client_origins + ["http://localhost:8080", "https://dashboard.live.jiahui.cafe"], # 添加dashboard_web的地址
|
46
|
+
allow_credentials=True,
|
47
|
+
allow_methods=["*"],
|
48
|
+
allow_headers=["*"],
|
49
|
+
expose_headers=["X-API-Token"], # 暴露API Token头
|
50
|
+
)
|
51
|
+
|
52
|
+
# --- 安全和认证 ---
|
53
|
+
API_TOKEN_HEADER = "X-API-Token"
|
54
|
+
|
55
|
+
async def get_api_token(request: Request):
|
56
|
+
"""检查API token是否有效"""
|
57
|
+
password = config_manager.settings.server.panel_password
|
58
|
+
if not password:
|
59
|
+
# No password set, allow access
|
60
|
+
return True
|
61
|
+
|
62
|
+
# 检查header中的token
|
63
|
+
header_token = request.headers.get(API_TOKEN_HEADER)
|
64
|
+
if header_token and header_token == password:
|
65
|
+
return True
|
66
|
+
|
67
|
+
raise HTTPException(
|
68
|
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
69
|
+
detail="Invalid API token",
|
70
|
+
headers={"WWW-Authenticate": "Bearer"},
|
71
|
+
)
|
72
|
+
|
73
|
+
# -------------------------------------------------------------
|
74
|
+
# --- 后台任务函数定义 ---
|
75
|
+
# -------------------------------------------------------------
|
76
|
+
|
77
|
+
async def broadcast_events_task():
|
78
|
+
"""从 live_stream_manager 的队列中获取事件并广播给所有客户端。"""
|
79
|
+
while True:
|
80
|
+
try:
|
81
|
+
event = await live_stream_manager.event_queue.get()
|
82
|
+
print(f"广播事件: {event}")
|
83
|
+
await connection_manager.broadcast(event)
|
84
|
+
live_stream_manager.event_queue.task_done()
|
85
|
+
except asyncio.CancelledError:
|
86
|
+
print("广播任务被取消。")
|
87
|
+
break
|
88
|
+
except Exception as e:
|
89
|
+
print(f"广播事件时出错: {e}")
|
90
|
+
|
91
|
+
async def fetch_and_process_audience_chats():
|
92
|
+
"""单个聊天生成任务的执行体。"""
|
93
|
+
if not chatbot_manager or not chatbot_manager.client:
|
94
|
+
print("错误: Chatbot manager 未初始化,跳过聊天生成。")
|
95
|
+
return
|
96
|
+
try:
|
97
|
+
dynamic_prompt = await get_dynamic_audience_prompt()
|
98
|
+
raw_chat_text = await chatbot_manager.client.generate_chat_messages(
|
99
|
+
prompt=dynamic_prompt,
|
100
|
+
max_tokens=config_manager.settings.audience_simulation.max_output_tokens
|
101
|
+
)
|
102
|
+
|
103
|
+
parsed_chats = []
|
104
|
+
for line in raw_chat_text.split('\n'):
|
105
|
+
line = line.strip()
|
106
|
+
if ':' in line:
|
107
|
+
username_raw, text = line.split(':', 1)
|
108
|
+
username = username_raw.strip()
|
109
|
+
if username in config_manager.settings.audience_simulation.username_blocklist:
|
110
|
+
username = random.choice(config_manager.settings.audience_simulation.username_pool)
|
111
|
+
if username and text.strip():
|
112
|
+
parsed_chats.append({"username": username, "text": text.strip()})
|
113
|
+
elif line:
|
114
|
+
parsed_chats.append({"username": random.choice(config_manager.settings.audience_simulation.username_pool), "text": line})
|
115
|
+
|
116
|
+
chats_to_broadcast = parsed_chats[:config_manager.settings.audience_simulation.chats_per_batch]
|
117
|
+
|
118
|
+
for chat in chats_to_broadcast:
|
119
|
+
add_to_audience_buffer(chat)
|
120
|
+
add_to_neuro_input_queue(chat)
|
121
|
+
broadcast_message = {"type": "chat_message", **chat, "is_user_message": False}
|
122
|
+
await connection_manager.broadcast(broadcast_message)
|
123
|
+
await asyncio.sleep(random.uniform(0.1, 0.4))
|
124
|
+
except Exception:
|
125
|
+
print("错误: 单个聊天生成任务失败。详情见 traceback。")
|
126
|
+
traceback.print_exc()
|
127
|
+
|
128
|
+
async def generate_audience_chat_task():
|
129
|
+
"""周期性地调度聊天生成任务。"""
|
130
|
+
print("观众聊天调度器: 任务启动。")
|
131
|
+
while True:
|
132
|
+
try:
|
133
|
+
asyncio.create_task(fetch_and_process_audience_chats())
|
134
|
+
await asyncio.sleep(config_manager.settings.audience_simulation.chat_generation_interval_sec)
|
135
|
+
except asyncio.CancelledError:
|
136
|
+
print("观众聊天调度器任务被取消。")
|
137
|
+
break
|
138
|
+
|
139
|
+
async def neuro_response_cycle():
|
140
|
+
"""Neuro 的核心响应循环。"""
|
141
|
+
await shared_state.live_phase_started_event.wait()
|
142
|
+
print("Neuro响应周期: 任务启动。")
|
143
|
+
is_first_response = True
|
144
|
+
|
145
|
+
while True:
|
146
|
+
try:
|
147
|
+
if is_first_response:
|
148
|
+
print("首次响应: 注入开场白。")
|
149
|
+
add_to_neuro_input_queue({"username": "System", "text": config_manager.settings.neuro_behavior.initial_greeting})
|
150
|
+
is_first_response = False
|
151
|
+
elif is_neuro_input_queue_empty():
|
152
|
+
await asyncio.sleep(1)
|
153
|
+
continue
|
154
|
+
|
155
|
+
current_queue_snapshot = get_all_neuro_input_chats()
|
156
|
+
sample_size = min(config_manager.settings.neuro_behavior.input_chat_sample_size, len(current_queue_snapshot))
|
157
|
+
selected_chats = random.sample(current_queue_snapshot, sample_size)
|
158
|
+
ai_full_response_text = await get_neuro_response(selected_chats)
|
159
|
+
|
160
|
+
async with shared_state.neuro_last_speech_lock:
|
161
|
+
if ai_full_response_text and ai_full_response_text.strip():
|
162
|
+
shared_state.neuro_last_speech = ai_full_response_text
|
163
|
+
else:
|
164
|
+
shared_state.neuro_last_speech = "(Neuro-Sama is currently silent...)"
|
165
|
+
print("警告: 从 Letta 获取的响应为空,跳过本轮。")
|
166
|
+
continue
|
167
|
+
|
168
|
+
sentences = [s.strip() for s in re.split(r'(?<=[.!?])\s+', ai_full_response_text.replace('\n', ' ').strip()) if s.strip()]
|
169
|
+
if not sentences:
|
170
|
+
continue
|
171
|
+
|
172
|
+
synthesis_tasks = [synthesize_audio_segment(s) for s in sentences]
|
173
|
+
synthesis_results = await asyncio.gather(*synthesis_tasks, return_exceptions=True)
|
174
|
+
|
175
|
+
speech_packages = [
|
176
|
+
{"segment_id": i, "text": sentences[i], "audio_base64": res[0], "duration": res[1]}
|
177
|
+
for i, res in enumerate(synthesis_results) if not isinstance(res, Exception)
|
178
|
+
]
|
179
|
+
|
180
|
+
if not speech_packages:
|
181
|
+
print("错误: 所有句子的 TTS 合成都失败了。")
|
182
|
+
await connection_manager.broadcast({"type": "neuro_error_signal"})
|
183
|
+
await asyncio.sleep(15)
|
184
|
+
continue
|
185
|
+
|
186
|
+
live_stream_manager.set_neuro_speaking_status(True)
|
187
|
+
for package in speech_packages:
|
188
|
+
broadcast_package = {"type": "neuro_speech_segment", **package, "is_end": False}
|
189
|
+
await connection_manager.broadcast(broadcast_package)
|
190
|
+
await asyncio.sleep(package['duration'])
|
191
|
+
|
192
|
+
await connection_manager.broadcast({"type": "neuro_speech_segment", "is_end": True})
|
193
|
+
live_stream_manager.set_neuro_speaking_status(False)
|
194
|
+
|
195
|
+
await asyncio.sleep(config_manager.settings.neuro_behavior.post_speech_cooldown_sec)
|
196
|
+
except asyncio.CancelledError:
|
197
|
+
print("Neuro 响应周期任务被取消。")
|
198
|
+
live_stream_manager.set_neuro_speaking_status(False)
|
199
|
+
break
|
200
|
+
except Exception:
|
201
|
+
print("Neuro响应周期发生严重错误,将在10秒后恢复。详情见 traceback。")
|
202
|
+
traceback.print_exc()
|
203
|
+
live_stream_manager.set_neuro_speaking_status(False)
|
204
|
+
await asyncio.sleep(10)
|
205
|
+
|
206
|
+
|
207
|
+
# -------------------------------------------------------------
|
208
|
+
# --- 应用生命周期事件 ---
|
209
|
+
# -------------------------------------------------------------
|
210
|
+
|
211
|
+
@app.on_event("startup")
|
212
|
+
async def startup_event():
|
213
|
+
"""应用启动时执行。"""
|
214
|
+
global chatbot_manager
|
215
|
+
configure_logging()
|
216
|
+
|
217
|
+
# 实例化管理器
|
218
|
+
chatbot_manager = ChatbotManager()
|
219
|
+
|
220
|
+
# 定义并注册回调
|
221
|
+
async def metadata_callback(updated_settings: AppSettings):
|
222
|
+
await live_stream_manager.broadcast_stream_metadata()
|
223
|
+
|
224
|
+
config_manager.register_update_callback(metadata_callback)
|
225
|
+
config_manager.register_update_callback(chatbot_manager.handle_config_update)
|
226
|
+
|
227
|
+
print("FastAPI 应用已启动。请通过外部控制面板控制直播进程。")
|
228
|
+
|
229
|
+
@app.on_event("shutdown")
|
230
|
+
async def shutdown_event():
|
231
|
+
"""应用关闭时执行。"""
|
232
|
+
if process_manager.is_running:
|
233
|
+
process_manager.stop_live_processes()
|
234
|
+
print("FastAPI 应用已关闭。")
|
235
|
+
|
236
|
+
|
237
|
+
# -------------------------------------------------------------
|
238
|
+
# --- 直播控制 API 端点 ---
|
239
|
+
# -------------------------------------------------------------
|
240
|
+
|
241
|
+
@app.post("/api/stream/start", tags=["Stream Control"], dependencies=[Depends(get_api_token)])
|
242
|
+
async def api_start_stream():
|
243
|
+
"""启动直播"""
|
244
|
+
if not process_manager.is_running:
|
245
|
+
process_manager.start_live_processes()
|
246
|
+
return {"status": "success", "message": "直播已启动"}
|
247
|
+
else:
|
248
|
+
return {"status": "info", "message": "直播已在运行"}
|
249
|
+
|
250
|
+
@app.post("/api/stream/stop", tags=["Stream Control"], dependencies=[Depends(get_api_token)])
|
251
|
+
async def api_stop_stream():
|
252
|
+
"""停止直播"""
|
253
|
+
if process_manager.is_running:
|
254
|
+
process_manager.stop_live_processes()
|
255
|
+
return {"status": "success", "message": "直播已停止"}
|
256
|
+
else:
|
257
|
+
return {"status": "info", "message": "直播未在运行"}
|
258
|
+
|
259
|
+
@app.post("/api/stream/restart", tags=["Stream Control"], dependencies=[Depends(get_api_token)])
|
260
|
+
async def api_restart_stream():
|
261
|
+
"""重启直播"""
|
262
|
+
process_manager.stop_live_processes()
|
263
|
+
await asyncio.sleep(1)
|
264
|
+
process_manager.start_live_processes()
|
265
|
+
return {"status": "success", "message": "直播已重启"}
|
266
|
+
|
267
|
+
@app.get("/api/stream/status", tags=["Stream Control"], dependencies=[Depends(get_api_token)])
|
268
|
+
async def api_get_stream_status():
|
269
|
+
"""获取直播状态"""
|
270
|
+
return {
|
271
|
+
"is_running": process_manager.is_running,
|
272
|
+
"backend_status": "running" if process_manager.is_running else "stopped"
|
273
|
+
}
|
274
|
+
|
275
|
+
# -------------------------------------------------------------
|
276
|
+
# --- 日志 API 端点 ---
|
277
|
+
# -------------------------------------------------------------
|
278
|
+
|
279
|
+
@app.get("/api/logs", tags=["Logs"], dependencies=[Depends(get_api_token)])
|
280
|
+
async def api_get_logs(lines: int = 50):
|
281
|
+
"""获取最近的日志行"""
|
282
|
+
logs_list = list(log_queue)
|
283
|
+
return {"logs": logs_list[-lines:] if len(logs_list) > lines else logs_list}
|
284
|
+
|
285
|
+
# -------------------------------------------------------------
|
286
|
+
# --- WebSocket 端点 ---
|
287
|
+
# -------------------------------------------------------------
|
288
|
+
|
289
|
+
@app.websocket("/ws/stream")
|
290
|
+
async def websocket_stream_endpoint(websocket: WebSocket):
|
291
|
+
await connection_manager.connect(websocket)
|
292
|
+
try:
|
293
|
+
initial_event = live_stream_manager.get_initial_state_for_client()
|
294
|
+
await connection_manager.send_personal_message(initial_event, websocket)
|
295
|
+
|
296
|
+
metadata_event = {"type": "update_stream_metadata", **config_manager.settings.stream_metadata.model_dump()}
|
297
|
+
await connection_manager.send_personal_message(metadata_event, websocket)
|
298
|
+
|
299
|
+
initial_chats = get_recent_audience_chats(config_manager.settings.performance.initial_chat_backlog_limit)
|
300
|
+
for chat in initial_chats:
|
301
|
+
await connection_manager.send_personal_message({"type": "chat_message", **chat, "is_user_message": False}, websocket)
|
302
|
+
await asyncio.sleep(0.01)
|
303
|
+
|
304
|
+
while True:
|
305
|
+
raw_data = await websocket.receive_text()
|
306
|
+
data = json.loads(raw_data)
|
307
|
+
if data.get("type") == "user_message":
|
308
|
+
user_message = {"username": data.get("username", "User"), "text": data.get("message", "").strip()}
|
309
|
+
if user_message["text"]:
|
310
|
+
add_to_audience_buffer(user_message)
|
311
|
+
add_to_neuro_input_queue(user_message)
|
312
|
+
broadcast_message = {"type": "chat_message", **user_message, "is_user_message": True}
|
313
|
+
await connection_manager.broadcast(broadcast_message)
|
314
|
+
except WebSocketDisconnect:
|
315
|
+
print(f"客户端 {websocket.client} 已断开连接。")
|
316
|
+
finally:
|
317
|
+
connection_manager.disconnect(websocket)
|
318
|
+
|
319
|
+
@app.websocket("/ws/logs")
|
320
|
+
async def websocket_logs_endpoint(websocket: WebSocket):
|
321
|
+
await websocket.accept()
|
322
|
+
try:
|
323
|
+
for log_entry in list(log_queue):
|
324
|
+
await websocket.send_text(log_entry)
|
325
|
+
|
326
|
+
while websocket.client_state == WebSocketState.CONNECTED:
|
327
|
+
if log_queue:
|
328
|
+
log_entry = log_queue.popleft()
|
329
|
+
await websocket.send_text(log_entry)
|
330
|
+
else:
|
331
|
+
await asyncio.sleep(0.1)
|
332
|
+
except WebSocketDisconnect:
|
333
|
+
print("日志流客户端已断开连接。")
|
334
|
+
finally:
|
335
|
+
print("日志流WebSocket连接关闭。")
|
336
|
+
|
337
|
+
|
338
|
+
# -------------------------------------------------------------
|
339
|
+
# --- 其他 API 端点 ---
|
340
|
+
# -------------------------------------------------------------
|
341
|
+
|
342
|
+
class ErrorSpeechRequest(BaseModel):
|
343
|
+
text: str
|
344
|
+
voice_name: str | None = None
|
345
|
+
pitch: float | None = None
|
346
|
+
|
347
|
+
@app.post("/api/tts/synthesize", tags=["TTS"], dependencies=[Depends(get_api_token)])
|
348
|
+
async def synthesize_speech_endpoint(request: ErrorSpeechRequest):
|
349
|
+
"""TTS语音合成端点"""
|
350
|
+
try:
|
351
|
+
audio_base64, _ = await synthesize_audio_segment(
|
352
|
+
text=request.text, voice_name=request.voice_name, pitch=request.pitch
|
353
|
+
)
|
354
|
+
return {"audio_base64": audio_base64}
|
355
|
+
except Exception as e:
|
356
|
+
raise HTTPException(status_code=500, detail=str(e))
|
357
|
+
|
358
|
+
# -------------------------------------------------------------
|
359
|
+
# --- 配置管理 API 端点 ---
|
360
|
+
# -------------------------------------------------------------
|
361
|
+
|
362
|
+
def filter_config_for_frontend(settings):
|
363
|
+
"""过滤配置,只返回前端需要的配置项"""
|
364
|
+
# 创建一个新的字典,只包含前端需要的配置项
|
365
|
+
filtered_settings = {}
|
366
|
+
|
367
|
+
# Stream metadata (除了streamer_nickname)
|
368
|
+
if hasattr(settings, 'stream_metadata'):
|
369
|
+
filtered_settings['stream_metadata'] = {
|
370
|
+
'stream_title': settings.stream_metadata.stream_title,
|
371
|
+
'stream_category': settings.stream_metadata.stream_category,
|
372
|
+
'stream_tags': settings.stream_metadata.stream_tags
|
373
|
+
}
|
374
|
+
|
375
|
+
# Neuro behavior settings
|
376
|
+
if hasattr(settings, 'neuro_behavior'):
|
377
|
+
filtered_settings['neuro_behavior'] = {
|
378
|
+
'input_chat_sample_size': settings.neuro_behavior.input_chat_sample_size,
|
379
|
+
'post_speech_cooldown_sec': settings.neuro_behavior.post_speech_cooldown_sec,
|
380
|
+
'initial_greeting': settings.neuro_behavior.initial_greeting
|
381
|
+
}
|
382
|
+
|
383
|
+
# Audience simulation settings
|
384
|
+
if hasattr(settings, 'audience_simulation'):
|
385
|
+
filtered_settings['audience_simulation'] = {
|
386
|
+
'llm_provider': settings.audience_simulation.llm_provider,
|
387
|
+
'gemini_model': settings.audience_simulation.gemini_model,
|
388
|
+
'openai_model': settings.audience_simulation.openai_model,
|
389
|
+
'llm_temperature': settings.audience_simulation.llm_temperature,
|
390
|
+
'chat_generation_interval_sec': settings.audience_simulation.chat_generation_interval_sec,
|
391
|
+
'chats_per_batch': settings.audience_simulation.chats_per_batch,
|
392
|
+
'max_output_tokens': settings.audience_simulation.max_output_tokens,
|
393
|
+
'username_blocklist': settings.audience_simulation.username_blocklist,
|
394
|
+
'username_pool': settings.audience_simulation.username_pool
|
395
|
+
}
|
396
|
+
|
397
|
+
# Performance settings
|
398
|
+
if hasattr(settings, 'performance'):
|
399
|
+
filtered_settings['performance'] = {
|
400
|
+
'neuro_input_queue_max_size': settings.performance.neuro_input_queue_max_size,
|
401
|
+
'audience_chat_buffer_max_size': settings.performance.audience_chat_buffer_max_size,
|
402
|
+
'initial_chat_backlog_limit': settings.performance.initial_chat_backlog_limit
|
403
|
+
}
|
404
|
+
|
405
|
+
return filtered_settings
|
406
|
+
|
407
|
+
@app.get("/api/configs", tags=["Config Management"], dependencies=[Depends(get_api_token)])
|
408
|
+
async def get_configs():
|
409
|
+
"""获取当前配置(已过滤,不包含敏感信息)"""
|
410
|
+
return filter_config_for_frontend(config_manager.settings)
|
411
|
+
|
412
|
+
@app.patch("/api/configs", tags=["Config Management"], dependencies=[Depends(get_api_token)])
|
413
|
+
async def update_configs(new_settings: dict):
|
414
|
+
"""更新配置(已过滤,不包含敏感信息)"""
|
415
|
+
try:
|
416
|
+
# 过滤掉不应该被修改的配置项
|
417
|
+
filtered_settings = {}
|
418
|
+
|
419
|
+
# 定义允许修改的配置路径
|
420
|
+
allowed_paths = {
|
421
|
+
'stream_metadata.stream_title',
|
422
|
+
'stream_metadata.stream_category',
|
423
|
+
'stream_metadata.stream_tags',
|
424
|
+
'neuro_behavior.input_chat_sample_size',
|
425
|
+
'neuro_behavior.post_speech_cooldown_sec',
|
426
|
+
'neuro_behavior.initial_greeting',
|
427
|
+
'audience_simulation.llm_provider',
|
428
|
+
'audience_simulation.gemini_model',
|
429
|
+
'audience_simulation.openai_model',
|
430
|
+
'audience_simulation.llm_temperature',
|
431
|
+
'audience_simulation.chat_generation_interval_sec',
|
432
|
+
'audience_simulation.chats_per_batch',
|
433
|
+
'audience_simulation.max_output_tokens',
|
434
|
+
'audience_simulation.username_blocklist',
|
435
|
+
'audience_simulation.username_pool',
|
436
|
+
'performance.neuro_input_queue_max_size',
|
437
|
+
'performance.audience_chat_buffer_max_size',
|
438
|
+
'performance.initial_chat_backlog_limit'
|
439
|
+
}
|
440
|
+
|
441
|
+
# 递归函数来检查和过滤配置项
|
442
|
+
def filter_nested_dict(obj, prefix=''):
|
443
|
+
filtered = {}
|
444
|
+
for key, value in obj.items():
|
445
|
+
full_path = f"{prefix}.{key}" if prefix else key
|
446
|
+
if full_path in allowed_paths:
|
447
|
+
filtered[key] = value
|
448
|
+
elif isinstance(value, dict):
|
449
|
+
nested_filtered = filter_nested_dict(value, full_path)
|
450
|
+
if nested_filtered: # 只有当过滤后还有内容时才添加
|
451
|
+
filtered[key] = nested_filtered
|
452
|
+
return filtered
|
453
|
+
|
454
|
+
# 应用过滤
|
455
|
+
filtered_settings = filter_nested_dict(new_settings)
|
456
|
+
|
457
|
+
# 更新配置
|
458
|
+
await config_manager.update_settings(filtered_settings)
|
459
|
+
return filter_config_for_frontend(config_manager.settings)
|
460
|
+
except Exception as e:
|
461
|
+
raise HTTPException(status_code=500, detail=f"更新配置失败: {str(e)}")
|
462
|
+
|
463
|
+
@app.post("/api/configs/reload", tags=["Config Management"], dependencies=[Depends(get_api_token)])
|
464
|
+
async def reload_configs():
|
465
|
+
"""重载配置文件"""
|
466
|
+
try:
|
467
|
+
await config_manager.update_settings({}) # 传入空字典,强制重载并触发回调
|
468
|
+
return {"status": "success", "message": "配置已重载"}
|
469
|
+
except Exception as e:
|
470
|
+
raise HTTPException(status_code=500, detail=f"重载配置失败: {str(e)}")
|
471
|
+
|
472
|
+
@app.get("/api/system/health", tags=["System"])
|
473
|
+
async def health_check():
|
474
|
+
"""健康检查端点,用于监控系统状态"""
|
475
|
+
return {
|
476
|
+
"status": "healthy",
|
477
|
+
"backend_running": True,
|
478
|
+
"process_manager_running": process_manager.is_running,
|
479
|
+
"timestamp": time.time()
|
480
|
+
}
|
481
|
+
|
482
|
+
@app.get("/", tags=["Root"])
|
483
|
+
async def root():
|
484
|
+
return {
|
485
|
+
"message": "Neuro-Sama Simulator Backend",
|
486
|
+
"version": "2.0",
|
487
|
+
"api_docs": "/docs",
|
488
|
+
"api_structure": {
|
489
|
+
"stream": "/api/stream",
|
490
|
+
"configs": "/api/configs",
|
491
|
+
"logs": "/api/logs",
|
492
|
+
"tts": "/api/tts",
|
493
|
+
"system": "/api/system",
|
494
|
+
"websocket": "/ws/stream"
|
495
|
+
}
|
496
|
+
}
|
497
|
+
|
498
|
+
# -------------------------------------------------------------
|
499
|
+
# --- Uvicorn 启动 ---
|
500
|
+
# -------------------------------------------------------------
|
501
|
+
|
502
|
+
def run_server(host: str = None, port: int = None):
|
503
|
+
"""Run the server with optional host and port overrides"""
|
504
|
+
import uvicorn
|
505
|
+
|
506
|
+
# Use provided host/port or fall back to config values
|
507
|
+
server_host = host or config_manager.settings.server.host
|
508
|
+
server_port = port or config_manager.settings.server.port
|
509
|
+
|
510
|
+
# When running as a package, we need to specify the full module path
|
511
|
+
uvicorn.run(
|
512
|
+
"neuro_simulator.main:app",
|
513
|
+
host=server_host,
|
514
|
+
port=server_port,
|
515
|
+
reload=False # 生产环境中建议关闭reload
|
516
|
+
)
|
517
|
+
|
518
|
+
if __name__ == "__main__":
|
519
|
+
import uvicorn
|
520
|
+
# 从配置文件中读取host和port设置
|
521
|
+
uvicorn.run(
|
522
|
+
"neuro_simulator.main:app",
|
523
|
+
host=config_manager.settings.server.host,
|
524
|
+
port=config_manager.settings.server.port,
|
525
|
+
reload=False # 生产环境中建议关闭reload
|
526
|
+
)
|
Binary file
|
@@ -0,0 +1,67 @@
|
|
1
|
+
# backend/process_manager.py
|
2
|
+
import asyncio
|
3
|
+
|
4
|
+
class ProcessManager:
|
5
|
+
"""管理后台核心直播任务的生命周期。"""
|
6
|
+
|
7
|
+
def __init__(self):
|
8
|
+
self._tasks: list[asyncio.Task] = []
|
9
|
+
self._is_running = False
|
10
|
+
print("ProcessManager initialized.")
|
11
|
+
|
12
|
+
@property
|
13
|
+
def is_running(self) -> bool:
|
14
|
+
"""返回直播核心进程是否正在运行。"""
|
15
|
+
return self._is_running
|
16
|
+
|
17
|
+
def start_live_processes(self):
|
18
|
+
"""
|
19
|
+
启动所有与直播相关的后台任务。
|
20
|
+
这个方法会动态地从 main.py 导入任务函数,以避免循环导入。
|
21
|
+
"""
|
22
|
+
if self.is_running:
|
23
|
+
print("警告: 直播进程已在运行,无法重复启动。")
|
24
|
+
return
|
25
|
+
|
26
|
+
print("正在启动直播核心进程...")
|
27
|
+
from .main import generate_audience_chat_task, neuro_response_cycle, broadcast_events_task
|
28
|
+
from .stream_manager import live_stream_manager
|
29
|
+
from .stream_chat import clear_all_queues
|
30
|
+
from .letta import reset_neuro_agent_memory
|
31
|
+
|
32
|
+
# 清理状态和队列,开始新的直播周期
|
33
|
+
clear_all_queues()
|
34
|
+
asyncio.create_task(reset_neuro_agent_memory())
|
35
|
+
live_stream_manager.reset_stream_state()
|
36
|
+
|
37
|
+
# 创建并存储任务
|
38
|
+
self._tasks.append(asyncio.create_task(live_stream_manager.start_new_stream_cycle()))
|
39
|
+
self._tasks.append(asyncio.create_task(broadcast_events_task()))
|
40
|
+
self._tasks.append(asyncio.create_task(generate_audience_chat_task()))
|
41
|
+
self._tasks.append(asyncio.create_task(neuro_response_cycle()))
|
42
|
+
|
43
|
+
self._is_running = True
|
44
|
+
print(f"直播核心进程已启动,共 {len(self._tasks)} 个任务。")
|
45
|
+
|
46
|
+
def stop_live_processes(self):
|
47
|
+
"""停止并清理所有后台任务。"""
|
48
|
+
if not self.is_running:
|
49
|
+
print("信息: 直播进程未运行,无需停止。")
|
50
|
+
return
|
51
|
+
|
52
|
+
print(f"正在停止 {len(self._tasks)} 个直播核心任务...")
|
53
|
+
for task in self._tasks:
|
54
|
+
if not task.done():
|
55
|
+
task.cancel()
|
56
|
+
|
57
|
+
self._tasks.clear()
|
58
|
+
self._is_running = False
|
59
|
+
|
60
|
+
# 停止后,也重置一下 stream manager 的状态
|
61
|
+
from .stream_manager import live_stream_manager
|
62
|
+
live_stream_manager.reset_stream_state()
|
63
|
+
|
64
|
+
print("所有直播核心任务已停止。")
|
65
|
+
|
66
|
+
# 创建一个全局单例
|
67
|
+
process_manager = ProcessManager()
|