neuro-simulator 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neuro_simulator-0.0.1/PKG-INFO +193 -0
- neuro_simulator-0.0.1/README.md +153 -0
- neuro_simulator-0.0.1/neuro_simulator/__init__.py +1 -0
- neuro_simulator-0.0.1/neuro_simulator/audio_synthesis.py +66 -0
- neuro_simulator-0.0.1/neuro_simulator/chatbot.py +104 -0
- neuro_simulator-0.0.1/neuro_simulator/cli.py +132 -0
- neuro_simulator-0.0.1/neuro_simulator/config.py +226 -0
- neuro_simulator-0.0.1/neuro_simulator/letta.py +135 -0
- neuro_simulator-0.0.1/neuro_simulator/log_handler.py +29 -0
- neuro_simulator-0.0.1/neuro_simulator/main.py +526 -0
- neuro_simulator-0.0.1/neuro_simulator/media/neuro_start.mp4 +0 -0
- neuro_simulator-0.0.1/neuro_simulator/process_manager.py +67 -0
- neuro_simulator-0.0.1/neuro_simulator/settings.yaml.example +143 -0
- neuro_simulator-0.0.1/neuro_simulator/shared_state.py +11 -0
- neuro_simulator-0.0.1/neuro_simulator/stream_chat.py +29 -0
- neuro_simulator-0.0.1/neuro_simulator/stream_manager.py +143 -0
- neuro_simulator-0.0.1/neuro_simulator/websocket_manager.py +51 -0
- neuro_simulator-0.0.1/neuro_simulator.egg-info/PKG-INFO +193 -0
- neuro_simulator-0.0.1/neuro_simulator.egg-info/SOURCES.txt +23 -0
- neuro_simulator-0.0.1/neuro_simulator.egg-info/dependency_links.txt +1 -0
- neuro_simulator-0.0.1/neuro_simulator.egg-info/entry_points.txt +2 -0
- neuro_simulator-0.0.1/neuro_simulator.egg-info/requires.txt +11 -0
- neuro_simulator-0.0.1/neuro_simulator.egg-info/top_level.txt +1 -0
- neuro_simulator-0.0.1/setup.cfg +4 -0
- neuro_simulator-0.0.1/setup.py +53 -0
@@ -0,0 +1,193 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: neuro-simulator
|
3
|
+
Version: 0.0.1
|
4
|
+
Summary: Neuro Simulator Server
|
5
|
+
Home-page: https://github.com/Moha-Master/neuro-simulator
|
6
|
+
Author: Moha-Master
|
7
|
+
Author-email: hongkongreporter@outlook.com
|
8
|
+
Classifier: Development Status :: 4 - Beta
|
9
|
+
Classifier: Intended Audience :: Developers
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
11
|
+
Classifier: Operating System :: OS Independent
|
12
|
+
Classifier: Programming Language :: Python :: 3
|
13
|
+
Classifier: Programming Language :: Python :: 3.8
|
14
|
+
Classifier: Programming Language :: Python :: 3.9
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
18
|
+
Requires-Python: >=3.8
|
19
|
+
Description-Content-Type: text/markdown
|
20
|
+
Requires-Dist: fastapi
|
21
|
+
Requires-Dist: uvicorn
|
22
|
+
Requires-Dist: google-genai
|
23
|
+
Requires-Dist: azure-cognitiveservices-speech
|
24
|
+
Requires-Dist: letta-client
|
25
|
+
Requires-Dist: openai
|
26
|
+
Requires-Dist: pyyaml
|
27
|
+
Requires-Dist: pydantic
|
28
|
+
Requires-Dist: jinja2
|
29
|
+
Requires-Dist: python-multipart
|
30
|
+
Requires-Dist: mutagen
|
31
|
+
Dynamic: author
|
32
|
+
Dynamic: author-email
|
33
|
+
Dynamic: classifier
|
34
|
+
Dynamic: description
|
35
|
+
Dynamic: description-content-type
|
36
|
+
Dynamic: home-page
|
37
|
+
Dynamic: requires-dist
|
38
|
+
Dynamic: requires-python
|
39
|
+
Dynamic: summary
|
40
|
+
|
41
|
+
# Neuro-Simulator 服务端
|
42
|
+
|
43
|
+
*关注Vedal喵,关注Vedal谢谢喵*
|
44
|
+
|
45
|
+
*本临时README由AI自动生成*
|
46
|
+
|
47
|
+
这是 Neuro Simulator 的后端服务,基于 Python 和 FastAPI 构建,负责处理直播逻辑、AI 交互、TTS 合成等核心功能。
|
48
|
+
|
49
|
+
## 功能特性
|
50
|
+
|
51
|
+
- **多 LLM 支持**:支持 Gemini 和 OpenAI API,用于生成观众聊天内容
|
52
|
+
- **配置管理**:支持通过 API 动态修改和热重载配置
|
53
|
+
- **外部控制**:完全使用外部API端点操控服务端运行
|
54
|
+
|
55
|
+
## 目录结构
|
56
|
+
|
57
|
+
```
|
58
|
+
neuro_simulator/
|
59
|
+
├── main.py # 应用入口和核心逻辑
|
60
|
+
├── config.py # 配置管理模块
|
61
|
+
├── letta.py # Letta Agent 集成
|
62
|
+
├── chatbot.py # 观众聊天生成器
|
63
|
+
├── audio_synthesis.py # 音频合成模块
|
64
|
+
├── stream_chat.py # 聊天消息处理
|
65
|
+
├── stream_manager.py # 直播管理器
|
66
|
+
├── websocket_manager.py # WebSocket 连接管理
|
67
|
+
├── process_manager.py # 进程管理器
|
68
|
+
├── shared_state.py # 全局状态管理
|
69
|
+
├── log_handler.py # 日志处理模块
|
70
|
+
├── requirements.txt # Python 依赖列表
|
71
|
+
├── setup.py # Python 包安装配置
|
72
|
+
├── cli.py # 命令行启动脚本
|
73
|
+
├── settings.yaml.example # 自带的备用配置模板
|
74
|
+
└── media/ # 自带的备用媒体文件
|
75
|
+
└── neuro_start.mp4 # 用来计算Start Soon长度,仅读取时长
|
76
|
+
```
|
77
|
+
|
78
|
+
```
|
79
|
+
working_dir_example/ # 工作目录结构
|
80
|
+
├── media/ # 媒体文件夹,如缺失会使用自带资源覆盖
|
81
|
+
│ └── neuro_start.mp4 # 用来计算Start Soon长度,仅读取时长
|
82
|
+
├── settings.yaml # 由用户手工创建的配置文件
|
83
|
+
└── settings.yaml.example # 自动生成的配置文件模板,必须手动重命名和填写
|
84
|
+
```
|
85
|
+
|
86
|
+
## 安装与配置
|
87
|
+
|
88
|
+
0. **配置设置**
|
89
|
+
复制一份 `working_dir_example` 到你想要的位置,作为配置文件目录
|
90
|
+
然后进入配置文件目录,复制 `settings.yaml.example` 到 `settings.yaml`
|
91
|
+
编辑 `settings.yaml` 文件,填入必要的 API 密钥和配置项:
|
92
|
+
- Letta Token 和 Agent ID
|
93
|
+
- Gemini/OpenAI API Key
|
94
|
+
- Azure TTS Key 和 Region
|
95
|
+
可以执行替换media/neuro_start.mp4为其它视频文件,但记得手动替换client中的同名文件
|
96
|
+
|
97
|
+
### 方法一:使用 pip 安装
|
98
|
+
|
99
|
+
1. **从云端安装PyPi包,适合直接使用**
|
100
|
+
```bash
|
101
|
+
python3 -m venv venv
|
102
|
+
# Windows
|
103
|
+
venv/Scripts/pip install neuro-simulator
|
104
|
+
# macOS/Linux
|
105
|
+
venv/bin/pip install neuro-simulator
|
106
|
+
```
|
107
|
+
|
108
|
+
**从本地安装PyPi包,适合二次开发**
|
109
|
+
```bash
|
110
|
+
python3 -m venv venv
|
111
|
+
#Windows
|
112
|
+
venv/Scripts/pip install -e .
|
113
|
+
# macOS/Linux
|
114
|
+
venv/bin/pip install -e .
|
115
|
+
```
|
116
|
+
|
117
|
+
2. **运行服务**
|
118
|
+
```bash
|
119
|
+
# 使用默认配置 (~/.config/neuro-simulator/)
|
120
|
+
neuro
|
121
|
+
|
122
|
+
# 指定工作目录
|
123
|
+
neuro -D /path/to/your/config
|
124
|
+
|
125
|
+
# 指定主机和端口
|
126
|
+
neuro -H 0.0.0.0 -P 8080
|
127
|
+
|
128
|
+
# 组合使用
|
129
|
+
neuro -D /path/to/your/config -H 0.0.0.0 -P 8080
|
130
|
+
```
|
131
|
+
|
132
|
+
### 方法二:传统方式运行
|
133
|
+
|
134
|
+
1. **创建并激活虚拟环境**
|
135
|
+
```bash
|
136
|
+
python -m venv venv
|
137
|
+
# Windows
|
138
|
+
venv\Scripts\activate
|
139
|
+
# macOS/Linux
|
140
|
+
source venv/bin/activate
|
141
|
+
```
|
142
|
+
|
143
|
+
2. **安装依赖**
|
144
|
+
```bash
|
145
|
+
pip install -r requirements.txt
|
146
|
+
```
|
147
|
+
|
148
|
+
3. **启动服务**
|
149
|
+
```bash
|
150
|
+
uvicorn main:app --host 127.0.0.1 --port 8000
|
151
|
+
```
|
152
|
+
|
153
|
+
服务默认运行在 `http://127.0.0.1:8000`。
|
154
|
+
|
155
|
+
## API 接口
|
156
|
+
|
157
|
+
后端提供丰富的 API 接口用于控制和管理:
|
158
|
+
|
159
|
+
- `/api/stream/*` - 直播控制接口(启动/停止/重启/状态)
|
160
|
+
- `/api/configs/*` - 配置管理接口(获取/更新/重载配置)
|
161
|
+
- `api_keys` `server` 等敏感配置项无法从接口获取和修改。
|
162
|
+
- `/api/logs` - 日志获取接口
|
163
|
+
- `/api/tts/synthesize` - TTS 合成接口
|
164
|
+
- `/api/system/health` - 健康检查接口
|
165
|
+
- `/ws/stream` - 直播内容 WebSocket 接口
|
166
|
+
- `/ws/logs` - 日志流 WebSocket 接口
|
167
|
+
|
168
|
+
详细接口说明可通过 `http://127.0.0.1:8000/docs` 访问 API 文档查看。
|
169
|
+
|
170
|
+
## 配置说明
|
171
|
+
|
172
|
+
配置文件 `settings.yaml` 包含以下主要配置项:
|
173
|
+
|
174
|
+
- `api_keys` - 各种服务的 API 密钥
|
175
|
+
- `stream_metadata` - 直播元数据(标题、分类、标签等)
|
176
|
+
- `neuro_behavior` - Neuro 行为设置
|
177
|
+
- `audience_simulation` - 观众模拟设置
|
178
|
+
- `tts` - TTS 语音合成设置
|
179
|
+
- `performance` - 性能相关设置
|
180
|
+
- `server` - 服务器设置(主机、端口、CORS 等)
|
181
|
+
|
182
|
+
## 安全说明
|
183
|
+
|
184
|
+
1. 通过 `panel_password` 配置项可以设置控制面板访问密码
|
185
|
+
2. 敏感配置项(如 API 密钥)不会通过 API 接口暴露
|
186
|
+
3. 支持 CORS,但仅允许预配置的来源访问
|
187
|
+
|
188
|
+
## 故障排除
|
189
|
+
|
190
|
+
- 确保所有必需的 API 密钥都已正确配置
|
191
|
+
- 检查网络连接是否正常
|
192
|
+
- 查看日志文件获取错误信息
|
193
|
+
- 确保端口未被其他程序占用
|
@@ -0,0 +1,153 @@
|
|
1
|
+
# Neuro-Simulator 服务端
|
2
|
+
|
3
|
+
*关注Vedal喵,关注Vedal谢谢喵*
|
4
|
+
|
5
|
+
*本临时README由AI自动生成*
|
6
|
+
|
7
|
+
这是 Neuro Simulator 的后端服务,基于 Python 和 FastAPI 构建,负责处理直播逻辑、AI 交互、TTS 合成等核心功能。
|
8
|
+
|
9
|
+
## 功能特性
|
10
|
+
|
11
|
+
- **多 LLM 支持**:支持 Gemini 和 OpenAI API,用于生成观众聊天内容
|
12
|
+
- **配置管理**:支持通过 API 动态修改和热重载配置
|
13
|
+
- **外部控制**:完全使用外部API端点操控服务端运行
|
14
|
+
|
15
|
+
## 目录结构
|
16
|
+
|
17
|
+
```
|
18
|
+
neuro_simulator/
|
19
|
+
├── main.py # 应用入口和核心逻辑
|
20
|
+
├── config.py # 配置管理模块
|
21
|
+
├── letta.py # Letta Agent 集成
|
22
|
+
├── chatbot.py # 观众聊天生成器
|
23
|
+
├── audio_synthesis.py # 音频合成模块
|
24
|
+
├── stream_chat.py # 聊天消息处理
|
25
|
+
├── stream_manager.py # 直播管理器
|
26
|
+
├── websocket_manager.py # WebSocket 连接管理
|
27
|
+
├── process_manager.py # 进程管理器
|
28
|
+
├── shared_state.py # 全局状态管理
|
29
|
+
├── log_handler.py # 日志处理模块
|
30
|
+
├── requirements.txt # Python 依赖列表
|
31
|
+
├── setup.py # Python 包安装配置
|
32
|
+
├── cli.py # 命令行启动脚本
|
33
|
+
├── settings.yaml.example # 自带的备用配置模板
|
34
|
+
└── media/ # 自带的备用媒体文件
|
35
|
+
└── neuro_start.mp4 # 用来计算Start Soon长度,仅读取时长
|
36
|
+
```
|
37
|
+
|
38
|
+
```
|
39
|
+
working_dir_example/ # 工作目录结构
|
40
|
+
├── media/ # 媒体文件夹,如缺失会使用自带资源覆盖
|
41
|
+
│ └── neuro_start.mp4 # 用来计算Start Soon长度,仅读取时长
|
42
|
+
├── settings.yaml # 由用户手工创建的配置文件
|
43
|
+
└── settings.yaml.example # 自动生成的配置文件模板,必须手动重命名和填写
|
44
|
+
```
|
45
|
+
|
46
|
+
## 安装与配置
|
47
|
+
|
48
|
+
0. **配置设置**
|
49
|
+
复制一份 `working_dir_example` 到你想要的位置,作为配置文件目录
|
50
|
+
然后进入配置文件目录,复制 `settings.yaml.example` 到 `settings.yaml`
|
51
|
+
编辑 `settings.yaml` 文件,填入必要的 API 密钥和配置项:
|
52
|
+
- Letta Token 和 Agent ID
|
53
|
+
- Gemini/OpenAI API Key
|
54
|
+
- Azure TTS Key 和 Region
|
55
|
+
可以执行替换media/neuro_start.mp4为其它视频文件,但记得手动替换client中的同名文件
|
56
|
+
|
57
|
+
### 方法一:使用 pip 安装
|
58
|
+
|
59
|
+
1. **从云端安装PyPi包,适合直接使用**
|
60
|
+
```bash
|
61
|
+
python3 -m venv venv
|
62
|
+
# Windows
|
63
|
+
venv/Scripts/pip install neuro-simulator
|
64
|
+
# macOS/Linux
|
65
|
+
venv/bin/pip install neuro-simulator
|
66
|
+
```
|
67
|
+
|
68
|
+
**从本地安装PyPi包,适合二次开发**
|
69
|
+
```bash
|
70
|
+
python3 -m venv venv
|
71
|
+
#Windows
|
72
|
+
venv/Scripts/pip install -e .
|
73
|
+
# macOS/Linux
|
74
|
+
venv/bin/pip install -e .
|
75
|
+
```
|
76
|
+
|
77
|
+
2. **运行服务**
|
78
|
+
```bash
|
79
|
+
# 使用默认配置 (~/.config/neuro-simulator/)
|
80
|
+
neuro
|
81
|
+
|
82
|
+
# 指定工作目录
|
83
|
+
neuro -D /path/to/your/config
|
84
|
+
|
85
|
+
# 指定主机和端口
|
86
|
+
neuro -H 0.0.0.0 -P 8080
|
87
|
+
|
88
|
+
# 组合使用
|
89
|
+
neuro -D /path/to/your/config -H 0.0.0.0 -P 8080
|
90
|
+
```
|
91
|
+
|
92
|
+
### 方法二:传统方式运行
|
93
|
+
|
94
|
+
1. **创建并激活虚拟环境**
|
95
|
+
```bash
|
96
|
+
python -m venv venv
|
97
|
+
# Windows
|
98
|
+
venv\Scripts\activate
|
99
|
+
# macOS/Linux
|
100
|
+
source venv/bin/activate
|
101
|
+
```
|
102
|
+
|
103
|
+
2. **安装依赖**
|
104
|
+
```bash
|
105
|
+
pip install -r requirements.txt
|
106
|
+
```
|
107
|
+
|
108
|
+
3. **启动服务**
|
109
|
+
```bash
|
110
|
+
uvicorn main:app --host 127.0.0.1 --port 8000
|
111
|
+
```
|
112
|
+
|
113
|
+
服务默认运行在 `http://127.0.0.1:8000`。
|
114
|
+
|
115
|
+
## API 接口
|
116
|
+
|
117
|
+
后端提供丰富的 API 接口用于控制和管理:
|
118
|
+
|
119
|
+
- `/api/stream/*` - 直播控制接口(启动/停止/重启/状态)
|
120
|
+
- `/api/configs/*` - 配置管理接口(获取/更新/重载配置)
|
121
|
+
- `api_keys` `server` 等敏感配置项无法从接口获取和修改。
|
122
|
+
- `/api/logs` - 日志获取接口
|
123
|
+
- `/api/tts/synthesize` - TTS 合成接口
|
124
|
+
- `/api/system/health` - 健康检查接口
|
125
|
+
- `/ws/stream` - 直播内容 WebSocket 接口
|
126
|
+
- `/ws/logs` - 日志流 WebSocket 接口
|
127
|
+
|
128
|
+
详细接口说明可通过 `http://127.0.0.1:8000/docs` 访问 API 文档查看。
|
129
|
+
|
130
|
+
## 配置说明
|
131
|
+
|
132
|
+
配置文件 `settings.yaml` 包含以下主要配置项:
|
133
|
+
|
134
|
+
- `api_keys` - 各种服务的 API 密钥
|
135
|
+
- `stream_metadata` - 直播元数据(标题、分类、标签等)
|
136
|
+
- `neuro_behavior` - Neuro 行为设置
|
137
|
+
- `audience_simulation` - 观众模拟设置
|
138
|
+
- `tts` - TTS 语音合成设置
|
139
|
+
- `performance` - 性能相关设置
|
140
|
+
- `server` - 服务器设置(主机、端口、CORS 等)
|
141
|
+
|
142
|
+
## 安全说明
|
143
|
+
|
144
|
+
1. 通过 `panel_password` 配置项可以设置控制面板访问密码
|
145
|
+
2. 敏感配置项(如 API 密钥)不会通过 API 接口暴露
|
146
|
+
3. 支持 CORS,但仅允许预配置的来源访问
|
147
|
+
|
148
|
+
## 故障排除
|
149
|
+
|
150
|
+
- 确保所有必需的 API 密钥都已正确配置
|
151
|
+
- 检查网络连接是否正常
|
152
|
+
- 查看日志文件获取错误信息
|
153
|
+
- 确保端口未被其他程序占用
|
@@ -0,0 +1 @@
|
|
1
|
+
# neuro_simulator/__init__.py
|
@@ -0,0 +1,66 @@
|
|
1
|
+
# backend/audio_synthesis.py
|
2
|
+
import html
|
3
|
+
import base64
|
4
|
+
import azure.cognitiveservices.speech as speechsdk
|
5
|
+
import asyncio
|
6
|
+
from .config import config_manager
|
7
|
+
|
8
|
+
async def synthesize_audio_segment(text: str, voice_name: str = None, pitch: float = None) -> tuple[str, float]:
|
9
|
+
"""
|
10
|
+
使用 Azure TTS 合成音频。
|
11
|
+
如果 voice_name 或 pitch 未提供,则使用配置中的默认值。
|
12
|
+
返回 Base64 编码的音频字符串和音频时长(秒)。
|
13
|
+
"""
|
14
|
+
# 使用 config_manager.settings 中的值
|
15
|
+
azure_key = config_manager.settings.api_keys.azure_speech_key
|
16
|
+
azure_region = config_manager.settings.api_keys.azure_speech_region
|
17
|
+
|
18
|
+
if not azure_key or not azure_region:
|
19
|
+
raise ValueError("Azure Speech Key 或 Region 未在配置中设置。")
|
20
|
+
|
21
|
+
# 如果未传入参数,则使用配置的默认值
|
22
|
+
final_voice_name = voice_name if voice_name is not None else config_manager.settings.tts.voice_name
|
23
|
+
final_pitch = pitch if pitch is not None else config_manager.settings.tts.voice_pitch
|
24
|
+
|
25
|
+
speech_config = speechsdk.SpeechConfig(subscription=azure_key, region=azure_region)
|
26
|
+
speech_config.set_speech_synthesis_output_format(speechsdk.SpeechSynthesisOutputFormat.Audio16Khz32KBitRateMonoMp3)
|
27
|
+
|
28
|
+
pitch_percent = int((final_pitch - 1.0) * 100)
|
29
|
+
pitch_ssml_value = f"+{pitch_percent}%" if pitch_percent >= 0 else f"{pitch_percent}%"
|
30
|
+
|
31
|
+
escaped_text = html.escape(text)
|
32
|
+
|
33
|
+
ssml_string = f"""
|
34
|
+
<speak version="1.0" xmlns="http://www.w3.org/2001/10/synthesis" xml:lang="en-US">
|
35
|
+
<voice name="{final_voice_name}">
|
36
|
+
<prosody pitch="{pitch_ssml_value}">
|
37
|
+
{escaped_text}
|
38
|
+
</prosody>
|
39
|
+
</voice>
|
40
|
+
</speak>
|
41
|
+
"""
|
42
|
+
|
43
|
+
synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=None)
|
44
|
+
|
45
|
+
def _perform_synthesis_sync():
|
46
|
+
return synthesizer.speak_ssml_async(ssml_string).get()
|
47
|
+
|
48
|
+
try:
|
49
|
+
result = await asyncio.to_thread(_perform_synthesis_sync)
|
50
|
+
|
51
|
+
if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
|
52
|
+
audio_data = result.audio_data
|
53
|
+
encoded_audio = base64.b64encode(audio_data).decode('utf-8')
|
54
|
+
audio_duration_sec = result.audio_duration.total_seconds()
|
55
|
+
print(f"TTS 合成完成: '{text[:30]}...' (时长: {audio_duration_sec:.2f}s)")
|
56
|
+
return encoded_audio, audio_duration_sec
|
57
|
+
else:
|
58
|
+
cancellation_details = result.cancellation_details
|
59
|
+
error_message = f"TTS 合成失败/取消 (原因: {cancellation_details.reason})。文本: '{text}'"
|
60
|
+
if cancellation_details.error_details:
|
61
|
+
error_message += f" | 详情: {cancellation_details.error_details}"
|
62
|
+
print(f"错误: {error_message}")
|
63
|
+
raise Exception(error_message)
|
64
|
+
except Exception as e:
|
65
|
+
print(f"错误: 在调用 Azure TTS SDK 时发生异常: {e}")
|
66
|
+
raise
|
@@ -0,0 +1,104 @@
|
|
1
|
+
# backend/chatbot.py
|
2
|
+
from google import genai
|
3
|
+
from google.genai import types
|
4
|
+
from openai import AsyncOpenAI
|
5
|
+
import random
|
6
|
+
import asyncio
|
7
|
+
from .config import config_manager, AppSettings
|
8
|
+
import neuro_simulator.shared_state as shared_state
|
9
|
+
|
10
|
+
class AudienceLLMClient:
|
11
|
+
async def generate_chat_messages(self, prompt: str, max_tokens: int) -> str:
|
12
|
+
raise NotImplementedError
|
13
|
+
|
14
|
+
class GeminiAudienceLLM(AudienceLLMClient):
|
15
|
+
def __init__(self, api_key: str, model_name: str):
|
16
|
+
if not api_key:
|
17
|
+
raise ValueError("Gemini API Key is not provided for GeminiAudienceLLM.")
|
18
|
+
# 根据新文档,正确初始化客户端
|
19
|
+
self.client = genai.Client(api_key=api_key)
|
20
|
+
self.model_name = model_name
|
21
|
+
print(f"已初始化 GeminiAudienceLLM (new SDK),模型: {self.model_name}")
|
22
|
+
|
23
|
+
async def generate_chat_messages(self, prompt: str, max_tokens: int) -> str:
|
24
|
+
# 根据新文档,使用正确的异步方法和参数
|
25
|
+
response = await self.client.aio.models.generate_content(
|
26
|
+
model=self.model_name,
|
27
|
+
contents=prompt,
|
28
|
+
config=types.GenerateContentConfig(
|
29
|
+
temperature=config_manager.settings.audience_simulation.llm_temperature,
|
30
|
+
max_output_tokens=max_tokens
|
31
|
+
)
|
32
|
+
)
|
33
|
+
raw_chat_text = ""
|
34
|
+
if hasattr(response, 'text') and response.text:
|
35
|
+
raw_chat_text = response.text
|
36
|
+
elif response.candidates and response.candidates[0].content and response.candidates[0].content.parts:
|
37
|
+
for part in response.candidates[0].content.parts:
|
38
|
+
if hasattr(part, 'text') and part.text:
|
39
|
+
raw_chat_text += part.text
|
40
|
+
return raw_chat_text
|
41
|
+
|
42
|
+
class OpenAIAudienceLLM(AudienceLLMClient):
|
43
|
+
def __init__(self, api_key: str, model_name: str, base_url: str | None):
|
44
|
+
if not api_key:
|
45
|
+
raise ValueError("OpenAI API Key is not provided for OpenAIAudienceLLM.")
|
46
|
+
self.client = AsyncOpenAI(api_key=api_key, base_url=base_url)
|
47
|
+
self.model_name = model_name
|
48
|
+
print(f"已初始化 OpenAIAudienceLLM,模型: {self.model_name},API Base: {base_url}")
|
49
|
+
|
50
|
+
async def generate_chat_messages(self, prompt: str, max_tokens: int) -> str:
|
51
|
+
response = await self.client.chat.completions.create(
|
52
|
+
model=self.model_name,
|
53
|
+
messages=[{"role": "user", "content": prompt}],
|
54
|
+
temperature=config_manager.settings.audience_simulation.llm_temperature,
|
55
|
+
max_tokens=max_tokens,
|
56
|
+
)
|
57
|
+
if response.choices and response.choices[0].message and response.choices[0].message.content:
|
58
|
+
return response.choices[0].message.content.strip()
|
59
|
+
return ""
|
60
|
+
|
61
|
+
async def get_dynamic_audience_prompt() -> str:
|
62
|
+
current_neuro_speech = ""
|
63
|
+
async with shared_state.neuro_last_speech_lock:
|
64
|
+
current_neuro_speech = shared_state.neuro_last_speech
|
65
|
+
|
66
|
+
# 使用 settings 对象中的模板和变量
|
67
|
+
prompt = config_manager.settings.audience_simulation.prompt_template.format(
|
68
|
+
neuro_speech=current_neuro_speech,
|
69
|
+
num_chats_to_generate=config_manager.settings.audience_simulation.chats_per_batch
|
70
|
+
)
|
71
|
+
return prompt
|
72
|
+
|
73
|
+
class ChatbotManager:
|
74
|
+
def __init__(self):
|
75
|
+
self.client: AudienceLLMClient = self._create_client(config_manager.settings)
|
76
|
+
self._last_checked_settings: dict = config_manager.settings.audience_simulation.model_dump()
|
77
|
+
print("ChatbotManager initialized.")
|
78
|
+
|
79
|
+
def _create_client(self, settings: AppSettings) -> AudienceLLMClient:
|
80
|
+
provider = settings.audience_simulation.llm_provider
|
81
|
+
print(f"正在为 provider 创建新的 audience LLM client: {provider}")
|
82
|
+
if provider.lower() == "gemini":
|
83
|
+
if not settings.api_keys.gemini_api_key:
|
84
|
+
raise ValueError("GEMINI_API_KEY 未在配置中设置")
|
85
|
+
return GeminiAudienceLLM(api_key=settings.api_keys.gemini_api_key, model_name=settings.audience_simulation.gemini_model)
|
86
|
+
elif provider.lower() == "openai":
|
87
|
+
if not settings.api_keys.openai_api_key:
|
88
|
+
raise ValueError("OPENAI_API_KEY 未在配置中设置")
|
89
|
+
return OpenAIAudienceLLM(api_key=settings.api_keys.openai_api_key, model_name=settings.audience_simulation.openai_model, base_url=settings.api_keys.openai_api_base_url)
|
90
|
+
else:
|
91
|
+
raise ValueError(f"不支持的 AUDIENCE_LLM_PROVIDER: {provider}")
|
92
|
+
|
93
|
+
def handle_config_update(self, new_settings: AppSettings):
|
94
|
+
new_audience_settings = new_settings.audience_simulation.model_dump()
|
95
|
+
if new_audience_settings != self._last_checked_settings:
|
96
|
+
print("检测到观众模拟设置已更改,正在重新初始化 LLM client...")
|
97
|
+
try:
|
98
|
+
self.client = self._create_client(new_settings)
|
99
|
+
self._last_checked_settings = new_audience_settings
|
100
|
+
print("LLM client 已成功热重载。")
|
101
|
+
except Exception as e:
|
102
|
+
print(f"错误:热重载 LLM client 失败: {e}")
|
103
|
+
else:
|
104
|
+
print("观众模拟设置未更改,跳过 LLM client 重载。")
|
@@ -0,0 +1,132 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
|
3
|
+
import argparse
|
4
|
+
import os
|
5
|
+
import sys
|
6
|
+
import shutil
|
7
|
+
from pathlib import Path
|
8
|
+
|
9
|
+
def main():
|
10
|
+
parser = argparse.ArgumentParser(description="Neuro-Simulator Server")
|
11
|
+
parser.add_argument("-D", "--dir", help="Working directory containing settings.yaml")
|
12
|
+
parser.add_argument("-H", "--host", help="Host to bind the server to")
|
13
|
+
parser.add_argument("-P", "--port", type=int, help="Port to bind the server to")
|
14
|
+
|
15
|
+
args = parser.parse_args()
|
16
|
+
|
17
|
+
# Set working directory
|
18
|
+
if args.dir:
|
19
|
+
work_dir = Path(args.dir).resolve()
|
20
|
+
# If the directory doesn't exist (and it's not the default), raise an error
|
21
|
+
if not work_dir.exists():
|
22
|
+
print(f"Error: Working directory '{work_dir}' does not exist. Please create it manually.")
|
23
|
+
sys.exit(1)
|
24
|
+
else:
|
25
|
+
work_dir = Path.home() / ".config" / "neuro-simulator"
|
26
|
+
work_dir.mkdir(parents=True, exist_ok=True)
|
27
|
+
|
28
|
+
# Change to working directory
|
29
|
+
os.chdir(work_dir)
|
30
|
+
|
31
|
+
# Handle settings.yaml.example
|
32
|
+
settings_example_path = work_dir / "settings.yaml.example"
|
33
|
+
settings_path = work_dir / "settings.yaml"
|
34
|
+
|
35
|
+
# Copy settings.yaml.example from package if it doesn't exist
|
36
|
+
if not settings_example_path.exists():
|
37
|
+
try:
|
38
|
+
# Try pkg_resources first (for installed packages)
|
39
|
+
try:
|
40
|
+
import pkg_resources
|
41
|
+
example_path = pkg_resources.resource_filename('neuro_simulator', 'settings.yaml.example')
|
42
|
+
if os.path.exists(example_path):
|
43
|
+
shutil.copy(example_path, settings_example_path)
|
44
|
+
print(f"Created {settings_example_path} from package example")
|
45
|
+
else:
|
46
|
+
# Fallback to relative path (for development mode)
|
47
|
+
dev_example_path = Path(__file__).parent / "settings.yaml.example"
|
48
|
+
if dev_example_path.exists():
|
49
|
+
shutil.copy(dev_example_path, settings_example_path)
|
50
|
+
print(f"Created {settings_example_path} from development example")
|
51
|
+
else:
|
52
|
+
print("Warning: settings.yaml.example not found in package or development folder")
|
53
|
+
except Exception:
|
54
|
+
# Fallback to relative path (for development mode)
|
55
|
+
dev_example_path = Path(__file__).parent / "settings.yaml.example"
|
56
|
+
if dev_example_path.exists():
|
57
|
+
shutil.copy(dev_example_path, settings_example_path)
|
58
|
+
print(f"Created {settings_example_path} from development example")
|
59
|
+
else:
|
60
|
+
print("Warning: settings.yaml.example not found in package or development folder")
|
61
|
+
except Exception as e:
|
62
|
+
print(f"Warning: Could not copy settings.yaml.example from package: {e}")
|
63
|
+
|
64
|
+
# Handle media folder
|
65
|
+
media_dir = work_dir / "media"
|
66
|
+
video_path = media_dir / "neuro_start.mp4"
|
67
|
+
|
68
|
+
# Copy media folder from package if it doesn't exist or is invalid
|
69
|
+
if not media_dir.exists() or not video_path.exists():
|
70
|
+
# If media dir exists but video doesn't, remove the incomplete media dir
|
71
|
+
if media_dir.exists():
|
72
|
+
shutil.rmtree(media_dir)
|
73
|
+
|
74
|
+
try:
|
75
|
+
# Try pkg_resources first (for installed packages)
|
76
|
+
try:
|
77
|
+
import pkg_resources
|
78
|
+
package_media_path = pkg_resources.resource_filename('neuro_simulator', 'media')
|
79
|
+
if os.path.exists(package_media_path):
|
80
|
+
shutil.copytree(package_media_path, media_dir)
|
81
|
+
print(f"Created {media_dir} from package media")
|
82
|
+
else:
|
83
|
+
# Fallback to relative path (for development mode)
|
84
|
+
dev_media_path = Path(__file__).parent / "media"
|
85
|
+
if dev_media_path.exists():
|
86
|
+
shutil.copytree(dev_media_path, media_dir)
|
87
|
+
print(f"Created {media_dir} from development media")
|
88
|
+
else:
|
89
|
+
print("Warning: media folder not found in package or development folder")
|
90
|
+
except Exception:
|
91
|
+
# Fallback to relative path (for development mode)
|
92
|
+
dev_media_path = Path(__file__).parent / "media"
|
93
|
+
if dev_media_path.exists():
|
94
|
+
shutil.copytree(dev_media_path, media_dir)
|
95
|
+
print(f"Created {media_dir} from development media")
|
96
|
+
else:
|
97
|
+
print("Warning: media folder not found in package or development folder")
|
98
|
+
except Exception as e:
|
99
|
+
print(f"Warning: Could not copy media folder from package: {e}")
|
100
|
+
|
101
|
+
# Now check for required files and handle errors appropriately
|
102
|
+
errors = []
|
103
|
+
|
104
|
+
# Check for settings.yaml (required for running)
|
105
|
+
if not settings_path.exists():
|
106
|
+
if settings_example_path.exists():
|
107
|
+
errors.append(f"Error: {settings_path} not found. Please copy {settings_example_path} to {settings_path} and configure it.")
|
108
|
+
else:
|
109
|
+
errors.append(f"Error: Neither {settings_path} nor {settings_example_path} found. Please ensure proper configuration.")
|
110
|
+
|
111
|
+
# Check for required media files (required for running)
|
112
|
+
if not media_dir.exists() or not video_path.exists():
|
113
|
+
errors.append(f"Error: Required media files not found in {media_dir}.")
|
114
|
+
|
115
|
+
# If there are any errors, print them and exit
|
116
|
+
if errors:
|
117
|
+
for error in errors:
|
118
|
+
print(error)
|
119
|
+
sys.exit(1)
|
120
|
+
|
121
|
+
# Import and run the main application
|
122
|
+
try:
|
123
|
+
from neuro_simulator.main import run_server
|
124
|
+
run_server(args.host, args.port)
|
125
|
+
except ImportError:
|
126
|
+
# Fallback for development mode
|
127
|
+
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
128
|
+
from neuro_simulator.main import run_server
|
129
|
+
run_server(args.host, args.port)
|
130
|
+
|
131
|
+
if __name__ == "__main__":
|
132
|
+
main()
|