neuro-simulator 0.1.3__tar.gz → 0.2.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {neuro_simulator-0.1.3 → neuro_simulator-0.2.1}/PKG-INFO +83 -33
- neuro_simulator-0.2.1/README.md +189 -0
- neuro_simulator-0.2.1/neuro_simulator/__init__.py +1 -0
- neuro_simulator-0.2.1/neuro_simulator/agent/__init__.py +1 -0
- neuro_simulator-0.2.1/neuro_simulator/agent/base.py +43 -0
- neuro_simulator-0.2.1/neuro_simulator/agent/core.py +201 -0
- neuro_simulator-0.2.1/neuro_simulator/agent/factory.py +30 -0
- {neuro_simulator-0.1.3 → neuro_simulator-0.2.1}/neuro_simulator/agent/llm.py +34 -31
- neuro_simulator-0.2.1/neuro_simulator/agent/memory/__init__.py +1 -0
- neuro_simulator-0.2.1/neuro_simulator/agent/memory/manager.py +204 -0
- neuro_simulator-0.2.1/neuro_simulator/agent/tools/__init__.py +1 -0
- {neuro_simulator-0.1.3 → neuro_simulator-0.2.1}/neuro_simulator/agent/tools/core.py +8 -18
- neuro_simulator-0.2.1/neuro_simulator/api/__init__.py +1 -0
- neuro_simulator-0.2.1/neuro_simulator/api/agent.py +163 -0
- neuro_simulator-0.2.1/neuro_simulator/api/stream.py +55 -0
- neuro_simulator-0.2.1/neuro_simulator/api/system.py +90 -0
- neuro_simulator-0.2.1/neuro_simulator/cli.py +94 -0
- neuro_simulator-0.2.1/neuro_simulator/core/__init__.py +1 -0
- neuro_simulator-0.2.1/neuro_simulator/core/agent_factory.py +52 -0
- neuro_simulator-0.2.1/neuro_simulator/core/agent_interface.py +91 -0
- neuro_simulator-0.2.1/neuro_simulator/core/application.py +278 -0
- neuro_simulator-0.2.1/neuro_simulator/services/__init__.py +1 -0
- neuro_simulator-0.1.3/neuro_simulator/chatbot.py → neuro_simulator-0.2.1/neuro_simulator/services/audience.py +24 -24
- neuro_simulator-0.1.3/neuro_simulator/audio_synthesis.py → neuro_simulator-0.2.1/neuro_simulator/services/audio.py +18 -15
- neuro_simulator-0.2.1/neuro_simulator/services/builtin.py +87 -0
- neuro_simulator-0.2.1/neuro_simulator/services/letta.py +206 -0
- neuro_simulator-0.1.3/neuro_simulator/stream_manager.py → neuro_simulator-0.2.1/neuro_simulator/services/stream.py +39 -47
- neuro_simulator-0.2.1/neuro_simulator/utils/__init__.py +1 -0
- neuro_simulator-0.2.1/neuro_simulator/utils/logging.py +90 -0
- neuro_simulator-0.2.1/neuro_simulator/utils/process.py +67 -0
- neuro_simulator-0.1.3/neuro_simulator/stream_chat.py → neuro_simulator-0.2.1/neuro_simulator/utils/queue.py +17 -4
- neuro_simulator-0.2.1/neuro_simulator/utils/state.py +14 -0
- neuro_simulator-0.1.3/neuro_simulator/websocket_manager.py → neuro_simulator-0.2.1/neuro_simulator/utils/websocket.py +18 -14
- {neuro_simulator-0.1.3 → neuro_simulator-0.2.1}/neuro_simulator.egg-info/PKG-INFO +83 -33
- neuro_simulator-0.2.1/neuro_simulator.egg-info/SOURCES.txt +40 -0
- {neuro_simulator-0.1.3 → neuro_simulator-0.2.1}/pyproject.toml +1 -1
- neuro_simulator-0.1.3/README.md +0 -139
- neuro_simulator-0.1.3/neuro_simulator/__init__.py +0 -10
- neuro_simulator-0.1.3/neuro_simulator/agent/__init__.py +0 -8
- neuro_simulator-0.1.3/neuro_simulator/agent/api.py +0 -737
- neuro_simulator-0.1.3/neuro_simulator/agent/core.py +0 -494
- neuro_simulator-0.1.3/neuro_simulator/agent/memory/__init__.py +0 -4
- neuro_simulator-0.1.3/neuro_simulator/agent/memory/manager.py +0 -346
- neuro_simulator-0.1.3/neuro_simulator/agent/memory.py +0 -137
- neuro_simulator-0.1.3/neuro_simulator/agent/tools/__init__.py +0 -4
- neuro_simulator-0.1.3/neuro_simulator/agent/tools.py +0 -69
- neuro_simulator-0.1.3/neuro_simulator/builtin_agent.py +0 -83
- neuro_simulator-0.1.3/neuro_simulator/cli.py +0 -177
- neuro_simulator-0.1.3/neuro_simulator/config.yaml.example +0 -157
- neuro_simulator-0.1.3/neuro_simulator/letta.py +0 -164
- neuro_simulator-0.1.3/neuro_simulator/log_handler.py +0 -43
- neuro_simulator-0.1.3/neuro_simulator/main.py +0 -673
- neuro_simulator-0.1.3/neuro_simulator/media/neuro_start.mp4 +0 -0
- neuro_simulator-0.1.3/neuro_simulator/process_manager.py +0 -70
- neuro_simulator-0.1.3/neuro_simulator/shared_state.py +0 -11
- neuro_simulator-0.1.3/neuro_simulator.egg-info/SOURCES.txt +0 -34
- {neuro_simulator-0.1.3/neuro_simulator → neuro_simulator-0.2.1/neuro_simulator/core}/config.py +0 -0
- {neuro_simulator-0.1.3 → neuro_simulator-0.2.1}/neuro_simulator.egg-info/dependency_links.txt +0 -0
- {neuro_simulator-0.1.3 → neuro_simulator-0.2.1}/neuro_simulator.egg-info/entry_points.txt +0 -0
- {neuro_simulator-0.1.3 → neuro_simulator-0.2.1}/neuro_simulator.egg-info/requires.txt +0 -0
- {neuro_simulator-0.1.3 → neuro_simulator-0.2.1}/neuro_simulator.egg-info/top_level.txt +0 -0
- {neuro_simulator-0.1.3 → neuro_simulator-0.2.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: neuro_simulator
|
3
|
-
Version: 0.1
|
3
|
+
Version: 0.2.1
|
4
4
|
Summary: Neuro Simulator Server
|
5
5
|
Author-email: Moha-Master <hongkongreporter@outlook.com>
|
6
6
|
License-Expression: MIT
|
@@ -39,7 +39,7 @@ Requires-Dist: flake8; extra == "dev"
|
|
39
39
|
|
40
40
|
*本临时README由AI自动生成*
|
41
41
|
|
42
|
-
这是 Neuro Simulator 的服务端,负责处理直播逻辑、AI 交互、TTS
|
42
|
+
这是 Neuro Simulator 的服务端,负责处理直播逻辑、AI 交互、TTS 合成等核心功能
|
43
43
|
|
44
44
|
## 功能特性
|
45
45
|
|
@@ -51,49 +51,99 @@ Requires-Dist: flake8; extra == "dev"
|
|
51
51
|
|
52
52
|
``` main
|
53
53
|
neuro_simulator/
|
54
|
-
├──
|
55
|
-
├── config.py # 配置管理模块
|
56
|
-
├── letta.py # Letta Agent 集成
|
57
|
-
├── chatbot.py # 观众聊天生成器
|
58
|
-
├── audio_synthesis.py # 音频合成模块
|
59
|
-
├── stream_chat.py # 聊天消息处理
|
60
|
-
├── stream_manager.py # 直播管理器
|
61
|
-
├── websocket_manager.py # WebSocket 连接管理
|
62
|
-
├── process_manager.py # 进程管理器
|
63
|
-
├── shared_state.py # 全局状态管理
|
64
|
-
├── log_handler.py # 日志处理模块
|
65
|
-
├── requirements.txt # Python 依赖列表
|
66
|
-
├── pyproject.toml # Python 包安装配置
|
54
|
+
├── __init__.py
|
67
55
|
├── cli.py # 命令行启动脚本
|
68
|
-
├──
|
69
|
-
|
70
|
-
|
56
|
+
├── core/ # 核心模块
|
57
|
+
│ ├── __init__.py
|
58
|
+
│ ├── application.py # FastAPI应用和主要路由
|
59
|
+
│ ├── config.py # 配置管理模块
|
60
|
+
│ ├── agent_factory.py # Agent工厂模式实现
|
61
|
+
│ ├── agent_interface.py # Agent接口定义
|
62
|
+
│ └── config.yaml.example # 自带的备用配置模板
|
63
|
+
├── agent/ # 内建Agent模块
|
64
|
+
│ ├── __init__.py
|
65
|
+
│ ├── base.py # Agent基类
|
66
|
+
│ ├── core.py # Agent核心实现
|
67
|
+
│ ├── factory.py # Agent工厂
|
68
|
+
│ ├── llm.py # LLM客户端
|
69
|
+
│ ├── memory/ # 记忆管理模块
|
70
|
+
│ │ ├── __init__.py
|
71
|
+
│ │ ├── manager.py # 记忆管理器
|
72
|
+
│ │ ├── context.json # 上下文记忆文件
|
73
|
+
│ │ ├── core_memory.json # 核心记忆文件
|
74
|
+
│ │ ├── init_memory.json # 初始化记忆文件
|
75
|
+
│ │ └── temp_memory.json # 临时记忆文件
|
76
|
+
│ └── tools/ # 工具模块
|
77
|
+
│ ├── __init__.py
|
78
|
+
│ └── core.py # 核心工具实现
|
79
|
+
├── api/ # API路由模块
|
80
|
+
│ ├── __init__.py
|
81
|
+
│ ├── agent.py # Agent管理API
|
82
|
+
│ ├── stream.py # 直播控制API
|
83
|
+
│ └── system.py # 系统管理API
|
84
|
+
├── services/ # 服务模块
|
85
|
+
│ ├── __init__.py
|
86
|
+
│ ├── audience.py # 观众聊天生成器
|
87
|
+
│ ├── audio.py # 音频合成模块
|
88
|
+
│ ├── builtin.py # 内建Agent服务
|
89
|
+
│ ├── letta.py # Letta Agent 集成
|
90
|
+
│ └── stream.py # 直播管理服务
|
91
|
+
├── utils/ # 工具模块
|
92
|
+
│ ├── __init__.py
|
93
|
+
│ ├── logging.py # 日志处理模块
|
94
|
+
│ ├── process.py # 进程管理模块
|
95
|
+
│ ├── queue.py # 队列处理模块
|
96
|
+
│ ├── state.py # 状态管理模块
|
97
|
+
│ └── websocket.py # WebSocket连接管理
|
98
|
+
├── assets/ # 自带的备用媒体文件
|
99
|
+
│ └── neuro_start.mp4 # 用来计算Start Soon长度,仅读取时长
|
100
|
+
├── requirements.txt # Python 依赖列表
|
101
|
+
└── pyproject.toml # Python 包安装配置
|
71
102
|
```
|
72
103
|
|
73
104
|
``` workin'dir
|
74
105
|
working_dir_example/ # 工作目录结构,请将这个目录重命名和复制到你想要的位置(推荐放到~/.config/neuro-simulator)
|
75
|
-
├──
|
106
|
+
├── assets/ # 媒体文件夹,如缺失会使用自带资源覆盖
|
76
107
|
│ └── neuro_start.mp4 # 用来计算Start Soon长度,仅读取时长,请和客户端的视频保持一致
|
77
|
-
├── config.yaml
|
78
|
-
|
108
|
+
├── config.yaml # 由用户手工创建的配置文件
|
109
|
+
├── config.yaml.example # 自动生成的配置文件模板,必须手动重命名和填写
|
110
|
+
└── agent/ # Agent相关文件夹
|
111
|
+
└── memory/ # Agent记忆文件夹
|
112
|
+
├── context.json # 上下文记忆文件
|
113
|
+
├── core_memory.json # 核心记忆文件
|
114
|
+
├── init_memory.json # 初始化记忆文件
|
115
|
+
└── temp_memory.json # 临时记忆文件
|
79
116
|
```
|
80
117
|
|
81
118
|
## 安装与配置
|
82
119
|
|
83
|
-
0. **在运行server前,必须有已经配置完成的Letta Agent。**
|
84
120
|
1. 复制一份 `../docs/working_dir_example` 到你想要的位置,作为配置文件目录.
|
85
121
|
- 程序会在未指定 `--dir` 的情况下自动生成一个工作目录,路径为 `~/.config/neuro-simulator/`
|
86
122
|
2. 然后进入配置文件目录,复制 `config.yaml.example` 到 `config.yaml`
|
87
123
|
3. 编辑 `config.yaml` 文件,填入必要的 API 密钥和配置项:
|
88
|
-
- Letta Token 和 Agent ID
|
89
|
-
- Gemini/OpenAI API Key
|
124
|
+
- 如果使用 Letta Agent,需要配置 Letta Token 和 Agent ID
|
125
|
+
- Gemini/OpenAI API Key(用于观众聊天生成和 Agent)
|
90
126
|
- Azure TTS Key 和 Region
|
91
127
|
|
92
|
-
|
128
|
+
可以自行替换 `$dir/assets/neuro_start.mp4` 为其它视频文件,但记得手动替换 client 中的同名文件
|
129
|
+
|
130
|
+
### Agent配置
|
131
|
+
|
132
|
+
服务端支持两种Agent类型:
|
133
|
+
1. **Letta Agent**:需要配置 Letta Cloud 或自托管的 Letta Server
|
134
|
+
2. **内建 Agent**:使用服务端自带的 Agent,支持 Gemini 和OpenAI API
|
135
|
+
|
136
|
+
在 `config.yaml` 中通过 `agent_type` 字段选择使用的 Agent 类型:
|
137
|
+
- `agent_type: "letta"`:使用 Letta Agent
|
138
|
+
- `agent_type: "builtin"`:使用内建 Agent
|
139
|
+
|
140
|
+
当使用内建Agent时,还需要配置:
|
141
|
+
- `agent.agent_provider`:选择"gemini"或"openai"
|
142
|
+
- `agent.agent_model`:指定具体的模型名称
|
93
143
|
|
94
144
|
### 直接安装方式(无需二次开发)
|
95
145
|
|
96
|
-
若无需二次开发,可以直接使用pip安装:
|
146
|
+
若无需二次开发,可以直接使用 pip 安装:
|
97
147
|
```bash
|
98
148
|
python3 -m venv venv
|
99
149
|
# Windows
|
@@ -104,7 +154,7 @@ venv/bin/pip install neuro-simulator
|
|
104
154
|
|
105
155
|
### 二次开发方式
|
106
156
|
|
107
|
-
|
157
|
+
若需要二次开发,请克隆项目:
|
108
158
|
```bash
|
109
159
|
git clone https://github.com/your-username/Neuro-Simulator.git
|
110
160
|
cd Neuro-Simulator/server
|
@@ -131,7 +181,7 @@ neuro -H 0.0.0.0 -P 8080
|
|
131
181
|
neuro -D /path/to/your/config -H 0.0.0.0 -P 8080
|
132
182
|
```
|
133
183
|
|
134
|
-
服务默认运行在 `http://127.0.0.1:8000
|
184
|
+
服务默认运行在 `http://127.0.0.1:8000`
|
135
185
|
|
136
186
|
## API 接口
|
137
187
|
|
@@ -139,14 +189,14 @@ neuro -D /path/to/your/config -H 0.0.0.0 -P 8080
|
|
139
189
|
|
140
190
|
- `/api/stream/*` - 直播控制接口(启动/停止/重启/状态)
|
141
191
|
- `/api/configs/*` - 配置管理接口(获取/更新/重载配置)
|
142
|
-
- `api_keys` `server`
|
192
|
+
- `api_keys` `server` 等敏感配置项无法从接口获取和修改
|
143
193
|
- `/api/logs` - 日志获取接口
|
144
194
|
- `/api/tts/synthesize` - TTS 合成接口
|
145
195
|
- `/api/system/health` - 健康检查接口
|
146
|
-
- `/ws/stream` -
|
147
|
-
- `/ws/
|
196
|
+
- `/ws/stream` - 客户端使用的直播接口
|
197
|
+
- `/ws/admin` - 日志和内建 Agent的 Context 流接口
|
148
198
|
|
149
|
-
详细接口说明可通过 `http://127.0.0.1:8000/docs` 访问 API
|
199
|
+
详细接口说明可通过 `http://127.0.0.1:8000/docs` 访问 API 文档查
|
150
200
|
|
151
201
|
## 配置说明
|
152
202
|
|
@@ -160,7 +210,7 @@ neuro -D /path/to/your/config -H 0.0.0.0 -P 8080
|
|
160
210
|
- `performance` - 性能相关设置
|
161
211
|
- `server` - 服务器设置(主机、端口、CORS 等)
|
162
212
|
|
163
|
-
有关配置文件的完整示例,请参阅项目根目录下的 `docs/working_dir_example/`
|
213
|
+
有关配置文件的完整示例,请参阅项目根目录下的 `docs/working_dir_example/` 文件夹
|
164
214
|
|
165
215
|
## 安全说明
|
166
216
|
|
@@ -0,0 +1,189 @@
|
|
1
|
+
# Neuro-Simulator 服务端
|
2
|
+
|
3
|
+
*本临时README由AI自动生成*
|
4
|
+
|
5
|
+
这是 Neuro Simulator 的服务端,负责处理直播逻辑、AI 交互、TTS 合成等核心功能
|
6
|
+
|
7
|
+
## 功能特性
|
8
|
+
|
9
|
+
- **动态观众**:调用无状态LLM,动态生成观众聊天内容,支持 Gemini 和 OpenAI API
|
10
|
+
- **配置管理**:支持通过 API 动态修改和热重载配置
|
11
|
+
- **外部控制**:完全使用外部API端点操控服务端运行
|
12
|
+
|
13
|
+
## 目录结构
|
14
|
+
|
15
|
+
``` main
|
16
|
+
neuro_simulator/
|
17
|
+
├── __init__.py
|
18
|
+
├── cli.py # 命令行启动脚本
|
19
|
+
├── core/ # 核心模块
|
20
|
+
│ ├── __init__.py
|
21
|
+
│ ├── application.py # FastAPI应用和主要路由
|
22
|
+
│ ├── config.py # 配置管理模块
|
23
|
+
│ ├── agent_factory.py # Agent工厂模式实现
|
24
|
+
│ ├── agent_interface.py # Agent接口定义
|
25
|
+
│ └── config.yaml.example # 自带的备用配置模板
|
26
|
+
├── agent/ # 内建Agent模块
|
27
|
+
│ ├── __init__.py
|
28
|
+
│ ├── base.py # Agent基类
|
29
|
+
│ ├── core.py # Agent核心实现
|
30
|
+
│ ├── factory.py # Agent工厂
|
31
|
+
│ ├── llm.py # LLM客户端
|
32
|
+
│ ├── memory/ # 记忆管理模块
|
33
|
+
│ │ ├── __init__.py
|
34
|
+
│ │ ├── manager.py # 记忆管理器
|
35
|
+
│ │ ├── context.json # 上下文记忆文件
|
36
|
+
│ │ ├── core_memory.json # 核心记忆文件
|
37
|
+
│ │ ├── init_memory.json # 初始化记忆文件
|
38
|
+
│ │ └── temp_memory.json # 临时记忆文件
|
39
|
+
│ └── tools/ # 工具模块
|
40
|
+
│ ├── __init__.py
|
41
|
+
│ └── core.py # 核心工具实现
|
42
|
+
├── api/ # API路由模块
|
43
|
+
│ ├── __init__.py
|
44
|
+
│ ├── agent.py # Agent管理API
|
45
|
+
│ ├── stream.py # 直播控制API
|
46
|
+
│ └── system.py # 系统管理API
|
47
|
+
├── services/ # 服务模块
|
48
|
+
│ ├── __init__.py
|
49
|
+
│ ├── audience.py # 观众聊天生成器
|
50
|
+
│ ├── audio.py # 音频合成模块
|
51
|
+
│ ├── builtin.py # 内建Agent服务
|
52
|
+
│ ├── letta.py # Letta Agent 集成
|
53
|
+
│ └── stream.py # 直播管理服务
|
54
|
+
├── utils/ # 工具模块
|
55
|
+
│ ├── __init__.py
|
56
|
+
│ ├── logging.py # 日志处理模块
|
57
|
+
│ ├── process.py # 进程管理模块
|
58
|
+
│ ├── queue.py # 队列处理模块
|
59
|
+
│ ├── state.py # 状态管理模块
|
60
|
+
│ └── websocket.py # WebSocket连接管理
|
61
|
+
├── assets/ # 自带的备用媒体文件
|
62
|
+
│ └── neuro_start.mp4 # 用来计算Start Soon长度,仅读取时长
|
63
|
+
├── requirements.txt # Python 依赖列表
|
64
|
+
└── pyproject.toml # Python 包安装配置
|
65
|
+
```
|
66
|
+
|
67
|
+
``` workin'dir
|
68
|
+
working_dir_example/ # 工作目录结构,请将这个目录重命名和复制到你想要的位置(推荐放到~/.config/neuro-simulator)
|
69
|
+
├── assets/ # 媒体文件夹,如缺失会使用自带资源覆盖
|
70
|
+
│ └── neuro_start.mp4 # 用来计算Start Soon长度,仅读取时长,请和客户端的视频保持一致
|
71
|
+
├── config.yaml # 由用户手工创建的配置文件
|
72
|
+
├── config.yaml.example # 自动生成的配置文件模板,必须手动重命名和填写
|
73
|
+
└── agent/ # Agent相关文件夹
|
74
|
+
└── memory/ # Agent记忆文件夹
|
75
|
+
├── context.json # 上下文记忆文件
|
76
|
+
├── core_memory.json # 核心记忆文件
|
77
|
+
├── init_memory.json # 初始化记忆文件
|
78
|
+
└── temp_memory.json # 临时记忆文件
|
79
|
+
```
|
80
|
+
|
81
|
+
## 安装与配置
|
82
|
+
|
83
|
+
1. 复制一份 `../docs/working_dir_example` 到你想要的位置,作为配置文件目录.
|
84
|
+
- 程序会在未指定 `--dir` 的情况下自动生成一个工作目录,路径为 `~/.config/neuro-simulator/`
|
85
|
+
2. 然后进入配置文件目录,复制 `config.yaml.example` 到 `config.yaml`
|
86
|
+
3. 编辑 `config.yaml` 文件,填入必要的 API 密钥和配置项:
|
87
|
+
- 如果使用 Letta Agent,需要配置 Letta Token 和 Agent ID
|
88
|
+
- Gemini/OpenAI API Key(用于观众聊天生成和 Agent)
|
89
|
+
- Azure TTS Key 和 Region
|
90
|
+
|
91
|
+
可以自行替换 `$dir/assets/neuro_start.mp4` 为其它视频文件,但记得手动替换 client 中的同名文件
|
92
|
+
|
93
|
+
### Agent配置
|
94
|
+
|
95
|
+
服务端支持两种Agent类型:
|
96
|
+
1. **Letta Agent**:需要配置 Letta Cloud 或自托管的 Letta Server
|
97
|
+
2. **内建 Agent**:使用服务端自带的 Agent,支持 Gemini 和OpenAI API
|
98
|
+
|
99
|
+
在 `config.yaml` 中通过 `agent_type` 字段选择使用的 Agent 类型:
|
100
|
+
- `agent_type: "letta"`:使用 Letta Agent
|
101
|
+
- `agent_type: "builtin"`:使用内建 Agent
|
102
|
+
|
103
|
+
当使用内建Agent时,还需要配置:
|
104
|
+
- `agent.agent_provider`:选择"gemini"或"openai"
|
105
|
+
- `agent.agent_model`:指定具体的模型名称
|
106
|
+
|
107
|
+
### 直接安装方式(无需二次开发)
|
108
|
+
|
109
|
+
若无需二次开发,可以直接使用 pip 安装:
|
110
|
+
```bash
|
111
|
+
python3 -m venv venv
|
112
|
+
# Windows
|
113
|
+
venv/Scripts/pip install neuro-simulator
|
114
|
+
# macOS/Linux
|
115
|
+
venv/bin/pip install neuro-simulator
|
116
|
+
```
|
117
|
+
|
118
|
+
### 二次开发方式
|
119
|
+
|
120
|
+
若需要二次开发,请克隆项目:
|
121
|
+
```bash
|
122
|
+
git clone https://github.com/your-username/Neuro-Simulator.git
|
123
|
+
cd Neuro-Simulator/server
|
124
|
+
python3 -m venv venv
|
125
|
+
# Windows
|
126
|
+
venv/Scripts/pip install -e .
|
127
|
+
# macOS/Linux
|
128
|
+
venv/bin/pip install -e .
|
129
|
+
```
|
130
|
+
|
131
|
+
### 运行服务
|
132
|
+
|
133
|
+
```bash
|
134
|
+
# 使用默认配置 (位于~/.config/neuro-simulator/)
|
135
|
+
neuro
|
136
|
+
|
137
|
+
# 指定工作目录
|
138
|
+
neuro -D /path/to/your/config
|
139
|
+
|
140
|
+
# 指定主机和端口
|
141
|
+
neuro -H 0.0.0.0 -P 8080
|
142
|
+
|
143
|
+
# 组合使用
|
144
|
+
neuro -D /path/to/your/config -H 0.0.0.0 -P 8080
|
145
|
+
```
|
146
|
+
|
147
|
+
服务默认运行在 `http://127.0.0.1:8000`
|
148
|
+
|
149
|
+
## API 接口
|
150
|
+
|
151
|
+
后端提供丰富的 API 接口用于控制和管理:
|
152
|
+
|
153
|
+
- `/api/stream/*` - 直播控制接口(启动/停止/重启/状态)
|
154
|
+
- `/api/configs/*` - 配置管理接口(获取/更新/重载配置)
|
155
|
+
- `api_keys` `server` 等敏感配置项无法从接口获取和修改
|
156
|
+
- `/api/logs` - 日志获取接口
|
157
|
+
- `/api/tts/synthesize` - TTS 合成接口
|
158
|
+
- `/api/system/health` - 健康检查接口
|
159
|
+
- `/ws/stream` - 客户端使用的直播接口
|
160
|
+
- `/ws/admin` - 日志和内建 Agent的 Context 流接口
|
161
|
+
|
162
|
+
详细接口说明可通过 `http://127.0.0.1:8000/docs` 访问 API 文档查
|
163
|
+
|
164
|
+
## 配置说明
|
165
|
+
|
166
|
+
配置文件 `config.yaml` 包含以下主要配置项:
|
167
|
+
|
168
|
+
- `api_keys` - 各种服务的 API 密钥
|
169
|
+
- `stream_metadata` - 直播元数据(标题、分类、标签等)
|
170
|
+
- `neuro_behavior` - Neuro 行为设置
|
171
|
+
- `audience_simulation` - 观众模拟设置
|
172
|
+
- `tts` - TTS 语音合成设置
|
173
|
+
- `performance` - 性能相关设置
|
174
|
+
- `server` - 服务器设置(主机、端口、CORS 等)
|
175
|
+
|
176
|
+
有关配置文件的完整示例,请参阅项目根目录下的 `docs/working_dir_example/` 文件夹
|
177
|
+
|
178
|
+
## 安全说明
|
179
|
+
|
180
|
+
1. 通过 `panel_password` 配置项可以设置控制面板访问密码
|
181
|
+
2. 敏感配置项(如 API 密钥)不会通过 API 接口暴露
|
182
|
+
3. 支持 CORS,仅允许预配置的来源访问
|
183
|
+
|
184
|
+
## 故障排除
|
185
|
+
|
186
|
+
- 确保所有必需的 API 密钥都已正确配置
|
187
|
+
- 检查网络连接是否正常
|
188
|
+
- 查看日志文件获取错误信息
|
189
|
+
- 确保端口未被其他程序占用
|
@@ -0,0 +1 @@
|
|
1
|
+
# neuro_simulator package root
|
@@ -0,0 +1 @@
|
|
1
|
+
# neuro_simulator.agent package
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# agent/base.py
|
2
|
+
"""Base classes for Neuro Simulator Agent"""
|
3
|
+
|
4
|
+
from abc import ABC, abstractmethod
|
5
|
+
from typing import List, Dict, Any, Optional
|
6
|
+
|
7
|
+
|
8
|
+
class BaseAgent(ABC):
|
9
|
+
"""Abstract base class for all agents"""
|
10
|
+
|
11
|
+
@abstractmethod
|
12
|
+
async def initialize(self):
|
13
|
+
"""Initialize the agent"""
|
14
|
+
pass
|
15
|
+
|
16
|
+
@abstractmethod
|
17
|
+
async def reset_memory(self):
|
18
|
+
"""Reset agent memory"""
|
19
|
+
pass
|
20
|
+
|
21
|
+
@abstractmethod
|
22
|
+
async def get_response(self, chat_messages: List[Dict[str, str]]) -> Dict[str, Any]:
|
23
|
+
"""Get response from the agent
|
24
|
+
|
25
|
+
Args:
|
26
|
+
chat_messages: List of message dictionaries with 'username' and 'text' keys
|
27
|
+
|
28
|
+
Returns:
|
29
|
+
Dictionary containing processing details including tool executions and final response
|
30
|
+
"""
|
31
|
+
pass
|
32
|
+
|
33
|
+
@abstractmethod
|
34
|
+
async def process_messages(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
|
35
|
+
"""Process messages and generate a response
|
36
|
+
|
37
|
+
Args:
|
38
|
+
messages: List of message dictionaries with 'username' and 'text' keys
|
39
|
+
|
40
|
+
Returns:
|
41
|
+
Dictionary containing processing details including tool executions and final response
|
42
|
+
"""
|
43
|
+
pass
|
@@ -0,0 +1,201 @@
|
|
1
|
+
# neuro_simulator/agent/core.py
|
2
|
+
"""
|
3
|
+
Core module for the Neuro Simulator's built-in agent.
|
4
|
+
"""
|
5
|
+
|
6
|
+
import asyncio
|
7
|
+
import json
|
8
|
+
import logging
|
9
|
+
import re
|
10
|
+
import sys
|
11
|
+
from pathlib import Path
|
12
|
+
from datetime import datetime
|
13
|
+
from typing import Any, Dict, List, Optional
|
14
|
+
|
15
|
+
# Updated imports for the new structure
|
16
|
+
from ..utils.logging import QueueLogHandler, agent_log_queue
|
17
|
+
from ..utils.websocket import connection_manager
|
18
|
+
|
19
|
+
# --- Agent-specific imports ---
|
20
|
+
from .llm import LLMClient
|
21
|
+
from .memory.manager import MemoryManager
|
22
|
+
from .tools.core import ToolManager
|
23
|
+
|
24
|
+
# Create a logger for the agent
|
25
|
+
agent_logger = logging.getLogger("neuro_agent")
|
26
|
+
agent_logger.setLevel(logging.DEBUG)
|
27
|
+
|
28
|
+
# Configure agent logging to use the shared queue
|
29
|
+
def configure_agent_logging():
|
30
|
+
"""Configure agent logging to use the shared agent_log_queue."""
|
31
|
+
if agent_logger.hasHandlers():
|
32
|
+
agent_logger.handlers.clear()
|
33
|
+
|
34
|
+
agent_queue_handler = QueueLogHandler(agent_log_queue)
|
35
|
+
# Use the same format as the server for consistency
|
36
|
+
formatter = logging.Formatter('%(asctime)s - [%(name)-24s] - %(levelname)-8s - %(message)s', datefmt='%H:%M:%S')
|
37
|
+
agent_queue_handler.setFormatter(formatter)
|
38
|
+
agent_logger.addHandler(agent_queue_handler)
|
39
|
+
agent_logger.propagate = False
|
40
|
+
agent_logger.info("Agent logging configured to use agent_log_queue.")
|
41
|
+
|
42
|
+
configure_agent_logging()
|
43
|
+
|
44
|
+
class Agent:
|
45
|
+
"""Main Agent class that integrates LLM, memory, and tools. This is the concrete implementation."""
|
46
|
+
|
47
|
+
def __init__(self, working_dir: str = None):
|
48
|
+
self.memory_manager = MemoryManager(working_dir)
|
49
|
+
self.tool_manager = ToolManager(self.memory_manager)
|
50
|
+
self.llm_client = LLMClient()
|
51
|
+
self._initialized = False
|
52
|
+
agent_logger.info("Agent instance created.")
|
53
|
+
agent_logger.debug(f"Agent working directory: {working_dir}")
|
54
|
+
|
55
|
+
async def initialize(self):
|
56
|
+
"""Initialize the agent, loading any persistent memory."""
|
57
|
+
if not self._initialized:
|
58
|
+
agent_logger.info("Initializing agent memory manager...")
|
59
|
+
await self.memory_manager.initialize()
|
60
|
+
self._initialized = True
|
61
|
+
agent_logger.info("Agent initialized successfully.")
|
62
|
+
|
63
|
+
async def reset_all_memory(self):
|
64
|
+
"""Reset all agent memory types."""
|
65
|
+
await self.memory_manager.reset_temp_memory()
|
66
|
+
await self.memory_manager.reset_context()
|
67
|
+
agent_logger.info("All agent memory has been reset.")
|
68
|
+
|
69
|
+
async def process_messages(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
|
70
|
+
"""Process incoming messages and generate a response with tool usage."""
|
71
|
+
await self.initialize()
|
72
|
+
agent_logger.info(f"Processing {len(messages)} messages.")
|
73
|
+
|
74
|
+
for msg in messages:
|
75
|
+
content = f"{msg['username']}: {msg['text']}"
|
76
|
+
await self.memory_manager.add_context_entry("user", content)
|
77
|
+
|
78
|
+
context_messages = await self.memory_manager.get_recent_context()
|
79
|
+
await connection_manager.broadcast({"type": "agent_context", "action": "update", "messages": context_messages})
|
80
|
+
|
81
|
+
processing_entry_id = await self.memory_manager.add_detailed_context_entry(
|
82
|
+
input_messages=messages, prompt="Processing started", llm_response="",
|
83
|
+
tool_executions=[], final_response="Processing started"
|
84
|
+
)
|
85
|
+
|
86
|
+
context = await self.memory_manager.get_full_context()
|
87
|
+
tool_descriptions = self.tool_manager.get_tool_descriptions()
|
88
|
+
|
89
|
+
# --- CORRECTED HISTORY GATHERING ---
|
90
|
+
recent_history = await self.memory_manager.get_detailed_context_history()
|
91
|
+
assistant_responses = []
|
92
|
+
for entry in reversed(recent_history):
|
93
|
+
if entry.get("type") == "llm_interaction":
|
94
|
+
for tool in entry.get("tool_executions", []):
|
95
|
+
if tool.get("name") == "speak" and tool.get("result"):
|
96
|
+
assistant_responses.append(tool["result"])
|
97
|
+
|
98
|
+
# Create LLM prompt from template
|
99
|
+
template_path = Path(self.memory_manager.memory_dir).parent / "prompt_template.txt"
|
100
|
+
with open(template_path, 'r', encoding='utf-8') as f:
|
101
|
+
prompt_template = f.read()
|
102
|
+
|
103
|
+
recent_speak_history_text = "\n".join([f"- {response}" for response in assistant_responses[:5]]) if assistant_responses else "You haven't said anything yet."
|
104
|
+
user_messages_text = "\n".join([f"{msg['username']}: {msg['text']}" for msg in messages])
|
105
|
+
|
106
|
+
prompt = prompt_template.format(
|
107
|
+
full_context=context,
|
108
|
+
tool_descriptions=tool_descriptions,
|
109
|
+
recent_speak_history=recent_speak_history_text,
|
110
|
+
user_messages=user_messages_text
|
111
|
+
)
|
112
|
+
|
113
|
+
await self.memory_manager.add_detailed_context_entry(
|
114
|
+
input_messages=messages, prompt=prompt, llm_response="", tool_executions=[],
|
115
|
+
final_response="Prompt sent to LLM", entry_id=processing_entry_id
|
116
|
+
)
|
117
|
+
|
118
|
+
response_text = await self.llm_client.generate(prompt)
|
119
|
+
agent_logger.debug(f"LLM raw response: {response_text[:100] if response_text else 'None'}...")
|
120
|
+
|
121
|
+
await self.memory_manager.add_detailed_context_entry(
|
122
|
+
input_messages=messages, prompt=prompt, llm_response=response_text, tool_executions=[],
|
123
|
+
final_response="LLM response received", entry_id=processing_entry_id
|
124
|
+
)
|
125
|
+
|
126
|
+
processing_result = {
|
127
|
+
"input_messages": messages, "llm_response": response_text,
|
128
|
+
"tool_executions": [], "final_response": ""
|
129
|
+
}
|
130
|
+
|
131
|
+
if response_text:
|
132
|
+
tool_calls = self._parse_tool_calls(response_text)
|
133
|
+
for tool_call in tool_calls:
|
134
|
+
agent_logger.info(f"Executing tool: {tool_call['name']}")
|
135
|
+
await self._execute_parsed_tool(tool_call, processing_result)
|
136
|
+
|
137
|
+
await self.memory_manager.add_detailed_context_entry(
|
138
|
+
input_messages=messages, prompt=prompt, llm_response=response_text,
|
139
|
+
tool_executions=processing_result["tool_executions"],
|
140
|
+
final_response=processing_result["final_response"], entry_id=processing_entry_id
|
141
|
+
)
|
142
|
+
|
143
|
+
final_context = await self.memory_manager.get_recent_context()
|
144
|
+
await connection_manager.broadcast({"type": "agent_context", "action": "update", "messages": final_context})
|
145
|
+
|
146
|
+
agent_logger.info("Message processing completed.")
|
147
|
+
return processing_result
|
148
|
+
|
149
|
+
async def _execute_parsed_tool(self, tool_call: Dict[str, Any], processing_result: Dict[str, Any]):
|
150
|
+
"""Execute a parsed tool call and update processing result."""
|
151
|
+
try:
|
152
|
+
tool_result = await self.execute_tool(tool_call["name"], tool_call["params"])
|
153
|
+
tool_call["result"] = tool_result
|
154
|
+
if tool_call["name"] == "speak":
|
155
|
+
processing_result["final_response"] = tool_call["params"].get("text", "")
|
156
|
+
processing_result["tool_executions"].append(tool_call)
|
157
|
+
except Exception as e:
|
158
|
+
tool_call["error"] = str(e)
|
159
|
+
processing_result["tool_executions"].append(tool_call)
|
160
|
+
agent_logger.error(f"Error executing tool {tool_call['name']}: {e}")
|
161
|
+
|
162
|
+
def _parse_tool_calls(self, text: str) -> List[Dict[str, Any]]:
|
163
|
+
"""Parse tool calls using ast.literal_eval for robustness."""
|
164
|
+
import ast
|
165
|
+
calls = []
|
166
|
+
text = text.strip()
|
167
|
+
if text.startswith("speak(") and text.endswith(")"):
|
168
|
+
try:
|
169
|
+
# Extract the content inside speak(...)
|
170
|
+
# e.g., "text='Hello, I'm here'"
|
171
|
+
inner_content = text[len("speak("):-1].strip()
|
172
|
+
|
173
|
+
# Ensure it's a text=... call
|
174
|
+
if not inner_content.startswith("text="):
|
175
|
+
return []
|
176
|
+
|
177
|
+
# Get the quoted string part
|
178
|
+
quoted_string = inner_content[len("text="):
|
179
|
+
].strip()
|
180
|
+
|
181
|
+
# Use ast.literal_eval to safely parse the Python string literal
|
182
|
+
parsed_text = ast.literal_eval(quoted_string)
|
183
|
+
|
184
|
+
if isinstance(parsed_text, str):
|
185
|
+
calls.append({
|
186
|
+
"name": "speak",
|
187
|
+
"params": {"text": parsed_text}
|
188
|
+
})
|
189
|
+
|
190
|
+
except (ValueError, SyntaxError, TypeError) as e:
|
191
|
+
agent_logger.warning(f"Could not parse tool call using ast.literal_eval: {text}. Error: {e}")
|
192
|
+
|
193
|
+
return calls
|
194
|
+
|
195
|
+
async def execute_tool(self, tool_name: str, params: Dict[str, Any]) -> Any:
|
196
|
+
"""Execute a registered tool."""
|
197
|
+
await self.initialize()
|
198
|
+
agent_logger.debug(f"Executing tool: {tool_name} with params: {params}")
|
199
|
+
result = await self.tool_manager.execute_tool(tool_name, params)
|
200
|
+
agent_logger.debug(f"Tool execution result: {result}")
|
201
|
+
return result
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# agent/factory.py
|
2
|
+
"""Factory for creating agent instances"""
|
3
|
+
|
4
|
+
from .base import BaseAgent
|
5
|
+
from ..config import config_manager
|
6
|
+
|
7
|
+
|
8
|
+
async def create_agent() -> BaseAgent:
|
9
|
+
"""Create an agent instance based on the configuration"""
|
10
|
+
agent_type = config_manager.settings.agent_type
|
11
|
+
|
12
|
+
if agent_type == "builtin":
|
13
|
+
from ..builtin_agent import local_agent, BuiltinAgentWrapper, initialize_builtin_agent
|
14
|
+
if local_agent is None:
|
15
|
+
# Try to initialize the builtin agent
|
16
|
+
await initialize_builtin_agent()
|
17
|
+
# Re-import local_agent after initialization
|
18
|
+
from ..builtin_agent import local_agent
|
19
|
+
if local_agent is None:
|
20
|
+
raise RuntimeError("Failed to initialize Builtin agent")
|
21
|
+
return BuiltinAgentWrapper(local_agent)
|
22
|
+
elif agent_type == "letta":
|
23
|
+
from ..letta import get_letta_agent, initialize_letta_client
|
24
|
+
# Try to initialize the letta client
|
25
|
+
initialize_letta_client()
|
26
|
+
agent = get_letta_agent()
|
27
|
+
await agent.initialize()
|
28
|
+
return agent
|
29
|
+
else:
|
30
|
+
raise ValueError(f"Unknown agent type: {agent_type}")
|