neuro-simulator 0.4.3__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neuro_simulator/chatbot/__init__.py +0 -0
- neuro_simulator/chatbot/core.py +235 -0
- neuro_simulator/chatbot/llm.py +105 -0
- neuro_simulator/chatbot/memory/__init__.py +0 -0
- neuro_simulator/chatbot/memory/manager.py +90 -0
- neuro_simulator/chatbot/nickname_gen/__init__.py +0 -0
- neuro_simulator/chatbot/nickname_gen/generator.py +144 -0
- neuro_simulator/chatbot/prompts/__init__.py +0 -0
- neuro_simulator/chatbot/tools/__init__.py +0 -0
- neuro_simulator/chatbot/tools/add_temp_memory.py +51 -0
- neuro_simulator/chatbot/tools/base.py +47 -0
- neuro_simulator/chatbot/tools/manager.py +100 -0
- neuro_simulator/chatbot/tools/post_chat_message.py +49 -0
- neuro_simulator/config.yaml.example +16 -56
- neuro_simulator/core/application.py +38 -33
- neuro_simulator/core/config.py +15 -30
- neuro_simulator/core/path_manager.py +30 -9
- neuro_simulator/services/stream.py +4 -0
- neuro_simulator/utils/queue.py +13 -1
- {neuro_simulator-0.4.3.dist-info → neuro_simulator-0.5.0.dist-info}/METADATA +1 -1
- {neuro_simulator-0.4.3.dist-info → neuro_simulator-0.5.0.dist-info}/RECORD +24 -12
- neuro_simulator/services/audience.py +0 -102
- {neuro_simulator-0.4.3.dist-info → neuro_simulator-0.5.0.dist-info}/WHEEL +0 -0
- {neuro_simulator-0.4.3.dist-info → neuro_simulator-0.5.0.dist-info}/entry_points.txt +0 -0
- {neuro_simulator-0.4.3.dist-info → neuro_simulator-0.5.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,47 @@
|
|
1
|
+
# neuro_simulator/chatbot/tools/base.py
|
2
|
+
"""Base classes and definitions for the chatbot tool system."""
|
3
|
+
|
4
|
+
from abc import ABC, abstractmethod
|
5
|
+
from typing import Dict, Any, List
|
6
|
+
|
7
|
+
class BaseChatbotTool(ABC):
|
8
|
+
"""
|
9
|
+
Abstract base class for all chatbot tools.
|
10
|
+
"""
|
11
|
+
|
12
|
+
@property
|
13
|
+
@abstractmethod
|
14
|
+
def name(self) -> str:
|
15
|
+
"""The unique name of the tool."""
|
16
|
+
pass
|
17
|
+
|
18
|
+
@property
|
19
|
+
@abstractmethod
|
20
|
+
def description(self) -> str:
|
21
|
+
"""A concise description of what the tool does, intended for use by an LLM."""
|
22
|
+
pass
|
23
|
+
|
24
|
+
@property
|
25
|
+
@abstractmethod
|
26
|
+
def parameters(self) -> List[Dict[str, Any]]:
|
27
|
+
"""
|
28
|
+
A list of dictionaries describing the tool's parameters.
|
29
|
+
"""
|
30
|
+
pass
|
31
|
+
|
32
|
+
@abstractmethod
|
33
|
+
async def execute(self, **kwargs: Any) -> Dict[str, Any]:
|
34
|
+
"""
|
35
|
+
The method that executes the tool's logic.
|
36
|
+
"""
|
37
|
+
pass
|
38
|
+
|
39
|
+
def get_schema(self) -> Dict[str, Any]:
|
40
|
+
"""
|
41
|
+
Returns a serializable dictionary representing the tool's public schema.
|
42
|
+
"""
|
43
|
+
return {
|
44
|
+
"name": self.name,
|
45
|
+
"description": self.description,
|
46
|
+
"parameters": self.parameters
|
47
|
+
}
|
@@ -0,0 +1,100 @@
|
|
1
|
+
# neuro_simulator/chatbot/tools/manager.py
|
2
|
+
"""The central tool manager for the chatbot agent."""
|
3
|
+
|
4
|
+
import importlib
|
5
|
+
import inspect
|
6
|
+
import json
|
7
|
+
import logging
|
8
|
+
import os
|
9
|
+
from pathlib import Path
|
10
|
+
from typing import Any, Dict, List
|
11
|
+
|
12
|
+
from neuro_simulator.core.path_manager import path_manager
|
13
|
+
from .base import BaseChatbotTool
|
14
|
+
from ..memory.manager import ChatbotMemoryManager
|
15
|
+
|
16
|
+
logger = logging.getLogger(__name__.replace("neuro_simulator", "chatbot", 1))
|
17
|
+
|
18
|
+
class ChatbotToolManager:
|
19
|
+
"""
|
20
|
+
Acts as a central registry and executor for all available chatbot tools.
|
21
|
+
"""
|
22
|
+
|
23
|
+
def __init__(self, memory_manager: ChatbotMemoryManager):
|
24
|
+
self.memory_manager = memory_manager
|
25
|
+
self.tools_dir = path_manager.chatbot_builtin_tools_dir
|
26
|
+
self.tools: Dict[str, BaseChatbotTool] = {}
|
27
|
+
self.agent_tool_allocations: Dict[str, List[str]] = {}
|
28
|
+
|
29
|
+
def load_tools(self):
|
30
|
+
"""Dynamically scans the tools directory, imports modules, and registers tool instances."""
|
31
|
+
logger.info(f"Loading chatbot tools from: {self.tools_dir}")
|
32
|
+
self.tools = {}
|
33
|
+
if not self.tools_dir.exists():
|
34
|
+
logger.warning(f"Chatbot tools directory not found: {self.tools_dir}")
|
35
|
+
return
|
36
|
+
|
37
|
+
for filename in os.listdir(self.tools_dir):
|
38
|
+
if filename.endswith('.py') and not filename.startswith('__'):
|
39
|
+
module_path = self.tools_dir / filename
|
40
|
+
spec = importlib.util.spec_from_file_location(module_path.stem, module_path)
|
41
|
+
if spec and spec.loader:
|
42
|
+
try:
|
43
|
+
module = importlib.util.module_from_spec(spec)
|
44
|
+
spec.loader.exec_module(module)
|
45
|
+
for _, cls in inspect.getmembers(module, inspect.isclass):
|
46
|
+
if issubclass(cls, BaseChatbotTool) and cls is not BaseChatbotTool:
|
47
|
+
tool_instance = cls(memory_manager=self.memory_manager)
|
48
|
+
if tool_instance.name in self.tools:
|
49
|
+
logger.warning(f"Duplicate chatbot tool name '{tool_instance.name}' found. Overwriting.")
|
50
|
+
self.tools[tool_instance.name] = tool_instance
|
51
|
+
logger.info(f"Successfully loaded chatbot tool: {tool_instance.name}")
|
52
|
+
except Exception as e:
|
53
|
+
logger.error(f"Failed to load chatbot tool from {module_path}: {e}", exc_info=True)
|
54
|
+
self._load_allocations()
|
55
|
+
|
56
|
+
def _load_allocations(self):
|
57
|
+
"""Loads tool allocations from JSON files, creating defaults if they don't exist."""
|
58
|
+
default_allocations = {
|
59
|
+
"chatbot_agent": ["post_chat_message"],
|
60
|
+
"chatbot_memory_agent": ["add_temp_memory"] # Add more memory tools later
|
61
|
+
}
|
62
|
+
|
63
|
+
# Load actor agent allocations
|
64
|
+
if path_manager.chatbot_agent_tools_path.exists():
|
65
|
+
with open(path_manager.chatbot_agent_tools_path, 'r', encoding='utf-8') as f:
|
66
|
+
self.agent_tool_allocations['chatbot_agent'] = json.load(f)
|
67
|
+
else:
|
68
|
+
self.agent_tool_allocations['chatbot_agent'] = default_allocations['chatbot_agent']
|
69
|
+
with open(path_manager.chatbot_agent_tools_path, 'w', encoding='utf-8') as f:
|
70
|
+
json.dump(default_allocations['chatbot_agent'], f, indent=2)
|
71
|
+
|
72
|
+
# Load thinker agent allocations
|
73
|
+
if path_manager.chatbot_memory_agent_tools_path.exists():
|
74
|
+
with open(path_manager.chatbot_memory_agent_tools_path, 'r', encoding='utf-8') as f:
|
75
|
+
self.agent_tool_allocations['chatbot_memory_agent'] = json.load(f)
|
76
|
+
else:
|
77
|
+
self.agent_tool_allocations['chatbot_memory_agent'] = default_allocations['chatbot_memory_agent']
|
78
|
+
with open(path_manager.chatbot_memory_agent_tools_path, 'w', encoding='utf-8') as f:
|
79
|
+
json.dump(default_allocations['chatbot_memory_agent'], f, indent=2)
|
80
|
+
|
81
|
+
logger.info(f"Chatbot tool allocations loaded: {self.agent_tool_allocations}")
|
82
|
+
|
83
|
+
def get_tool_schemas_for_agent(self, agent_name: str) -> List[Dict[str, Any]]:
|
84
|
+
"""Gets the tool schemas for a specific agent based on its allocation."""
|
85
|
+
allowed_names = set(self.agent_tool_allocations.get(agent_name, []))
|
86
|
+
if not allowed_names:
|
87
|
+
return []
|
88
|
+
return [tool.get_schema() for tool in self.tools.values() if tool.name in allowed_names]
|
89
|
+
|
90
|
+
async def execute_tool(self, tool_name: str, **kwargs: Any) -> Dict[str, Any]:
|
91
|
+
if tool_name not in self.tools:
|
92
|
+
logger.error(f"Attempted to execute non-existent chatbot tool: {tool_name}")
|
93
|
+
return {"error": f"Tool '{tool_name}' not found."}
|
94
|
+
tool = self.tools[tool_name]
|
95
|
+
try:
|
96
|
+
result = await tool.execute(**kwargs)
|
97
|
+
return result
|
98
|
+
except Exception as e:
|
99
|
+
logger.error(f"Error executing chatbot tool '{tool_name}' with params {kwargs}: {e}", exc_info=True)
|
100
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
@@ -0,0 +1,49 @@
|
|
1
|
+
# neuro_simulator/chatbot/tools/post_chat_message.py
|
2
|
+
"""The Post Chat Message tool for the chatbot agent."""
|
3
|
+
|
4
|
+
import logging
|
5
|
+
from typing import Dict, Any, List
|
6
|
+
|
7
|
+
from neuro_simulator.chatbot.tools.base import BaseChatbotTool
|
8
|
+
from neuro_simulator.chatbot.memory.manager import ChatbotMemoryManager
|
9
|
+
|
10
|
+
logger = logging.getLogger(__name__.replace("neuro_simulator", "chatbot", 1))
|
11
|
+
|
12
|
+
class PostChatMessageTool(BaseChatbotTool):
|
13
|
+
"""Tool for the chatbot to post a message to the stream chat."""
|
14
|
+
|
15
|
+
def __init__(self, memory_manager: ChatbotMemoryManager):
|
16
|
+
self.memory_manager = memory_manager
|
17
|
+
|
18
|
+
@property
|
19
|
+
def name(self) -> str:
|
20
|
+
return "post_chat_message"
|
21
|
+
|
22
|
+
@property
|
23
|
+
def description(self) -> str:
|
24
|
+
return "Posts a text message to the stream chat, as if you are a viewer."
|
25
|
+
|
26
|
+
@property
|
27
|
+
def parameters(self) -> List[Dict[str, Any]]:
|
28
|
+
return [
|
29
|
+
{
|
30
|
+
"name": "text",
|
31
|
+
"type": "string",
|
32
|
+
"description": "The content of the chat message to post.",
|
33
|
+
"required": True,
|
34
|
+
}
|
35
|
+
]
|
36
|
+
|
37
|
+
async def execute(self, **kwargs: Any) -> Dict[str, Any]:
|
38
|
+
"""
|
39
|
+
Executes the action. This tool doesn't *actually* send the message,
|
40
|
+
it just structures the output for the core agent logic to handle.
|
41
|
+
"""
|
42
|
+
text = kwargs.get("text")
|
43
|
+
if not isinstance(text, str) or not text:
|
44
|
+
raise ValueError("The 'text' parameter must be a non-empty string.")
|
45
|
+
|
46
|
+
logger.info(f"Chatbot decided to say: {text}")
|
47
|
+
# The result is the text to be posted. The core agent will combine this
|
48
|
+
# with a generated nickname before sending it to the stream.
|
49
|
+
return {"status": "success", "text_to_post": text}
|
@@ -53,6 +53,22 @@ agent:
|
|
53
53
|
# Agent使用的模型,切换gemini/openai时记得更改
|
54
54
|
agent_model: "gemini-2.5-flash-lite"
|
55
55
|
|
56
|
+
# --- 内建 Chatbot Agent 设置 ---
|
57
|
+
# 仅当需要启用新的有状态 Chatbot Agent 时配置
|
58
|
+
chatbot_agent:
|
59
|
+
# Agent的API服务商,支持"gemini"和"openai"
|
60
|
+
agent_provider: "gemini"
|
61
|
+
# Agent使用的模型,建议使用一个速度快、成本低的模型
|
62
|
+
agent_model: "gemini-1.5-flash-latest"
|
63
|
+
# Chatbot Agent 生成间隔(秒)
|
64
|
+
generation_interval_sec: 3
|
65
|
+
# 每次生成的消息数量
|
66
|
+
chats_per_batch: 2
|
67
|
+
# 昵称生成配置
|
68
|
+
nickname_generation:
|
69
|
+
enable_dynamic_pool: true # 开关:是否在启动时动态生成词库
|
70
|
+
dynamic_pool_size: 50 # 动态生成池的大小(形容词和名词各50个)
|
71
|
+
|
56
72
|
# --- Neuro 行为与节奏控制 ---
|
57
73
|
neuro_behavior:
|
58
74
|
# 输入聊天采样数量 - 每次生成 Neuro 回复时从观众聊天中采样的消息数量,不建议太长
|
@@ -64,62 +80,6 @@ neuro_behavior:
|
|
64
80
|
# 初始问候语 - 直播开始时给 Neuro 的系统提示语
|
65
81
|
initial_greeting: "The stream has just started. Greet your audience and say hello!"
|
66
82
|
|
67
|
-
# --- Chatbot 配置 ---
|
68
|
-
audience_simulation:
|
69
|
-
# LLM 提供商 - 选择用于生成观众聊天的 AI 服务,只能是 'gemini' 或 'openai'
|
70
|
-
llm_provider: "gemini"
|
71
|
-
|
72
|
-
# Gemini 模型 - 使用 Gemini 服务时的具体模型名称
|
73
|
-
# 推荐使用gemma-3-27b-it,每天可免费调用14000次(15:00 GMT+8 刷新次数)
|
74
|
-
gemini_model: "gemma-3-27b-it"
|
75
|
-
|
76
|
-
# OpenAI 模型 - 使用 OpenAI 服务时的具体模型名称
|
77
|
-
# 推荐使用SiliconFlow,9B以下模型免费不限量调用(注意TPM限制)
|
78
|
-
openai_model: "THUDM/GLM-4-9B-0414"
|
79
|
-
|
80
|
-
# LLM 温度 - 控制 AI 生成内容的随机性,值越高越随机(0-2之间)
|
81
|
-
llm_temperature: 0.7
|
82
|
-
|
83
|
-
# 聊天生成间隔(秒) - 调用 Chatbot 生成新观众聊天的时间间隔
|
84
|
-
chat_generation_interval_sec: 2
|
85
|
-
|
86
|
-
# 每批聊天生成数量 - 每次调用 Chatbot 时生成的聊天消息数量
|
87
|
-
chats_per_batch: 3
|
88
|
-
|
89
|
-
# 最大输出 Token 数 - 单次调用 Chatbot 时允许生成的最大 token 数量
|
90
|
-
max_output_tokens: 300
|
91
|
-
|
92
|
-
# Chatbot 提示模板 - 用于指导 AI 生成观众聊天内容的提示词
|
93
|
-
# 其中 {neuro_speech} 和 {num_chats_to_generate} 会被动态替换
|
94
|
-
prompt_template: |
|
95
|
-
You are a Twitch live stream viewer. Your goal is to generate short, realistic, and relevant chat messages.
|
96
|
-
The streamer, Neuro-Sama, just said the following:
|
97
|
-
---
|
98
|
-
{neuro_speech}
|
99
|
-
---
|
100
|
-
Based on what Neuro-Sama said, generate a variety of chat messages. Your messages should be:
|
101
|
-
- Directly reacting to her words.
|
102
|
-
- Asking follow-up questions.
|
103
|
-
- Using relevant Twitch emotes (like LUL, Pog, Kappa, etc.).
|
104
|
-
- General banter related to the topic.
|
105
|
-
- Short and punchy, like real chat messages.
|
106
|
-
Do NOT act as the streamer. Do NOT generate full conversations.
|
107
|
-
Generate exactly {num_chats_to_generate} distinct chat messages. Each message must be prefixed with a DIFFERENT fictional username, like 'ChatterBoy: message text', 'EmoteFan: message text'.
|
108
|
-
|
109
|
-
# 用户名黑名单 - 当检测到这些用户名时会自动替换为 username_pool 中的用户名
|
110
|
-
username_blocklist: ["ChatterBoy", "EmoteFan", "Username", "User"]
|
111
|
-
|
112
|
-
# 用户名池 - 用于替换黑名单用户名或生成新用户名(有时候Chatbot LLM可能未给出用户名)的候选列表
|
113
|
-
username_pool:
|
114
|
-
- "ChatterBox"
|
115
|
-
- "EmoteLord"
|
116
|
-
- "QuestionMark"
|
117
|
-
- "StreamFan"
|
118
|
-
- "PixelPundit"
|
119
|
-
- "CodeSage"
|
120
|
-
- "DataDiver"
|
121
|
-
- "ByteBard"
|
122
|
-
|
123
83
|
# --- 音频合成 (TTS) 配置 ---
|
124
84
|
tts:
|
125
85
|
# 语音名称 - 不要调整这个设置
|
@@ -18,6 +18,7 @@ from starlette.websockets import WebSocketState
|
|
18
18
|
from .config import config_manager, AppSettings
|
19
19
|
from ..core.agent_factory import create_agent
|
20
20
|
from ..agent.core import Agent as LocalAgent
|
21
|
+
from ..chatbot.core import ChatbotAgent
|
21
22
|
from ..services.letta import LettaAgent
|
22
23
|
from ..services.builtin import BuiltinAgentWrapper
|
23
24
|
|
@@ -25,7 +26,6 @@ from ..services.builtin import BuiltinAgentWrapper
|
|
25
26
|
from ..api.system import router as system_router
|
26
27
|
|
27
28
|
# --- Services and Utilities ---
|
28
|
-
from ..services.audience import AudienceChatbotManager, get_dynamic_audience_prompt
|
29
29
|
from ..services.audio import synthesize_audio_segment
|
30
30
|
from ..services.stream import live_stream_manager
|
31
31
|
from ..utils.logging import configure_server_logging, server_log_queue, agent_log_queue
|
@@ -37,6 +37,7 @@ from ..utils.queue import (
|
|
37
37
|
is_neuro_input_queue_empty,
|
38
38
|
get_all_neuro_input_chats,
|
39
39
|
initialize_queues,
|
40
|
+
get_recent_audience_chats_for_chatbot,
|
40
41
|
)
|
41
42
|
from ..utils.state import app_state
|
42
43
|
from ..utils.websocket import connection_manager
|
@@ -64,7 +65,7 @@ app.include_router(system_router)
|
|
64
65
|
|
65
66
|
# --- Background Task Definitions ---
|
66
67
|
|
67
|
-
|
68
|
+
chatbot_agent: ChatbotAgent = None
|
68
69
|
|
69
70
|
async def broadcast_events_task():
|
70
71
|
"""Broadcasts events from the live_stream_manager's queue to all clients."""
|
@@ -79,48 +80,52 @@ async def broadcast_events_task():
|
|
79
80
|
logger.error(f"Error in broadcast_events_task: {e}", exc_info=True)
|
80
81
|
|
81
82
|
async def fetch_and_process_audience_chats():
|
82
|
-
"""Generates a batch of audience chat messages."""
|
83
|
-
if not
|
83
|
+
"""Generates a batch of audience chat messages using the new ChatbotAgent."""
|
84
|
+
if not chatbot_agent:
|
84
85
|
return
|
85
86
|
try:
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
87
|
+
# Get context for the chatbot
|
88
|
+
current_neuro_speech = app_state.neuro_last_speech
|
89
|
+
context_message = current_neuro_speech if current_neuro_speech else "The stream is starting! Let's say hello and get the hype going!"
|
90
|
+
recent_history = get_recent_audience_chats_for_chatbot(limit=10)
|
91
|
+
|
92
|
+
# Generate messages
|
93
|
+
generated_messages = await chatbot_agent.generate_chat_messages(
|
94
|
+
neuro_speech=context_message,
|
95
|
+
recent_history=recent_history
|
90
96
|
)
|
91
97
|
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
username = username_raw.strip()
|
98
|
-
if username in config_manager.settings.audience_simulation.username_blocklist:
|
99
|
-
username = random.choice(config_manager.settings.audience_simulation.username_pool)
|
100
|
-
if username and text.strip():
|
101
|
-
parsed_chats.append({"username": username, "text": text.strip()})
|
102
|
-
elif line:
|
103
|
-
parsed_chats.append({"username": random.choice(config_manager.settings.audience_simulation.username_pool), "text": line})
|
104
|
-
|
105
|
-
chats_to_broadcast = parsed_chats[:config_manager.settings.audience_simulation.chats_per_batch]
|
106
|
-
|
107
|
-
for chat in chats_to_broadcast:
|
98
|
+
if not generated_messages:
|
99
|
+
return
|
100
|
+
|
101
|
+
# Process and broadcast generated messages
|
102
|
+
for chat in generated_messages:
|
108
103
|
add_to_audience_buffer(chat)
|
109
104
|
add_to_neuro_input_queue(chat)
|
110
105
|
broadcast_message = {"type": "chat_message", **chat, "is_user_message": False}
|
111
106
|
await connection_manager.broadcast(broadcast_message)
|
112
|
-
|
107
|
+
# Stagger the messages slightly to feel more natural
|
108
|
+
await asyncio.sleep(random.uniform(0.2, 0.8))
|
109
|
+
|
113
110
|
except Exception as e:
|
114
|
-
logger.error(f"Error in fetch_and_process_audience_chats: {e}", exc_info=True)
|
111
|
+
logger.error(f"Error in new fetch_and_process_audience_chats: {e}", exc_info=True)
|
115
112
|
|
116
113
|
async def generate_audience_chat_task():
|
117
114
|
"""Periodically triggers the audience chat generation task."""
|
118
115
|
while True:
|
119
116
|
try:
|
117
|
+
# Wait until the live phase starts
|
118
|
+
# await app_state.live_phase_started_event.wait()
|
119
|
+
|
120
120
|
asyncio.create_task(fetch_and_process_audience_chats())
|
121
|
-
|
121
|
+
|
122
|
+
# Use the interval from the new chatbot_agent config
|
123
|
+
await asyncio.sleep(config_manager.settings.chatbot_agent.generation_interval_sec)
|
122
124
|
except asyncio.CancelledError:
|
123
125
|
break
|
126
|
+
except Exception as e:
|
127
|
+
logger.error(f"Error in generate_audience_chat_task: {e}", exc_info=True)
|
128
|
+
await asyncio.sleep(10) # Avoid fast-looping on persistent errors
|
124
129
|
|
125
130
|
async def neuro_response_cycle():
|
126
131
|
"""The core response loop for the agent."""
|
@@ -223,18 +228,18 @@ async def startup_event():
|
|
223
228
|
# 2. Initialize queues now that config is loaded
|
224
229
|
initialize_queues()
|
225
230
|
|
226
|
-
#
|
227
|
-
global
|
228
|
-
|
231
|
+
# 3. Initialize the new Chatbot Agent
|
232
|
+
global chatbot_agent
|
233
|
+
chatbot_agent = ChatbotAgent()
|
234
|
+
await chatbot_agent.initialize()
|
229
235
|
|
230
|
-
#
|
236
|
+
# 4. Register callbacks
|
231
237
|
async def metadata_callback(settings: AppSettings):
|
232
238
|
await live_stream_manager.broadcast_stream_metadata()
|
233
239
|
|
234
240
|
config_manager.register_update_callback(metadata_callback)
|
235
|
-
config_manager.register_update_callback(chatbot_manager.handle_config_update)
|
236
241
|
|
237
|
-
#
|
242
|
+
# 5. Initialize main agent (which will load its own configs)
|
238
243
|
try:
|
239
244
|
await create_agent()
|
240
245
|
logger.info(f"Successfully initialized agent type: {config_manager.settings.agent_type}")
|
neuro_simulator/core/config.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
# backend/config.py
|
2
2
|
import shutil
|
3
|
+
import sys
|
3
4
|
from pathlib import Path
|
4
5
|
import yaml
|
5
6
|
from pydantic import BaseModel, Field
|
@@ -39,17 +40,7 @@ class NeuroBehaviorSettings(BaseModel):
|
|
39
40
|
post_speech_cooldown_sec: float
|
40
41
|
initial_greeting: str
|
41
42
|
|
42
|
-
|
43
|
-
llm_provider: str
|
44
|
-
gemini_model: str
|
45
|
-
openai_model: str
|
46
|
-
llm_temperature: float
|
47
|
-
chat_generation_interval_sec: int
|
48
|
-
chats_per_batch: int
|
49
|
-
max_output_tokens: int
|
50
|
-
prompt_template: str = Field(default="")
|
51
|
-
username_blocklist: List[str] = Field(default_factory=list)
|
52
|
-
username_pool: List[str] = Field(default_factory=list)
|
43
|
+
|
53
44
|
|
54
45
|
class TTSSettings(BaseModel):
|
55
46
|
voice_name: str
|
@@ -66,13 +57,25 @@ class ServerSettings(BaseModel):
|
|
66
57
|
client_origins: List[str] = Field(default_factory=list)
|
67
58
|
panel_password: Optional[str] = None
|
68
59
|
|
60
|
+
class NicknameGenerationSettings(BaseModel):
|
61
|
+
enable_dynamic_pool: bool
|
62
|
+
dynamic_pool_size: int
|
63
|
+
|
64
|
+
class ChatbotAgentSettings(BaseModel):
|
65
|
+
"""Settings for the new chatbot agent"""
|
66
|
+
agent_provider: str
|
67
|
+
agent_model: str
|
68
|
+
generation_interval_sec: int
|
69
|
+
chats_per_batch: int
|
70
|
+
nickname_generation: NicknameGenerationSettings
|
71
|
+
|
69
72
|
class AppSettings(BaseModel):
|
70
73
|
api_keys: ApiKeysSettings = Field(default_factory=ApiKeysSettings)
|
71
74
|
stream_metadata: StreamMetadataSettings
|
72
75
|
agent_type: str # 可选 "letta" 或 "builtin"
|
73
76
|
agent: AgentSettings
|
77
|
+
chatbot_agent: ChatbotAgentSettings
|
74
78
|
neuro_behavior: NeuroBehaviorSettings
|
75
|
-
audience_simulation: AudienceSimSettings
|
76
79
|
tts: TTSSettings
|
77
80
|
performance: PerformanceSettings
|
78
81
|
server: ServerSettings
|
@@ -91,24 +94,6 @@ def _deep_update(source: dict, overrides: dict) -> dict:
|
|
91
94
|
source[key] = overrides[key]
|
92
95
|
return source
|
93
96
|
|
94
|
-
class ConfigManager:
|
95
|
-
_instance = None
|
96
|
-
|
97
|
-
def __new__(cls):
|
98
|
-
if cls._instance is None:
|
99
|
-
cls._instance = super(ConfigManager, cls).__new__(cls)
|
100
|
-
cls._instance._initialized = False
|
101
|
-
return cls._instance
|
102
|
-
|
103
|
-
def __init__(self):
|
104
|
-
if self._initialized:
|
105
|
-
return
|
106
|
-
self.settings: AppSettings = self._load_settings()
|
107
|
-
self._update_callbacks = []
|
108
|
-
self._initialized = True
|
109
|
-
|
110
|
-
import sys
|
111
|
-
|
112
97
|
class ConfigManager:
|
113
98
|
_instance = None
|
114
99
|
|
@@ -11,33 +11,47 @@ class PathManager:
|
|
11
11
|
"""Initializes the PathManager and defines the directory structure."""
|
12
12
|
self.working_dir = Path(working_dir).resolve()
|
13
13
|
|
14
|
-
#
|
14
|
+
# --- Main Agent Paths ---
|
15
15
|
self.agents_dir = self.working_dir / "agents"
|
16
16
|
self.assets_dir = self.working_dir / "assets"
|
17
|
-
|
18
|
-
# Agents subdirectories
|
19
17
|
self.neuro_agent_dir = self.agents_dir / "neuro"
|
20
18
|
self.memory_agent_dir = self.agents_dir / "memory_manager"
|
21
19
|
self.shared_memories_dir = self.agents_dir / "memories"
|
22
20
|
self.user_tools_dir = self.agents_dir / "tools"
|
23
21
|
self.builtin_tools_dir = self.user_tools_dir / "builtin_tools"
|
24
22
|
|
25
|
-
# Agent-specific config files
|
26
|
-
self.neuro_config_path = self.neuro_agent_dir / "config.yaml"
|
27
23
|
self.neuro_tools_path = self.neuro_agent_dir / "tools.json"
|
28
24
|
self.neuro_history_path = self.neuro_agent_dir / "history.jsonl"
|
29
25
|
self.neuro_prompt_path = self.neuro_agent_dir / "neuro_prompt.txt"
|
30
26
|
|
31
|
-
self.memory_agent_config_path = self.memory_agent_dir / "config.yaml"
|
32
27
|
self.memory_agent_tools_path = self.memory_agent_dir / "tools.json"
|
33
28
|
self.memory_agent_history_path = self.memory_agent_dir / "history.jsonl"
|
34
29
|
self.memory_agent_prompt_path = self.memory_agent_dir / "memory_prompt.txt"
|
35
|
-
|
36
|
-
# Shared memory files
|
37
30
|
self.init_memory_path = self.shared_memories_dir / "init_memory.json"
|
38
31
|
self.core_memory_path = self.shared_memories_dir / "core_memory.json"
|
39
32
|
self.temp_memory_path = self.shared_memories_dir / "temp_memory.json"
|
40
33
|
|
34
|
+
# --- Chatbot Agent Paths ---
|
35
|
+
self.chatbot_root_dir = self.working_dir / "chatbot"
|
36
|
+
self.chatbot_agent_dir = self.chatbot_root_dir / "chatbot"
|
37
|
+
self.chatbot_memory_agent_dir = self.chatbot_root_dir / "memory_agent"
|
38
|
+
self.chatbot_memories_dir = self.chatbot_root_dir / "memories"
|
39
|
+
self.chatbot_tools_dir = self.chatbot_root_dir / "tools"
|
40
|
+
self.chatbot_builtin_tools_dir = self.chatbot_tools_dir / "builtin_tools"
|
41
|
+
self.chatbot_nickname_data_dir = self.chatbot_root_dir / "nickname_gen" / "data"
|
42
|
+
|
43
|
+
self.chatbot_agent_prompt_path = self.chatbot_agent_dir / "chatbot_prompt.txt"
|
44
|
+
self.chatbot_agent_tools_path = self.chatbot_agent_dir / "tools.json"
|
45
|
+
self.chatbot_agent_history_path = self.chatbot_agent_dir / "history.jsonl"
|
46
|
+
|
47
|
+
self.chatbot_memory_agent_prompt_path = self.chatbot_memory_agent_dir / "memory_prompt.txt"
|
48
|
+
self.chatbot_memory_agent_tools_path = self.chatbot_memory_agent_dir / "tools.json"
|
49
|
+
self.chatbot_memory_agent_history_path = self.chatbot_memory_agent_dir / "history.jsonl"
|
50
|
+
|
51
|
+
self.chatbot_init_memory_path = self.chatbot_memories_dir / "init_memory.json"
|
52
|
+
self.chatbot_core_memory_path = self.chatbot_memories_dir / "core_memory.json"
|
53
|
+
self.chatbot_temp_memory_path = self.chatbot_memories_dir / "temp_memory.json"
|
54
|
+
|
41
55
|
def initialize_directories(self):
|
42
56
|
"""Creates all necessary directories if they don't exist."""
|
43
57
|
dirs_to_create = [
|
@@ -47,7 +61,14 @@ class PathManager:
|
|
47
61
|
self.memory_agent_dir,
|
48
62
|
self.shared_memories_dir,
|
49
63
|
self.user_tools_dir,
|
50
|
-
self.builtin_tools_dir
|
64
|
+
self.builtin_tools_dir,
|
65
|
+
self.chatbot_root_dir,
|
66
|
+
self.chatbot_agent_dir,
|
67
|
+
self.chatbot_memory_agent_dir,
|
68
|
+
self.chatbot_memories_dir,
|
69
|
+
self.chatbot_tools_dir,
|
70
|
+
self.chatbot_builtin_tools_dir,
|
71
|
+
self.chatbot_nickname_data_dir,
|
51
72
|
]
|
52
73
|
for dir_path in dirs_to_create:
|
53
74
|
os.makedirs(dir_path, exist_ok=True)
|
@@ -128,6 +128,10 @@ class LiveStreamManager:
|
|
128
128
|
return time.time() - self._stream_start_global_time
|
129
129
|
return 0.0
|
130
130
|
|
131
|
+
def get_current_phase(self) -> str:
|
132
|
+
"""Gets the current stream phase."""
|
133
|
+
return self._current_phase
|
134
|
+
|
131
135
|
def get_initial_state_for_client(self) -> dict:
|
132
136
|
"""Generates the initial state event for a newly connected client."""
|
133
137
|
elapsed_time = self.get_elapsed_time()
|
neuro_simulator/utils/queue.py
CHANGED
@@ -64,4 +64,16 @@ def get_all_neuro_input_chats() -> list[dict]:
|
|
64
64
|
|
65
65
|
def is_neuro_input_queue_empty() -> bool:
|
66
66
|
"""Checks if the agent's input queue is empty."""
|
67
|
-
return not bool(neuro_input_queue)
|
67
|
+
return not bool(neuro_input_queue)
|
68
|
+
|
69
|
+
def get_recent_audience_chats_for_chatbot(limit: int) -> list[dict]:
|
70
|
+
"""Returns a list of recent chats formatted for the chatbot agent."""
|
71
|
+
recent_chats = list(audience_chat_buffer)[-limit:]
|
72
|
+
formatted_chats = []
|
73
|
+
for chat in recent_chats:
|
74
|
+
role = "user" if chat.get("is_user_message") else "assistant"
|
75
|
+
formatted_chats.append({
|
76
|
+
"role": role,
|
77
|
+
"content": f"{chat.get('username', 'unknown')}: {chat.get('text', '')}"
|
78
|
+
})
|
79
|
+
return formatted_chats
|