neuro-simulator 0.5.3__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neuro_simulator/agent/llm.py +23 -19
- neuro_simulator/chatbot/core.py +10 -10
- neuro_simulator/chatbot/llm.py +22 -19
- neuro_simulator/chatbot/nickname_gen/generator.py +3 -3
- neuro_simulator/chatbot/tools/manager.py +10 -8
- neuro_simulator/cli.py +7 -12
- neuro_simulator/core/agent_factory.py +9 -18
- neuro_simulator/core/application.py +59 -56
- neuro_simulator/core/config.py +88 -301
- neuro_simulator/core/path_manager.py +7 -7
- neuro_simulator/dashboard/assets/{AgentView-0fzVkKxU.js → AgentView-DBq2msN_.js} +2 -2
- neuro_simulator/dashboard/assets/{ChatBotView-DifkK6-9.js → ChatBotView-BqQsuJUv.js} +2 -2
- neuro_simulator/dashboard/assets/ConfigView-CPYMgl_d.js +2 -0
- neuro_simulator/dashboard/assets/ConfigView-aFribfyR.css +1 -0
- neuro_simulator/dashboard/assets/{ContextTab-BAcGlJgU.js → ContextTab-BSROkcd2.js} +1 -1
- neuro_simulator/dashboard/assets/{ControlView-DnOBRK05.js → ControlView-BvflkxO-.js} +1 -1
- neuro_simulator/dashboard/assets/FieldRenderer-DyPAEyOT.js +1 -0
- neuro_simulator/dashboard/assets/LogsTab-C-SZhHdN.js +1 -0
- neuro_simulator/dashboard/assets/LogsView-82wOs2Pp.js +1 -0
- neuro_simulator/dashboard/assets/{MemoryTab-CT6mH7oh.js → MemoryTab-p3Q-Wa4e.js} +3 -3
- neuro_simulator/dashboard/assets/{ToolsTab-BAt1r6ui.js → ToolsTab-BxbFZhXs.js} +1 -1
- neuro_simulator/dashboard/assets/index-Ba5ZG3QB.js +52 -0
- neuro_simulator/dashboard/assets/{index-w9eUSFF9.css → index-CcYt9OR6.css} +1 -1
- neuro_simulator/dashboard/index.html +2 -2
- neuro_simulator/services/audio.py +55 -47
- neuro_simulator/services/builtin.py +3 -0
- neuro_simulator/services/stream.py +1 -1
- neuro_simulator/utils/queue.py +2 -2
- {neuro_simulator-0.5.3.dist-info → neuro_simulator-0.6.0.dist-info}/METADATA +1 -2
- {neuro_simulator-0.5.3.dist-info → neuro_simulator-0.6.0.dist-info}/RECORD +34 -35
- requirements.txt +1 -1
- neuro_simulator/config.yaml.example +0 -117
- neuro_simulator/dashboard/assets/ConfigView-BTWWYQah.js +0 -2
- neuro_simulator/dashboard/assets/FieldRenderer-CCuLNhHG.js +0 -1
- neuro_simulator/dashboard/assets/LogsTab-ylrlgzPh.js +0 -1
- neuro_simulator/dashboard/assets/LogsView-BtzSwkDM.js +0 -1
- neuro_simulator/dashboard/assets/index-BGoCthX_.js +0 -34
- neuro_simulator/services/letta.py +0 -254
- {neuro_simulator-0.5.3.dist-info → neuro_simulator-0.6.0.dist-info}/WHEEL +0 -0
- {neuro_simulator-0.5.3.dist-info → neuro_simulator-0.6.0.dist-info}/entry_points.txt +0 -0
- {neuro_simulator-0.5.3.dist-info → neuro_simulator-0.6.0.dist-info}/licenses/LICENSE +0 -0
neuro_simulator/agent/llm.py
CHANGED
@@ -33,33 +33,37 @@ class LLMClient:
|
|
33
33
|
|
34
34
|
logger.info("First use of built-in agent's LLMClient, performing initialization...")
|
35
35
|
settings = config_manager.settings
|
36
|
-
|
37
|
-
|
38
|
-
if
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
36
|
+
|
37
|
+
provider_id = settings.neuro.llm_provider_id
|
38
|
+
if not provider_id:
|
39
|
+
raise ValueError("LLM Provider ID is not set for the agent.")
|
40
|
+
|
41
|
+
provider_config = next((p for p in settings.llm_providers if p.provider_id == provider_id), None)
|
42
|
+
if not provider_config:
|
43
|
+
raise ValueError(f"LLM Provider with ID '{provider_id}' not found in configuration.")
|
44
|
+
|
45
|
+
provider_type = provider_config.provider_type.lower()
|
46
|
+
self.model_name = provider_config.model_name
|
47
|
+
|
48
|
+
if provider_type == "gemini":
|
49
|
+
if not provider_config.api_key:
|
50
|
+
raise ValueError(f"API key for Gemini provider '{provider_config.display_name}' is not set.")
|
51
|
+
self.client = genai.Client(api_key=provider_config.api_key)
|
45
52
|
self._generate_func = self._generate_gemini
|
46
53
|
|
47
|
-
elif
|
48
|
-
|
49
|
-
|
50
|
-
raise ValueError("OPENAI_API_KEY is not set in configuration for the agent.")
|
51
|
-
|
52
|
-
self.model_name = settings.agent.agent_model
|
54
|
+
elif provider_type == "openai":
|
55
|
+
if not provider_config.api_key:
|
56
|
+
raise ValueError(f"API key for OpenAI provider '{provider_config.display_name}' is not set.")
|
53
57
|
self.client = AsyncOpenAI(
|
54
|
-
api_key=api_key,
|
55
|
-
base_url=
|
58
|
+
api_key=provider_config.api_key,
|
59
|
+
base_url=provider_config.base_url
|
56
60
|
)
|
57
61
|
self._generate_func = self._generate_openai
|
58
62
|
else:
|
59
|
-
raise ValueError(f"Unsupported
|
63
|
+
raise ValueError(f"Unsupported provider type in agent config: {provider_type}")
|
60
64
|
|
61
65
|
self._initialized = True
|
62
|
-
logger.info(f"Agent LLM client initialized. Provider: {
|
66
|
+
logger.info(f"Agent LLM client initialized. Provider: {provider_type.upper()}, Model: {self.model_name}")
|
63
67
|
|
64
68
|
async def _generate_gemini(self, prompt: str, max_tokens: int) -> str:
|
65
69
|
"""Generates text using the Gemini model."""
|
neuro_simulator/chatbot/core.py
CHANGED
@@ -63,7 +63,7 @@ class ChatbotAgent:
|
|
63
63
|
package_source_dir = Path(__file__).parent.parent
|
64
64
|
|
65
65
|
files_to_copy = {
|
66
|
-
"chatbot/prompts/chatbot_prompt.txt": path_manager.
|
66
|
+
"chatbot/prompts/chatbot_prompt.txt": path_manager.chatbot_prompt_path,
|
67
67
|
"chatbot/prompts/memory_prompt.txt": path_manager.chatbot_memory_agent_prompt_path,
|
68
68
|
"chatbot/memory/init_memory.json": path_manager.chatbot_init_memory_path,
|
69
69
|
"chatbot/memory/core_memory.json": path_manager.chatbot_core_memory_path,
|
@@ -132,17 +132,17 @@ class ChatbotAgent:
|
|
132
132
|
|
133
133
|
async def _build_chatbot_prompt(self, neuro_speech: str, recent_history: List[Dict[str, str]]) -> str:
|
134
134
|
"""Builds the prompt for the Chatbot (Actor) LLM."""
|
135
|
-
with open(path_manager.
|
135
|
+
with open(path_manager.chatbot_prompt_path, 'r', encoding='utf-8') as f:
|
136
136
|
prompt_template = f.read()
|
137
137
|
|
138
|
-
tool_descriptions = self._format_tool_schemas_for_prompt('
|
138
|
+
tool_descriptions = self._format_tool_schemas_for_prompt('chatbot')
|
139
139
|
init_memory_text = json.dumps(self.memory_manager.init_memory, indent=2)
|
140
140
|
core_memory_text = json.dumps(self.memory_manager.core_memory, indent=2)
|
141
141
|
temp_memory_text = json.dumps(self.memory_manager.temp_memory, indent=2)
|
142
142
|
recent_history_text = "\n".join([f"{msg.get('role')}: {msg.get('content')}" for msg in recent_history])
|
143
143
|
|
144
144
|
from ..core.config import config_manager
|
145
|
-
chats_per_batch = config_manager.settings.
|
145
|
+
chats_per_batch = config_manager.settings.chatbot.chats_per_batch
|
146
146
|
|
147
147
|
return prompt_template.format(
|
148
148
|
tool_descriptions=tool_descriptions,
|
@@ -186,20 +186,20 @@ class ChatbotAgent:
|
|
186
186
|
params = tool_call.get("params", {})
|
187
187
|
result = await self.tool_manager.execute_tool(tool_name, **params)
|
188
188
|
|
189
|
-
if agent_name == '
|
189
|
+
if agent_name == 'chatbot' and tool_name == "post_chat_message" and result.get("status") == "success":
|
190
190
|
text_to_post = result.get("text_to_post", "")
|
191
191
|
if text_to_post:
|
192
192
|
nickname = self.nickname_generator.generate_nickname()
|
193
193
|
message = {"username": nickname, "text": text_to_post}
|
194
194
|
generated_messages.append(message)
|
195
|
-
await self._append_to_history(path_manager.
|
195
|
+
await self._append_to_history(path_manager.chatbot_history_path, {'role': 'assistant', 'content': f"{nickname}: {text_to_post}"})
|
196
196
|
logger.info(f"Returning generated messages: {generated_messages}")
|
197
197
|
return generated_messages
|
198
198
|
|
199
199
|
async def generate_chat_messages(self, neuro_speech: str, recent_history: List[Dict[str, str]]) -> List[Dict[str, str]]:
|
200
200
|
"""The main actor loop to generate chat messages."""
|
201
201
|
for entry in recent_history:
|
202
|
-
await self._append_to_history(path_manager.
|
202
|
+
await self._append_to_history(path_manager.chatbot_history_path, entry)
|
203
203
|
|
204
204
|
prompt = await self._build_chatbot_prompt(neuro_speech, recent_history)
|
205
205
|
response_text = await self.chatbot_llm.generate(prompt)
|
@@ -208,7 +208,7 @@ class ChatbotAgent:
|
|
208
208
|
tool_calls = self._parse_tool_calls(response_text)
|
209
209
|
if not tool_calls: return []
|
210
210
|
|
211
|
-
messages = await self._execute_tool_calls(tool_calls, '
|
211
|
+
messages = await self._execute_tool_calls(tool_calls, 'chatbot')
|
212
212
|
|
213
213
|
self.turn_counter += 1
|
214
214
|
if self.turn_counter >= self.reflection_threshold:
|
@@ -220,7 +220,7 @@ class ChatbotAgent:
|
|
220
220
|
"""The main thinker loop to consolidate memories."""
|
221
221
|
logger.info("Chatbot is reflecting on recent conversations...")
|
222
222
|
self.turn_counter = 0
|
223
|
-
history = await self._read_history(path_manager.
|
223
|
+
history = await self._read_history(path_manager.chatbot_history_path, limit=50)
|
224
224
|
if len(history) < self.reflection_threshold:
|
225
225
|
return
|
226
226
|
|
@@ -232,4 +232,4 @@ class ChatbotAgent:
|
|
232
232
|
if not tool_calls: return
|
233
233
|
|
234
234
|
await self._execute_tool_calls(tool_calls, 'chatbot_memory_agent')
|
235
|
-
logger.info("Chatbot memory consolidation complete.")
|
235
|
+
logger.info("Chatbot memory consolidation complete.")
|
neuro_simulator/chatbot/llm.py
CHANGED
@@ -31,34 +31,37 @@ class ChatbotLLMClient:
|
|
31
31
|
|
32
32
|
logger.info("First use of Chatbot's LLMClient, performing initialization...")
|
33
33
|
settings = config_manager.settings
|
34
|
-
# Use the new chatbot_agent config block
|
35
|
-
provider = settings.chatbot_agent.agent_provider.lower()
|
36
34
|
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
35
|
+
provider_id = settings.chatbot.llm_provider_id
|
36
|
+
if not provider_id:
|
37
|
+
raise ValueError("LLM Provider ID is not set for the chatbot.")
|
38
|
+
|
39
|
+
provider_config = next((p for p in settings.llm_providers if p.provider_id == provider_id), None)
|
40
|
+
if not provider_config:
|
41
|
+
raise ValueError(f"LLM Provider with ID '{provider_id}' not found in configuration.")
|
42
|
+
|
43
|
+
provider_type = provider_config.provider_type.lower()
|
44
|
+
self.model_name = provider_config.model_name
|
45
|
+
|
46
|
+
if provider_type == "gemini":
|
47
|
+
if not provider_config.api_key:
|
48
|
+
raise ValueError(f"API key for Gemini provider '{provider_config.display_name}' is not set.")
|
49
|
+
self.client = genai.Client(api_key=provider_config.api_key)
|
44
50
|
self._generate_func = self._generate_gemini
|
45
51
|
|
46
|
-
elif
|
47
|
-
|
48
|
-
|
49
|
-
raise ValueError("OPENAI_API_KEY is not set in configuration for the chatbot agent.")
|
50
|
-
|
51
|
-
self.model_name = settings.chatbot_agent.agent_model
|
52
|
+
elif provider_type == "openai":
|
53
|
+
if not provider_config.api_key:
|
54
|
+
raise ValueError(f"API key for OpenAI provider '{provider_config.display_name}' is not set.")
|
52
55
|
self.client = AsyncOpenAI(
|
53
|
-
api_key=api_key,
|
54
|
-
base_url=
|
56
|
+
api_key=provider_config.api_key,
|
57
|
+
base_url=provider_config.base_url
|
55
58
|
)
|
56
59
|
self._generate_func = self._generate_openai
|
57
60
|
else:
|
58
|
-
raise ValueError(f"Unsupported
|
61
|
+
raise ValueError(f"Unsupported provider type in chatbot config: {provider_type}")
|
59
62
|
|
60
63
|
self._initialized = True
|
61
|
-
logger.info(f"Chatbot LLM client initialized. Provider: {
|
64
|
+
logger.info(f"Chatbot LLM client initialized. Provider: {provider_type.upper()}, Model: {self.model_name}")
|
62
65
|
|
63
66
|
async def _generate_gemini(self, prompt: str, max_tokens: int) -> str:
|
64
67
|
"""Generates text using the Gemini model."""
|
@@ -50,7 +50,7 @@ class NicknameGenerator:
|
|
50
50
|
if not self.base_adjectives or not self.base_nouns:
|
51
51
|
logger.warning("Base adjective or noun pools are empty. Nickname generation quality will be affected.")
|
52
52
|
|
53
|
-
if config_manager.settings.
|
53
|
+
if config_manager.settings.chatbot.nickname_generation.enable_dynamic_pool:
|
54
54
|
await self._populate_dynamic_pools()
|
55
55
|
|
56
56
|
logger.info("NicknameGenerator initialized.")
|
@@ -58,7 +58,7 @@ class NicknameGenerator:
|
|
58
58
|
async def _populate_dynamic_pools(self):
|
59
59
|
"""Uses an LLM to generate and populate the dynamic word pools."""
|
60
60
|
logger.info("Attempting to populate dynamic nickname pools using LLM...")
|
61
|
-
pool_size = config_manager.settings.
|
61
|
+
pool_size = config_manager.settings.chatbot.nickname_generation.dynamic_pool_size
|
62
62
|
try:
|
63
63
|
adj_prompt = f"Generate a list of {pool_size} diverse, cool-sounding English adjectives for online usernames. Output only the words, one per line."
|
64
64
|
noun_prompt = f"Generate a list of {pool_size} diverse, cool-sounding English nouns for online usernames. Output only the words, one per line."
|
@@ -141,4 +141,4 @@ class NicknameGenerator:
|
|
141
141
|
k=1
|
142
142
|
)[0]
|
143
143
|
|
144
|
-
return chosen_strategy()
|
144
|
+
return chosen_strategy()
|
@@ -1,5 +1,7 @@
|
|
1
1
|
# neuro_simulator/chatbot/tools/manager.py
|
2
|
-
"""
|
2
|
+
"""
|
3
|
+
The central tool manager for the chatbot agent.
|
4
|
+
"""
|
3
5
|
|
4
6
|
import importlib
|
5
7
|
import inspect
|
@@ -56,18 +58,18 @@ class ChatbotToolManager:
|
|
56
58
|
def _load_allocations(self):
|
57
59
|
"""Loads tool allocations from JSON files, creating defaults if they don't exist."""
|
58
60
|
default_allocations = {
|
59
|
-
"
|
61
|
+
"chatbot": ["post_chat_message"],
|
60
62
|
"chatbot_memory_agent": ["add_temp_memory"] # Add more memory tools later
|
61
63
|
}
|
62
64
|
|
63
65
|
# Load actor agent allocations
|
64
|
-
if path_manager.
|
65
|
-
with open(path_manager.
|
66
|
-
self.agent_tool_allocations['
|
66
|
+
if path_manager.chatbot_tools_path.exists():
|
67
|
+
with open(path_manager.chatbot_tools_path, 'r', encoding='utf-8') as f:
|
68
|
+
self.agent_tool_allocations['chatbot'] = json.load(f)
|
67
69
|
else:
|
68
|
-
self.agent_tool_allocations['
|
69
|
-
with open(path_manager.
|
70
|
-
json.dump(default_allocations['
|
70
|
+
self.agent_tool_allocations['chatbot'] = default_allocations['chatbot']
|
71
|
+
with open(path_manager.chatbot_tools_path, 'w', encoding='utf-8') as f:
|
72
|
+
json.dump(default_allocations['chatbot'], f, indent=2)
|
71
73
|
|
72
74
|
# Load thinker agent allocations
|
73
75
|
if path_manager.chatbot_memory_agent_tools_path.exists():
|
neuro_simulator/cli.py
CHANGED
@@ -48,8 +48,8 @@ def main():
|
|
48
48
|
shutil.copy(src, dest)
|
49
49
|
logging.info(f"Copied default file to {dest}")
|
50
50
|
|
51
|
-
# Copy config.yaml
|
52
|
-
copy_if_not_exists(package_source_path / "config.yaml
|
51
|
+
# Copy config.yaml if it doesn't exist
|
52
|
+
copy_if_not_exists(package_source_path / "config.yaml", work_dir / "config.yaml")
|
53
53
|
|
54
54
|
# Copy prompts
|
55
55
|
copy_if_not_exists(package_source_path / "agent" / "neuro_prompt.txt", path_manager.path_manager.neuro_prompt_path)
|
@@ -60,12 +60,11 @@ def main():
|
|
60
60
|
copy_if_not_exists(package_source_path / "agent" / "memory" / "init_memory.json", path_manager.path_manager.init_memory_path)
|
61
61
|
copy_if_not_exists(package_source_path / "agent" / "memory" / "temp_memory.json", path_manager.path_manager.temp_memory_path)
|
62
62
|
|
63
|
-
# Copy default
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
logging.info(f"Copied default asset directory to {destination_assets_dir}")
|
63
|
+
# Copy default video asset if it doesn't exist
|
64
|
+
copy_if_not_exists(
|
65
|
+
package_source_path / "assets" / "neuro_start.mp4",
|
66
|
+
path_manager.path_manager.assets_dir / "neuro_start.mp4"
|
67
|
+
)
|
69
68
|
|
70
69
|
except Exception as e:
|
71
70
|
logging.warning(f"Could not copy all default files: {e}")
|
@@ -78,10 +77,6 @@ def main():
|
|
78
77
|
main_config_path = path_manager.path_manager.working_dir / "config.yaml"
|
79
78
|
try:
|
80
79
|
config_manager.load(str(main_config_path))
|
81
|
-
except FileNotFoundError:
|
82
|
-
logging.error(f"FATAL: Configuration file '{main_config_path.name}' not found.")
|
83
|
-
logging.error(f"If this is your first time, please rename 'config.yaml.example' to 'config.yaml' after filling it out.")
|
84
|
-
sys.exit(1)
|
85
80
|
except ValidationError as e:
|
86
81
|
logging.error(f"FATAL: Configuration error in '{main_config_path.name}':")
|
87
82
|
logging.error(e)
|
@@ -19,32 +19,23 @@ config_manager.register_update_callback(_reset_agent_on_config_update)
|
|
19
19
|
|
20
20
|
async def create_agent() -> BaseAgent:
|
21
21
|
"""
|
22
|
-
Factory function to create and initialize
|
22
|
+
Factory function to create and initialize the agent instance.
|
23
23
|
Returns a cached instance unless the configuration has changed.
|
24
24
|
"""
|
25
25
|
global _agent_instance
|
26
26
|
if _agent_instance is not None:
|
27
27
|
return _agent_instance
|
28
28
|
|
29
|
-
|
30
|
-
logger.info(f"Creating new agent instance of type: {agent_type}")
|
29
|
+
logger.info(f"Creating new agent instance...")
|
31
30
|
|
32
|
-
|
33
|
-
from ..services.builtin import BuiltinAgentWrapper, initialize_builtin_agent
|
34
|
-
|
35
|
-
agent_impl = await initialize_builtin_agent()
|
36
|
-
|
37
|
-
if agent_impl is None:
|
38
|
-
raise RuntimeError("Failed to initialize the Builtin agent implementation.")
|
39
|
-
|
40
|
-
_agent_instance = BuiltinAgentWrapper(agent_impl)
|
41
|
-
|
42
|
-
elif agent_type == "letta":
|
43
|
-
from ..services.letta import LettaAgent
|
44
|
-
_agent_instance = LettaAgent()
|
31
|
+
from ..services.builtin import BuiltinAgentWrapper, initialize_builtin_agent
|
45
32
|
|
46
|
-
|
47
|
-
|
33
|
+
agent_impl = await initialize_builtin_agent()
|
34
|
+
|
35
|
+
if agent_impl is None:
|
36
|
+
raise RuntimeError("Failed to initialize the Builtin agent implementation.")
|
37
|
+
|
38
|
+
_agent_instance = BuiltinAgentWrapper(agent_impl)
|
48
39
|
|
49
40
|
await _agent_instance.initialize()
|
50
41
|
|
@@ -18,7 +18,6 @@ from .config import config_manager, AppSettings
|
|
18
18
|
from ..core.agent_factory import create_agent
|
19
19
|
from ..agent.core import Agent as LocalAgent
|
20
20
|
from ..chatbot.core import ChatbotAgent
|
21
|
-
from ..services.letta import LettaAgent
|
22
21
|
from ..services.builtin import BuiltinAgentWrapper
|
23
22
|
|
24
23
|
# --- API Routers ---
|
@@ -71,7 +70,7 @@ app.include_router(system_router)
|
|
71
70
|
|
72
71
|
# --- Background Task Definitions ---
|
73
72
|
|
74
|
-
|
73
|
+
chatbot: ChatbotAgent = None
|
75
74
|
|
76
75
|
async def broadcast_events_task():
|
77
76
|
"""Broadcasts events from the live_stream_manager's queue to all clients."""
|
@@ -87,7 +86,7 @@ async def broadcast_events_task():
|
|
87
86
|
|
88
87
|
async def fetch_and_process_audience_chats():
|
89
88
|
"""Generates a batch of audience chat messages using the new ChatbotAgent."""
|
90
|
-
if not
|
89
|
+
if not chatbot:
|
91
90
|
return
|
92
91
|
try:
|
93
92
|
# Get context for the chatbot
|
@@ -96,7 +95,7 @@ async def fetch_and_process_audience_chats():
|
|
96
95
|
recent_history = get_recent_audience_chats_for_chatbot(limit=10)
|
97
96
|
|
98
97
|
# Generate messages
|
99
|
-
generated_messages = await
|
98
|
+
generated_messages = await chatbot.generate_chat_messages(
|
100
99
|
neuro_speech=context_message,
|
101
100
|
recent_history=recent_history
|
102
101
|
)
|
@@ -125,8 +124,8 @@ async def generate_audience_chat_task():
|
|
125
124
|
|
126
125
|
asyncio.create_task(fetch_and_process_audience_chats())
|
127
126
|
|
128
|
-
# Use the interval from the new
|
129
|
-
await asyncio.sleep(config_manager.settings.
|
127
|
+
# Use the interval from the new chatbot config
|
128
|
+
await asyncio.sleep(config_manager.settings.chatbot.generation_interval_sec)
|
130
129
|
except asyncio.CancelledError:
|
131
130
|
break
|
132
131
|
except Exception as e:
|
@@ -148,20 +147,14 @@ async def neuro_response_cycle():
|
|
148
147
|
app_state.last_superchat_time = time.time()
|
149
148
|
await connection_manager.broadcast({"type": "processing_superchat", "data": sc})
|
150
149
|
|
151
|
-
#
|
152
|
-
|
153
|
-
selected_chats = [
|
154
|
-
{"role": "system", "content": "=== RANDOM 10 MSG IN CHATROOM ===\nNO MSG FETCH DUE TO UNPROCESSED HIGHLIGHTED MESSAGE"},
|
155
|
-
{"role": "system", "content": f"=== HIGHLIGHTED MESSAGE ===\n{sc['username']}: {sc['text']}"}
|
156
|
-
]
|
157
|
-
else: # For BuiltinAgent and any other future agents
|
158
|
-
selected_chats = [{'username': sc['username'], 'text': sc['text']}]
|
150
|
+
# For BuiltinAgent and any other future agents
|
151
|
+
selected_chats = [{'username': sc['username'], 'text': sc['text']}]
|
159
152
|
|
160
153
|
# Clear the regular input queue to prevent immediate follow-up with normal chats
|
161
154
|
get_all_neuro_input_chats()
|
162
155
|
else:
|
163
156
|
if is_first_response:
|
164
|
-
add_to_neuro_input_queue({"username": "System", "text": config_manager.settings.
|
157
|
+
add_to_neuro_input_queue({"username": "System", "text": config_manager.settings.neuro.initial_greeting})
|
165
158
|
is_first_response = False
|
166
159
|
elif is_neuro_input_queue_empty():
|
167
160
|
await asyncio.sleep(1)
|
@@ -170,7 +163,7 @@ async def neuro_response_cycle():
|
|
170
163
|
current_queue_snapshot = get_all_neuro_input_chats()
|
171
164
|
if not current_queue_snapshot:
|
172
165
|
continue
|
173
|
-
sample_size = min(config_manager.settings.
|
166
|
+
sample_size = min(config_manager.settings.neuro.input_chat_sample_size, len(current_queue_snapshot))
|
174
167
|
selected_chats = random.sample(current_queue_snapshot, sample_size)
|
175
168
|
|
176
169
|
if not selected_chats:
|
@@ -192,7 +185,12 @@ async def neuro_response_cycle():
|
|
192
185
|
sentences = [s.strip() for s in re.split(r'(?<=[.!?])\s+', response_text.replace('\n', ' ')) if s.strip()]
|
193
186
|
if not sentences: continue
|
194
187
|
|
195
|
-
|
188
|
+
tts_id = config_manager.settings.neuro.tts_provider_id
|
189
|
+
if not tts_id:
|
190
|
+
logger.warning("TTS Provider ID is not set for the agent. Skipping speech synthesis.")
|
191
|
+
continue
|
192
|
+
|
193
|
+
synthesis_tasks = [synthesize_audio_segment(s, tts_provider_id=tts_id) for s in sentences]
|
196
194
|
synthesis_results = await asyncio.gather(*synthesis_tasks, return_exceptions=True)
|
197
195
|
|
198
196
|
speech_packages = [
|
@@ -210,7 +208,7 @@ async def neuro_response_cycle():
|
|
210
208
|
await connection_manager.broadcast({"type": "neuro_speech_segment", "is_end": True})
|
211
209
|
live_stream_manager.set_neuro_speaking_status(False)
|
212
210
|
|
213
|
-
await asyncio.sleep(config_manager.settings.
|
211
|
+
await asyncio.sleep(config_manager.settings.neuro.post_speech_cooldown_sec)
|
214
212
|
|
215
213
|
except asyncio.TimeoutError:
|
216
214
|
logger.warning("Agent response timed out, skipping this cycle.")
|
@@ -271,10 +269,7 @@ async def startup_event():
|
|
271
269
|
# 2. Initialize queues now that config is loaded
|
272
270
|
initialize_queues()
|
273
271
|
|
274
|
-
# 3.
|
275
|
-
global chatbot_agent
|
276
|
-
chatbot_agent = ChatbotAgent()
|
277
|
-
await chatbot_agent.initialize()
|
272
|
+
# 3. Chatbot Agent will be initialized on stream start.
|
278
273
|
|
279
274
|
# 4. Register callbacks
|
280
275
|
async def metadata_callback(settings: AppSettings):
|
@@ -285,7 +280,7 @@ async def startup_event():
|
|
285
280
|
# 5. Initialize main agent (which will load its own configs)
|
286
281
|
try:
|
287
282
|
await create_agent()
|
288
|
-
logger.info(f"Successfully initialized agent
|
283
|
+
logger.info(f"Successfully initialized agent.")
|
289
284
|
except Exception as e:
|
290
285
|
logger.critical(f"Agent initialization failed on startup: {e}", exc_info=True)
|
291
286
|
|
@@ -305,9 +300,9 @@ async def websocket_stream_endpoint(websocket: WebSocket):
|
|
305
300
|
await connection_manager.connect(websocket)
|
306
301
|
try:
|
307
302
|
await connection_manager.send_personal_message(live_stream_manager.get_initial_state_for_client(), websocket)
|
308
|
-
await connection_manager.send_personal_message({"type": "update_stream_metadata", **config_manager.settings.
|
303
|
+
await connection_manager.send_personal_message({"type": "update_stream_metadata", **config_manager.settings.stream.model_dump()}, websocket)
|
309
304
|
|
310
|
-
initial_chats = get_recent_audience_chats(config_manager.settings.
|
305
|
+
initial_chats = get_recent_audience_chats(config_manager.settings.server.initial_chat_backlog_limit)
|
311
306
|
for chat in initial_chats:
|
312
307
|
await connection_manager.send_personal_message({"type": "chat_message", **chat, "is_user_message": False}, websocket)
|
313
308
|
await asyncio.sleep(0.01)
|
@@ -494,6 +489,23 @@ async def handle_admin_ws_message(websocket: WebSocket, data: dict):
|
|
494
489
|
|
495
490
|
# Stream Control Actions
|
496
491
|
elif action == "start_stream":
|
492
|
+
# Validate that required providers are set before starting
|
493
|
+
agent_cfg = config_manager.settings.neuro
|
494
|
+
chatbot_cfg = config_manager.settings.chatbot
|
495
|
+
if not agent_cfg.llm_provider_id:
|
496
|
+
raise ValueError("Agent (Neuro) does not have an LLM Provider configured.")
|
497
|
+
if not agent_cfg.tts_provider_id:
|
498
|
+
raise ValueError("Agent (Neuro) does not have a TTS Provider configured.")
|
499
|
+
if not chatbot_cfg.llm_provider_id:
|
500
|
+
raise ValueError("Chatbot does not have an LLM Provider configured.")
|
501
|
+
|
502
|
+
# Initialize chatbot agent on first stream start if not already initialized
|
503
|
+
global chatbot
|
504
|
+
if chatbot is None:
|
505
|
+
logger.info("Initializing ChatbotAgent for the first time...")
|
506
|
+
chatbot = ChatbotAgent()
|
507
|
+
await chatbot.initialize()
|
508
|
+
|
497
509
|
logger.info("Start stream action received. Resetting agent memory before starting processes...")
|
498
510
|
await agent.reset_memory()
|
499
511
|
if not process_manager.is_running:
|
@@ -549,34 +561,28 @@ async def handle_admin_ws_message(websocket: WebSocket, data: dict):
|
|
549
561
|
response["payload"] = context
|
550
562
|
|
551
563
|
elif action == "get_last_prompt":
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
prompt = await agent_instance._build_neuro_prompt(messages_for_prompt)
|
575
|
-
response["payload"] = {"prompt": prompt}
|
576
|
-
except Exception as e:
|
577
|
-
logger.error(f"Error generating last prompt: {e}", exc_info=True)
|
578
|
-
response["payload"] = {"prompt": f"Failed to generate prompt: {e}"}
|
579
|
-
|
564
|
+
try:
|
565
|
+
# 1. Get the recent history from the agent itself
|
566
|
+
history = await agent.get_message_history(limit=10)
|
567
|
+
|
568
|
+
# 2. Reconstruct the 'messages' list that _build_neuro_prompt expects
|
569
|
+
messages_for_prompt = []
|
570
|
+
for entry in history:
|
571
|
+
if entry.get('role') == 'user':
|
572
|
+
# Content is in the format "username: text"
|
573
|
+
content = entry.get('content', '')
|
574
|
+
parts = content.split(':', 1)
|
575
|
+
if len(parts) == 2:
|
576
|
+
messages_for_prompt.append({'username': parts[0].strip(), 'text': parts[1].strip()})
|
577
|
+
elif content: # Handle cases where there's no colon
|
578
|
+
messages_for_prompt.append({'username': 'user', 'text': content})
|
579
|
+
|
580
|
+
# 3. Build the prompt using the agent's own internal logic
|
581
|
+
prompt = await agent.build_neuro_prompt(messages_for_prompt)
|
582
|
+
response["payload"] = {"prompt": prompt}
|
583
|
+
except Exception as e:
|
584
|
+
logger.error(f"Error generating last prompt: {e}", exc_info=True)
|
585
|
+
response["payload"] = {"prompt": f"Failed to generate prompt: {e}"}
|
580
586
|
elif action == "reset_agent_memory":
|
581
587
|
await agent.reset_memory()
|
582
588
|
response["payload"] = {"status": "success"}
|
@@ -598,6 +604,3 @@ async def handle_admin_ws_message(websocket: WebSocket, data: dict):
|
|
598
604
|
if request_id:
|
599
605
|
response["payload"] = {"status": "error", "message": str(e)}
|
600
606
|
await websocket.send_json(response)
|
601
|
-
|
602
|
-
|
603
|
-
|