neuro-simulator 0.2.2__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,22 +1,19 @@
1
1
  # neuro_simulator/agent/core.py
2
2
  """
3
3
  Core module for the Neuro Simulator's built-in agent.
4
+ Implements a dual-LLM "Actor/Thinker" architecture for responsive interaction
5
+ and asynchronous memory consolidation.
4
6
  """
5
7
 
6
8
  import asyncio
7
9
  import json
8
10
  import logging
9
11
  import re
10
- import sys
11
12
  from pathlib import Path
12
- from datetime import datetime
13
- from typing import Any, Dict, List, Optional
13
+ from typing import Any, Dict, List
14
14
 
15
- # Updated imports for the new structure
16
15
  from ..utils.logging import QueueLogHandler, agent_log_queue
17
16
  from ..utils.websocket import connection_manager
18
-
19
- # --- Agent-specific imports ---
20
17
  from .llm import LLMClient
21
18
  from .memory.manager import MemoryManager
22
19
  from .tools.core import ToolManager
@@ -32,8 +29,7 @@ def configure_agent_logging():
32
29
  agent_logger.handlers.clear()
33
30
 
34
31
  agent_queue_handler = QueueLogHandler(agent_log_queue)
35
- # Use the same format as the server for consistency
36
- formatter = logging.Formatter('%(asctime)s - [%(name)-24s] - %(levelname)-8s - %(message)s', datefmt='%H:%M:%S')
32
+ formatter = logging.Formatter('%(asctime)s - [%(name)-32s] - %(levelname)-8s - %(message)s', datefmt='%H:%M:%S')
37
33
  agent_queue_handler.setFormatter(formatter)
38
34
  agent_logger.addHandler(agent_queue_handler)
39
35
  agent_logger.propagate = False
@@ -42,14 +38,25 @@ def configure_agent_logging():
42
38
  configure_agent_logging()
43
39
 
44
40
  class Agent:
45
- """Main Agent class that integrates LLM, memory, and tools. This is the concrete implementation."""
41
+ """
42
+ Main Agent class implementing the Actor/Thinker model.
43
+ - The "Neuro" part (Actor) handles real-time interaction.
44
+ - The "Memory" part (Thinker) handles background memory consolidation.
45
+ """
46
46
 
47
47
  def __init__(self, working_dir: str = None):
48
48
  self.memory_manager = MemoryManager(working_dir)
49
49
  self.tool_manager = ToolManager(self.memory_manager)
50
- self.llm_client = LLMClient()
50
+
51
+ # Dual LLM clients
52
+ self.neuro_llm = LLMClient()
53
+ self.memory_llm = LLMClient()
54
+
51
55
  self._initialized = False
52
- agent_logger.info("Agent instance created.")
56
+ self.turn_counter = 0
57
+ self.reflection_threshold = 3 # Trigger reflection every 3 turns
58
+
59
+ agent_logger.info("Agent instance created with dual-LLM architecture.")
53
60
  agent_logger.debug(f"Agent working directory: {working_dir}")
54
61
 
55
62
  async def initialize(self):
@@ -63,139 +70,165 @@ class Agent:
63
70
  async def reset_all_memory(self):
64
71
  """Reset all agent memory types."""
65
72
  await self.memory_manager.reset_temp_memory()
66
- await self.memory_manager.reset_context()
73
+ await self.memory_manager.reset_chat_history()
67
74
  agent_logger.info("All agent memory has been reset.")
68
-
69
- async def process_messages(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
70
- """Process incoming messages and generate a response with tool usage."""
71
- await self.initialize()
72
- agent_logger.info(f"Processing {len(messages)} messages.")
73
75
 
74
- for msg in messages:
75
- content = f"{msg['username']}: {msg['text']}"
76
- await self.memory_manager.add_context_entry("user", content)
77
-
78
- context_messages = await self.memory_manager.get_recent_context()
79
- await connection_manager.broadcast({"type": "agent_context", "action": "update", "messages": context_messages})
80
-
81
- processing_entry_id = await self.memory_manager.add_detailed_context_entry(
82
- input_messages=messages, prompt="Processing started", llm_response="",
83
- tool_executions=[], final_response="Processing started"
84
- )
85
-
86
- context = await self.memory_manager.get_full_context()
87
- tool_descriptions = self.tool_manager.get_tool_descriptions()
88
-
89
- # --- CORRECTED HISTORY GATHERING ---
90
- recent_history = await self.memory_manager.get_detailed_context_history()
91
- assistant_responses = []
92
- for entry in reversed(recent_history):
93
- if entry.get("type") == "llm_interaction":
94
- for tool in entry.get("tool_executions", []):
95
- if tool.get("name") == "speak" and tool.get("result"):
96
- assistant_responses.append(tool["result"])
97
-
98
- # Create LLM prompt from template
99
- template_path = Path(self.memory_manager.memory_dir).parent / "prompt_template.txt"
76
+ async def _build_neuro_prompt(self, messages: List[Dict[str, str]]) -> str:
77
+ """Builds the prompt for the Neuro (Actor) LLM."""
78
+ template_path = Path(self.memory_manager.memory_dir).parent / "neuro_prompt.txt"
100
79
  with open(template_path, 'r', encoding='utf-8') as f:
101
80
  prompt_template = f.read()
102
81
 
103
- recent_speak_history_text = "\n".join([f"- {response}" for response in assistant_responses[:5]]) if assistant_responses else "You haven't said anything yet."
82
+ # Gather context
83
+ tool_descriptions = self.tool_manager.get_tool_descriptions()
84
+
85
+ # Format Core Memory from blocks
86
+ core_memory_blocks = await self.memory_manager.get_core_memory_blocks()
87
+ core_memory_parts = []
88
+ if core_memory_blocks:
89
+ for block_id, block in core_memory_blocks.items():
90
+ core_memory_parts.append(f"\nBlock: {block.get('title', '')} ({block_id})")
91
+ core_memory_parts.append(f"Description: {block.get('description', '')}")
92
+ content_items = block.get("content", [])
93
+ if content_items:
94
+ core_memory_parts.append("Content:")
95
+ for item in content_items:
96
+ core_memory_parts.append(f" - {item}")
97
+ core_memory_text = "\n".join(core_memory_parts) if core_memory_parts else "Not set."
98
+
99
+ # Format Temp Memory
100
+ temp_memory_items = self.memory_manager.temp_memory
101
+ temp_memory_text = "\n".join(
102
+ [f"[{item.get('role', 'system')}] {item.get('content', '')}" for item in temp_memory_items]
103
+ ) if temp_memory_items else "Empty."
104
+
105
+ recent_history = await self.memory_manager.get_recent_chat(entries=10)
106
+
104
107
  user_messages_text = "\n".join([f"{msg['username']}: {msg['text']}" for msg in messages])
108
+ recent_history_text = "\n".join([f"{msg['role']}: {msg['content']}" for msg in recent_history])
105
109
 
106
- prompt = prompt_template.format(
107
- full_context=context,
110
+ return prompt_template.format(
108
111
  tool_descriptions=tool_descriptions,
109
- recent_speak_history=recent_speak_history_text,
112
+ core_memory=core_memory_text,
113
+ temp_memory=temp_memory_text,
114
+ recent_history=recent_history_text,
110
115
  user_messages=user_messages_text
111
116
  )
117
+
118
+ async def _build_memory_prompt(self, conversation_history: List[Dict[str, str]]) -> str:
119
+ """Builds the prompt for the Memory (Thinker) LLM."""
120
+ template_path = Path(self.memory_manager.memory_dir).parent / "memory_prompt.txt"
121
+ with open(template_path, 'r', encoding='utf-8') as f:
122
+ prompt_template = f.read()
112
123
 
113
- await self.memory_manager.add_detailed_context_entry(
114
- input_messages=messages, prompt=prompt, llm_response="", tool_executions=[],
115
- final_response="Prompt sent to LLM", entry_id=processing_entry_id
116
- )
117
-
118
- response_text = await self.llm_client.generate(prompt)
119
- agent_logger.debug(f"LLM raw response: {response_text[:100] if response_text else 'None'}...")
120
-
121
- await self.memory_manager.add_detailed_context_entry(
122
- input_messages=messages, prompt=prompt, llm_response=response_text, tool_executions=[],
123
- final_response="LLM response received", entry_id=processing_entry_id
124
- )
125
-
126
- processing_result = {
127
- "input_messages": messages, "llm_response": response_text,
128
- "tool_executions": [], "final_response": ""
129
- }
130
-
131
- if response_text:
132
- tool_calls = self._parse_tool_calls(response_text)
133
- for tool_call in tool_calls:
134
- agent_logger.info(f"Executing tool: {tool_call['name']}")
135
- await self._execute_parsed_tool(tool_call, processing_result)
136
-
137
- await self.memory_manager.add_detailed_context_entry(
138
- input_messages=messages, prompt=prompt, llm_response=response_text,
139
- tool_executions=processing_result["tool_executions"],
140
- final_response=processing_result["final_response"], entry_id=processing_entry_id
141
- )
142
-
143
- final_context = await self.memory_manager.get_recent_context()
144
- await connection_manager.broadcast({"type": "agent_context", "action": "update", "messages": final_context})
145
-
146
- agent_logger.info("Message processing completed.")
147
- return processing_result
124
+ history_text = "\n".join([f"{msg['role']}: {msg['content']}" for msg in conversation_history])
148
125
 
149
- async def _execute_parsed_tool(self, tool_call: Dict[str, Any], processing_result: Dict[str, Any]):
150
- """Execute a parsed tool call and update processing result."""
126
+ return prompt_template.format(conversation_history=history_text)
127
+
128
+ def _parse_tool_calls(self, response_text: str) -> List[Dict[str, Any]]:
129
+ """Parses LLM response for JSON tool calls."""
151
130
  try:
152
- tool_result = await self.execute_tool(tool_call["name"], tool_call["params"])
153
- tool_call["result"] = tool_result
154
- if tool_call["name"] == "speak":
155
- processing_result["final_response"] = tool_call["params"].get("text", "")
156
- processing_result["tool_executions"].append(tool_call)
157
- except Exception as e:
158
- tool_call["error"] = str(e)
159
- processing_result["tool_executions"].append(tool_call)
160
- agent_logger.error(f"Error executing tool {tool_call['name']}: {e}")
131
+ # The LLM is prompted to return a JSON array of tool calls.
132
+ # Find the JSON block, which might be wrapped in markdown.
133
+ match = re.search(r'''```json\s*([\s\S]*?)\s*```|(\[[\s\S]*\])''', response_text)
134
+ if not match:
135
+ agent_logger.warning(f"No valid JSON tool call block found in response: {response_text}")
136
+ return []
137
+
138
+ json_str = match.group(1) or match.group(2)
139
+ tool_calls = json.loads(json_str)
161
140
 
162
- def _parse_tool_calls(self, text: str) -> List[Dict[str, Any]]:
163
- """Parse tool calls using ast.literal_eval for robustness."""
164
- import ast
165
- calls = []
166
- text = text.strip()
167
- if text.startswith("speak(") and text.endswith(")"):
141
+ if isinstance(tool_calls, list):
142
+ return tool_calls
143
+ return []
144
+ except json.JSONDecodeError as e:
145
+ agent_logger.error(f"Failed to decode JSON from LLM response: {e}\nResponse text: {response_text}")
146
+ return []
147
+ except Exception as e:
148
+ agent_logger.error(f"An unexpected error occurred while parsing tool calls: {e}")
149
+ return []
150
+
151
+ async def _execute_tool_calls(self, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]:
152
+ """Executes a list of parsed tool calls."""
153
+ execution_results = []
154
+ final_response = ""
155
+ for tool_call in tool_calls:
156
+ tool_name = tool_call.get("name")
157
+ params = tool_call.get("params", {})
158
+ if not tool_name:
159
+ continue
160
+
161
+ agent_logger.info(f"Executing tool: {tool_name} with params: {params}")
168
162
  try:
169
- # Extract the content inside speak(...)
170
- # e.g., "text='Hello, I'm here'"
171
- inner_content = text[len("speak("):-1].strip()
172
-
173
- # Ensure it's a text=... call
174
- if not inner_content.startswith("text="):
175
- return []
176
-
177
- # Get the quoted string part
178
- quoted_string = inner_content[len("text="):
179
- ].strip()
180
-
181
- # Use ast.literal_eval to safely parse the Python string literal
182
- parsed_text = ast.literal_eval(quoted_string)
183
-
184
- if isinstance(parsed_text, str):
185
- calls.append({
186
- "name": "speak",
187
- "params": {"text": parsed_text}
188
- })
189
-
190
- except (ValueError, SyntaxError, TypeError) as e:
191
- agent_logger.warning(f"Could not parse tool call using ast.literal_eval: {text}. Error: {e}")
192
-
193
- return calls
163
+ result = await self.tool_manager.execute_tool(tool_name, params)
164
+ execution_results.append({"name": tool_name, "params": params, "result": result})
165
+ if tool_name == "speak":
166
+ final_response = params.get("text", "")
167
+ except Exception as e:
168
+ agent_logger.error(f"Error executing tool {tool_name}: {e}")
169
+ execution_results.append({"name": tool_name, "params": params, "error": str(e)})
194
170
 
195
- async def execute_tool(self, tool_name: str, params: Dict[str, Any]) -> Any:
196
- """Execute a registered tool."""
171
+ return {"tool_executions": execution_results, "final_response": final_response}
172
+
173
+ async def process_and_respond(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
174
+ """
175
+ The main entry point for the "Neuro" (Actor) flow.
176
+ Handles real-time interaction and triggers background reflection.
177
+ """
197
178
  await self.initialize()
198
- agent_logger.debug(f"Executing tool: {tool_name} with params: {params}")
199
- result = await self.tool_manager.execute_tool(tool_name, params)
200
- agent_logger.debug(f"Tool execution result: {result}")
201
- return result
179
+ agent_logger.info(f"Processing {len(messages)} messages in Actor flow.")
180
+
181
+ # Add user messages to context
182
+ for msg in messages:
183
+ await self.memory_manager.add_chat_entry("user", f"{msg['username']}: {msg['text']}")
184
+
185
+ # Build prompt and get response from Neuro LLM
186
+ prompt = await self._build_neuro_prompt(messages)
187
+ response_text = await self.neuro_llm.generate(prompt)
188
+ agent_logger.debug(f"Neuro LLM raw response: {response_text[:150] if response_text else 'None'}...")
189
+
190
+ # Parse and execute tools
191
+ tool_calls = self._parse_tool_calls(response_text)
192
+ processing_result = await self._execute_tool_calls(tool_calls)
193
+
194
+ # Add agent's response to context
195
+ if processing_result["final_response"]:
196
+ await self.memory_manager.add_chat_entry("assistant", processing_result["final_response"])
197
+
198
+ # Update dashboard/UI
199
+ final_context = await self.memory_manager.get_recent_chat()
200
+ # Broadcast to stream clients
201
+ await connection_manager.broadcast({"type": "agent_context", "action": "update", "messages": final_context})
202
+ # Broadcast to admin clients (Dashboard)
203
+ await connection_manager.broadcast_to_admins({"type": "agent_context", "action": "update", "messages": final_context})
204
+
205
+ # Handle reflection trigger
206
+ self.turn_counter += 1
207
+ if self.turn_counter >= self.reflection_threshold:
208
+ agent_logger.info(f"Reflection threshold reached ({self.turn_counter}/{self.reflection_threshold}). Scheduling background reflection.")
209
+ history_for_reflection = await self.memory_manager.get_recent_chat(entries=self.reflection_threshold * 2) # Get a bit more context
210
+ asyncio.create_task(self.reflect_on_context(history_for_reflection))
211
+ self.turn_counter = 0
212
+
213
+ agent_logger.info("Actor flow completed.")
214
+ return processing_result
215
+
216
+ async def reflect_on_context(self, conversation_history: List[Dict[str, str]]):
217
+ """
218
+ The main entry point for the "Memory" (Thinker) flow.
219
+ Runs in the background to consolidate memories.
220
+ """
221
+ agent_logger.info("Thinker flow started: Reflecting on recent context.")
222
+
223
+ prompt = await self._build_memory_prompt(conversation_history)
224
+ response_text = await self.memory_llm.generate(prompt)
225
+ agent_logger.debug(f"Memory LLM raw response: {response_text[:150] if response_text else 'None'}...")
226
+
227
+ tool_calls = self._parse_tool_calls(response_text)
228
+ if not tool_calls:
229
+ agent_logger.info("Thinker flow: No memory operations were suggested by the LLM.")
230
+ return
231
+
232
+ agent_logger.info(f"Thinker flow: Executing {len(tool_calls)} memory operations.")
233
+ await self._execute_tool_calls(tool_calls)
234
+ agent_logger.info("Thinker flow completed, memory has been updated.")
@@ -31,12 +31,12 @@ class MemoryManager:
31
31
 
32
32
  self.init_memory_file = os.path.join(self.memory_dir, "init_memory.json")
33
33
  self.core_memory_file = os.path.join(self.memory_dir, "core_memory.json")
34
- self.context_file = os.path.join(self.memory_dir, "context.json")
34
+ self.chat_history_file = os.path.join(self.memory_dir, "chat_history.json")
35
35
  self.temp_memory_file = os.path.join(self.memory_dir, "temp_memory.json")
36
36
 
37
37
  self.init_memory: Dict[str, Any] = {}
38
38
  self.core_memory: Dict[str, Any] = {}
39
- self.context_history: List[Dict[str, Any]] = []
39
+ self.chat_history: List[Dict[str, Any]] = []
40
40
  self.temp_memory: List[Dict[str, Any]] = []
41
41
 
42
42
  async def initialize(self):
@@ -61,12 +61,12 @@ class MemoryManager:
61
61
  self.core_memory = {"blocks": {}}
62
62
  await self._save_core_memory()
63
63
 
64
- # Load context history
65
- if os.path.exists(self.context_file):
66
- with open(self.context_file, 'r', encoding='utf-8') as f:
67
- self.context_history = json.load(f)
64
+ # Load chat history
65
+ if os.path.exists(self.chat_history_file):
66
+ with open(self.chat_history_file, 'r', encoding='utf-8') as f:
67
+ self.chat_history = json.load(f)
68
68
  else:
69
- self.context_history = []
69
+ self.chat_history = []
70
70
 
71
71
  # Load temp memory
72
72
  if os.path.exists(self.temp_memory_file):
@@ -89,20 +89,20 @@ class MemoryManager:
89
89
  with open(self.core_memory_file, 'w', encoding='utf-8') as f:
90
90
  json.dump(self.core_memory, f, ensure_ascii=False, indent=2)
91
91
 
92
- async def _save_context(self):
93
- with open(self.context_file, 'w', encoding='utf-8') as f:
94
- json.dump(self.context_history, f, ensure_ascii=False, indent=2)
92
+ async def _save_chat_history(self):
93
+ with open(self.chat_history_file, 'w', encoding='utf-8') as f:
94
+ json.dump(self.chat_history, f, ensure_ascii=False, indent=2)
95
95
 
96
96
  async def _save_temp_memory(self):
97
97
  with open(self.temp_memory_file, 'w', encoding='utf-8') as f:
98
98
  json.dump(self.temp_memory, f, ensure_ascii=False, indent=2)
99
99
 
100
- async def add_context_entry(self, role: str, content: str):
100
+ async def add_chat_entry(self, role: str, content: str):
101
101
  entry = {"id": generate_id(), "role": role, "content": content, "timestamp": datetime.now().isoformat()}
102
- self.context_history.append(entry)
103
- await self._save_context()
102
+ self.chat_history.append(entry)
103
+ await self._save_chat_history()
104
104
 
105
- async def add_detailed_context_entry(self, input_messages: List[Dict[str, str]],
105
+ async def add_detailed_chat_entry(self, input_messages: List[Dict[str, str]],
106
106
  prompt: str, llm_response: str,
107
107
  tool_executions: List[Dict[str, Any]],
108
108
  final_response: str, entry_id: str = None):
@@ -112,25 +112,25 @@ class MemoryManager:
112
112
  "timestamp": datetime.now().isoformat()
113
113
  }
114
114
  if entry_id:
115
- for entry in self.context_history:
115
+ for entry in self.chat_history:
116
116
  if entry.get("id") == entry_id:
117
117
  entry.update(update_data)
118
- await self._save_context()
118
+ await self._save_chat_history()
119
119
  return entry_id
120
120
 
121
121
  new_entry = {"id": entry_id or generate_id(), "type": "llm_interaction", "role": "assistant", **update_data}
122
- self.context_history.append(new_entry)
123
- await self._save_context()
122
+ self.chat_history.append(new_entry)
123
+ await self._save_chat_history()
124
124
  return new_entry["id"]
125
125
 
126
- async def get_recent_context(self, entries: int = 10) -> List[Dict[str, Any]]:
127
- return self.context_history[-entries:]
126
+ async def get_recent_chat(self, entries: int = 10) -> List[Dict[str, Any]]:
127
+ return self.chat_history[-entries:]
128
128
 
129
- async def get_detailed_context_history(self) -> List[Dict[str, Any]]:
130
- return self.context_history
129
+ async def get_detailed_chat_history(self) -> List[Dict[str, Any]]:
130
+ return self.chat_history
131
131
 
132
132
  async def get_last_agent_response(self) -> Optional[str]:
133
- for entry in reversed(self.context_history):
133
+ for entry in reversed(self.chat_history):
134
134
  if entry.get("type") == "llm_interaction":
135
135
  final_response = entry.get("final_response", "")
136
136
  if final_response and final_response not in ["Processing started", "Prompt sent to LLM", "LLM response received"]:
@@ -141,9 +141,9 @@ class MemoryManager:
141
141
  return content
142
142
  return None
143
143
 
144
- async def reset_context(self):
145
- self.context_history = []
146
- await self._save_context()
144
+ async def reset_chat_history(self):
145
+ self.chat_history = []
146
+ await self._save_chat_history()
147
147
 
148
148
  async def reset_temp_memory(self):
149
149
  """Reset temp memory to a default empty state."""
@@ -1,55 +1 @@
1
- # neuro_simulator/api/stream.py
2
- """API endpoints for controlling the live stream lifecycle."""
3
-
4
- import asyncio
5
- import logging
6
- from pathlib import Path
7
-
8
- from fastapi import APIRouter, Depends
9
-
10
- from ..core.agent_factory import create_agent
11
- from ..utils.process import process_manager
12
- from .agent import get_api_token # Re-using the auth dependency from agent API
13
-
14
- logger = logging.getLogger(__name__.replace("neuro_simulator", "server", 1))
15
- router = APIRouter(prefix="/api/stream", tags=["Stream Control"])
16
-
17
- @router.post("/start", dependencies=[Depends(get_api_token)])
18
- async def start_stream():
19
- """Starts the live stream processes."""
20
- try:
21
- agent = await create_agent()
22
- await agent.reset_memory()
23
- except Exception as e:
24
- logger.error(f"Could not reset agent memory on stream start: {e}", exc_info=True)
25
-
26
- if not process_manager.is_running:
27
- process_manager.start_live_processes()
28
- return {"status": "success", "message": "Stream started"}
29
- else:
30
- return {"status": "info", "message": "Stream is already running"}
31
-
32
- @router.post("/stop", dependencies=[Depends(get_api_token)])
33
- async def stop_stream():
34
- """Stops the live stream processes."""
35
- if process_manager.is_running:
36
- await process_manager.stop_live_processes()
37
- return {"status": "success", "message": "Stream stopped"}
38
- else:
39
- return {"status": "info", "message": "Stream is not running"}
40
-
41
- @router.post("/restart", dependencies=[Depends(get_api_token)])
42
- async def restart_stream():
43
- """Restarts the live stream processes."""
44
- await process_manager.stop_live_processes()
45
- await asyncio.sleep(1) # Give time for tasks to cancel
46
- process_manager.start_live_processes()
47
- return {"status": "success", "message": "Stream restarted"}
48
-
49
- @router.get("/status", dependencies=[Depends(get_api_token)])
50
- async def get_stream_status():
51
- """Gets the current status of the stream."""
52
- return {
53
- "is_running": process_manager.is_running,
54
- "backend_status": "running" if process_manager.is_running else "stopped"
55
- }
1
+ # This file is now empty as all stream control API endpoints have been migrated to WebSockets.
@@ -1,71 +1,41 @@
1
1
  # neuro_simulator/api/system.py
2
2
  """API endpoints for system, config, and utility functions."""
3
3
 
4
- from fastapi import APIRouter, Depends, HTTPException
5
- from pydantic import BaseModel
4
+ from fastapi import APIRouter, Depends, HTTPException, status, Request
6
5
  import time
7
6
 
8
- from ..core.config import config_manager, AppSettings
9
- from ..services.audio import synthesize_audio_segment
10
- from .agent import get_api_token # Re-using the auth dependency
7
+ from ..core.config import config_manager
11
8
 
12
9
  router = APIRouter(tags=["System & Utilities"])
13
10
 
14
- # --- TTS Endpoint ---
15
11
 
16
- class SpeechRequest(BaseModel):
17
- text: str
18
- voice_name: str | None = None
19
- pitch: float | None = None
20
-
21
- @router.post("/api/tts/synthesize", dependencies=[Depends(get_api_token)])
22
- async def synthesize_speech_endpoint(request: SpeechRequest):
23
- """Synthesizes text to speech using the configured TTS service."""
24
- try:
25
- audio_base64, _ = await synthesize_audio_segment(
26
- text=request.text, voice_name=request.voice_name, pitch=request.pitch
27
- )
28
- return {"audio_base64": audio_base64}
29
- except Exception as e:
30
- raise HTTPException(status_code=500, detail=str(e))
31
-
32
- # --- Config Management Endpoints ---
33
-
34
- # This helper can be moved to a more central location if needed
35
- def filter_config_for_frontend(settings: AppSettings):
36
- """Filters the settings to return only the fields safe for the frontend."""
37
- # Using .model_dump() with include is a more robust Pydantic approach
38
- return settings.model_dump(include={
39
- 'stream_metadata': {'stream_title', 'stream_category', 'stream_tags'},
40
- 'agent': {'agent_provider', 'agent_model'},
41
- 'neuro_behavior': {'input_chat_sample_size', 'post_speech_cooldown_sec', 'initial_greeting'},
42
- 'audience_simulation': {'llm_provider', 'gemini_model', 'openai_model', 'llm_temperature', 'chat_generation_interval_sec', 'chats_per_batch', 'max_output_tokens', 'username_blocklist', 'username_pool'},
43
- 'performance': {'neuro_input_queue_max_size', 'audience_chat_buffer_max_size', 'initial_chat_backlog_limit'}
44
- })
45
-
46
- @router.get("/api/configs", dependencies=[Depends(get_api_token)])
47
- async def get_configs():
48
- """Gets the current, frontend-safe configuration."""
49
- return filter_config_for_frontend(config_manager.settings)
50
-
51
- @router.patch("/api/configs", dependencies=[Depends(get_api_token)])
52
- async def update_configs(new_settings: dict):
53
- """Updates the configuration with new values from the frontend."""
54
- try:
55
- await config_manager.update_settings(new_settings)
56
- return filter_config_for_frontend(config_manager.settings)
57
- except Exception as e:
58
- raise HTTPException(status_code=500, detail=f"Failed to update settings: {str(e)}")
59
-
60
- @router.post("/api/configs/reload", dependencies=[Depends(get_api_token)])
61
- async def reload_configs():
62
- """Triggers a reload of the configuration from the config.yaml file."""
63
- try:
64
- # Passing an empty dict forces a reload and triggers callbacks
65
- await config_manager.update_settings({})
66
- return {"status": "success", "message": "Configuration reloaded"}
67
- except Exception as e:
68
- raise HTTPException(status_code=500, detail=f"Failed to reload settings: {str(e)}")
12
+ # --- Utility function to filter config for frontend ---
13
+ def filter_config_for_frontend(settings):
14
+ """Filters the full settings object to remove sensitive fields before sending to the frontend."""
15
+ # Create a dictionary representation of the settings
16
+ config_dict = settings.model_dump()
17
+
18
+ # Remove sensitive fields
19
+ config_dict.pop('api_keys', None)
20
+
21
+ return config_dict
22
+
23
+
24
+ # --- Auth Dependency ---
25
+
26
+ async def get_api_token(request: Request):
27
+ """FastAPI dependency to check for the API token in headers."""
28
+ password = config_manager.settings.server.panel_password
29
+ if not password:
30
+ return True
31
+ header_token = request.headers.get("X-API-Token")
32
+ if header_token and header_token == password:
33
+ return True
34
+ raise HTTPException(
35
+ status_code=status.HTTP_401_UNAUTHORIZED,
36
+ detail="Invalid API token",
37
+ headers={"WWW-Authenticate": "Bearer"},
38
+ )
69
39
 
70
40
  # --- System Endpoints ---
71
41
 
@@ -87,4 +57,4 @@ async def root():
87
57
  "message": "Neuro-Sama Simulator Backend",
88
58
  "version": "2.0",
89
59
  "api_docs": "/docs",
90
- }
60
+ }
neuro_simulator/cli.py CHANGED
@@ -60,12 +60,13 @@ def main():
60
60
  # Ensure agent directory and its contents exist
61
61
  agent_dir = work_dir / "agent"
62
62
  agent_dir.mkdir(parents=True, exist_ok=True)
63
- copy_resource('neuro_simulator', 'agent/prompt_template.txt', agent_dir / 'prompt_template.txt')
63
+ copy_resource('neuro_simulator', 'agent/neuro_prompt.txt', agent_dir / 'neuro_prompt.txt')
64
+ copy_resource('neuro_simulator', 'agent/memory_prompt.txt', agent_dir / 'memory_prompt.txt')
64
65
 
65
66
  # Ensure agent memory directory and its contents exist
66
67
  agent_memory_dir = agent_dir / "memory"
67
68
  agent_memory_dir.mkdir(parents=True, exist_ok=True)
68
- for filename in ["context.json", "core_memory.json", "dialog_history.json", "init_memory.json"]:
69
+ for filename in ["chat_history.json", "core_memory.json", "init_memory.json"]:
69
70
  copy_resource('neuro_simulator', f'agent/memory/{filename}', agent_memory_dir / filename)
70
71
 
71
72
  # 3. Validate essential files
@@ -16,7 +16,7 @@ class BaseAgent(ABC):
16
16
  pass
17
17
 
18
18
  @abstractmethod
19
- async def process_messages(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
19
+ async def process_and_respond(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
20
20
  """Process messages and generate a response."""
21
21
  pass
22
22