neuro-simulator 0.1.3__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. neuro_simulator/__init__.py +1 -10
  2. neuro_simulator/agent/__init__.py +1 -8
  3. neuro_simulator/agent/base.py +43 -0
  4. neuro_simulator/agent/core.py +111 -397
  5. neuro_simulator/agent/factory.py +30 -0
  6. neuro_simulator/agent/llm.py +34 -31
  7. neuro_simulator/agent/memory/__init__.py +1 -4
  8. neuro_simulator/agent/memory/manager.py +61 -203
  9. neuro_simulator/agent/tools/__init__.py +1 -4
  10. neuro_simulator/agent/tools/core.py +8 -18
  11. neuro_simulator/api/__init__.py +1 -0
  12. neuro_simulator/api/agent.py +163 -0
  13. neuro_simulator/api/stream.py +55 -0
  14. neuro_simulator/api/system.py +90 -0
  15. neuro_simulator/cli.py +53 -142
  16. neuro_simulator/core/__init__.py +1 -0
  17. neuro_simulator/core/agent_factory.py +52 -0
  18. neuro_simulator/core/agent_interface.py +91 -0
  19. neuro_simulator/core/application.py +278 -0
  20. neuro_simulator/services/__init__.py +1 -0
  21. neuro_simulator/{chatbot.py → services/audience.py} +24 -24
  22. neuro_simulator/{audio_synthesis.py → services/audio.py} +18 -15
  23. neuro_simulator/services/builtin.py +87 -0
  24. neuro_simulator/services/letta.py +206 -0
  25. neuro_simulator/{stream_manager.py → services/stream.py} +39 -47
  26. neuro_simulator/utils/__init__.py +1 -0
  27. neuro_simulator/utils/logging.py +90 -0
  28. neuro_simulator/utils/process.py +67 -0
  29. neuro_simulator/{stream_chat.py → utils/queue.py} +17 -4
  30. neuro_simulator/utils/state.py +14 -0
  31. neuro_simulator/{websocket_manager.py → utils/websocket.py} +18 -14
  32. {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.0.dist-info}/METADATA +176 -176
  33. neuro_simulator-0.2.0.dist-info/RECORD +37 -0
  34. neuro_simulator/agent/api.py +0 -737
  35. neuro_simulator/agent/memory.py +0 -137
  36. neuro_simulator/agent/tools.py +0 -69
  37. neuro_simulator/builtin_agent.py +0 -83
  38. neuro_simulator/config.yaml.example +0 -157
  39. neuro_simulator/letta.py +0 -164
  40. neuro_simulator/log_handler.py +0 -43
  41. neuro_simulator/main.py +0 -673
  42. neuro_simulator/media/neuro_start.mp4 +0 -0
  43. neuro_simulator/process_manager.py +0 -70
  44. neuro_simulator/shared_state.py +0 -11
  45. neuro_simulator-0.1.3.dist-info/RECORD +0 -31
  46. /neuro_simulator/{config.py → core/config.py} +0 -0
  47. {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.0.dist-info}/WHEEL +0 -0
  48. {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.0.dist-info}/entry_points.txt +0 -0
  49. {neuro_simulator-0.1.3.dist-info → neuro_simulator-0.2.0.dist-info}/top_level.txt +0 -0
@@ -1,18 +1,24 @@
1
- # agent/core.py
1
+ # neuro_simulator/agent/core.py
2
2
  """
3
- Core module for the Neuro Simulator Agent
3
+ Core module for the Neuro Simulator's built-in agent.
4
4
  """
5
5
 
6
- import os
7
- import json
8
6
  import asyncio
9
- from typing import Dict, List, Any, Optional
10
- from datetime import datetime
11
- import sys
7
+ import json
12
8
  import logging
9
+ import re
10
+ import sys
11
+ from datetime import datetime
12
+ from typing import Any, Dict, List, Optional
13
13
 
14
- # Import the shared log queue from the main log_handler
15
- from ..log_handler import agent_log_queue, QueueLogHandler
14
+ # Updated imports for the new structure
15
+ from ..utils.logging import QueueLogHandler, agent_log_queue
16
+ from ..utils.websocket import connection_manager
17
+
18
+ # --- Agent-specific imports ---
19
+ from .llm import LLMClient
20
+ from .memory.manager import MemoryManager
21
+ from .tools.core import ToolManager
16
22
 
17
23
  # Create a logger for the agent
18
24
  agent_logger = logging.getLogger("neuro_agent")
@@ -20,475 +26,183 @@ agent_logger.setLevel(logging.DEBUG)
20
26
 
21
27
  # Configure agent logging to use the shared queue
22
28
  def configure_agent_logging():
23
- """Configure agent logging to use the shared agent_log_queue"""
24
- # Create a handler for the agent queue
25
- agent_queue_handler = QueueLogHandler(agent_log_queue)
26
- formatter = logging.Formatter('%(asctime)s - [AGENT] - %(levelname)s - %(message)s', datefmt='%H:%M:%S')
27
- agent_queue_handler.setFormatter(formatter)
28
-
29
- # Clear any existing handlers
29
+ """Configure agent logging to use the shared agent_log_queue."""
30
30
  if agent_logger.hasHandlers():
31
31
  agent_logger.handlers.clear()
32
32
 
33
- # Add the queue handler
33
+ agent_queue_handler = QueueLogHandler(agent_log_queue)
34
+ # Use the same format as the server for consistency
35
+ formatter = logging.Formatter('%(asctime)s - [%(name)-24s] - %(levelname)-8s - %(message)s', datefmt='%H:%M:%S')
36
+ agent_queue_handler.setFormatter(formatter)
34
37
  agent_logger.addHandler(agent_queue_handler)
35
- agent_logger.propagate = False # Prevent logs from propagating to root logger
36
-
37
- print("Agent日志系统已配置,将日志输出到 agent_log_queue。")
38
+ agent_logger.propagate = False
39
+ agent_logger.info("Agent logging configured to use agent_log_queue.")
38
40
 
39
- # Configure agent logging when module is imported
40
41
  configure_agent_logging()
41
42
 
42
43
  class Agent:
43
- """Main Agent class that integrates LLM, memory, and tools"""
44
+ """Main Agent class that integrates LLM, memory, and tools. This is the concrete implementation."""
44
45
 
45
46
  def __init__(self, working_dir: str = None):
46
- # Lazy imports to avoid circular dependencies
47
- from .memory.manager import MemoryManager
48
- from .tools.core import ToolManager
49
- from .llm import LLMClient
50
-
51
47
  self.memory_manager = MemoryManager(working_dir)
52
48
  self.tool_manager = ToolManager(self.memory_manager)
53
49
  self.llm_client = LLMClient()
54
50
  self._initialized = False
55
-
56
- # Log agent initialization
57
- agent_logger.info("Agent initialized")
51
+ agent_logger.info("Agent instance created.")
58
52
  agent_logger.debug(f"Agent working directory: {working_dir}")
59
53
 
60
54
  async def initialize(self):
61
- """Initialize the agent, loading any persistent memory"""
55
+ """Initialize the agent, loading any persistent memory."""
62
56
  if not self._initialized:
63
- agent_logger.info("Initializing agent memory manager")
57
+ agent_logger.info("Initializing agent memory manager...")
64
58
  await self.memory_manager.initialize()
65
59
  self._initialized = True
66
- agent_logger.info("Agent initialized successfully")
60
+ agent_logger.info("Agent initialized successfully.")
67
61
 
68
62
  async def reset_all_memory(self):
69
- """Reset all agent memory types"""
70
- # Reset temp memory
63
+ """Reset all agent memory types."""
71
64
  await self.memory_manager.reset_temp_memory()
72
-
73
- # Reset context (dialog history)
74
65
  await self.memory_manager.reset_context()
75
-
76
- agent_logger.info("All agent memory reset successfully")
77
- print("All agent memory reset successfully")
78
-
79
- async def reset_memory(self):
80
- """Reset agent temp memory (alias for backward compatibility)"""
81
- await self.reset_all_memory()
66
+ agent_logger.info("All agent memory has been reset.")
82
67
 
83
68
  async def process_messages(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
84
- """
85
- Process incoming messages and generate a response
86
-
87
- Args:
88
- messages: List of message dictionaries with 'username' and 'text' keys
89
-
90
- Returns:
91
- Dictionary containing processing details including tool executions and final response
92
- """
93
- # Ensure agent is initialized
69
+ """Process incoming messages and generate a response with tool usage."""
94
70
  await self.initialize()
95
-
96
- agent_logger.info(f"Processing {len(messages)} messages")
97
-
98
- # Add messages to context
71
+ agent_logger.info(f"Processing {len(messages)} messages.")
72
+
99
73
  for msg in messages:
100
74
  content = f"{msg['username']}: {msg['text']}"
101
75
  await self.memory_manager.add_context_entry("user", content)
102
- agent_logger.debug(f"Added message to context: {content}")
103
-
104
- # Send context update via WebSocket after adding user messages
105
- from ..websocket_manager import connection_manager
76
+
106
77
  context_messages = await self.memory_manager.get_recent_context()
107
- await connection_manager.broadcast({
108
- "type": "agent_context",
109
- "action": "update",
110
- "messages": context_messages
111
- })
78
+ await connection_manager.broadcast({"type": "agent_context", "action": "update", "messages": context_messages})
112
79
 
113
- # Add detailed context entry for the start of processing
114
80
  processing_entry_id = await self.memory_manager.add_detailed_context_entry(
115
- input_messages=messages,
116
- prompt="Processing started",
117
- llm_response="",
118
- tool_executions=[],
119
- final_response="Processing started"
81
+ input_messages=messages, prompt="Processing started", llm_response="",
82
+ tool_executions=[], final_response="Processing started"
120
83
  )
121
84
 
122
- # Get full context for LLM
123
85
  context = await self.memory_manager.get_full_context()
124
86
  tool_descriptions = self.tool_manager.get_tool_descriptions()
125
87
 
126
- # Get last agent response to avoid repetition
127
- last_response = await self.memory_manager.get_last_agent_response()
128
-
129
- # Create LLM prompt with context and tools
130
- prompt = f"""You are {self.memory_manager.init_memory.get('name', 'Neuro-Sama')}, an AI VTuber.
131
- Your personality: {self.memory_manager.init_memory.get('personality', 'Friendly and curious')}
132
-
133
- === CONTEXT ===
134
- {context}
135
-
136
- === AVAILABLE TOOLS ===
137
- {tool_descriptions}
138
-
139
- === INSTRUCTIONS ===
140
- Process the user messages and respond appropriately. You can use tools to manage memory or output responses.
141
- When you want to speak to the user, use the 'speak' tool with your response as the text parameter.
142
- When you want to update memory, use the appropriate memory management tools.
143
- You are fully responsible for managing your own memory. Use the memory tools proactively when you need to:
144
- - Remember important information from the conversation
145
- - Update your knowledge or personality
146
- - Store observations about users or events
147
- - Retrieve relevant information to inform your responses
148
- Always think about whether you need to use tools before responding.
88
+ # --- CORRECTED HISTORY GATHERING ---
89
+ recent_history = await self.memory_manager.get_detailed_context_history()
90
+ assistant_responses = []
91
+ for entry in reversed(recent_history):
92
+ if entry.get("type") == "llm_interaction":
93
+ for tool in entry.get("tool_executions", []):
94
+ if tool.get("name") == "speak" and tool.get("result"):
95
+ assistant_responses.append(tool["result"])
149
96
 
150
- IMPORTANT GUIDELINES:
151
- - Be creative and engaging in your responses
152
- - Keep responses concise and conversational
153
- - Maintain your character's personality
154
- - Pay close attention to the conversation history in the context to understand the flow of the dialogue
155
- - Respond to the most recent user messages while considering the overall context of the conversation
97
+ # Create LLM prompt
98
+ prompt_parts = [
99
+ f"You are {self.memory_manager.init_memory.get('name', 'Neuro-Sama')}, an AI VTuber.",
100
+ f"Your personality: {self.memory_manager.init_memory.get('personality', 'Friendly and curious')}",
101
+ "\n=== CONTEXT ===", context,
102
+ "\n=== AVAILABLE TOOLS ===", tool_descriptions,
103
+ "\n=== YOUR RECENT SPEAK HISTORY (for context) ==="
104
+ ]
105
+ for response in assistant_responses[:5]: # Get last 5 responses
106
+ prompt_parts.append(f"- {response}")
156
107
 
157
- === YOUR SPEAK HISTORY ===
158
- """
108
+ prompt_parts.extend([
109
+ "\n=== INSTRUCTIONS ===",
110
+ "Process the user messages and respond. Use the 'speak' tool to talk to the user.",
111
+ "You are fully responsible for managing your own memory using the available tools.",
112
+ "\nUser messages to respond to:",
113
+ ])
159
114
 
160
- # Add assistant's recent responses to the prompt
161
- # Get the recent context and extract speak results from llm_interaction entries
162
- recent_context = await self.memory_manager.get_recent_context(100) # Get more entries to filter
163
- assistant_responses = []
164
-
165
- # Filter for llm_interaction entries with role: "assistant" and extract speak results
166
- for entry in reversed(recent_context): # Reverse to get newest first
167
- if entry.get("type") == "llm_interaction" and entry.get("role") == "assistant":
168
- tool_executions = entry.get("tool_executions", [])
169
- for tool_execution in tool_executions:
170
- if tool_execution.get("name") == "speak":
171
- result = tool_execution.get("result")
172
- if result:
173
- assistant_responses.append(result)
174
-
175
- # Add up to 64 most recent assistant responses
176
- for i, response in enumerate(assistant_responses[:64]):
177
- prompt += f"{i+1}. {response}\n"
178
-
179
- prompt += f"\nUser messages to respond to:\n"
180
-
181
115
  for msg in messages:
182
- prompt += f"{msg['username']}: {msg['text']}\n"
183
-
184
- prompt += "\nYour response (use tools as needed):"
116
+ prompt_parts.append(f"{msg['username']}: {msg['text']}")
117
+ prompt_parts.append("\nYour response (use tools as needed):")
118
+ prompt = "\n".join(prompt_parts)
185
119
 
186
- agent_logger.debug("Sending prompt to LLM")
187
-
188
- # Add detailed context entry for the prompt
189
120
  await self.memory_manager.add_detailed_context_entry(
190
- input_messages=messages,
191
- prompt=prompt,
192
- llm_response="",
193
- tool_executions=[],
194
- final_response="Prompt sent to LLM",
195
- entry_id=processing_entry_id
121
+ input_messages=messages, prompt=prompt, llm_response="", tool_executions=[],
122
+ final_response="Prompt sent to LLM", entry_id=processing_entry_id
196
123
  )
197
124
 
198
- # Generate response using LLM
199
- response = await self.llm_client.generate(prompt)
200
- agent_logger.debug(f"LLM response received: {response[:100] if response else 'None'}...")
125
+ response_text = await self.llm_client.generate(prompt)
126
+ agent_logger.debug(f"LLM raw response: {response_text[:100] if response_text else 'None'}...")
201
127
 
202
- # Add detailed context entry for the LLM response
203
128
  await self.memory_manager.add_detailed_context_entry(
204
- input_messages=messages,
205
- prompt=prompt,
206
- llm_response=response,
207
- tool_executions=[],
208
- final_response="LLM response received",
209
- entry_id=processing_entry_id
129
+ input_messages=messages, prompt=prompt, llm_response=response_text, tool_executions=[],
130
+ final_response="LLM response received", entry_id=processing_entry_id
210
131
  )
211
132
 
212
- # Parse the response to handle tool calls
213
- # This is a simplified parser - in a full implementation, you would use a more robust method
214
133
  processing_result = {
215
- "input_messages": messages,
216
- "llm_response": response,
217
- "tool_executions": [],
218
- "final_response": ""
134
+ "input_messages": messages, "llm_response": response_text,
135
+ "tool_executions": [], "final_response": ""
219
136
  }
220
137
 
221
- # Extract tool calls from the response
222
- # Look for tool calls in the response
223
- lines = response.split('\n') if response else []
224
- i = 0
225
- json_buffer = "" # Buffer to accumulate multi-line JSON
226
- in_json_block = False # Flag to track if we're inside a JSON block
227
-
228
- while i < len(lines):
229
- line = lines[i].strip()
230
- agent_logger.debug(f"Parsing line: {line}")
231
-
232
- # Handle JSON blocks
233
- if line.startswith('```json'):
234
- in_json_block = True
235
- json_buffer = line + '\n'
236
- elif line == '```' and in_json_block:
237
- # End of JSON block
238
- json_buffer += line
239
- in_json_block = False
240
- # Process the complete JSON block
241
- tool_call = self._parse_tool_call(json_buffer)
242
- if tool_call:
243
- agent_logger.info(f"Executing tool: {tool_call['name']}")
244
- await self._execute_parsed_tool(tool_call, processing_result)
245
- # Update detailed context entry for tool execution
246
- await self.memory_manager.add_detailed_context_entry(
247
- input_messages=messages,
248
- prompt=prompt,
249
- llm_response=response,
250
- tool_executions=processing_result["tool_executions"].copy(), # Pass a copy of current tool executions
251
- final_response=f"Executed tool: {tool_call['name']}",
252
- entry_id=processing_entry_id
253
- )
254
- else:
255
- agent_logger.warning(f"Failed to parse tool call from JSON block: {json_buffer}")
256
- elif in_json_block:
257
- # Accumulate lines for JSON block
258
- json_buffer += line + '\n'
259
- else:
260
- # Check if line contains a tool call
261
- if any(line.startswith(prefix) for prefix in ["get_", "create_", "update_", "delete_", "add_", "remove_", "speak("]):
262
- # Parse tool call
263
- tool_call = self._parse_tool_call(line)
264
- if tool_call:
265
- agent_logger.info(f"Executing tool: {tool_call['name']}")
266
- await self._execute_parsed_tool(tool_call, processing_result)
267
- # Update detailed context entry for tool execution
268
- await self.memory_manager.add_detailed_context_entry(
269
- input_messages=messages,
270
- prompt=prompt,
271
- llm_response=response,
272
- tool_executions=processing_result["tool_executions"].copy(), # Pass a copy of current tool executions
273
- final_response=f"Executed tool: {tool_call['name']}",
274
- entry_id=processing_entry_id
275
- )
276
- else:
277
- agent_logger.warning(f"Failed to parse tool call from line: {line}")
278
- i += 1
279
-
280
- # If we're still in a JSON block at the end, process it
281
- if in_json_block and json_buffer:
282
- tool_call = self._parse_tool_call(json_buffer)
283
- if tool_call:
138
+ if response_text:
139
+ tool_calls = self._parse_tool_calls(response_text)
140
+ for tool_call in tool_calls:
284
141
  agent_logger.info(f"Executing tool: {tool_call['name']}")
285
142
  await self._execute_parsed_tool(tool_call, processing_result)
286
- # Update detailed context entry for tool execution
287
- await self.memory_manager.add_detailed_context_entry(
288
- input_messages=messages,
289
- prompt=prompt,
290
- llm_response=response,
291
- tool_executions=processing_result["tool_executions"].copy(), # Pass a copy of current tool executions
292
- final_response=f"Executed tool: {tool_call['name']}",
293
- entry_id=processing_entry_id
294
- )
295
- else:
296
- agent_logger.warning(f"Failed to parse tool call from incomplete JSON block: {json_buffer}")
297
-
298
- # If we have a final response, we don't need to add it to context here
299
- # because add_detailed_context_entry will add a short entry for us
300
- # if processing_result["final_response"]:
301
- # await self.memory_manager.add_context_entry("assistant", processing_result["final_response"])
302
-
303
- # Update the detailed context entry with final LLM interaction details
143
+
304
144
  await self.memory_manager.add_detailed_context_entry(
305
- input_messages=messages,
306
- prompt=prompt,
307
- llm_response=response,
145
+ input_messages=messages, prompt=prompt, llm_response=response_text,
308
146
  tool_executions=processing_result["tool_executions"],
309
- final_response=processing_result["final_response"],
310
- entry_id=processing_entry_id
147
+ final_response=processing_result["final_response"], entry_id=processing_entry_id
311
148
  )
312
149
 
313
- # Send context update via WebSocket
314
- from ..websocket_manager import connection_manager
315
- context_messages = await self.memory_manager.get_recent_context()
316
- await connection_manager.broadcast({
317
- "type": "agent_context",
318
- "action": "update",
319
- "messages": context_messages
320
- })
150
+ final_context = await self.memory_manager.get_recent_context()
151
+ await connection_manager.broadcast({"type": "agent_context", "action": "update", "messages": final_context})
321
152
 
322
- agent_logger.info("Message processing completed")
153
+ agent_logger.info("Message processing completed.")
323
154
  return processing_result
324
155
 
325
156
  async def _execute_parsed_tool(self, tool_call: Dict[str, Any], processing_result: Dict[str, Any]):
326
- """Execute a parsed tool call and update processing result"""
327
- # Only prevent duplicate speak tool executions to avoid repeated responses
328
- if tool_call["name"] == "speak":
329
- for executed_tool in processing_result["tool_executions"]:
330
- if (executed_tool["name"] == "speak" and
331
- executed_tool["params"].get("text") == tool_call["params"].get("text")):
332
- agent_logger.debug(f"Skipping duplicate speak tool execution: {tool_call['params'].get('text')}")
333
- return
334
-
335
- # Execute the tool
157
+ """Execute a parsed tool call and update processing result."""
336
158
  try:
337
159
  tool_result = await self.execute_tool(tool_call["name"], tool_call["params"])
338
160
  tool_call["result"] = tool_result
339
-
340
- # If this is the speak tool, capture the final response
341
161
  if tool_call["name"] == "speak":
342
162
  processing_result["final_response"] = tool_call["params"].get("text", "")
343
- agent_logger.info(f"Speak tool executed with text: {processing_result['final_response']}")
344
- else:
345
- agent_logger.debug(f"Tool execution result: {tool_result}")
346
-
347
163
  processing_result["tool_executions"].append(tool_call)
348
164
  except Exception as e:
349
165
  tool_call["error"] = str(e)
350
166
  processing_result["tool_executions"].append(tool_call)
351
167
  agent_logger.error(f"Error executing tool {tool_call['name']}: {e}")
352
168
 
353
- def _parse_tool_call(self, line: str) -> Optional[Dict[str, Any]]:
354
- """Parse a tool call from a line of text"""
355
- import re
356
- import json
357
-
358
- # First try to parse as JSON if it looks like JSON
359
- line = line.strip()
360
- if line.startswith('```json'):
169
+ def _parse_tool_calls(self, text: str) -> List[Dict[str, Any]]:
170
+ """Parse tool calls using ast.literal_eval for robustness."""
171
+ import ast
172
+ calls = []
173
+ text = text.strip()
174
+ if text.startswith("speak(") and text.endswith(")"):
361
175
  try:
362
- # Extract JSON content
363
- json_content = line[7:] # Remove ```json
364
- if json_content.endswith('```'):
365
- json_content = json_content[:-3] # Remove trailing ```
366
- json_content = json_content.strip()
367
-
368
- # Parse the JSON
369
- tool_call_data = json.loads(json_content)
176
+ # Extract the content inside speak(...)
177
+ # e.g., "text='Hello, I'm here'"
178
+ inner_content = text[len("speak("):-1].strip()
179
+
180
+ # Ensure it's a text=... call
181
+ if not inner_content.startswith("text="):
182
+ return []
370
183
 
371
- # Handle different JSON formats
372
- if isinstance(tool_call_data, dict):
373
- # Check if it's a tool_code format
374
- if 'tool_code' in tool_call_data:
375
- # Extract the tool call from tool_code
376
- tool_code = tool_call_data['tool_code']
377
- # Remove any wrapper functions like print()
378
- tool_code = re.sub(r'^\w+\((.*)\)$', r'\1', tool_code)
379
- # Now parse the tool call normally
380
- pattern = r'(\w+)\((.*)\)'
381
- match = re.match(pattern, tool_code)
382
- if match:
383
- tool_name = match.group(1)
384
- params_str = match.group(2)
385
-
386
- # Parse parameters
387
- params = {}
388
- param_pattern = r'(\w+)\s*=\s*(".*?"|\'.*?\'|[^,]+?)(?:,|$)'
389
- for param_match in re.finditer(param_pattern, params_str):
390
- key, value = param_match.groups()
391
- # Remove quotes if present
392
- if (value.startswith('"') and value.endswith('"')) or \
393
- (value.startswith("'") and value.endswith("'")):
394
- value = value[1:-1]
395
- params[key] = value
396
-
397
- return {
398
- "name": tool_name,
399
- "params": params
400
- }
401
- # Check if it's a name/arguments format
402
- elif 'name' in tool_call_data and 'arguments' in tool_call_data:
403
- return {
404
- "name": tool_call_data['name'],
405
- "params": tool_call_data['arguments']
406
- }
407
- elif isinstance(tool_call_data, list) and len(tool_call_data) > 0:
408
- # Handle array format - take the first item
409
- first_item = tool_call_data[0]
410
- if isinstance(first_item, dict):
411
- if 'tool_code' in first_item:
412
- # Extract the tool call from tool_code
413
- tool_code = first_item['tool_code']
414
- # Remove any wrapper functions like print()
415
- tool_code = re.sub(r'^\w+\((.*)\)$', r'\1', tool_code)
416
- # Now parse the tool call normally
417
- pattern = r'(\w+)\((.*)\)'
418
- match = re.match(pattern, tool_code)
419
- if match:
420
- tool_name = match.group(1)
421
- params_str = match.group(2)
422
-
423
- # Parse parameters
424
- params = {}
425
- param_pattern = r'(\w+)\s*=\s*(".*?"|\'.*?\'|[^,]+?)(?:,|$)'
426
- for param_match in re.finditer(param_pattern, params_str):
427
- key, value = param_match.groups()
428
- # Remove quotes if present
429
- if (value.startswith('"') and value.endswith('"')) or \
430
- (value.startswith("'") and value.endswith("'")):
431
- value = value[1:-1]
432
- params[key] = value
433
-
434
- return {
435
- "name": tool_name,
436
- "params": params
437
- }
438
- elif 'name' in first_item and 'arguments' in first_item:
439
- return {
440
- "name": first_item['name'],
441
- "params": first_item['arguments']
442
- }
443
-
444
- except (json.JSONDecodeError, KeyError, IndexError):
445
- pass # Fall back to regex parsing
446
-
447
- # Handle multi-line JSON that might be split across several lines
448
- if line == '```json' or line == '{' or line == '}':
449
- # Skip these lines as they're part of JSON structure
450
- return None
451
-
452
- # Pattern to match tool_name(param1=value1, param2=value2, ...)
453
- pattern = r'(\w+)\((.*)\)'
454
- match = re.match(pattern, line)
455
-
456
- if match:
457
- tool_name = match.group(1)
458
- params_str = match.group(2)
459
-
460
- # Parse parameters more robustly
461
- params = {}
462
-
463
- # Handle parameters one by one
464
- # This handles quoted strings correctly, including special characters
465
- param_pattern = r'(\w+)\s*=\s*(".*?"|\'.*?\'|[^,]+?)(?:,|$)'
466
- for param_match in re.finditer(param_pattern, params_str):
467
- key, value = param_match.groups()
468
- # Remove quotes if present
469
- if (value.startswith('"') and value.endswith('"')) or \
470
- (value.startswith("'") and value.endswith("'")):
471
- value = value[1:-1]
472
- params[key] = value
184
+ # Get the quoted string part
185
+ quoted_string = inner_content[len("text="):
186
+ ].strip()
187
+
188
+ # Use ast.literal_eval to safely parse the Python string literal
189
+ parsed_text = ast.literal_eval(quoted_string)
473
190
 
474
- return {
475
- "name": tool_name,
476
- "params": params
477
- }
478
-
479
- return None
191
+ if isinstance(parsed_text, str):
192
+ calls.append({
193
+ "name": "speak",
194
+ "params": {"text": parsed_text}
195
+ })
196
+
197
+ except (ValueError, SyntaxError, TypeError) as e:
198
+ agent_logger.warning(f"Could not parse tool call using ast.literal_eval: {text}. Error: {e}")
199
+
200
+ return calls
480
201
 
481
202
  async def execute_tool(self, tool_name: str, params: Dict[str, Any]) -> Any:
482
- """Execute a registered tool"""
483
- # Ensure agent is initialized
203
+ """Execute a registered tool."""
484
204
  await self.initialize()
485
205
  agent_logger.debug(f"Executing tool: {tool_name} with params: {params}")
486
206
  result = await self.tool_manager.execute_tool(tool_name, params)
487
207
  agent_logger.debug(f"Tool execution result: {result}")
488
208
  return result
489
-
490
- # Function to get agent logs (now uses the shared queue)
491
- def get_agent_logs(lines: int = 50) -> List[str]:
492
- """Get recent agent logs from the shared queue"""
493
- logs_list = list(agent_log_queue)
494
- return logs_list[-lines:] if len(logs_list) > lines else logs_list
@@ -0,0 +1,30 @@
1
+ # agent/factory.py
2
+ """Factory for creating agent instances"""
3
+
4
+ from .base import BaseAgent
5
+ from ..config import config_manager
6
+
7
+
8
+ async def create_agent() -> BaseAgent:
9
+ """Create an agent instance based on the configuration"""
10
+ agent_type = config_manager.settings.agent_type
11
+
12
+ if agent_type == "builtin":
13
+ from ..builtin_agent import local_agent, BuiltinAgentWrapper, initialize_builtin_agent
14
+ if local_agent is None:
15
+ # Try to initialize the builtin agent
16
+ await initialize_builtin_agent()
17
+ # Re-import local_agent after initialization
18
+ from ..builtin_agent import local_agent
19
+ if local_agent is None:
20
+ raise RuntimeError("Failed to initialize Builtin agent")
21
+ return BuiltinAgentWrapper(local_agent)
22
+ elif agent_type == "letta":
23
+ from ..letta import get_letta_agent, initialize_letta_client
24
+ # Try to initialize the letta client
25
+ initialize_letta_client()
26
+ agent = get_letta_agent()
27
+ await agent.initialize()
28
+ return agent
29
+ else:
30
+ raise ValueError(f"Unknown agent type: {agent_type}")