solana-agent 27.4.3__py3-none-any.whl → 28.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- solana_agent/__init__.py +7 -2
- solana_agent/adapters/openai_adapter.py +17 -21
- solana_agent/factories/agent_factory.py +60 -79
- solana_agent/guardrails/pii.py +107 -0
- solana_agent/interfaces/guardrails/guardrails.py +26 -0
- solana_agent/interfaces/providers/llm.py +1 -1
- solana_agent/services/agent.py +577 -308
- solana_agent/services/query.py +140 -58
- {solana_agent-27.4.3.dist-info → solana_agent-28.0.0.dist-info}/METADATA +108 -52
- {solana_agent-27.4.3.dist-info → solana_agent-28.0.0.dist-info}/RECORD +12 -10
- {solana_agent-27.4.3.dist-info → solana_agent-28.0.0.dist-info}/LICENSE +0 -0
- {solana_agent-27.4.3.dist-info → solana_agent-28.0.0.dist-info}/WHEEL +0 -0
solana_agent/services/agent.py
CHANGED
@@ -9,6 +9,8 @@ import asyncio
|
|
9
9
|
import datetime as main_datetime
|
10
10
|
from datetime import datetime
|
11
11
|
import json
|
12
|
+
import logging # Add logging
|
13
|
+
import re
|
12
14
|
from typing import AsyncGenerator, Dict, List, Literal, Optional, Any, Union
|
13
15
|
|
14
16
|
from solana_agent.interfaces.services.agent import AgentService as AgentServiceInterface
|
@@ -16,6 +18,11 @@ from solana_agent.interfaces.providers.llm import LLMProvider
|
|
16
18
|
from solana_agent.plugins.manager import PluginManager
|
17
19
|
from solana_agent.plugins.registry import ToolRegistry
|
18
20
|
from solana_agent.domains.agent import AIAgent, BusinessMission
|
21
|
+
from solana_agent.interfaces.guardrails.guardrails import (
|
22
|
+
OutputGuardrail,
|
23
|
+
)
|
24
|
+
|
25
|
+
logger = logging.getLogger(__name__) # Add logger
|
19
26
|
|
20
27
|
|
21
28
|
class AgentService(AgentServiceInterface):
|
@@ -29,6 +36,9 @@ class AgentService(AgentServiceInterface):
|
|
29
36
|
api_key: Optional[str] = None,
|
30
37
|
base_url: Optional[str] = None,
|
31
38
|
model: Optional[str] = None,
|
39
|
+
output_guardrails: List[
|
40
|
+
OutputGuardrail
|
41
|
+
] = None, # <-- Add output_guardrails parameter
|
32
42
|
):
|
33
43
|
"""Initialize the agent service.
|
34
44
|
|
@@ -36,6 +46,10 @@ class AgentService(AgentServiceInterface):
|
|
36
46
|
llm_provider: Provider for language model interactions
|
37
47
|
business_mission: Optional business mission and values
|
38
48
|
config: Optional service configuration
|
49
|
+
api_key: API key for the LLM provider
|
50
|
+
base_url: Base URL for the LLM provider
|
51
|
+
model: Model name for the LLM provider
|
52
|
+
output_guardrails: List of output guardrail instances
|
39
53
|
"""
|
40
54
|
self.llm_provider = llm_provider
|
41
55
|
self.business_mission = business_mission
|
@@ -46,6 +60,7 @@ class AgentService(AgentServiceInterface):
|
|
46
60
|
self.api_key = api_key
|
47
61
|
self.base_url = base_url
|
48
62
|
self.model = model
|
63
|
+
self.output_guardrails = output_guardrails or [] # <-- Store guardrails
|
49
64
|
|
50
65
|
self.plugin_manager = PluginManager(
|
51
66
|
config=self.config,
|
@@ -71,6 +86,7 @@ class AgentService(AgentServiceInterface):
|
|
71
86
|
specialization=specialization,
|
72
87
|
)
|
73
88
|
self.agents.append(agent)
|
89
|
+
logger.info(f"Registered AI agent: {name}")
|
74
90
|
|
75
91
|
def get_agent_system_prompt(self, agent_name: str) -> str:
|
76
92
|
"""Get the system prompt for an agent.
|
@@ -152,30 +168,174 @@ class AgentService(AgentServiceInterface):
|
|
152
168
|
"""Execute a tool on behalf of an agent."""
|
153
169
|
|
154
170
|
if not self.tool_registry:
|
171
|
+
logger.error("Tool registry not available during tool execution.")
|
155
172
|
return {"status": "error", "message": "Tool registry not available"}
|
156
173
|
|
157
174
|
tool = self.tool_registry.get_tool(tool_name)
|
158
175
|
if not tool:
|
176
|
+
logger.warning(f"Tool '{tool_name}' not found for execution.")
|
159
177
|
return {"status": "error", "message": f"Tool '{tool_name}' not found"}
|
160
178
|
|
161
179
|
# Check if agent has access to this tool
|
162
180
|
agent_tools = self.tool_registry.get_agent_tools(agent_name)
|
163
181
|
|
164
182
|
if not any(t.get("name") == tool_name for t in agent_tools):
|
183
|
+
logger.warning(
|
184
|
+
f"Agent '{agent_name}' attempted to use unassigned tool '{tool_name}'."
|
185
|
+
)
|
165
186
|
return {
|
166
187
|
"status": "error",
|
167
188
|
"message": f"Agent '{agent_name}' doesn't have access to tool '{tool_name}'",
|
168
189
|
}
|
169
190
|
|
170
191
|
try:
|
192
|
+
logger.info(
|
193
|
+
f"Executing tool '{tool_name}' for agent '{agent_name}' with params: {parameters}"
|
194
|
+
)
|
171
195
|
result = await tool.execute(**parameters)
|
196
|
+
logger.info(
|
197
|
+
f"Tool '{tool_name}' execution result status: {result.get('status')}"
|
198
|
+
)
|
172
199
|
return result
|
173
200
|
except Exception as e:
|
174
201
|
import traceback
|
175
202
|
|
176
|
-
|
203
|
+
logger.error(
|
204
|
+
f"Error executing tool '{tool_name}': {e}\n{traceback.format_exc()}"
|
205
|
+
)
|
177
206
|
return {"status": "error", "message": f"Error executing tool: {str(e)}"}
|
178
207
|
|
208
|
+
# --- Helper function to recursively substitute placeholders ---
|
209
|
+
def _substitute_placeholders(self, data: Any, results_map: Dict[str, str]) -> Any:
|
210
|
+
"""Recursively substitutes placeholders like {{tool_name.result}} or {output_of_tool_name} in strings."""
|
211
|
+
if isinstance(data, str):
|
212
|
+
# Regex to find placeholders like {{tool_name.result}} or {output_of_tool_name}
|
213
|
+
placeholder_pattern = re.compile(
|
214
|
+
r"\{\{(?P<name1>[a-zA-Z0-9_]+)\.result\}\}|\{output_of_(?P<name2>[a-zA-Z0-9_]+)\}"
|
215
|
+
)
|
216
|
+
|
217
|
+
def replace_match(match):
|
218
|
+
tool_name = match.group("name1") or match.group("name2")
|
219
|
+
if tool_name and tool_name in results_map:
|
220
|
+
logger.debug(f"Substituting placeholder for '{tool_name}'")
|
221
|
+
return results_map[tool_name]
|
222
|
+
else:
|
223
|
+
# If placeholder not found, leave it as is but log warning
|
224
|
+
logger.warning(
|
225
|
+
f"Could not find result for placeholder tool '{tool_name}'. Leaving placeholder."
|
226
|
+
)
|
227
|
+
return match.group(0) # Return original placeholder
|
228
|
+
|
229
|
+
# Use re.sub with the replacement function
|
230
|
+
return placeholder_pattern.sub(replace_match, data)
|
231
|
+
elif isinstance(data, dict):
|
232
|
+
# Recursively process dictionary values
|
233
|
+
return {
|
234
|
+
k: self._substitute_placeholders(v, results_map)
|
235
|
+
for k, v in data.items()
|
236
|
+
}
|
237
|
+
elif isinstance(data, list):
|
238
|
+
# Recursively process list items
|
239
|
+
return [self._substitute_placeholders(item, results_map) for item in data]
|
240
|
+
else:
|
241
|
+
# Return non-string/dict/list types as is
|
242
|
+
return data
|
243
|
+
|
244
|
+
# --- Helper to parse tool calls ---
|
245
|
+
def _parse_tool_calls(self, text: str) -> List[Dict[str, Any]]:
|
246
|
+
"""Parses all [TOOL]...[/TOOL] blocks in the text."""
|
247
|
+
tool_calls = []
|
248
|
+
# Regex to find all tool blocks, non-greedy match for content
|
249
|
+
pattern = re.compile(r"\[TOOL\](.*?)\[/TOOL\]", re.DOTALL | re.IGNORECASE)
|
250
|
+
matches = pattern.finditer(text)
|
251
|
+
|
252
|
+
for match in matches:
|
253
|
+
tool_content = match.group(1).strip()
|
254
|
+
tool_name = None
|
255
|
+
parameters = {}
|
256
|
+
try:
|
257
|
+
for line in tool_content.split("\n"):
|
258
|
+
line = line.strip()
|
259
|
+
if not line:
|
260
|
+
continue
|
261
|
+
if line.lower().startswith("name:"):
|
262
|
+
tool_name = line[5:].strip()
|
263
|
+
elif line.lower().startswith("parameters:"):
|
264
|
+
params_text = line[11:].strip()
|
265
|
+
try:
|
266
|
+
# Prefer JSON parsing
|
267
|
+
parameters = json.loads(params_text)
|
268
|
+
except json.JSONDecodeError:
|
269
|
+
logger.warning(
|
270
|
+
f"Failed to parse parameters as JSON, falling back: {params_text}"
|
271
|
+
)
|
272
|
+
# Fallback: Treat as simple key=value (less robust)
|
273
|
+
try:
|
274
|
+
# Basic eval might work for {"key": "value"} but is risky
|
275
|
+
# parameters = eval(params_text) # Avoid eval if possible
|
276
|
+
# Safer fallback: Assume simple string if not JSON-like
|
277
|
+
if not params_text.startswith("{"):
|
278
|
+
# Try splitting key=value pairs? Very brittle.
|
279
|
+
# For now, log warning and skip complex fallback parsing
|
280
|
+
logger.error(
|
281
|
+
f"Cannot parse non-JSON parameters reliably: {params_text}"
|
282
|
+
)
|
283
|
+
parameters = {
|
284
|
+
"_raw_params": params_text
|
285
|
+
} # Store raw string
|
286
|
+
else:
|
287
|
+
# If it looks like a dict but isn't valid JSON, log error
|
288
|
+
logger.error(
|
289
|
+
f"Invalid dictionary format for parameters: {params_text}"
|
290
|
+
)
|
291
|
+
parameters = {"_raw_params": params_text}
|
292
|
+
|
293
|
+
except Exception as parse_err:
|
294
|
+
logger.error(
|
295
|
+
f"Fallback parameter parsing failed: {parse_err}"
|
296
|
+
)
|
297
|
+
parameters = {
|
298
|
+
"_raw_params": params_text
|
299
|
+
} # Store raw string on error
|
300
|
+
|
301
|
+
if tool_name:
|
302
|
+
tool_calls.append({"name": tool_name, "parameters": parameters})
|
303
|
+
else:
|
304
|
+
logger.warning(f"Parsed tool block missing name: {tool_content}")
|
305
|
+
except Exception as e:
|
306
|
+
logger.error(f"Error parsing tool content: {tool_content} - {e}")
|
307
|
+
|
308
|
+
logger.info(f"Parsed {len(tool_calls)} tool calls from response.")
|
309
|
+
return tool_calls
|
310
|
+
|
311
|
+
# --- Helper to execute a single parsed tool call ---
|
312
|
+
async def _execute_single_tool(
|
313
|
+
self, agent_name: str, tool_call: Dict[str, Any]
|
314
|
+
) -> Dict[str, Any]:
|
315
|
+
"""Executes a single tool call dictionary and returns its result."""
|
316
|
+
tool_name = tool_call.get("name")
|
317
|
+
parameters = tool_call.get("parameters", {})
|
318
|
+
if not tool_name:
|
319
|
+
return {
|
320
|
+
"tool_name": "unknown",
|
321
|
+
"status": "error",
|
322
|
+
"message": "Tool name missing in parsed call",
|
323
|
+
}
|
324
|
+
# Ensure parameters is a dict, even if parsing failed
|
325
|
+
if not isinstance(parameters, dict):
|
326
|
+
logger.warning(
|
327
|
+
f"Parameters for tool '{tool_name}' is not a dict: {parameters}. Attempting execution with empty params."
|
328
|
+
)
|
329
|
+
parameters = {}
|
330
|
+
|
331
|
+
logger.debug(
|
332
|
+
f"Preparing to execute tool '{tool_name}' with params: {parameters}"
|
333
|
+
)
|
334
|
+
result = await self.execute_tool(agent_name, tool_name, parameters)
|
335
|
+
# Add tool name to result for easier aggregation
|
336
|
+
result["tool_name"] = tool_name
|
337
|
+
return result
|
338
|
+
|
179
339
|
async def generate_response(
|
180
340
|
self,
|
181
341
|
agent_name: str,
|
@@ -201,11 +361,17 @@ class AgentService(AgentServiceInterface):
|
|
201
361
|
] = "aac",
|
202
362
|
prompt: Optional[str] = None,
|
203
363
|
) -> AsyncGenerator[Union[str, bytes], None]: # pragma: no cover
|
204
|
-
"""Generate a response
|
364
|
+
"""Generate a response, supporting multiple sequential tool calls with placeholder substitution.
|
365
|
+
|
366
|
+
Text responses are always generated as a single block.
|
367
|
+
Audio responses always buffer text before TTS.
|
368
|
+
"""
|
205
369
|
agent = next((a for a in self.agents if a.name == agent_name), None)
|
206
370
|
if not agent:
|
207
371
|
error_msg = f"Agent '{agent_name}' not found."
|
372
|
+
logger.warning(error_msg)
|
208
373
|
if output_format == "audio":
|
374
|
+
# Assuming tts returns an async generator
|
209
375
|
async for chunk in self.llm_provider.tts(
|
210
376
|
error_msg,
|
211
377
|
instructions=audio_instructions,
|
@@ -214,264 +380,315 @@ class AgentService(AgentServiceInterface):
|
|
214
380
|
):
|
215
381
|
yield chunk
|
216
382
|
else:
|
217
|
-
yield error_msg
|
383
|
+
yield error_msg # Yield the single error string
|
218
384
|
return
|
219
385
|
|
386
|
+
logger.debug(
|
387
|
+
f"Generating response for agent '{agent_name}'. Output format: {output_format}."
|
388
|
+
)
|
389
|
+
|
220
390
|
try:
|
221
|
-
# ---
|
391
|
+
# --- System Prompt Assembly ---
|
222
392
|
system_prompt_parts = [self.get_agent_system_prompt(agent_name)]
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
tool_usage_prompt_text = self._get_tool_usage_prompt(agent_name)
|
228
|
-
if tool_usage_prompt_text:
|
229
|
-
system_prompt_parts.append(
|
230
|
-
f"\n\n--- TOOL USAGE INSTRUCTIONS ---{tool_usage_prompt_text}"
|
231
|
-
)
|
232
|
-
print(
|
233
|
-
f"Tools available to agent {agent_name}: {[t.get('name') for t in self.get_agent_tools(agent_name)]}"
|
234
|
-
)
|
235
|
-
|
236
|
-
# --- 3. Add User ID ---
|
237
|
-
system_prompt_parts.append("\n\n--- USER & SESSION INFO ---")
|
238
|
-
system_prompt_parts.append(f"User ID: {user_id}")
|
239
|
-
|
240
|
-
# --- 4. Add Memory Context ---
|
393
|
+
tool_instructions = self._get_tool_usage_prompt(agent_name)
|
394
|
+
if tool_instructions:
|
395
|
+
system_prompt_parts.append(tool_instructions)
|
396
|
+
system_prompt_parts.append(f"USER IDENTIFIER: {user_id}")
|
241
397
|
if memory_context:
|
242
|
-
|
243
|
-
system_prompt_parts.append(
|
244
|
-
"\n\n--- CONVERSATION HISTORY (Memory Context) ---"
|
245
|
-
)
|
246
|
-
system_prompt_parts.append(memory_context)
|
247
|
-
|
248
|
-
# --- 5. Add Additional Prompt (if provided) ---
|
398
|
+
system_prompt_parts.append(f"\nCONVERSATION HISTORY:\n{memory_context}")
|
249
399
|
if prompt:
|
250
|
-
|
251
|
-
|
252
|
-
"\n\n--- ADDITIONAL INSTRUCTIONS FOR THIS TURN ---"
|
253
|
-
)
|
254
|
-
system_prompt_parts.append(prompt)
|
255
|
-
|
256
|
-
# --- Assemble the final system prompt ---
|
257
|
-
final_system_prompt = "\n".join(system_prompt_parts)
|
400
|
+
system_prompt_parts.append(f"\nADDITIONAL PROMPT:\n{prompt}")
|
401
|
+
final_system_prompt = "\n\n".join(filter(None, system_prompt_parts))
|
258
402
|
|
259
|
-
#
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
# Variables for robust handling of tool call markers that may be split across chunks
|
264
|
-
tool_buffer = ""
|
265
|
-
pending_chunk = "" # To hold text that might contain partial markers
|
266
|
-
is_tool_call = False
|
267
|
-
|
268
|
-
# Define start and end markers
|
403
|
+
# --- Initial Response Generation (No Streaming) ---
|
404
|
+
initial_llm_response_buffer = ""
|
405
|
+
tool_calls_detected = False
|
269
406
|
start_marker = "[TOOL]"
|
270
|
-
end_marker = "[/TOOL]"
|
271
407
|
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
prompt=query,
|
408
|
+
logger.info(f"Generating initial response for agent '{agent_name}'...")
|
409
|
+
# Call generate_text and await the string result
|
410
|
+
initial_llm_response_buffer = await self.llm_provider.generate_text(
|
411
|
+
prompt=str(query),
|
276
412
|
system_prompt=final_system_prompt,
|
277
413
|
api_key=self.api_key,
|
278
414
|
base_url=self.base_url,
|
279
415
|
model=self.model,
|
416
|
+
)
|
417
|
+
|
418
|
+
# Check for errors returned as string by the adapter
|
419
|
+
if isinstance(
|
420
|
+
initial_llm_response_buffer, str
|
421
|
+
) and initial_llm_response_buffer.startswith(
|
422
|
+
"I apologize, but I encountered an error"
|
280
423
|
):
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
424
|
+
logger.error(
|
425
|
+
f"LLM provider failed during initial generation: {initial_llm_response_buffer}"
|
426
|
+
)
|
427
|
+
# Yield the error and exit
|
428
|
+
if output_format == "audio":
|
429
|
+
async for chunk in self.llm_provider.tts(
|
430
|
+
initial_llm_response_buffer,
|
431
|
+
voice=audio_voice,
|
432
|
+
response_format=audio_output_format,
|
433
|
+
instructions=audio_instructions,
|
434
|
+
):
|
435
|
+
yield chunk
|
285
436
|
else:
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
437
|
+
yield initial_llm_response_buffer
|
438
|
+
return
|
439
|
+
|
440
|
+
# Check for tool markers in the complete response
|
441
|
+
if start_marker.lower() in initial_llm_response_buffer.lower():
|
442
|
+
tool_calls_detected = True
|
443
|
+
logger.info("Tool call marker detected in initial response.")
|
444
|
+
|
445
|
+
logger.debug(
|
446
|
+
f"Full initial LLM response buffer:\n--- START ---\n{initial_llm_response_buffer}\n--- END ---"
|
447
|
+
)
|
448
|
+
logger.info(
|
449
|
+
f"Initial LLM response received (length: {len(initial_llm_response_buffer)}). Tools detected: {tool_calls_detected}"
|
450
|
+
)
|
451
|
+
|
452
|
+
# --- Tool Execution Phase (if tools were detected) ---
|
453
|
+
final_response_text = ""
|
454
|
+
if tool_calls_detected:
|
455
|
+
parsed_calls = self._parse_tool_calls(initial_llm_response_buffer)
|
456
|
+
|
457
|
+
if parsed_calls:
|
458
|
+
# --- Execute tools SEQUENTIALLY with Placeholder Substitution ---
|
459
|
+
executed_tool_results = [] # Store full result dicts
|
460
|
+
# Map tool names to their string results for substitution
|
461
|
+
tool_results_map: Dict[str, str] = {}
|
462
|
+
|
463
|
+
logger.info(
|
464
|
+
f"Executing {len(parsed_calls)} tools sequentially with substitution..."
|
292
465
|
)
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
before_marker = combined_chunk[:start_pos]
|
298
|
-
after_marker = combined_chunk[start_pos:]
|
299
|
-
|
300
|
-
# Yield text that appeared before the marker
|
301
|
-
if before_marker and output_format == "text":
|
302
|
-
yield before_marker
|
303
|
-
|
304
|
-
# Start collecting the tool call
|
305
|
-
tool_buffer = after_marker
|
306
|
-
continue # Skip to next chunk
|
307
|
-
|
308
|
-
# STEP 2: Handle ongoing tool call collection
|
309
|
-
if is_tool_call:
|
310
|
-
tool_buffer += combined_chunk
|
311
|
-
|
312
|
-
# Check if the tool call is complete
|
313
|
-
if end_marker in tool_buffer:
|
314
|
-
print(f"Tool call complete, buffer size: {len(tool_buffer)}")
|
315
|
-
|
316
|
-
# Process the tool call
|
317
|
-
response_text = await self._handle_tool_call(
|
318
|
-
agent_name=agent_name, tool_text=tool_buffer
|
466
|
+
for i, call in enumerate(parsed_calls):
|
467
|
+
tool_name_to_exec = call.get("name", "unknown")
|
468
|
+
logger.info(
|
469
|
+
f"Executing tool {i + 1}/{len(parsed_calls)}: {tool_name_to_exec}"
|
319
470
|
)
|
320
471
|
|
321
|
-
#
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
472
|
+
# --- Substitute placeholders in parameters ---
|
473
|
+
try:
|
474
|
+
original_params = call.get("parameters", {})
|
475
|
+
substituted_params = self._substitute_placeholders(
|
476
|
+
original_params, tool_results_map
|
477
|
+
)
|
478
|
+
if substituted_params != original_params:
|
479
|
+
logger.info(
|
480
|
+
f"Substituted parameters for tool '{tool_name_to_exec}': {substituted_params}"
|
481
|
+
)
|
482
|
+
call["parameters"] = substituted_params # Update call dict
|
483
|
+
except Exception as sub_err:
|
484
|
+
logger.error(
|
485
|
+
f"Error substituting placeholders for tool '{tool_name_to_exec}': {sub_err}",
|
486
|
+
exc_info=True,
|
487
|
+
)
|
488
|
+
# Proceed with original params but log the error
|
489
|
+
|
490
|
+
# --- Execute the tool ---
|
491
|
+
try:
|
492
|
+
result = await self._execute_single_tool(agent_name, call)
|
493
|
+
executed_tool_results.append(result)
|
494
|
+
|
495
|
+
# --- Store successful result string for future substitutions ---
|
496
|
+
if result.get("status") == "success":
|
497
|
+
tool_result_str = str(result.get("result", ""))
|
498
|
+
tool_results_map[tool_name_to_exec] = tool_result_str
|
499
|
+
logger.debug(
|
500
|
+
f"Stored result for '{tool_name_to_exec}' (length: {len(tool_result_str)})"
|
501
|
+
)
|
502
|
+
else:
|
503
|
+
# Store error message as result
|
504
|
+
error_message = result.get("message", "Unknown error")
|
505
|
+
tool_results_map[tool_name_to_exec] = (
|
506
|
+
f"Error: {error_message}"
|
507
|
+
)
|
508
|
+
logger.warning(
|
509
|
+
f"Tool '{tool_name_to_exec}' failed, storing error message as result."
|
510
|
+
)
|
511
|
+
|
512
|
+
except Exception as tool_exec_err:
|
513
|
+
logger.error(
|
514
|
+
f"Exception during execution of tool {tool_name_to_exec}: {tool_exec_err}",
|
515
|
+
exc_info=True,
|
516
|
+
)
|
517
|
+
error_result = {
|
518
|
+
"tool_name": tool_name_to_exec,
|
519
|
+
"status": "error",
|
520
|
+
"message": f"Exception during execution: {str(tool_exec_err)}",
|
521
|
+
}
|
522
|
+
executed_tool_results.append(error_result)
|
523
|
+
tool_results_map[tool_name_to_exec] = (
|
524
|
+
f"Error: {str(tool_exec_err)}" # Store error
|
525
|
+
)
|
326
526
|
|
327
|
-
|
328
|
-
|
329
|
-
|
527
|
+
logger.info("Sequential tool execution with substitution complete.")
|
528
|
+
# --- End Sequential Execution ---
|
529
|
+
|
530
|
+
# Format results for the follow-up prompt (use executed_tool_results)
|
531
|
+
tool_results_text_parts = []
|
532
|
+
for i, result in enumerate(
|
533
|
+
executed_tool_results
|
534
|
+
): # Use the collected results
|
535
|
+
tool_name = result.get(
|
536
|
+
"tool_name", "unknown"
|
537
|
+
) # Name should be in the result dict now
|
538
|
+
if (
|
539
|
+
isinstance(result, Exception)
|
540
|
+
or result.get("status") == "error"
|
541
|
+
):
|
542
|
+
error_msg = (
|
543
|
+
result.get("message", str(result))
|
544
|
+
if isinstance(result, dict)
|
545
|
+
else str(result)
|
546
|
+
)
|
547
|
+
logger.error(f"Tool '{tool_name}' failed: {error_msg}")
|
548
|
+
tool_results_text_parts.append(
|
549
|
+
f"Tool {i + 1} ({tool_name}) Execution Failed:\n{error_msg}"
|
550
|
+
)
|
551
|
+
else:
|
552
|
+
tool_output = str(result.get("result", ""))
|
553
|
+
tool_results_text_parts.append(
|
554
|
+
f"Tool {i + 1} ({tool_name}) Result:\n{tool_output}"
|
555
|
+
)
|
556
|
+
tool_results_context = "\n\n".join(tool_results_text_parts)
|
330
557
|
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
558
|
+
# --- Generate Final Response using Tool Results (No Streaming) ---
|
559
|
+
follow_up_prompt = f"Original Query: {str(query)}\n\nRESULTS FROM TOOL CALLS:\n{tool_results_context}\n\nBased on the original query and the tool results, please provide the final response to the user."
|
560
|
+
# Rebuild system prompt
|
561
|
+
follow_up_system_prompt_parts = [
|
562
|
+
self.get_agent_system_prompt(agent_name)
|
563
|
+
]
|
564
|
+
follow_up_system_prompt_parts.append(f"USER IDENTIFIER: {user_id}")
|
565
|
+
if memory_context:
|
337
566
|
follow_up_system_prompt_parts.append(
|
338
|
-
"\
|
567
|
+
f"\nORIGINAL CONVERSATION HISTORY:\n{memory_context}"
|
339
568
|
)
|
569
|
+
if prompt:
|
340
570
|
follow_up_system_prompt_parts.append(
|
341
|
-
"\
|
571
|
+
f"\nORIGINAL ADDITIONAL PROMPT:\n{prompt}"
|
342
572
|
)
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
573
|
+
follow_up_system_prompt_parts.append(
|
574
|
+
f"\nCONTEXT: You previously decided to run {len(parsed_calls)} tool(s) sequentially to answer the query. The results are provided above."
|
575
|
+
)
|
576
|
+
final_follow_up_system_prompt = "\n\n".join(
|
577
|
+
filter(None, follow_up_system_prompt_parts)
|
578
|
+
)
|
579
|
+
|
580
|
+
logger.info(
|
581
|
+
"Generating final response incorporating tool results..."
|
582
|
+
)
|
583
|
+
# Call generate_text and await the string result
|
584
|
+
synthesized_response_buffer = await self.llm_provider.generate_text(
|
585
|
+
prompt=follow_up_prompt,
|
586
|
+
system_prompt=final_follow_up_system_prompt,
|
587
|
+
api_key=self.api_key,
|
588
|
+
base_url=self.base_url,
|
589
|
+
model=self.model,
|
590
|
+
)
|
356
591
|
|
357
|
-
|
358
|
-
|
359
|
-
|
592
|
+
# Check for errors returned as string by the adapter
|
593
|
+
if isinstance(
|
594
|
+
synthesized_response_buffer, str
|
595
|
+
) and synthesized_response_buffer.startswith(
|
596
|
+
"I apologize, but I encountered an error"
|
597
|
+
):
|
598
|
+
logger.error(
|
599
|
+
f"LLM provider failed during final generation: {synthesized_response_buffer}"
|
360
600
|
)
|
361
|
-
#
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
processed_chunk
|
369
|
-
) in self.llm_provider.generate_text(
|
370
|
-
prompt=user_prompt,
|
371
|
-
system_prompt=final_follow_up_system_prompt,
|
372
|
-
api_key=self.api_key,
|
373
|
-
base_url=self.base_url,
|
374
|
-
model=self.model,
|
601
|
+
# Yield the error and exit
|
602
|
+
if output_format == "audio":
|
603
|
+
async for chunk in self.llm_provider.tts(
|
604
|
+
synthesized_response_buffer,
|
605
|
+
voice=audio_voice,
|
606
|
+
response_format=audio_output_format,
|
607
|
+
instructions=audio_instructions,
|
375
608
|
):
|
376
|
-
|
377
|
-
yield processed_chunk
|
609
|
+
yield chunk
|
378
610
|
else:
|
379
|
-
|
380
|
-
|
381
|
-
async for (
|
382
|
-
processed_chunk
|
383
|
-
) in self.llm_provider.generate_text(
|
384
|
-
prompt=user_prompt,
|
385
|
-
system_prompt=final_follow_up_system_prompt,
|
386
|
-
):
|
387
|
-
tool_response += processed_chunk
|
388
|
-
|
389
|
-
# Clean and add to our complete text record and audio buffer
|
390
|
-
tool_response = self._clean_for_audio(tool_response)
|
391
|
-
complete_text_response += tool_response
|
392
|
-
full_response_buffer += tool_response
|
611
|
+
yield synthesized_response_buffer
|
612
|
+
return
|
393
613
|
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
break # Exit the original generation loop after tool processing
|
399
|
-
|
400
|
-
# Continue collecting tool call content without yielding
|
401
|
-
continue
|
402
|
-
|
403
|
-
# STEP 3: Check for possible partial start markers at the end of the chunk
|
404
|
-
# This helps detect markers split across chunks
|
405
|
-
potential_marker = False
|
406
|
-
for i in range(1, len(start_marker)):
|
407
|
-
if combined_chunk.endswith(start_marker[:i]):
|
408
|
-
# Found a partial marker at the end
|
409
|
-
# Save the partial marker
|
410
|
-
pending_chunk = combined_chunk[-i:]
|
411
|
-
# Everything except the partial marker
|
412
|
-
chunk_to_yield = combined_chunk[:-i]
|
413
|
-
potential_marker = True
|
414
|
-
print(f"Potential partial marker detected: '{pending_chunk}'")
|
415
|
-
break
|
416
|
-
|
417
|
-
if potential_marker:
|
418
|
-
# Process the safe part of the chunk
|
419
|
-
if chunk_to_yield and output_format == "text":
|
420
|
-
yield chunk_to_yield
|
421
|
-
if chunk_to_yield:
|
422
|
-
complete_text_response += chunk_to_yield
|
423
|
-
if output_format == "audio":
|
424
|
-
full_response_buffer += chunk_to_yield
|
425
|
-
continue
|
426
|
-
|
427
|
-
# STEP 4: Normal text processing for non-tool call content
|
428
|
-
if output_format == "text":
|
429
|
-
yield combined_chunk
|
430
|
-
|
431
|
-
complete_text_response += combined_chunk
|
432
|
-
if output_format == "audio":
|
433
|
-
full_response_buffer += combined_chunk
|
614
|
+
final_response_text = synthesized_response_buffer
|
615
|
+
logger.info(
|
616
|
+
f"Final synthesized response length: {len(final_response_text)}"
|
617
|
+
)
|
434
618
|
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
619
|
+
else:
|
620
|
+
# Tools detected but parsing failed
|
621
|
+
logger.warning(
|
622
|
+
"Tool markers detected, but no valid tool calls parsed. Treating initial response as final."
|
623
|
+
)
|
624
|
+
final_response_text = initial_llm_response_buffer
|
625
|
+
else:
|
626
|
+
# No tools detected
|
627
|
+
final_response_text = initial_llm_response_buffer
|
628
|
+
logger.info("No tools detected. Using initial response as final.")
|
629
|
+
|
630
|
+
# --- Final Output Processing (Guardrails, TTS, Yielding) ---
|
631
|
+
processed_final_text = final_response_text
|
632
|
+
if self.output_guardrails:
|
633
|
+
logger.info(
|
634
|
+
f"Applying output guardrails to final text response (length: {len(processed_final_text)})"
|
439
635
|
)
|
440
|
-
|
441
|
-
|
636
|
+
original_len = len(processed_final_text)
|
637
|
+
for guardrail in self.output_guardrails:
|
638
|
+
try:
|
639
|
+
processed_final_text = await guardrail.process(
|
640
|
+
processed_final_text
|
641
|
+
)
|
642
|
+
except Exception as e:
|
643
|
+
logger.error(
|
644
|
+
f"Error applying output guardrail {guardrail.__class__.__name__} to final text: {e}"
|
645
|
+
)
|
646
|
+
if len(processed_final_text) != original_len:
|
647
|
+
logger.info(
|
648
|
+
f"Guardrails modified final text length from {original_len} to {len(processed_final_text)}"
|
649
|
+
)
|
442
650
|
|
443
|
-
|
444
|
-
if output_format == "audio":
|
445
|
-
full_response_buffer += tool_buffer
|
651
|
+
self.last_text_response = processed_final_text
|
446
652
|
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
653
|
+
if output_format == "text":
|
654
|
+
# Yield the single final string
|
655
|
+
if processed_final_text:
|
656
|
+
yield processed_final_text
|
657
|
+
else:
|
658
|
+
logger.warning("Final processed text was empty.")
|
659
|
+
yield ""
|
660
|
+
elif output_format == "audio":
|
661
|
+
# TTS still needs a generator
|
662
|
+
text_for_tts = processed_final_text
|
663
|
+
cleaned_audio_buffer = self._clean_for_audio(text_for_tts)
|
664
|
+
logger.info(
|
665
|
+
f"Processing {len(cleaned_audio_buffer)} characters for audio output"
|
452
666
|
)
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
667
|
+
if cleaned_audio_buffer:
|
668
|
+
async for audio_chunk in self.llm_provider.tts(
|
669
|
+
text=cleaned_audio_buffer,
|
670
|
+
voice=audio_voice,
|
671
|
+
response_format=audio_output_format,
|
672
|
+
instructions=audio_instructions,
|
673
|
+
):
|
674
|
+
yield audio_chunk
|
675
|
+
else:
|
676
|
+
logger.warning("Final text for audio was empty after cleaning.")
|
463
677
|
|
464
|
-
|
465
|
-
|
466
|
-
|
678
|
+
logger.info(
|
679
|
+
f"Response generation complete for agent '{agent_name}': {len(self.last_text_response)} final chars"
|
680
|
+
)
|
467
681
|
|
468
682
|
except Exception as e:
|
469
|
-
|
470
|
-
print(f"Error in generate_response: {str(e)}")
|
683
|
+
# --- Error Handling ---
|
471
684
|
import traceback
|
472
685
|
|
473
|
-
|
474
|
-
|
686
|
+
error_msg = (
|
687
|
+
"I apologize, but I encountered an error processing your request."
|
688
|
+
)
|
689
|
+
logger.error(
|
690
|
+
f"Error in generate_response for agent '{agent_name}': {e}\n{traceback.format_exc()}"
|
691
|
+
)
|
475
692
|
if output_format == "audio":
|
476
693
|
async for chunk in self.llm_provider.tts(
|
477
694
|
error_msg,
|
@@ -484,35 +701,24 @@ class AgentService(AgentServiceInterface):
|
|
484
701
|
yield error_msg
|
485
702
|
|
486
703
|
async def _bytes_to_generator(self, data: bytes) -> AsyncGenerator[bytes, None]:
|
487
|
-
"""Convert bytes to an async generator for streaming.
|
488
|
-
|
489
|
-
Args:
|
490
|
-
data: Bytes of audio data
|
491
|
-
|
492
|
-
Yields:
|
493
|
-
Chunks of audio data
|
494
|
-
"""
|
495
|
-
# Define a reasonable chunk size (adjust based on your needs)
|
704
|
+
"""Convert bytes to an async generator for streaming."""
|
496
705
|
chunk_size = 4096
|
497
|
-
|
498
706
|
for i in range(0, len(data), chunk_size):
|
499
707
|
yield data[i : i + chunk_size]
|
500
|
-
# Small delay to simulate streaming
|
501
708
|
await asyncio.sleep(0.01)
|
502
709
|
|
503
710
|
async def _handle_tool_call(self, agent_name: str, tool_text: str) -> str:
|
504
711
|
"""Handle marker-based tool calls."""
|
505
712
|
try:
|
506
|
-
# Extract the content between markers
|
507
713
|
start_marker = "[TOOL]"
|
508
714
|
end_marker = "[/TOOL]"
|
509
|
-
|
510
715
|
start_idx = tool_text.find(start_marker) + len(start_marker)
|
511
716
|
end_idx = tool_text.find(end_marker)
|
717
|
+
if start_idx == -1 or end_idx == -1 or end_idx <= start_idx:
|
718
|
+
logger.error(f"Malformed tool call text received: {tool_text}")
|
719
|
+
return "Error: Malformed tool call format."
|
512
720
|
|
513
721
|
tool_content = tool_text[start_idx:end_idx].strip()
|
514
|
-
|
515
|
-
# Parse the lines to extract name and parameters
|
516
722
|
tool_name = None
|
517
723
|
parameters = {}
|
518
724
|
|
@@ -520,136 +726,168 @@ class AgentService(AgentServiceInterface):
|
|
520
726
|
line = line.strip()
|
521
727
|
if not line:
|
522
728
|
continue
|
523
|
-
|
524
729
|
if line.startswith("name:"):
|
525
730
|
tool_name = line[5:].strip()
|
526
731
|
elif line.startswith("parameters:"):
|
527
732
|
params_text = line[11:].strip()
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
733
|
+
try:
|
734
|
+
# Attempt to parse as JSON first for robustness
|
735
|
+
parameters = json.loads(params_text)
|
736
|
+
except json.JSONDecodeError:
|
737
|
+
# Fallback to comma-separated key=value pairs
|
738
|
+
param_pairs = params_text.split(",")
|
739
|
+
for pair in param_pairs:
|
740
|
+
if "=" in pair:
|
741
|
+
k, v = pair.split("=", 1)
|
742
|
+
parameters[k.strip()] = v.strip()
|
743
|
+
logger.warning(
|
744
|
+
f"Parsed tool parameters using fallback method: {params_text}"
|
745
|
+
)
|
746
|
+
|
747
|
+
if not tool_name:
|
748
|
+
logger.error(f"Tool name missing in tool call: {tool_content}")
|
749
|
+
return "Error: Tool name missing in call."
|
750
|
+
|
536
751
|
result = await self.execute_tool(agent_name, tool_name, parameters)
|
537
752
|
|
538
|
-
# Return the result as string
|
539
753
|
if result.get("status") == "success":
|
540
754
|
tool_result = str(result.get("result", ""))
|
541
755
|
return tool_result
|
542
756
|
else:
|
543
757
|
error_msg = f"Error calling {tool_name}: {result.get('message', 'Unknown error')}"
|
758
|
+
logger.error(error_msg)
|
544
759
|
return error_msg
|
545
760
|
|
546
761
|
except Exception as e:
|
547
762
|
import traceback
|
548
763
|
|
549
|
-
|
764
|
+
logger.error(f"Error processing tool call: {e}\n{traceback.format_exc()}")
|
550
765
|
return f"Error processing tool call: {str(e)}"
|
551
766
|
|
552
767
|
def _get_tool_usage_prompt(self, agent_name: str) -> str:
|
553
768
|
"""Generate marker-based instructions for tool usage."""
|
554
|
-
# Get tools assigned to this agent
|
555
769
|
tools = self.get_agent_tools(agent_name)
|
556
770
|
if not tools:
|
557
771
|
return ""
|
558
772
|
|
559
|
-
|
773
|
+
# Simplify tool representation for the prompt
|
774
|
+
simplified_tools = []
|
775
|
+
for tool in tools:
|
776
|
+
simplified_tool = {
|
777
|
+
"name": tool.get("name"),
|
778
|
+
"description": tool.get("description"),
|
779
|
+
"parameters": tool.get("parameters", {}).get("properties", {}),
|
780
|
+
}
|
781
|
+
simplified_tools.append(simplified_tool)
|
782
|
+
|
783
|
+
tools_json = json.dumps(simplified_tools, indent=2)
|
784
|
+
|
785
|
+
logger.info(
|
786
|
+
f"Generated tool usage prompt for agent '{agent_name}': {tools_json}"
|
787
|
+
)
|
560
788
|
|
561
789
|
return f"""
|
562
790
|
AVAILABLE TOOLS:
|
563
791
|
{tools_json}
|
564
792
|
|
565
|
-
⚠️ CRITICAL
|
566
|
-
|
567
|
-
|
793
|
+
⚠️ CRITICAL INSTRUCTIONS FOR TOOL USAGE:
|
794
|
+
1. EXECUTION ORDER MATTERS: If multiple steps are needed (e.g., get information THEN use it), you MUST output the [TOOL] blocks in the exact sequence they need to run. Output the information-gathering tool call FIRST, then the tool call that uses the information.
|
795
|
+
2. ONLY TOOL CALLS: When using a tool, NEVER include explanatory text before or after the tool call block. Only output the exact tool call format shown below.
|
796
|
+
3. USE TOOLS WHEN NEEDED: Always call the necessary tool to give the latest information, especially for time-sensitive queries.
|
568
797
|
|
569
798
|
TOOL USAGE FORMAT:
|
570
799
|
[TOOL]
|
571
800
|
name: tool_name
|
572
|
-
parameters: key1
|
801
|
+
parameters: {{"key1": "value1", "key2": "value2"}}
|
573
802
|
[/TOOL]
|
574
803
|
|
575
804
|
EXAMPLES:
|
576
|
-
|
805
|
+
|
806
|
+
✅ CORRECT - Get news THEN email (Correct Order):
|
577
807
|
[TOOL]
|
578
808
|
name: search_internet
|
579
|
-
parameters: query
|
809
|
+
parameters: {{"query": "latest news on Canada"}}
|
810
|
+
[/TOOL]
|
811
|
+
[TOOL]
|
812
|
+
name: mcp
|
813
|
+
parameters: {{"query": "Send an email to
|
814
|
+
bob@bob.com with subject
|
815
|
+
'Friendly Reminder to Clean Your Room'
|
816
|
+
and body 'Hi Bob, just a friendly
|
817
|
+
reminder to please clean your room
|
818
|
+
when you get a chance.'"}}
|
819
|
+
[/TOOL]
|
820
|
+
(Note: The system will handle replacing placeholders like '{{output_of_search_internet}}' if possible, but the ORDER is crucial.)
|
821
|
+
|
822
|
+
|
823
|
+
❌ INCORRECT - Wrong Order:
|
824
|
+
[TOOL]
|
825
|
+
name: mcp
|
826
|
+
parameters: {{"query": "Send an email to
|
827
|
+
bob@bob.com with subject
|
828
|
+
'Friendly Reminder to Clean Your Room'
|
829
|
+
and body 'Hi Bob, just a friendly
|
830
|
+
reminder to please clean your room
|
831
|
+
when you get a chance.'"}}
|
580
832
|
[/TOOL]
|
581
|
-
|
582
|
-
❌ INCORRECT - Never add explanatory text like this:
|
583
|
-
To get the latest news on Solana, I will search the internet.
|
584
833
|
[TOOL]
|
585
834
|
name: search_internet
|
586
|
-
parameters: query
|
835
|
+
parameters: {{"query": "latest news on Canada"}}
|
587
836
|
[/TOOL]
|
588
837
|
|
589
|
-
REMEMBER:
|
590
|
-
1. Output ONLY the exact tool call format with NO additional text
|
591
|
-
2. If the query is time-sensitive (latest news, current status, etc.), ALWAYS use the tool.
|
592
|
-
3. After seeing your tool call, I will execute it automatically
|
593
|
-
4. You will receive the tool results and can then respond to the user
|
594
|
-
"""
|
595
838
|
|
596
|
-
|
597
|
-
|
839
|
+
❌ INCORRECT - Explanatory Text:
|
840
|
+
To get the news, I'll search.
|
841
|
+
[TOOL]
|
842
|
+
name: search_internet
|
843
|
+
parameters: {{"query": "latest news on Solana"}}
|
844
|
+
[/TOOL]
|
845
|
+
Now I will email it.
|
846
|
+
[TOOL]
|
847
|
+
name: mcp
|
848
|
+
parameters: {{"query": "Send an email to
|
849
|
+
bob@bob.com with subject
|
850
|
+
'Friendly Reminder to Clean Your Room'
|
851
|
+
and body 'Hi Bob, just a friendly
|
852
|
+
reminder to please clean your room
|
853
|
+
when you get a chance.'"}}
|
854
|
+
[/TOOL]
|
598
855
|
|
599
|
-
Args:
|
600
|
-
text: Input text with potential Markdown formatting and special characters
|
601
856
|
|
602
|
-
|
603
|
-
|
857
|
+
REMEMBER:
|
858
|
+
- Output ONLY the [TOOL] blocks in the correct execution order.
|
859
|
+
- I will execute the tools sequentially as you provide them.
|
860
|
+
- You will receive the results of ALL tool calls before formulating the final response.
|
604
861
|
"""
|
862
|
+
|
863
|
+
def _clean_for_audio(self, text: str) -> str:
|
864
|
+
"""Remove Markdown formatting, emojis, and non-pronounceable characters from text."""
|
605
865
|
import re
|
606
866
|
|
607
867
|
if not text:
|
608
868
|
return ""
|
609
|
-
|
610
|
-
# Remove Markdown links - [text](url) -> text
|
611
869
|
text = re.sub(r"\[([^\]]+)\]\([^\)]+\)", r"\1", text)
|
612
|
-
|
613
|
-
# Remove inline code with backticks
|
614
870
|
text = re.sub(r"`([^`]+)`", r"\1", text)
|
615
|
-
|
616
|
-
# Remove bold formatting - **text** or __text__ -> text
|
617
871
|
text = re.sub(r"(\*\*|__)(.*?)\1", r"\2", text)
|
618
|
-
|
619
|
-
# Remove italic formatting - *text* or _text_ -> text
|
620
872
|
text = re.sub(r"(\*|_)(.*?)\1", r"\2", text)
|
621
|
-
|
622
|
-
# Remove headers - ## Header -> Header
|
623
873
|
text = re.sub(r"^\s*#+\s*(.*?)$", r"\1", text, flags=re.MULTILINE)
|
624
|
-
|
625
|
-
# Remove blockquotes - > Text -> Text
|
626
874
|
text = re.sub(r"^\s*>\s*(.*?)$", r"\1", text, flags=re.MULTILINE)
|
627
|
-
|
628
|
-
# Remove horizontal rules (---, ***, ___)
|
629
875
|
text = re.sub(r"^\s*[-*_]{3,}\s*$", "", text, flags=re.MULTILINE)
|
630
|
-
|
631
|
-
# Remove list markers - * Item or - Item or 1. Item -> Item
|
632
876
|
text = re.sub(r"^\s*[-*+]\s+(.*?)$", r"\1", text, flags=re.MULTILINE)
|
633
877
|
text = re.sub(r"^\s*\d+\.\s+(.*?)$", r"\1", text, flags=re.MULTILINE)
|
634
|
-
|
635
|
-
# Remove multiple consecutive newlines (keep just one)
|
636
878
|
text = re.sub(r"\n{3,}", "\n\n", text)
|
637
|
-
|
638
|
-
# Remove emojis and other non-pronounceable characters
|
639
|
-
# Common emoji Unicode ranges
|
640
879
|
emoji_pattern = re.compile(
|
641
880
|
"["
|
642
881
|
"\U0001f600-\U0001f64f" # emoticons
|
643
882
|
"\U0001f300-\U0001f5ff" # symbols & pictographs
|
644
883
|
"\U0001f680-\U0001f6ff" # transport & map symbols
|
645
884
|
"\U0001f700-\U0001f77f" # alchemical symbols
|
646
|
-
"\U0001f780-\U0001f7ff" # Geometric Shapes
|
885
|
+
"\U0001f780-\U0001f7ff" # Geometric Shapes Extended
|
647
886
|
"\U0001f800-\U0001f8ff" # Supplemental Arrows-C
|
648
887
|
"\U0001f900-\U0001f9ff" # Supplemental Symbols and Pictographs
|
649
|
-
"\U0001fa00-\U0001fa6f" # Chess Symbols
|
650
888
|
"\U0001fa70-\U0001faff" # Symbols and Pictographs Extended-A
|
651
889
|
"\U00002702-\U000027b0" # Dingbats
|
652
|
-
"\U000024c2-\
|
890
|
+
"\U000024c2-\U0001f251"
|
653
891
|
"\U00002600-\U000026ff" # Miscellaneous Symbols
|
654
892
|
"\U00002700-\U000027bf" # Dingbats
|
655
893
|
"\U0000fe00-\U0000fe0f" # Variation Selectors
|
@@ -658,26 +896,57 @@ class AgentService(AgentServiceInterface):
|
|
658
896
|
flags=re.UNICODE,
|
659
897
|
)
|
660
898
|
text = emoji_pattern.sub(r" ", text)
|
661
|
-
|
662
|
-
|
663
|
-
|
664
|
-
|
665
|
-
# Replace multiple spaces with a single space
|
899
|
+
text = re.sub(
|
900
|
+
r"[^\w\s\.\,\;\:\?\!\'\"\-\(\)]", " ", text
|
901
|
+
) # Keep basic punctuation
|
666
902
|
text = re.sub(r"\s+", " ", text)
|
667
|
-
|
668
903
|
return text.strip()
|
669
904
|
|
670
905
|
def _clean_tool_response(self, text: str) -> str:
|
671
906
|
"""Remove any tool markers or formatting that might have leaked into the response."""
|
672
907
|
if not text:
|
673
908
|
return ""
|
909
|
+
text = text.replace("[TOOL]", "").replace("[/TOOL]", "")
|
910
|
+
if text.lstrip().startswith("TOOL"):
|
911
|
+
text = text.lstrip()[4:].lstrip() # Remove "TOOL" and leading space
|
912
|
+
return text.strip()
|
674
913
|
|
675
|
-
|
676
|
-
|
677
|
-
|
914
|
+
# --- Add methods from factory logic ---
|
915
|
+
def load_and_register_plugins(self):
|
916
|
+
"""Loads plugins using the PluginManager."""
|
917
|
+
try:
|
918
|
+
self.plugin_manager.load_plugins()
|
919
|
+
logger.info("Plugins loaded successfully via PluginManager.")
|
920
|
+
except Exception as e:
|
921
|
+
logger.error(f"Error loading plugins: {e}", exc_info=True)
|
678
922
|
|
679
|
-
|
680
|
-
|
681
|
-
|
923
|
+
def register_agents_from_config(self):
|
924
|
+
"""Registers agents defined in the main configuration."""
|
925
|
+
agents_config = self.config.get("agents", [])
|
926
|
+
if not agents_config:
|
927
|
+
logger.warning("No agents defined in the configuration.")
|
928
|
+
return
|
682
929
|
|
683
|
-
|
930
|
+
for agent_config in agents_config:
|
931
|
+
name = agent_config.get("name")
|
932
|
+
instructions = agent_config.get("instructions")
|
933
|
+
specialization = agent_config.get("specialization")
|
934
|
+
tools = agent_config.get("tools", [])
|
935
|
+
|
936
|
+
if not name or not instructions or not specialization:
|
937
|
+
logger.warning(
|
938
|
+
f"Skipping agent due to missing name, instructions, or specialization: {agent_config}"
|
939
|
+
)
|
940
|
+
continue
|
941
|
+
|
942
|
+
self.register_ai_agent(name, instructions, specialization)
|
943
|
+
# logger.info(f"Registered agent: {name}") # Logging done in register_ai_agent
|
944
|
+
|
945
|
+
# Assign tools to the agent
|
946
|
+
for tool_name in tools:
|
947
|
+
if self.assign_tool_for_agent(name, tool_name):
|
948
|
+
logger.info(f"Assigned tool '{tool_name}' to agent '{name}'.")
|
949
|
+
else:
|
950
|
+
logger.warning(
|
951
|
+
f"Failed to assign tool '{tool_name}' to agent '{name}' (Tool might not be registered)."
|
952
|
+
)
|