solana-agent 27.4.2__py3-none-any.whl → 27.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- solana_agent/__init__.py +7 -2
- solana_agent/factories/agent_factory.py +53 -1
- solana_agent/guardrails/pii.py +107 -0
- solana_agent/interfaces/guardrails/guardrails.py +26 -0
- solana_agent/services/agent.py +392 -237
- solana_agent/services/query.py +140 -58
- {solana_agent-27.4.2.dist-info → solana_agent-27.5.0.dist-info}/METADATA +84 -3
- {solana_agent-27.4.2.dist-info → solana_agent-27.5.0.dist-info}/RECORD +10 -8
- {solana_agent-27.4.2.dist-info → solana_agent-27.5.0.dist-info}/LICENSE +0 -0
- {solana_agent-27.4.2.dist-info → solana_agent-27.5.0.dist-info}/WHEEL +0 -0
solana_agent/services/agent.py
CHANGED
@@ -9,6 +9,7 @@ import asyncio
|
|
9
9
|
import datetime as main_datetime
|
10
10
|
from datetime import datetime
|
11
11
|
import json
|
12
|
+
import logging # Add logging
|
12
13
|
from typing import AsyncGenerator, Dict, List, Literal, Optional, Any, Union
|
13
14
|
|
14
15
|
from solana_agent.interfaces.services.agent import AgentService as AgentServiceInterface
|
@@ -16,6 +17,11 @@ from solana_agent.interfaces.providers.llm import LLMProvider
|
|
16
17
|
from solana_agent.plugins.manager import PluginManager
|
17
18
|
from solana_agent.plugins.registry import ToolRegistry
|
18
19
|
from solana_agent.domains.agent import AIAgent, BusinessMission
|
20
|
+
from solana_agent.interfaces.guardrails.guardrails import (
|
21
|
+
OutputGuardrail,
|
22
|
+
)
|
23
|
+
|
24
|
+
logger = logging.getLogger(__name__) # Add logger
|
19
25
|
|
20
26
|
|
21
27
|
class AgentService(AgentServiceInterface):
|
@@ -29,6 +35,9 @@ class AgentService(AgentServiceInterface):
|
|
29
35
|
api_key: Optional[str] = None,
|
30
36
|
base_url: Optional[str] = None,
|
31
37
|
model: Optional[str] = None,
|
38
|
+
output_guardrails: List[
|
39
|
+
OutputGuardrail
|
40
|
+
] = None, # <-- Add output_guardrails parameter
|
32
41
|
):
|
33
42
|
"""Initialize the agent service.
|
34
43
|
|
@@ -36,6 +45,10 @@ class AgentService(AgentServiceInterface):
|
|
36
45
|
llm_provider: Provider for language model interactions
|
37
46
|
business_mission: Optional business mission and values
|
38
47
|
config: Optional service configuration
|
48
|
+
api_key: API key for the LLM provider
|
49
|
+
base_url: Base URL for the LLM provider
|
50
|
+
model: Model name for the LLM provider
|
51
|
+
output_guardrails: List of output guardrail instances
|
39
52
|
"""
|
40
53
|
self.llm_provider = llm_provider
|
41
54
|
self.business_mission = business_mission
|
@@ -46,6 +59,7 @@ class AgentService(AgentServiceInterface):
|
|
46
59
|
self.api_key = api_key
|
47
60
|
self.base_url = base_url
|
48
61
|
self.model = model
|
62
|
+
self.output_guardrails = output_guardrails or [] # <-- Store guardrails
|
49
63
|
|
50
64
|
self.plugin_manager = PluginManager(
|
51
65
|
config=self.config,
|
@@ -71,6 +85,7 @@ class AgentService(AgentServiceInterface):
|
|
71
85
|
specialization=specialization,
|
72
86
|
)
|
73
87
|
self.agents.append(agent)
|
88
|
+
logger.info(f"Registered AI agent: {name}")
|
74
89
|
|
75
90
|
def get_agent_system_prompt(self, agent_name: str) -> str:
|
76
91
|
"""Get the system prompt for an agent.
|
@@ -152,28 +167,41 @@ class AgentService(AgentServiceInterface):
|
|
152
167
|
"""Execute a tool on behalf of an agent."""
|
153
168
|
|
154
169
|
if not self.tool_registry:
|
170
|
+
logger.error("Tool registry not available during tool execution.")
|
155
171
|
return {"status": "error", "message": "Tool registry not available"}
|
156
172
|
|
157
173
|
tool = self.tool_registry.get_tool(tool_name)
|
158
174
|
if not tool:
|
175
|
+
logger.warning(f"Tool '{tool_name}' not found for execution.")
|
159
176
|
return {"status": "error", "message": f"Tool '{tool_name}' not found"}
|
160
177
|
|
161
178
|
# Check if agent has access to this tool
|
162
179
|
agent_tools = self.tool_registry.get_agent_tools(agent_name)
|
163
180
|
|
164
181
|
if not any(t.get("name") == tool_name for t in agent_tools):
|
182
|
+
logger.warning(
|
183
|
+
f"Agent '{agent_name}' attempted to use unassigned tool '{tool_name}'."
|
184
|
+
)
|
165
185
|
return {
|
166
186
|
"status": "error",
|
167
187
|
"message": f"Agent '{agent_name}' doesn't have access to tool '{tool_name}'",
|
168
188
|
}
|
169
189
|
|
170
190
|
try:
|
191
|
+
logger.info(
|
192
|
+
f"Executing tool '{tool_name}' for agent '{agent_name}' with params: {parameters}"
|
193
|
+
)
|
171
194
|
result = await tool.execute(**parameters)
|
195
|
+
logger.info(
|
196
|
+
f"Tool '{tool_name}' execution result status: {result.get('status')}"
|
197
|
+
)
|
172
198
|
return result
|
173
199
|
except Exception as e:
|
174
200
|
import traceback
|
175
201
|
|
176
|
-
|
202
|
+
logger.error(
|
203
|
+
f"Error executing tool '{tool_name}': {e}\n{traceback.format_exc()}"
|
204
|
+
)
|
177
205
|
return {"status": "error", "message": f"Error executing tool: {str(e)}"}
|
178
206
|
|
179
207
|
async def generate_response(
|
@@ -201,10 +229,17 @@ class AgentService(AgentServiceInterface):
|
|
201
229
|
] = "aac",
|
202
230
|
prompt: Optional[str] = None,
|
203
231
|
) -> AsyncGenerator[Union[str, bytes], None]: # pragma: no cover
|
204
|
-
"""Generate a response with support for text/audio input/output.
|
232
|
+
"""Generate a response with support for text/audio input/output and guardrails.
|
233
|
+
|
234
|
+
If output_format is 'text' and output_guardrails are present, the response
|
235
|
+
will be buffered entirely before applying guardrails and yielding a single result.
|
236
|
+
Otherwise, text responses stream chunk-by-chunk. Audio responses always buffer.
|
237
|
+
"""
|
205
238
|
agent = next((a for a in self.agents if a.name == agent_name), None)
|
206
239
|
if not agent:
|
207
240
|
error_msg = f"Agent '{agent_name}' not found."
|
241
|
+
logger.warning(error_msg)
|
242
|
+
# Handle error output (unchanged)
|
208
243
|
if output_format == "audio":
|
209
244
|
async for chunk in self.llm_provider.tts(
|
210
245
|
error_msg,
|
@@ -217,261 +252,372 @@ class AgentService(AgentServiceInterface):
|
|
217
252
|
yield error_msg
|
218
253
|
return
|
219
254
|
|
255
|
+
# --- Determine Buffering Strategy ---
|
256
|
+
# Buffer text ONLY if format is text AND guardrails are present
|
257
|
+
should_buffer_text = bool(self.output_guardrails) and output_format == "text"
|
258
|
+
logger.debug(
|
259
|
+
f"Text buffering strategy: {'Buffer full response' if should_buffer_text else 'Stream chunks'}"
|
260
|
+
)
|
261
|
+
|
220
262
|
try:
|
221
|
-
# ---
|
263
|
+
# --- System Prompt Assembly ---
|
222
264
|
system_prompt_parts = [self.get_agent_system_prompt(agent_name)]
|
223
265
|
|
224
|
-
#
|
225
|
-
|
226
|
-
if
|
227
|
-
|
228
|
-
if tool_usage_prompt_text:
|
229
|
-
system_prompt_parts.append(
|
230
|
-
f"\n\n--- TOOL USAGE INSTRUCTIONS ---{tool_usage_prompt_text}"
|
231
|
-
)
|
232
|
-
print(
|
233
|
-
f"Tools available to agent {agent_name}: {[t.get('name') for t in self.get_agent_tools(agent_name)]}"
|
234
|
-
)
|
266
|
+
# Add tool usage instructions if tools are available for the agent
|
267
|
+
tool_instructions = self._get_tool_usage_prompt(agent_name)
|
268
|
+
if tool_instructions:
|
269
|
+
system_prompt_parts.append(tool_instructions)
|
235
270
|
|
236
|
-
#
|
237
|
-
system_prompt_parts.append("
|
238
|
-
system_prompt_parts.append(f"User ID: {user_id}")
|
271
|
+
# Add user ID context
|
272
|
+
system_prompt_parts.append(f"USER IDENTIFIER: {user_id}")
|
239
273
|
|
240
|
-
#
|
274
|
+
# Add memory context if provided
|
241
275
|
if memory_context:
|
242
|
-
|
243
|
-
system_prompt_parts.append(
|
244
|
-
"\n\n--- CONVERSATION HISTORY (Memory Context) ---"
|
245
|
-
)
|
246
|
-
system_prompt_parts.append(memory_context)
|
276
|
+
system_prompt_parts.append(f"\nCONVERSATION HISTORY:\n{memory_context}")
|
247
277
|
|
248
|
-
#
|
278
|
+
# Add optional prompt if provided
|
249
279
|
if prompt:
|
250
|
-
|
251
|
-
system_prompt_parts.append(
|
252
|
-
"\n\n--- ADDITIONAL INSTRUCTIONS FOR THIS TURN ---"
|
253
|
-
)
|
254
|
-
system_prompt_parts.append(prompt)
|
280
|
+
system_prompt_parts.append(f"\nADDITIONAL PROMPT:\n{prompt}")
|
255
281
|
|
256
|
-
|
257
|
-
|
282
|
+
final_system_prompt = "\n\n".join(
|
283
|
+
filter(None, system_prompt_parts)
|
284
|
+
) # Join non-empty parts
|
285
|
+
# --- End System Prompt Assembly ---
|
258
286
|
|
259
|
-
#
|
260
|
-
complete_text_response =
|
261
|
-
|
287
|
+
# --- Response Generation ---
|
288
|
+
complete_text_response = (
|
289
|
+
"" # Always used for final storage and potentially for buffering
|
290
|
+
)
|
291
|
+
full_response_buffer = "" # Used ONLY for audio buffering
|
262
292
|
|
263
|
-
#
|
293
|
+
# Tool call handling variables (unchanged)
|
264
294
|
tool_buffer = ""
|
265
|
-
pending_chunk = ""
|
295
|
+
pending_chunk = ""
|
266
296
|
is_tool_call = False
|
267
|
-
|
268
|
-
# Define start and end markers
|
269
297
|
start_marker = "[TOOL]"
|
270
298
|
end_marker = "[/TOOL]"
|
271
299
|
|
272
|
-
|
273
|
-
|
300
|
+
logger.info(
|
301
|
+
f"Generating response for agent '{agent_name}' with query length {len(str(query))}"
|
302
|
+
)
|
274
303
|
async for chunk in self.llm_provider.generate_text(
|
275
|
-
prompt=query,
|
304
|
+
prompt=str(query),
|
276
305
|
system_prompt=final_system_prompt,
|
277
306
|
api_key=self.api_key,
|
278
307
|
base_url=self.base_url,
|
279
308
|
model=self.model,
|
280
309
|
):
|
281
|
-
#
|
310
|
+
# --- Chunk Processing & Tool Call Logic (Modified Yielding) ---
|
282
311
|
if pending_chunk:
|
283
312
|
combined_chunk = pending_chunk + chunk
|
284
|
-
pending_chunk = ""
|
313
|
+
pending_chunk = ""
|
285
314
|
else:
|
286
315
|
combined_chunk = chunk
|
287
316
|
|
288
317
|
# STEP 1: Check for tool call start marker
|
289
318
|
if start_marker in combined_chunk and not is_tool_call:
|
290
|
-
print(
|
291
|
-
f"Found tool start marker in chunk of length {len(combined_chunk)}"
|
292
|
-
)
|
293
319
|
is_tool_call = True
|
294
|
-
|
295
|
-
# Extract text before the marker and the marker itself with everything after
|
296
320
|
start_pos = combined_chunk.find(start_marker)
|
297
321
|
before_marker = combined_chunk[:start_pos]
|
298
322
|
after_marker = combined_chunk[start_pos:]
|
299
323
|
|
300
|
-
|
301
|
-
|
302
|
-
|
324
|
+
if before_marker:
|
325
|
+
processed_before_marker = before_marker
|
326
|
+
# Apply guardrails ONLY if NOT buffering text
|
327
|
+
if not should_buffer_text:
|
328
|
+
for guardrail in self.output_guardrails:
|
329
|
+
try:
|
330
|
+
processed_before_marker = await guardrail.process(
|
331
|
+
processed_before_marker
|
332
|
+
)
|
333
|
+
except Exception as e:
|
334
|
+
logger.error(
|
335
|
+
f"Error applying output guardrail {guardrail.__class__.__name__} to pre-tool text: {e}"
|
336
|
+
)
|
337
|
+
|
338
|
+
# Yield ONLY if NOT buffering text
|
339
|
+
if (
|
340
|
+
processed_before_marker
|
341
|
+
and not should_buffer_text
|
342
|
+
and output_format == "text"
|
343
|
+
):
|
344
|
+
yield processed_before_marker
|
345
|
+
|
346
|
+
# Always accumulate for final response / audio buffer
|
347
|
+
if processed_before_marker:
|
348
|
+
complete_text_response += processed_before_marker
|
349
|
+
if output_format == "audio":
|
350
|
+
full_response_buffer += processed_before_marker
|
303
351
|
|
304
|
-
# Start collecting the tool call
|
305
352
|
tool_buffer = after_marker
|
306
|
-
continue
|
353
|
+
continue
|
307
354
|
|
308
355
|
# STEP 2: Handle ongoing tool call collection
|
309
356
|
if is_tool_call:
|
310
357
|
tool_buffer += combined_chunk
|
311
|
-
|
312
|
-
# Check if the tool call is complete
|
313
358
|
if end_marker in tool_buffer:
|
314
|
-
print(f"Tool call complete, buffer size: {len(tool_buffer)}")
|
315
|
-
|
316
|
-
# Process the tool call
|
317
359
|
response_text = await self._handle_tool_call(
|
318
360
|
agent_name=agent_name, tool_text=tool_buffer
|
319
361
|
)
|
320
|
-
|
321
|
-
# Clean the response to remove any markers or formatting
|
322
362
|
response_text = self._clean_tool_response(response_text)
|
323
|
-
print(
|
324
|
-
f"Tool execution complete, result size: {len(response_text)}"
|
325
|
-
)
|
326
|
-
|
327
|
-
# Create new prompt with search/tool results
|
328
|
-
# Ensure query is string
|
329
363
|
user_prompt = f"{str(query)}\n\nTOOL RESULT: {response_text}"
|
330
364
|
|
331
|
-
# ---
|
332
|
-
# Start with base prompt again
|
365
|
+
# --- Rebuild system prompt for follow-up ---
|
333
366
|
follow_up_system_prompt_parts = [
|
334
367
|
self.get_agent_system_prompt(agent_name)
|
335
368
|
]
|
336
|
-
#
|
369
|
+
# Re-add tool instructions if needed for follow-up context
|
370
|
+
if tool_instructions:
|
371
|
+
follow_up_system_prompt_parts.append(tool_instructions)
|
337
372
|
follow_up_system_prompt_parts.append(
|
338
|
-
"
|
373
|
+
f"USER IDENTIFIER: {user_id}"
|
339
374
|
)
|
340
|
-
|
341
|
-
"\n\n--- USER & SESSION INFO ---"
|
342
|
-
)
|
343
|
-
follow_up_system_prompt_parts.append(f"User ID: {user_id}")
|
375
|
+
# Include original memory + original query + tool result context
|
344
376
|
if memory_context:
|
345
|
-
# Make the header clearly separate it
|
346
377
|
follow_up_system_prompt_parts.append(
|
347
|
-
"\
|
378
|
+
f"\nORIGINAL CONVERSATION HISTORY:\n{memory_context}"
|
348
379
|
)
|
349
|
-
|
380
|
+
# Add the original prompt if it was provided
|
350
381
|
if prompt:
|
351
|
-
# Make the header clearly separate it
|
352
382
|
follow_up_system_prompt_parts.append(
|
353
|
-
"\
|
383
|
+
f"\nORIGINAL ADDITIONAL PROMPT:\n{prompt}"
|
354
384
|
)
|
355
|
-
|
385
|
+
# Add context about the tool call that just happened
|
386
|
+
follow_up_system_prompt_parts.append(
|
387
|
+
f"\nPREVIOUS TOOL CALL CONTEXT:\nOriginal Query: {str(query)}\nTool Used: (Inferred from result)\nTool Result: {response_text}"
|
388
|
+
)
|
356
389
|
|
357
|
-
|
358
|
-
|
359
|
-
follow_up_system_prompt_parts
|
390
|
+
final_follow_up_system_prompt = "\n\n".join(
|
391
|
+
filter(None, follow_up_system_prompt_parts)
|
360
392
|
)
|
361
|
-
# --- End Rebuild ---
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
393
|
+
# --- End Rebuild system prompt ---
|
394
|
+
|
395
|
+
logger.info("Generating follow-up response with tool results")
|
396
|
+
async for processed_chunk in self.llm_provider.generate_text(
|
397
|
+
prompt=user_prompt, # Use the prompt that includes the tool result
|
398
|
+
system_prompt=final_follow_up_system_prompt,
|
399
|
+
api_key=self.api_key,
|
400
|
+
base_url=self.base_url,
|
401
|
+
model=self.model,
|
402
|
+
):
|
403
|
+
chunk_to_yield_followup = processed_chunk
|
404
|
+
# Apply guardrails ONLY if NOT buffering text
|
405
|
+
if not should_buffer_text:
|
406
|
+
for guardrail in self.output_guardrails:
|
407
|
+
try:
|
408
|
+
chunk_to_yield_followup = (
|
409
|
+
await guardrail.process(
|
410
|
+
chunk_to_yield_followup
|
411
|
+
)
|
412
|
+
)
|
413
|
+
except Exception as e:
|
414
|
+
logger.error(
|
415
|
+
f"Error applying output guardrail {guardrail.__class__.__name__} to follow-up chunk: {e}"
|
416
|
+
)
|
417
|
+
|
418
|
+
# Yield ONLY if NOT buffering text
|
419
|
+
if (
|
420
|
+
chunk_to_yield_followup
|
421
|
+
and not should_buffer_text
|
422
|
+
and output_format == "text"
|
386
423
|
):
|
387
|
-
|
424
|
+
yield chunk_to_yield_followup
|
388
425
|
|
389
|
-
#
|
390
|
-
|
391
|
-
|
392
|
-
|
426
|
+
# Always accumulate
|
427
|
+
if chunk_to_yield_followup:
|
428
|
+
complete_text_response += chunk_to_yield_followup
|
429
|
+
if output_format == "audio":
|
430
|
+
full_response_buffer += chunk_to_yield_followup
|
393
431
|
|
394
|
-
# Reset tool handling state
|
395
432
|
is_tool_call = False
|
396
433
|
tool_buffer = ""
|
397
434
|
pending_chunk = ""
|
398
|
-
break # Exit the original generation loop
|
435
|
+
break # Exit the original generation loop
|
399
436
|
|
400
|
-
# Continue collecting tool call
|
401
|
-
continue
|
437
|
+
continue # Continue collecting tool call
|
402
438
|
|
403
|
-
# STEP 3: Check for possible partial start markers
|
404
|
-
# This helps detect markers split across chunks
|
439
|
+
# STEP 3: Check for possible partial start markers
|
405
440
|
potential_marker = False
|
441
|
+
chunk_to_yield = combined_chunk
|
406
442
|
for i in range(1, len(start_marker)):
|
407
443
|
if combined_chunk.endswith(start_marker[:i]):
|
408
|
-
# Found a partial marker at the end
|
409
|
-
# Save the partial marker
|
410
444
|
pending_chunk = combined_chunk[-i:]
|
411
|
-
# Everything except the partial marker
|
412
445
|
chunk_to_yield = combined_chunk[:-i]
|
413
446
|
potential_marker = True
|
414
|
-
print(f"Potential partial marker detected: '{pending_chunk}'")
|
415
447
|
break
|
416
448
|
|
417
449
|
if potential_marker:
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
450
|
+
chunk_to_yield_safe = chunk_to_yield
|
451
|
+
# Apply guardrails ONLY if NOT buffering text
|
452
|
+
if not should_buffer_text:
|
453
|
+
for guardrail in self.output_guardrails:
|
454
|
+
try:
|
455
|
+
chunk_to_yield_safe = await guardrail.process(
|
456
|
+
chunk_to_yield_safe
|
457
|
+
)
|
458
|
+
except Exception as e:
|
459
|
+
logger.error(
|
460
|
+
f"Error applying output guardrail {guardrail.__class__.__name__} to safe chunk: {e}"
|
461
|
+
)
|
462
|
+
|
463
|
+
# Yield ONLY if NOT buffering text
|
464
|
+
if (
|
465
|
+
chunk_to_yield_safe
|
466
|
+
and not should_buffer_text
|
467
|
+
and output_format == "text"
|
468
|
+
):
|
469
|
+
yield chunk_to_yield_safe
|
470
|
+
|
471
|
+
# Always accumulate
|
472
|
+
if chunk_to_yield_safe:
|
473
|
+
complete_text_response += chunk_to_yield_safe
|
423
474
|
if output_format == "audio":
|
424
|
-
full_response_buffer +=
|
475
|
+
full_response_buffer += chunk_to_yield_safe
|
425
476
|
continue
|
426
477
|
|
427
|
-
# STEP 4: Normal text processing
|
428
|
-
|
429
|
-
|
478
|
+
# STEP 4: Normal text processing
|
479
|
+
chunk_to_yield_normal = combined_chunk
|
480
|
+
# Apply guardrails ONLY if NOT buffering text
|
481
|
+
if not should_buffer_text:
|
482
|
+
for guardrail in self.output_guardrails:
|
483
|
+
try:
|
484
|
+
chunk_to_yield_normal = await guardrail.process(
|
485
|
+
chunk_to_yield_normal
|
486
|
+
)
|
487
|
+
except Exception as e:
|
488
|
+
logger.error(
|
489
|
+
f"Error applying output guardrail {guardrail.__class__.__name__} to normal chunk: {e}"
|
490
|
+
)
|
491
|
+
|
492
|
+
# Yield ONLY if NOT buffering text
|
493
|
+
if (
|
494
|
+
chunk_to_yield_normal
|
495
|
+
and not should_buffer_text
|
496
|
+
and output_format == "text"
|
497
|
+
):
|
498
|
+
yield chunk_to_yield_normal
|
499
|
+
|
500
|
+
# Always accumulate
|
501
|
+
if chunk_to_yield_normal:
|
502
|
+
complete_text_response += chunk_to_yield_normal
|
503
|
+
if output_format == "audio":
|
504
|
+
full_response_buffer += chunk_to_yield_normal
|
430
505
|
|
431
|
-
|
432
|
-
if output_format == "audio":
|
433
|
-
full_response_buffer += combined_chunk
|
506
|
+
# --- Post-Loop Processing ---
|
434
507
|
|
435
|
-
# Process any incomplete tool call
|
508
|
+
# Process any incomplete tool call
|
436
509
|
if is_tool_call and tool_buffer:
|
437
|
-
|
438
|
-
f"Incomplete tool call detected,
|
510
|
+
logger.warning(
|
511
|
+
f"Incomplete tool call detected, processing as regular text: {len(tool_buffer)} chars"
|
439
512
|
)
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
513
|
+
processed_tool_buffer = tool_buffer
|
514
|
+
# Apply guardrails ONLY if NOT buffering text
|
515
|
+
if not should_buffer_text:
|
516
|
+
for guardrail in self.output_guardrails:
|
517
|
+
try:
|
518
|
+
processed_tool_buffer = await guardrail.process(
|
519
|
+
processed_tool_buffer
|
520
|
+
)
|
521
|
+
except Exception as e:
|
522
|
+
logger.error(
|
523
|
+
f"Error applying output guardrail {guardrail.__class__.__name__} to incomplete tool buffer: {e}"
|
524
|
+
)
|
525
|
+
|
526
|
+
# Yield ONLY if NOT buffering text
|
527
|
+
if (
|
528
|
+
processed_tool_buffer
|
529
|
+
and not should_buffer_text
|
530
|
+
and output_format == "text"
|
531
|
+
):
|
532
|
+
yield processed_tool_buffer
|
533
|
+
|
534
|
+
# Always accumulate
|
535
|
+
if processed_tool_buffer:
|
536
|
+
complete_text_response += processed_tool_buffer
|
537
|
+
if output_format == "audio":
|
538
|
+
full_response_buffer += processed_tool_buffer
|
539
|
+
|
540
|
+
# --- Final Output Generation ---
|
541
|
+
|
542
|
+
# Case 1: Text output WITH guardrails (apply to buffered response)
|
543
|
+
if should_buffer_text:
|
544
|
+
logger.info(
|
545
|
+
f"Applying output guardrails to buffered text response (length: {len(complete_text_response)})"
|
452
546
|
)
|
453
|
-
|
547
|
+
processed_full_text = complete_text_response
|
548
|
+
for guardrail in self.output_guardrails:
|
549
|
+
try:
|
550
|
+
processed_full_text = await guardrail.process(
|
551
|
+
processed_full_text
|
552
|
+
)
|
553
|
+
except Exception as e:
|
554
|
+
logger.error(
|
555
|
+
f"Error applying output guardrail {guardrail.__class__.__name__} to full text buffer: {e}"
|
556
|
+
)
|
454
557
|
|
455
|
-
|
558
|
+
if processed_full_text:
|
559
|
+
yield processed_full_text
|
560
|
+
# Update last_text_response with the final processed text
|
561
|
+
self.last_text_response = processed_full_text
|
562
|
+
|
563
|
+
# Case 2: Audio output (apply guardrails to buffer before TTS) - Unchanged Logic
|
564
|
+
elif output_format == "audio" and full_response_buffer:
|
565
|
+
original_buffer = full_response_buffer
|
566
|
+
processed_audio_buffer = full_response_buffer
|
567
|
+
for (
|
568
|
+
guardrail
|
569
|
+
) in self.output_guardrails: # Apply even if empty, for consistency
|
570
|
+
try:
|
571
|
+
processed_audio_buffer = await guardrail.process(
|
572
|
+
processed_audio_buffer
|
573
|
+
)
|
574
|
+
except Exception as e:
|
575
|
+
logger.error(
|
576
|
+
f"Error applying output guardrail {guardrail.__class__.__name__} to audio buffer: {e}"
|
577
|
+
)
|
578
|
+
if processed_audio_buffer != original_buffer:
|
579
|
+
logger.info(
|
580
|
+
f"Output guardrails modified audio buffer. Original length: {len(original_buffer)}, New length: {len(processed_audio_buffer)}"
|
581
|
+
)
|
582
|
+
|
583
|
+
cleaned_audio_buffer = self._clean_for_audio(processed_audio_buffer)
|
584
|
+
logger.info(
|
585
|
+
f"Processing {len(cleaned_audio_buffer)} characters for audio output"
|
586
|
+
)
|
456
587
|
async for audio_chunk in self.llm_provider.tts(
|
457
|
-
text=
|
588
|
+
text=cleaned_audio_buffer,
|
458
589
|
voice=audio_voice,
|
459
590
|
response_format=audio_output_format,
|
460
591
|
instructions=audio_instructions,
|
461
592
|
):
|
462
593
|
yield audio_chunk
|
594
|
+
# Update last_text_response with the text *before* TTS cleaning
|
595
|
+
self.last_text_response = (
|
596
|
+
processed_audio_buffer # Store the guardrail-processed text
|
597
|
+
)
|
598
|
+
|
599
|
+
# Case 3: Text output WITHOUT guardrails (already streamed)
|
600
|
+
elif output_format == "text" and not should_buffer_text:
|
601
|
+
# Store the complete text response (accumulated from non-processed chunks)
|
602
|
+
self.last_text_response = complete_text_response
|
603
|
+
logger.info(
|
604
|
+
"Text streaming complete (no guardrails applied post-stream)."
|
605
|
+
)
|
463
606
|
|
464
|
-
|
465
|
-
|
466
|
-
|
607
|
+
logger.info(
|
608
|
+
f"Response generation complete for agent '{agent_name}': {len(self.last_text_response)} final chars"
|
609
|
+
)
|
467
610
|
|
468
611
|
except Exception as e:
|
469
|
-
|
470
|
-
print(f"Error in generate_response: {str(e)}")
|
612
|
+
# --- Error Handling (unchanged) ---
|
471
613
|
import traceback
|
472
614
|
|
473
|
-
|
474
|
-
|
615
|
+
error_msg = (
|
616
|
+
"I apologize, but I encountered an error processing your request."
|
617
|
+
)
|
618
|
+
logger.error(
|
619
|
+
f"Error in generate_response for agent '{agent_name}': {e}\n{traceback.format_exc()}"
|
620
|
+
)
|
475
621
|
if output_format == "audio":
|
476
622
|
async for chunk in self.llm_provider.tts(
|
477
623
|
error_msg,
|
@@ -484,35 +630,24 @@ class AgentService(AgentServiceInterface):
|
|
484
630
|
yield error_msg
|
485
631
|
|
486
632
|
async def _bytes_to_generator(self, data: bytes) -> AsyncGenerator[bytes, None]:
|
487
|
-
"""Convert bytes to an async generator for streaming.
|
488
|
-
|
489
|
-
Args:
|
490
|
-
data: Bytes of audio data
|
491
|
-
|
492
|
-
Yields:
|
493
|
-
Chunks of audio data
|
494
|
-
"""
|
495
|
-
# Define a reasonable chunk size (adjust based on your needs)
|
633
|
+
"""Convert bytes to an async generator for streaming."""
|
496
634
|
chunk_size = 4096
|
497
|
-
|
498
635
|
for i in range(0, len(data), chunk_size):
|
499
636
|
yield data[i : i + chunk_size]
|
500
|
-
# Small delay to simulate streaming
|
501
637
|
await asyncio.sleep(0.01)
|
502
638
|
|
503
639
|
async def _handle_tool_call(self, agent_name: str, tool_text: str) -> str:
|
504
640
|
"""Handle marker-based tool calls."""
|
505
641
|
try:
|
506
|
-
# Extract the content between markers
|
507
642
|
start_marker = "[TOOL]"
|
508
643
|
end_marker = "[/TOOL]"
|
509
|
-
|
510
644
|
start_idx = tool_text.find(start_marker) + len(start_marker)
|
511
645
|
end_idx = tool_text.find(end_marker)
|
646
|
+
if start_idx == -1 or end_idx == -1 or end_idx <= start_idx:
|
647
|
+
logger.error(f"Malformed tool call text received: {tool_text}")
|
648
|
+
return "Error: Malformed tool call format."
|
512
649
|
|
513
650
|
tool_content = tool_text[start_idx:end_idx].strip()
|
514
|
-
|
515
|
-
# Parse the lines to extract name and parameters
|
516
651
|
tool_name = None
|
517
652
|
parameters = {}
|
518
653
|
|
@@ -520,43 +655,61 @@ class AgentService(AgentServiceInterface):
|
|
520
655
|
line = line.strip()
|
521
656
|
if not line:
|
522
657
|
continue
|
523
|
-
|
524
658
|
if line.startswith("name:"):
|
525
659
|
tool_name = line[5:].strip()
|
526
660
|
elif line.startswith("parameters:"):
|
527
661
|
params_text = line[11:].strip()
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
662
|
+
try:
|
663
|
+
# Attempt to parse as JSON first for robustness
|
664
|
+
parameters = json.loads(params_text)
|
665
|
+
except json.JSONDecodeError:
|
666
|
+
# Fallback to comma-separated key=value pairs
|
667
|
+
param_pairs = params_text.split(",")
|
668
|
+
for pair in param_pairs:
|
669
|
+
if "=" in pair:
|
670
|
+
k, v = pair.split("=", 1)
|
671
|
+
parameters[k.strip()] = v.strip()
|
672
|
+
logger.warning(
|
673
|
+
f"Parsed tool parameters using fallback method: {params_text}"
|
674
|
+
)
|
675
|
+
|
676
|
+
if not tool_name:
|
677
|
+
logger.error(f"Tool name missing in tool call: {tool_content}")
|
678
|
+
return "Error: Tool name missing in call."
|
679
|
+
|
536
680
|
result = await self.execute_tool(agent_name, tool_name, parameters)
|
537
681
|
|
538
|
-
# Return the result as string
|
539
682
|
if result.get("status") == "success":
|
540
683
|
tool_result = str(result.get("result", ""))
|
541
684
|
return tool_result
|
542
685
|
else:
|
543
686
|
error_msg = f"Error calling {tool_name}: {result.get('message', 'Unknown error')}"
|
687
|
+
logger.error(error_msg)
|
544
688
|
return error_msg
|
545
689
|
|
546
690
|
except Exception as e:
|
547
691
|
import traceback
|
548
692
|
|
549
|
-
|
693
|
+
logger.error(f"Error processing tool call: {e}\n{traceback.format_exc()}")
|
550
694
|
return f"Error processing tool call: {str(e)}"
|
551
695
|
|
552
696
|
def _get_tool_usage_prompt(self, agent_name: str) -> str:
|
553
697
|
"""Generate marker-based instructions for tool usage."""
|
554
|
-
# Get tools assigned to this agent
|
555
698
|
tools = self.get_agent_tools(agent_name)
|
556
699
|
if not tools:
|
557
700
|
return ""
|
558
701
|
|
559
|
-
|
702
|
+
# Simplify tool representation for the prompt
|
703
|
+
simplified_tools = []
|
704
|
+
for tool in tools:
|
705
|
+
simplified_tool = {
|
706
|
+
"name": tool.get("name"),
|
707
|
+
"description": tool.get("description"),
|
708
|
+
"parameters": tool.get("parameters", {}).get("properties", {}),
|
709
|
+
}
|
710
|
+
simplified_tools.append(simplified_tool)
|
711
|
+
|
712
|
+
tools_json = json.dumps(simplified_tools, indent=2)
|
560
713
|
|
561
714
|
return f"""
|
562
715
|
AVAILABLE TOOLS:
|
@@ -569,21 +722,21 @@ class AgentService(AgentServiceInterface):
|
|
569
722
|
TOOL USAGE FORMAT:
|
570
723
|
[TOOL]
|
571
724
|
name: tool_name
|
572
|
-
parameters: key1
|
725
|
+
parameters: {{"key1": "value1", "key2": "value2"}}
|
573
726
|
[/TOOL]
|
574
727
|
|
575
728
|
EXAMPLES:
|
576
729
|
✅ CORRECT - ONLY the tool call with NOTHING else:
|
577
730
|
[TOOL]
|
578
731
|
name: search_internet
|
579
|
-
parameters: query
|
732
|
+
parameters: {{"query": "latest news on Solana"}}
|
580
733
|
[/TOOL]
|
581
|
-
|
734
|
+
|
582
735
|
❌ INCORRECT - Never add explanatory text like this:
|
583
736
|
To get the latest news on Solana, I will search the internet.
|
584
737
|
[TOOL]
|
585
738
|
name: search_internet
|
586
|
-
parameters: query
|
739
|
+
parameters: {{"query": "latest news on Solana"}}
|
587
740
|
[/TOOL]
|
588
741
|
|
589
742
|
REMEMBER:
|
@@ -594,62 +747,33 @@ class AgentService(AgentServiceInterface):
|
|
594
747
|
"""
|
595
748
|
|
596
749
|
def _clean_for_audio(self, text: str) -> str:
|
597
|
-
"""Remove Markdown formatting, emojis, and non-pronounceable characters from text.
|
598
|
-
|
599
|
-
Args:
|
600
|
-
text: Input text with potential Markdown formatting and special characters
|
601
|
-
|
602
|
-
Returns:
|
603
|
-
Clean text without Markdown, emojis, and special characters
|
604
|
-
"""
|
750
|
+
"""Remove Markdown formatting, emojis, and non-pronounceable characters from text."""
|
605
751
|
import re
|
606
752
|
|
607
753
|
if not text:
|
608
754
|
return ""
|
609
|
-
|
610
|
-
# Remove Markdown links - [text](url) -> text
|
611
755
|
text = re.sub(r"\[([^\]]+)\]\([^\)]+\)", r"\1", text)
|
612
|
-
|
613
|
-
# Remove inline code with backticks
|
614
756
|
text = re.sub(r"`([^`]+)`", r"\1", text)
|
615
|
-
|
616
|
-
# Remove bold formatting - **text** or __text__ -> text
|
617
757
|
text = re.sub(r"(\*\*|__)(.*?)\1", r"\2", text)
|
618
|
-
|
619
|
-
# Remove italic formatting - *text* or _text_ -> text
|
620
758
|
text = re.sub(r"(\*|_)(.*?)\1", r"\2", text)
|
621
|
-
|
622
|
-
# Remove headers - ## Header -> Header
|
623
759
|
text = re.sub(r"^\s*#+\s*(.*?)$", r"\1", text, flags=re.MULTILINE)
|
624
|
-
|
625
|
-
# Remove blockquotes - > Text -> Text
|
626
760
|
text = re.sub(r"^\s*>\s*(.*?)$", r"\1", text, flags=re.MULTILINE)
|
627
|
-
|
628
|
-
# Remove horizontal rules (---, ***, ___)
|
629
761
|
text = re.sub(r"^\s*[-*_]{3,}\s*$", "", text, flags=re.MULTILINE)
|
630
|
-
|
631
|
-
# Remove list markers - * Item or - Item or 1. Item -> Item
|
632
762
|
text = re.sub(r"^\s*[-*+]\s+(.*?)$", r"\1", text, flags=re.MULTILINE)
|
633
763
|
text = re.sub(r"^\s*\d+\.\s+(.*?)$", r"\1", text, flags=re.MULTILINE)
|
634
|
-
|
635
|
-
# Remove multiple consecutive newlines (keep just one)
|
636
764
|
text = re.sub(r"\n{3,}", "\n\n", text)
|
637
|
-
|
638
|
-
# Remove emojis and other non-pronounceable characters
|
639
|
-
# Common emoji Unicode ranges
|
640
765
|
emoji_pattern = re.compile(
|
641
766
|
"["
|
642
767
|
"\U0001f600-\U0001f64f" # emoticons
|
643
768
|
"\U0001f300-\U0001f5ff" # symbols & pictographs
|
644
769
|
"\U0001f680-\U0001f6ff" # transport & map symbols
|
645
770
|
"\U0001f700-\U0001f77f" # alchemical symbols
|
646
|
-
"\U0001f780-\U0001f7ff" # Geometric Shapes
|
771
|
+
"\U0001f780-\U0001f7ff" # Geometric Shapes Extended
|
647
772
|
"\U0001f800-\U0001f8ff" # Supplemental Arrows-C
|
648
773
|
"\U0001f900-\U0001f9ff" # Supplemental Symbols and Pictographs
|
649
|
-
"\U0001fa00-\U0001fa6f" # Chess Symbols
|
650
774
|
"\U0001fa70-\U0001faff" # Symbols and Pictographs Extended-A
|
651
775
|
"\U00002702-\U000027b0" # Dingbats
|
652
|
-
"\U000024c2-\
|
776
|
+
"\U000024c2-\U0001f251"
|
653
777
|
"\U00002600-\U000026ff" # Miscellaneous Symbols
|
654
778
|
"\U00002700-\U000027bf" # Dingbats
|
655
779
|
"\U0000fe00-\U0000fe0f" # Variation Selectors
|
@@ -658,26 +782,57 @@ class AgentService(AgentServiceInterface):
|
|
658
782
|
flags=re.UNICODE,
|
659
783
|
)
|
660
784
|
text = emoji_pattern.sub(r" ", text)
|
661
|
-
|
662
|
-
|
663
|
-
|
664
|
-
|
665
|
-
# Replace multiple spaces with a single space
|
785
|
+
text = re.sub(
|
786
|
+
r"[^\w\s\.\,\;\:\?\!\'\"\-\(\)]", " ", text
|
787
|
+
) # Keep basic punctuation
|
666
788
|
text = re.sub(r"\s+", " ", text)
|
667
|
-
|
668
789
|
return text.strip()
|
669
790
|
|
670
791
|
def _clean_tool_response(self, text: str) -> str:
|
671
792
|
"""Remove any tool markers or formatting that might have leaked into the response."""
|
672
793
|
if not text:
|
673
794
|
return ""
|
795
|
+
text = text.replace("[TOOL]", "").replace("[/TOOL]", "")
|
796
|
+
if text.lstrip().startswith("TOOL"):
|
797
|
+
text = text.lstrip()[4:].lstrip() # Remove "TOOL" and leading space
|
798
|
+
return text.strip()
|
674
799
|
|
675
|
-
|
676
|
-
|
677
|
-
|
800
|
+
# --- Add methods from factory logic ---
|
801
|
+
def load_and_register_plugins(self):
|
802
|
+
"""Loads plugins using the PluginManager."""
|
803
|
+
try:
|
804
|
+
self.plugin_manager.load_plugins()
|
805
|
+
logger.info("Plugins loaded successfully via PluginManager.")
|
806
|
+
except Exception as e:
|
807
|
+
logger.error(f"Error loading plugins: {e}", exc_info=True)
|
678
808
|
|
679
|
-
|
680
|
-
|
681
|
-
|
809
|
+
def register_agents_from_config(self):
|
810
|
+
"""Registers agents defined in the main configuration."""
|
811
|
+
agents_config = self.config.get("agents", [])
|
812
|
+
if not agents_config:
|
813
|
+
logger.warning("No agents defined in the configuration.")
|
814
|
+
return
|
682
815
|
|
683
|
-
|
816
|
+
for agent_config in agents_config:
|
817
|
+
name = agent_config.get("name")
|
818
|
+
instructions = agent_config.get("instructions")
|
819
|
+
specialization = agent_config.get("specialization")
|
820
|
+
tools = agent_config.get("tools", [])
|
821
|
+
|
822
|
+
if not name or not instructions or not specialization:
|
823
|
+
logger.warning(
|
824
|
+
f"Skipping agent due to missing name, instructions, or specialization: {agent_config}"
|
825
|
+
)
|
826
|
+
continue
|
827
|
+
|
828
|
+
self.register_ai_agent(name, instructions, specialization)
|
829
|
+
# logger.info(f"Registered agent: {name}") # Logging done in register_ai_agent
|
830
|
+
|
831
|
+
# Assign tools to the agent
|
832
|
+
for tool_name in tools:
|
833
|
+
if self.assign_tool_for_agent(name, tool_name):
|
834
|
+
logger.info(f"Assigned tool '{tool_name}' to agent '{name}'.")
|
835
|
+
else:
|
836
|
+
logger.warning(
|
837
|
+
f"Failed to assign tool '{tool_name}' to agent '{name}' (Tool might not be registered)."
|
838
|
+
)
|