solana-agent 29.2.0__tar.gz → 29.2.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {solana_agent-29.2.0 → solana_agent-29.2.1}/PKG-INFO +1 -3
- {solana_agent-29.2.0 → solana_agent-29.2.1}/README.md +0 -2
- {solana_agent-29.2.0 → solana_agent-29.2.1}/pyproject.toml +1 -1
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/adapters/openai_adapter.py +12 -10
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/interfaces/providers/llm.py +5 -1
- solana_agent-29.2.1/solana_agent/services/agent.py +467 -0
- solana_agent-29.2.0/solana_agent/services/agent.py +0 -956
- {solana_agent-29.2.0 → solana_agent-29.2.1}/LICENSE +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/__init__.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/adapters/__init__.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/adapters/mongodb_adapter.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/adapters/pinecone_adapter.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/cli.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/client/__init__.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/client/solana_agent.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/domains/__init__.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/domains/agent.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/domains/routing.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/factories/__init__.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/factories/agent_factory.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/guardrails/pii.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/interfaces/__init__.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/interfaces/client/client.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/interfaces/guardrails/guardrails.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/interfaces/plugins/plugins.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/interfaces/providers/data_storage.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/interfaces/providers/memory.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/interfaces/providers/vector_storage.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/interfaces/services/agent.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/interfaces/services/knowledge_base.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/interfaces/services/query.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/interfaces/services/routing.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/plugins/__init__.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/plugins/manager.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/plugins/registry.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/plugins/tools/__init__.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/plugins/tools/auto_tool.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/repositories/__init__.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/repositories/memory.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/services/__init__.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/services/knowledge_base.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/services/query.py +0 -0
- {solana_agent-29.2.0 → solana_agent-29.2.1}/solana_agent/services/routing.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: solana-agent
|
3
|
-
Version: 29.2.
|
3
|
+
Version: 29.2.1
|
4
4
|
Summary: AI Agents for Solana
|
5
5
|
License: MIT
|
6
6
|
Keywords: solana,solana ai,solana agent,ai,ai agent,ai agents
|
@@ -41,9 +41,7 @@ Description-Content-Type: text/markdown
|
|
41
41
|
[](https://opensource.org/licenses/MIT)
|
42
42
|
[](https://codecov.io/gh/truemagic-coder/solana-agent)
|
43
43
|
[](https://github.com/truemagic-coder/solana-agent/actions/workflows/ci.yml)
|
44
|
-
[](https://github.com/truemagic-coder/solana-agent)
|
45
44
|
[](https://github.com/astral-sh/ruff)
|
46
|
-
[](https://libraries.io/pypi/solana-agent)
|
47
45
|
|
48
46
|

|
49
47
|
|
@@ -6,9 +6,7 @@
|
|
6
6
|
[](https://opensource.org/licenses/MIT)
|
7
7
|
[](https://codecov.io/gh/truemagic-coder/solana-agent)
|
8
8
|
[](https://github.com/truemagic-coder/solana-agent/actions/workflows/ci.yml)
|
9
|
-
[](https://github.com/truemagic-coder/solana-agent)
|
10
9
|
[](https://github.com/astral-sh/ruff)
|
11
|
-
[](https://libraries.io/pypi/solana-agent)
|
12
10
|
|
13
11
|

|
14
12
|
|
@@ -163,8 +163,10 @@ class OpenAIAdapter(LLMProvider):
|
|
163
163
|
api_key: Optional[str] = None,
|
164
164
|
base_url: Optional[str] = None,
|
165
165
|
model: Optional[str] = None,
|
166
|
-
|
167
|
-
|
166
|
+
functions: Optional[List[Dict[str, Any]]] = None,
|
167
|
+
function_call: Optional[Union[str, Dict[str, Any]]] = None,
|
168
|
+
) -> Any: # pragma: no cover
|
169
|
+
"""Generate text or function call from OpenAI models."""
|
168
170
|
messages = []
|
169
171
|
if system_prompt:
|
170
172
|
messages.append({"role": "system", "content": system_prompt})
|
@@ -174,6 +176,10 @@ class OpenAIAdapter(LLMProvider):
|
|
174
176
|
"messages": messages,
|
175
177
|
"model": model or self.text_model,
|
176
178
|
}
|
179
|
+
if functions:
|
180
|
+
request_params["functions"] = functions
|
181
|
+
if function_call:
|
182
|
+
request_params["function_call"] = function_call
|
177
183
|
|
178
184
|
if api_key and base_url:
|
179
185
|
client = AsyncOpenAI(api_key=api_key, base_url=base_url)
|
@@ -185,17 +191,13 @@ class OpenAIAdapter(LLMProvider):
|
|
185
191
|
|
186
192
|
try:
|
187
193
|
response = await client.chat.completions.create(**request_params)
|
188
|
-
|
189
|
-
|
190
|
-
else:
|
191
|
-
logger.warning("Received non-streaming response with no content.")
|
192
|
-
return ""
|
193
|
-
except OpenAIError as e: # Catch specific OpenAI errors
|
194
|
+
return response
|
195
|
+
except OpenAIError as e:
|
194
196
|
logger.error(f"OpenAI API error during text generation: {e}")
|
195
|
-
return
|
197
|
+
return None
|
196
198
|
except Exception as e:
|
197
199
|
logger.exception(f"Error in generate_text: {e}")
|
198
|
-
return
|
200
|
+
return None
|
199
201
|
|
200
202
|
def _calculate_gpt41_image_cost(self, width: int, height: int, model: str) -> int:
|
201
203
|
"""Calculates the token cost for an image with GPT-4.1 models."""
|
@@ -1,6 +1,8 @@
|
|
1
1
|
from abc import ABC, abstractmethod
|
2
2
|
from typing import (
|
3
|
+
Any,
|
3
4
|
AsyncGenerator,
|
5
|
+
Dict,
|
4
6
|
List,
|
5
7
|
Literal,
|
6
8
|
Optional,
|
@@ -26,7 +28,9 @@ class LLMProvider(ABC):
|
|
26
28
|
api_key: Optional[str] = None,
|
27
29
|
base_url: Optional[str] = None,
|
28
30
|
model: Optional[str] = None,
|
29
|
-
|
31
|
+
functions: Optional[List[Dict[str, Any]]] = None,
|
32
|
+
function_call: Optional[Union[str, Dict[str, Any]]] = None,
|
33
|
+
) -> Any:
|
30
34
|
"""Generate text from the language model."""
|
31
35
|
pass
|
32
36
|
|
@@ -0,0 +1,467 @@
|
|
1
|
+
"""
|
2
|
+
Agent service implementation.
|
3
|
+
|
4
|
+
This service manages AI and human agents, their registration, tool assignments,
|
5
|
+
and response generation.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import datetime as main_datetime
|
9
|
+
from datetime import datetime
|
10
|
+
import json
|
11
|
+
import logging # Add logging
|
12
|
+
import re
|
13
|
+
from typing import AsyncGenerator, Dict, List, Literal, Optional, Any, Union
|
14
|
+
|
15
|
+
from solana_agent.interfaces.services.agent import AgentService as AgentServiceInterface
|
16
|
+
from solana_agent.interfaces.providers.llm import LLMProvider
|
17
|
+
from solana_agent.plugins.manager import PluginManager
|
18
|
+
from solana_agent.plugins.registry import ToolRegistry
|
19
|
+
from solana_agent.domains.agent import AIAgent, BusinessMission
|
20
|
+
from solana_agent.interfaces.guardrails.guardrails import (
|
21
|
+
OutputGuardrail,
|
22
|
+
)
|
23
|
+
|
24
|
+
logger = logging.getLogger(__name__) # Add logger
|
25
|
+
|
26
|
+
|
27
|
+
class AgentService(AgentServiceInterface):
|
28
|
+
"""Service for managing agents and generating responses."""
|
29
|
+
|
30
|
+
def __init__(
|
31
|
+
self,
|
32
|
+
llm_provider: LLMProvider,
|
33
|
+
business_mission: Optional[BusinessMission] = None,
|
34
|
+
config: Optional[Dict[str, Any]] = None,
|
35
|
+
api_key: Optional[str] = None,
|
36
|
+
base_url: Optional[str] = None,
|
37
|
+
model: Optional[str] = None,
|
38
|
+
output_guardrails: List[
|
39
|
+
OutputGuardrail
|
40
|
+
] = None, # <-- Add output_guardrails parameter
|
41
|
+
):
|
42
|
+
"""Initialize the agent service.
|
43
|
+
|
44
|
+
Args:
|
45
|
+
llm_provider: Provider for language model interactions
|
46
|
+
business_mission: Optional business mission and values
|
47
|
+
config: Optional service configuration
|
48
|
+
api_key: API key for the LLM provider
|
49
|
+
base_url: Base URL for the LLM provider
|
50
|
+
model: Model name for the LLM provider
|
51
|
+
output_guardrails: List of output guardrail instances
|
52
|
+
"""
|
53
|
+
self.llm_provider = llm_provider
|
54
|
+
self.business_mission = business_mission
|
55
|
+
self.config = config or {}
|
56
|
+
self.last_text_response = ""
|
57
|
+
self.tool_registry = ToolRegistry(config=self.config)
|
58
|
+
self.agents: List[AIAgent] = []
|
59
|
+
self.api_key = api_key
|
60
|
+
self.base_url = base_url
|
61
|
+
self.model = model
|
62
|
+
self.output_guardrails = output_guardrails or [] # <-- Store guardrails
|
63
|
+
|
64
|
+
self.plugin_manager = PluginManager(
|
65
|
+
config=self.config,
|
66
|
+
tool_registry=self.tool_registry,
|
67
|
+
)
|
68
|
+
|
69
|
+
def register_ai_agent(
|
70
|
+
self,
|
71
|
+
name: str,
|
72
|
+
instructions: str,
|
73
|
+
specialization: str,
|
74
|
+
) -> None:
|
75
|
+
"""Register an AI agent with its specialization.
|
76
|
+
|
77
|
+
Args:
|
78
|
+
name: Agent name
|
79
|
+
instructions: Agent instructions
|
80
|
+
specialization: Agent specialization
|
81
|
+
"""
|
82
|
+
agent = AIAgent(
|
83
|
+
name=name,
|
84
|
+
instructions=instructions,
|
85
|
+
specialization=specialization,
|
86
|
+
)
|
87
|
+
self.agents.append(agent)
|
88
|
+
logger.info(f"Registered AI agent: {name}")
|
89
|
+
|
90
|
+
def get_agent_system_prompt(self, agent_name: str) -> str:
|
91
|
+
"""Get the system prompt for an agent.
|
92
|
+
|
93
|
+
Args:
|
94
|
+
agent_name: Agent name
|
95
|
+
|
96
|
+
Returns:
|
97
|
+
System prompt
|
98
|
+
"""
|
99
|
+
|
100
|
+
# Get agent by name
|
101
|
+
agent = next((a for a in self.agents if a.name == agent_name), None)
|
102
|
+
|
103
|
+
# Build system prompt
|
104
|
+
system_prompt = f"You are {agent.name}, an AI assistant with the following instructions:\n\n"
|
105
|
+
system_prompt += agent.instructions
|
106
|
+
|
107
|
+
# add current time
|
108
|
+
system_prompt += f"\n\nThe current time is {datetime.now(tz=main_datetime.timezone.utc)}\n\n."
|
109
|
+
|
110
|
+
# Add mission and values if available
|
111
|
+
if self.business_mission:
|
112
|
+
system_prompt += f"\n\nBUSINESS MISSION:\n{self.business_mission.mission}"
|
113
|
+
system_prompt += f"\n\nVOICE OF THE BRAND:\n{self.business_mission.voice}"
|
114
|
+
|
115
|
+
if self.business_mission.values:
|
116
|
+
values_text = "\n".join(
|
117
|
+
[
|
118
|
+
f"- {value.get('name', '')}: {value.get('description', '')}"
|
119
|
+
for value in self.business_mission.values
|
120
|
+
]
|
121
|
+
)
|
122
|
+
system_prompt += f"\n\nBUSINESS VALUES:\n{values_text}"
|
123
|
+
|
124
|
+
# Add goals if available
|
125
|
+
if self.business_mission.goals:
|
126
|
+
goals_text = "\n".join(
|
127
|
+
[f"- {goal}" for goal in self.business_mission.goals]
|
128
|
+
)
|
129
|
+
system_prompt += f"\n\nBUSINESS GOALS:\n{goals_text}"
|
130
|
+
|
131
|
+
return system_prompt
|
132
|
+
|
133
|
+
def get_all_ai_agents(self) -> Dict[str, AIAgent]:
|
134
|
+
"""Get all registered AI agents.
|
135
|
+
|
136
|
+
Returns:
|
137
|
+
Dictionary mapping agent names to agents
|
138
|
+
"""
|
139
|
+
return {agent.name: agent for agent in self.agents}
|
140
|
+
|
141
|
+
def assign_tool_for_agent(self, agent_name: str, tool_name: str) -> bool:
|
142
|
+
"""Assign a tool to an agent.
|
143
|
+
|
144
|
+
Args:
|
145
|
+
agent_name: Agent name
|
146
|
+
tool_name: Tool name
|
147
|
+
|
148
|
+
Returns:
|
149
|
+
True if assignment was successful
|
150
|
+
"""
|
151
|
+
return self.tool_registry.assign_tool_to_agent(agent_name, tool_name)
|
152
|
+
|
153
|
+
def get_agent_tools(self, agent_name: str) -> List[Dict[str, Any]]:
|
154
|
+
"""Get tools available to an agent.
|
155
|
+
|
156
|
+
Args:
|
157
|
+
agent_name: Agent name
|
158
|
+
|
159
|
+
Returns:
|
160
|
+
List of tool configurations
|
161
|
+
"""
|
162
|
+
return self.tool_registry.get_agent_tools(agent_name)
|
163
|
+
|
164
|
+
async def execute_tool(
|
165
|
+
self, agent_name: str, tool_name: str, parameters: Dict[str, Any]
|
166
|
+
) -> Dict[str, Any]:
|
167
|
+
"""Execute a tool on behalf of an agent."""
|
168
|
+
|
169
|
+
if not self.tool_registry:
|
170
|
+
logger.error("Tool registry not available during tool execution.")
|
171
|
+
return {"status": "error", "message": "Tool registry not available"}
|
172
|
+
|
173
|
+
tool = self.tool_registry.get_tool(tool_name)
|
174
|
+
if not tool:
|
175
|
+
logger.warning(f"Tool '{tool_name}' not found for execution.")
|
176
|
+
return {"status": "error", "message": f"Tool '{tool_name}' not found"}
|
177
|
+
|
178
|
+
# Check if agent has access to this tool
|
179
|
+
agent_tools = self.tool_registry.get_agent_tools(agent_name)
|
180
|
+
|
181
|
+
if not any(t.get("name") == tool_name for t in agent_tools):
|
182
|
+
logger.warning(
|
183
|
+
f"Agent '{agent_name}' attempted to use unassigned tool '{tool_name}'."
|
184
|
+
)
|
185
|
+
return {
|
186
|
+
"status": "error",
|
187
|
+
"message": f"Agent '{agent_name}' doesn't have access to tool '{tool_name}'",
|
188
|
+
}
|
189
|
+
|
190
|
+
try:
|
191
|
+
logger.info(
|
192
|
+
f"Executing tool '{tool_name}' for agent '{agent_name}' with params: {parameters}"
|
193
|
+
)
|
194
|
+
result = await tool.execute(**parameters)
|
195
|
+
logger.info(
|
196
|
+
f"Tool '{tool_name}' execution result status: {result.get('status')}"
|
197
|
+
)
|
198
|
+
return result
|
199
|
+
except Exception as e:
|
200
|
+
import traceback
|
201
|
+
|
202
|
+
logger.error(
|
203
|
+
f"Error executing tool '{tool_name}': {e}\n{traceback.format_exc()}"
|
204
|
+
)
|
205
|
+
return {"status": "error", "message": f"Error executing tool: {str(e)}"}
|
206
|
+
|
207
|
+
async def generate_response(
|
208
|
+
self,
|
209
|
+
agent_name: str,
|
210
|
+
user_id: str,
|
211
|
+
query: Union[str, bytes],
|
212
|
+
images: Optional[List[Union[str, bytes]]] = None,
|
213
|
+
memory_context: str = "",
|
214
|
+
output_format: Literal["text", "audio"] = "text",
|
215
|
+
audio_voice: Literal[
|
216
|
+
"alloy",
|
217
|
+
"ash",
|
218
|
+
"ballad",
|
219
|
+
"coral",
|
220
|
+
"echo",
|
221
|
+
"fable",
|
222
|
+
"onyx",
|
223
|
+
"nova",
|
224
|
+
"sage",
|
225
|
+
"shimmer",
|
226
|
+
] = "nova",
|
227
|
+
audio_instructions: str = "You speak in a friendly and helpful manner.",
|
228
|
+
audio_output_format: Literal[
|
229
|
+
"mp3", "opus", "aac", "flac", "wav", "pcm"
|
230
|
+
] = "aac",
|
231
|
+
prompt: Optional[str] = None,
|
232
|
+
) -> AsyncGenerator[Union[str, bytes], None]: # pragma: no cover
|
233
|
+
"""Generate a response using OpenAI function calling (tools API) via generate_text."""
|
234
|
+
|
235
|
+
agent = next((a for a in self.agents if a.name == agent_name), None)
|
236
|
+
if not agent:
|
237
|
+
error_msg = f"Agent '{agent_name}' not found."
|
238
|
+
logger.warning(error_msg)
|
239
|
+
if output_format == "audio":
|
240
|
+
async for chunk in self.llm_provider.tts(
|
241
|
+
error_msg,
|
242
|
+
instructions=audio_instructions,
|
243
|
+
response_format=audio_output_format,
|
244
|
+
voice=audio_voice,
|
245
|
+
):
|
246
|
+
yield chunk
|
247
|
+
else:
|
248
|
+
yield error_msg
|
249
|
+
return
|
250
|
+
|
251
|
+
# Build system prompt and messages
|
252
|
+
system_prompt = self.get_agent_system_prompt(agent_name)
|
253
|
+
user_content = str(query)
|
254
|
+
if images:
|
255
|
+
user_content += (
|
256
|
+
"\n\n[Images attached]" # Optionally, handle images as needed
|
257
|
+
)
|
258
|
+
|
259
|
+
# Compose the prompt for generate_text
|
260
|
+
full_prompt = ""
|
261
|
+
if memory_context:
|
262
|
+
full_prompt += f"CONVERSATION HISTORY:\n{memory_context}\n\n"
|
263
|
+
if prompt:
|
264
|
+
full_prompt += f"ADDITIONAL PROMPT:\n{prompt}\n\n"
|
265
|
+
full_prompt += user_content
|
266
|
+
full_prompt += f"USER IDENTIFIER: {user_id}"
|
267
|
+
|
268
|
+
# Get OpenAI function schemas for this agent's tools
|
269
|
+
functions = []
|
270
|
+
for tool in self.get_agent_tools(agent_name):
|
271
|
+
functions.append(
|
272
|
+
{
|
273
|
+
"name": tool["name"],
|
274
|
+
"description": tool.get("description", ""),
|
275
|
+
"parameters": tool.get("parameters", {}),
|
276
|
+
}
|
277
|
+
)
|
278
|
+
|
279
|
+
response_text = ""
|
280
|
+
try:
|
281
|
+
while True:
|
282
|
+
response = await self.llm_provider.generate_text(
|
283
|
+
prompt=full_prompt,
|
284
|
+
system_prompt=system_prompt,
|
285
|
+
functions=functions if functions else None,
|
286
|
+
function_call="auto" if functions else None,
|
287
|
+
api_key=self.api_key,
|
288
|
+
base_url=self.base_url,
|
289
|
+
model=self.model,
|
290
|
+
)
|
291
|
+
if (
|
292
|
+
not response
|
293
|
+
or not hasattr(response, "choices")
|
294
|
+
or not response.choices
|
295
|
+
):
|
296
|
+
logger.error("No response or choices from LLM provider.")
|
297
|
+
response_text = "I apologize, but I could not generate a response."
|
298
|
+
break
|
299
|
+
|
300
|
+
choice = response.choices[0]
|
301
|
+
message = getattr(
|
302
|
+
choice, "message", choice
|
303
|
+
) # Support both OpenAI and instructor
|
304
|
+
|
305
|
+
# If the model wants to call a function/tool
|
306
|
+
if hasattr(message, "function_call") and message.function_call:
|
307
|
+
function_name = message.function_call.name
|
308
|
+
arguments = json.loads(message.function_call.arguments)
|
309
|
+
logger.info(
|
310
|
+
f"Model requested tool '{function_name}' with args: {arguments}"
|
311
|
+
)
|
312
|
+
|
313
|
+
# Execute the tool (async)
|
314
|
+
tool_result = await self.execute_tool(
|
315
|
+
agent_name, function_name, arguments
|
316
|
+
)
|
317
|
+
|
318
|
+
# Add the tool result to the prompt for the next round
|
319
|
+
# (You may want to format this differently for your use case)
|
320
|
+
full_prompt += (
|
321
|
+
f"\n\nTool '{function_name}' was called with arguments {arguments}.\n"
|
322
|
+
f"Result: {tool_result}\n"
|
323
|
+
)
|
324
|
+
continue # Loop again, LLM will see tool result and may call another tool or finish
|
325
|
+
|
326
|
+
# Otherwise, it's a normal message (final answer)
|
327
|
+
response_text = message.content
|
328
|
+
break
|
329
|
+
|
330
|
+
# Apply output guardrails if any
|
331
|
+
processed_final_text = response_text
|
332
|
+
if self.output_guardrails:
|
333
|
+
for guardrail in self.output_guardrails:
|
334
|
+
try:
|
335
|
+
processed_final_text = await guardrail.process(
|
336
|
+
processed_final_text
|
337
|
+
)
|
338
|
+
except Exception as e:
|
339
|
+
logger.error(
|
340
|
+
f"Error applying output guardrail {guardrail.__class__.__name__}: {e}"
|
341
|
+
)
|
342
|
+
|
343
|
+
self.last_text_response = processed_final_text
|
344
|
+
|
345
|
+
if output_format == "text":
|
346
|
+
yield processed_final_text or ""
|
347
|
+
elif output_format == "audio":
|
348
|
+
cleaned_audio_buffer = self._clean_for_audio(processed_final_text)
|
349
|
+
if cleaned_audio_buffer:
|
350
|
+
async for audio_chunk in self.llm_provider.tts(
|
351
|
+
text=cleaned_audio_buffer,
|
352
|
+
voice=audio_voice,
|
353
|
+
response_format=audio_output_format,
|
354
|
+
instructions=audio_instructions,
|
355
|
+
):
|
356
|
+
yield audio_chunk
|
357
|
+
else:
|
358
|
+
yield ""
|
359
|
+
except Exception as e:
|
360
|
+
import traceback
|
361
|
+
|
362
|
+
error_msg = (
|
363
|
+
"I apologize, but I encountered an error processing your request."
|
364
|
+
)
|
365
|
+
logger.error(
|
366
|
+
f"Error in generate_response for agent '{agent_name}': {e}\n{traceback.format_exc()}"
|
367
|
+
)
|
368
|
+
if output_format == "audio":
|
369
|
+
async for chunk in self.llm_provider.tts(
|
370
|
+
error_msg,
|
371
|
+
voice=audio_voice,
|
372
|
+
response_format=audio_output_format,
|
373
|
+
instructions=audio_instructions,
|
374
|
+
):
|
375
|
+
yield chunk
|
376
|
+
else:
|
377
|
+
yield error_msg
|
378
|
+
|
379
|
+
def _clean_for_audio(self, text: str) -> str:
|
380
|
+
"""Remove Markdown formatting, emojis, and non-pronounceable characters from text."""
|
381
|
+
|
382
|
+
if not text:
|
383
|
+
return ""
|
384
|
+
text = re.sub(r"\[([^\]]+)\]\([^\)]+\)", r"\1", text)
|
385
|
+
text = re.sub(r"`([^`]+)`", r"\1", text)
|
386
|
+
text = re.sub(r"(\*\*|__)(.*?)\1", r"\2", text)
|
387
|
+
text = re.sub(r"(\*|_)(.*?)\1", r"\2", text)
|
388
|
+
text = re.sub(r"^\s*#+\s*(.*?)$", r"\1", text, flags=re.MULTILINE)
|
389
|
+
text = re.sub(r"^\s*>\s*(.*?)$", r"\1", text, flags=re.MULTILINE)
|
390
|
+
text = re.sub(r"^\s*[-*_]{3,}\s*$", "", text, flags=re.MULTILINE)
|
391
|
+
text = re.sub(r"^\s*[-*+]\s+(.*?)$", r"\1", text, flags=re.MULTILINE)
|
392
|
+
text = re.sub(r"^\s*\d+\.\s+(.*?)$", r"\1", text, flags=re.MULTILINE)
|
393
|
+
text = re.sub(r"\n{3,}", "\n\n", text)
|
394
|
+
emoji_pattern = re.compile(
|
395
|
+
"["
|
396
|
+
"\U0001f600-\U0001f64f" # emoticons
|
397
|
+
"\U0001f300-\U0001f5ff" # symbols & pictographs
|
398
|
+
"\U0001f680-\U0001f6ff" # transport & map symbols
|
399
|
+
"\U0001f700-\U0001f77f" # alchemical symbols
|
400
|
+
"\U0001f780-\U0001f7ff" # Geometric Shapes Extended
|
401
|
+
"\U0001f800-\U0001f8ff" # Supplemental Arrows-C
|
402
|
+
"\U0001f900-\U0001f9ff" # Supplemental Symbols and Pictographs
|
403
|
+
"\U0001fa70-\U0001faff" # Symbols and Pictographs Extended-A
|
404
|
+
"\U00002702-\U000027b0" # Dingbats
|
405
|
+
"\U000024c2-\U0001f251"
|
406
|
+
"\U00002600-\U000026ff" # Miscellaneous Symbols
|
407
|
+
"\U00002700-\U000027bf" # Dingbats
|
408
|
+
"\U0000fe00-\U0000fe0f" # Variation Selectors
|
409
|
+
"\U0001f1e0-\U0001f1ff" # Flags (iOS)
|
410
|
+
"]+",
|
411
|
+
flags=re.UNICODE,
|
412
|
+
)
|
413
|
+
text = emoji_pattern.sub(r" ", text)
|
414
|
+
text = re.sub(
|
415
|
+
r"[^\w\s\.\,\;\:\?\!\'\"\-\(\)]", " ", text
|
416
|
+
) # Keep basic punctuation
|
417
|
+
text = re.sub(r"\s+", " ", text)
|
418
|
+
return text.strip()
|
419
|
+
|
420
|
+
def _clean_tool_response(self, text: str) -> str:
|
421
|
+
"""Remove any tool markers or formatting that might have leaked into the response."""
|
422
|
+
if not text:
|
423
|
+
return ""
|
424
|
+
text = text.replace("[TOOL]", "").replace("[/TOOL]", "")
|
425
|
+
if text.lstrip().startswith("TOOL"):
|
426
|
+
text = text.lstrip()[4:].lstrip() # Remove "TOOL" and leading space
|
427
|
+
return text.strip()
|
428
|
+
|
429
|
+
# --- Add methods from factory logic ---
|
430
|
+
def load_and_register_plugins(self):
|
431
|
+
"""Loads plugins using the PluginManager."""
|
432
|
+
try:
|
433
|
+
self.plugin_manager.load_plugins()
|
434
|
+
logger.info("Plugins loaded successfully via PluginManager.")
|
435
|
+
except Exception as e:
|
436
|
+
logger.error(f"Error loading plugins: {e}", exc_info=True)
|
437
|
+
|
438
|
+
def register_agents_from_config(self):
|
439
|
+
"""Registers agents defined in the main configuration."""
|
440
|
+
agents_config = self.config.get("agents", [])
|
441
|
+
if not agents_config:
|
442
|
+
logger.warning("No agents defined in the configuration.")
|
443
|
+
return
|
444
|
+
|
445
|
+
for agent_config in agents_config:
|
446
|
+
name = agent_config.get("name")
|
447
|
+
instructions = agent_config.get("instructions")
|
448
|
+
specialization = agent_config.get("specialization")
|
449
|
+
tools = agent_config.get("tools", [])
|
450
|
+
|
451
|
+
if not name or not instructions or not specialization:
|
452
|
+
logger.warning(
|
453
|
+
f"Skipping agent due to missing name, instructions, or specialization: {agent_config}"
|
454
|
+
)
|
455
|
+
continue
|
456
|
+
|
457
|
+
self.register_ai_agent(name, instructions, specialization)
|
458
|
+
# logger.info(f"Registered agent: {name}") # Logging done in register_ai_agent
|
459
|
+
|
460
|
+
# Assign tools to the agent
|
461
|
+
for tool_name in tools:
|
462
|
+
if self.assign_tool_for_agent(name, tool_name):
|
463
|
+
logger.info(f"Assigned tool '{tool_name}' to agent '{name}'.")
|
464
|
+
else:
|
465
|
+
logger.warning(
|
466
|
+
f"Failed to assign tool '{tool_name}' to agent '{name}' (Tool might not be registered)."
|
467
|
+
)
|