solana-agent 27.5.0__py3-none-any.whl → 28.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- solana_agent/adapters/openai_adapter.py +17 -21
- solana_agent/factories/agent_factory.py +12 -83
- solana_agent/interfaces/providers/llm.py +1 -1
- solana_agent/services/agent.py +425 -311
- {solana_agent-27.5.0.dist-info → solana_agent-28.0.0.dist-info}/METADATA +26 -50
- {solana_agent-27.5.0.dist-info → solana_agent-28.0.0.dist-info}/RECORD +8 -8
- {solana_agent-27.5.0.dist-info → solana_agent-28.0.0.dist-info}/LICENSE +0 -0
- {solana_agent-27.5.0.dist-info → solana_agent-28.0.0.dist-info}/WHEEL +0 -0
@@ -15,7 +15,7 @@ from solana_agent.interfaces.providers.llm import LLMProvider
|
|
15
15
|
|
16
16
|
T = TypeVar("T", bound=BaseModel)
|
17
17
|
|
18
|
-
DEFAULT_CHAT_MODEL = "gpt-4.1
|
18
|
+
DEFAULT_CHAT_MODEL = "gpt-4.1"
|
19
19
|
DEFAULT_PARSE_MODEL = "gpt-4.1-nano"
|
20
20
|
DEFAULT_EMBEDDING_MODEL = "text-embedding-3-large"
|
21
21
|
DEFAULT_EMBEDDING_DIMENSIONS = 3072
|
@@ -129,45 +129,41 @@ class OpenAIAdapter(LLMProvider):
|
|
129
129
|
api_key: Optional[str] = None,
|
130
130
|
base_url: Optional[str] = None,
|
131
131
|
model: Optional[str] = None,
|
132
|
-
) ->
|
133
|
-
"""Generate text from OpenAI models."""
|
132
|
+
) -> str: # pragma: no cover
|
133
|
+
"""Generate text from OpenAI models as a single string."""
|
134
134
|
messages = []
|
135
|
-
|
136
135
|
if system_prompt:
|
137
136
|
messages.append({"role": "system", "content": system_prompt})
|
138
|
-
|
139
137
|
messages.append({"role": "user", "content": prompt})
|
140
138
|
|
141
|
-
# Prepare request parameters
|
139
|
+
# Prepare request parameters - stream is always False now
|
142
140
|
request_params = {
|
143
141
|
"messages": messages,
|
144
|
-
"stream":
|
145
|
-
"model": self.text_model,
|
142
|
+
"stream": False, # Hardcoded to False
|
143
|
+
"model": model or self.text_model,
|
146
144
|
}
|
147
145
|
|
146
|
+
# Determine client based on provided api_key/base_url
|
148
147
|
if api_key and base_url:
|
149
148
|
client = AsyncOpenAI(api_key=api_key, base_url=base_url)
|
150
149
|
else:
|
151
150
|
client = self.client
|
152
151
|
|
153
|
-
if model:
|
154
|
-
request_params["model"] = model
|
155
|
-
|
156
152
|
try:
|
153
|
+
# Make the non-streaming API call
|
157
154
|
response = await client.chat.completions.create(**request_params)
|
158
155
|
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
156
|
+
# Handle non-streaming response
|
157
|
+
if response.choices and response.choices[0].message.content:
|
158
|
+
full_text = response.choices[0].message.content
|
159
|
+
return full_text # Return the complete string
|
160
|
+
else:
|
161
|
+
print("Received non-streaming response with no content.")
|
162
|
+
return "" # Return empty string if no content
|
164
163
|
|
165
164
|
except Exception as e:
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
print(traceback.format_exc())
|
170
|
-
yield f"I apologize, but I encountered an error: {str(e)}"
|
165
|
+
# Log the error and return an error message string
|
166
|
+
print(f"Error in generate_text: {e}")
|
171
167
|
|
172
168
|
async def parse_structured_output(
|
173
169
|
self,
|
@@ -130,90 +130,19 @@ class SolanaAgentFactory:
|
|
130
130
|
f"Loaded {len(input_guardrails)} input guardrails and {len(output_guardrails)} output guardrails."
|
131
131
|
)
|
132
132
|
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
llm_provider=llm_adapter,
|
141
|
-
business_mission=business_mission,
|
142
|
-
config=config,
|
143
|
-
api_key=config["gemini"]["api_key"],
|
144
|
-
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
|
145
|
-
model="gemini-2.5-flash-preview-04-17",
|
146
|
-
output_guardrails=output_guardrails,
|
147
|
-
)
|
148
|
-
|
149
|
-
# Create routing service
|
150
|
-
routing_service = RoutingService(
|
151
|
-
llm_provider=llm_adapter,
|
152
|
-
agent_service=agent_service,
|
153
|
-
api_key=config["gemini"]["api_key"],
|
154
|
-
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
|
155
|
-
model="gemini-2.5-flash-preview-04-17",
|
156
|
-
)
|
157
|
-
|
158
|
-
elif (
|
159
|
-
"gemini" in config
|
160
|
-
and "api_key" in config["gemini"]
|
161
|
-
and "grok" in config
|
162
|
-
and "api_key" in config["grok"]
|
163
|
-
):
|
164
|
-
# Create primary services
|
165
|
-
agent_service = AgentService(
|
166
|
-
llm_provider=llm_adapter,
|
167
|
-
business_mission=business_mission,
|
168
|
-
config=config,
|
169
|
-
api_key=config["grok"]["api_key"],
|
170
|
-
base_url="https://api.x.ai/v1",
|
171
|
-
model="grok-3-mini-fast-beta",
|
172
|
-
output_guardrails=output_guardrails,
|
173
|
-
)
|
174
|
-
# Create routing service
|
175
|
-
routing_service = RoutingService(
|
176
|
-
llm_provider=llm_adapter,
|
177
|
-
agent_service=agent_service,
|
178
|
-
api_key=config["gemini"]["api_key"],
|
179
|
-
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
|
180
|
-
model="gemini-2.5-flash-preview-04-17",
|
181
|
-
)
|
182
|
-
|
183
|
-
elif (
|
184
|
-
"grok" in config and "api_key" in config["grok"] and "gemini" not in config
|
185
|
-
):
|
186
|
-
# Create primary services
|
187
|
-
agent_service = AgentService(
|
188
|
-
llm_provider=llm_adapter,
|
189
|
-
business_mission=business_mission,
|
190
|
-
config=config,
|
191
|
-
api_key=config["grok"]["api_key"],
|
192
|
-
base_url="https://api.x.ai/v1",
|
193
|
-
model="grok-3-mini-fast-beta",
|
194
|
-
output_guardrails=output_guardrails,
|
195
|
-
)
|
196
|
-
|
197
|
-
# Create routing service
|
198
|
-
routing_service = RoutingService(
|
199
|
-
llm_provider=llm_adapter,
|
200
|
-
agent_service=agent_service,
|
201
|
-
)
|
202
|
-
|
203
|
-
else:
|
204
|
-
# Create primary services
|
205
|
-
agent_service = AgentService(
|
206
|
-
llm_provider=llm_adapter,
|
207
|
-
business_mission=business_mission,
|
208
|
-
config=config,
|
209
|
-
output_guardrails=output_guardrails,
|
210
|
-
)
|
133
|
+
# Create primary services
|
134
|
+
agent_service = AgentService(
|
135
|
+
llm_provider=llm_adapter,
|
136
|
+
business_mission=business_mission,
|
137
|
+
config=config,
|
138
|
+
output_guardrails=output_guardrails,
|
139
|
+
)
|
211
140
|
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
141
|
+
# Create routing service
|
142
|
+
routing_service = RoutingService(
|
143
|
+
llm_provider=llm_adapter,
|
144
|
+
agent_service=agent_service,
|
145
|
+
)
|
217
146
|
|
218
147
|
# Debug the agent service tool registry
|
219
148
|
print(
|
solana_agent/services/agent.py
CHANGED
@@ -10,6 +10,7 @@ import datetime as main_datetime
|
|
10
10
|
from datetime import datetime
|
11
11
|
import json
|
12
12
|
import logging # Add logging
|
13
|
+
import re
|
13
14
|
from typing import AsyncGenerator, Dict, List, Literal, Optional, Any, Union
|
14
15
|
|
15
16
|
from solana_agent.interfaces.services.agent import AgentService as AgentServiceInterface
|
@@ -204,6 +205,137 @@ class AgentService(AgentServiceInterface):
|
|
204
205
|
)
|
205
206
|
return {"status": "error", "message": f"Error executing tool: {str(e)}"}
|
206
207
|
|
208
|
+
# --- Helper function to recursively substitute placeholders ---
|
209
|
+
def _substitute_placeholders(self, data: Any, results_map: Dict[str, str]) -> Any:
|
210
|
+
"""Recursively substitutes placeholders like {{tool_name.result}} or {output_of_tool_name} in strings."""
|
211
|
+
if isinstance(data, str):
|
212
|
+
# Regex to find placeholders like {{tool_name.result}} or {output_of_tool_name}
|
213
|
+
placeholder_pattern = re.compile(
|
214
|
+
r"\{\{(?P<name1>[a-zA-Z0-9_]+)\.result\}\}|\{output_of_(?P<name2>[a-zA-Z0-9_]+)\}"
|
215
|
+
)
|
216
|
+
|
217
|
+
def replace_match(match):
|
218
|
+
tool_name = match.group("name1") or match.group("name2")
|
219
|
+
if tool_name and tool_name in results_map:
|
220
|
+
logger.debug(f"Substituting placeholder for '{tool_name}'")
|
221
|
+
return results_map[tool_name]
|
222
|
+
else:
|
223
|
+
# If placeholder not found, leave it as is but log warning
|
224
|
+
logger.warning(
|
225
|
+
f"Could not find result for placeholder tool '{tool_name}'. Leaving placeholder."
|
226
|
+
)
|
227
|
+
return match.group(0) # Return original placeholder
|
228
|
+
|
229
|
+
# Use re.sub with the replacement function
|
230
|
+
return placeholder_pattern.sub(replace_match, data)
|
231
|
+
elif isinstance(data, dict):
|
232
|
+
# Recursively process dictionary values
|
233
|
+
return {
|
234
|
+
k: self._substitute_placeholders(v, results_map)
|
235
|
+
for k, v in data.items()
|
236
|
+
}
|
237
|
+
elif isinstance(data, list):
|
238
|
+
# Recursively process list items
|
239
|
+
return [self._substitute_placeholders(item, results_map) for item in data]
|
240
|
+
else:
|
241
|
+
# Return non-string/dict/list types as is
|
242
|
+
return data
|
243
|
+
|
244
|
+
# --- Helper to parse tool calls ---
|
245
|
+
def _parse_tool_calls(self, text: str) -> List[Dict[str, Any]]:
|
246
|
+
"""Parses all [TOOL]...[/TOOL] blocks in the text."""
|
247
|
+
tool_calls = []
|
248
|
+
# Regex to find all tool blocks, non-greedy match for content
|
249
|
+
pattern = re.compile(r"\[TOOL\](.*?)\[/TOOL\]", re.DOTALL | re.IGNORECASE)
|
250
|
+
matches = pattern.finditer(text)
|
251
|
+
|
252
|
+
for match in matches:
|
253
|
+
tool_content = match.group(1).strip()
|
254
|
+
tool_name = None
|
255
|
+
parameters = {}
|
256
|
+
try:
|
257
|
+
for line in tool_content.split("\n"):
|
258
|
+
line = line.strip()
|
259
|
+
if not line:
|
260
|
+
continue
|
261
|
+
if line.lower().startswith("name:"):
|
262
|
+
tool_name = line[5:].strip()
|
263
|
+
elif line.lower().startswith("parameters:"):
|
264
|
+
params_text = line[11:].strip()
|
265
|
+
try:
|
266
|
+
# Prefer JSON parsing
|
267
|
+
parameters = json.loads(params_text)
|
268
|
+
except json.JSONDecodeError:
|
269
|
+
logger.warning(
|
270
|
+
f"Failed to parse parameters as JSON, falling back: {params_text}"
|
271
|
+
)
|
272
|
+
# Fallback: Treat as simple key=value (less robust)
|
273
|
+
try:
|
274
|
+
# Basic eval might work for {"key": "value"} but is risky
|
275
|
+
# parameters = eval(params_text) # Avoid eval if possible
|
276
|
+
# Safer fallback: Assume simple string if not JSON-like
|
277
|
+
if not params_text.startswith("{"):
|
278
|
+
# Try splitting key=value pairs? Very brittle.
|
279
|
+
# For now, log warning and skip complex fallback parsing
|
280
|
+
logger.error(
|
281
|
+
f"Cannot parse non-JSON parameters reliably: {params_text}"
|
282
|
+
)
|
283
|
+
parameters = {
|
284
|
+
"_raw_params": params_text
|
285
|
+
} # Store raw string
|
286
|
+
else:
|
287
|
+
# If it looks like a dict but isn't valid JSON, log error
|
288
|
+
logger.error(
|
289
|
+
f"Invalid dictionary format for parameters: {params_text}"
|
290
|
+
)
|
291
|
+
parameters = {"_raw_params": params_text}
|
292
|
+
|
293
|
+
except Exception as parse_err:
|
294
|
+
logger.error(
|
295
|
+
f"Fallback parameter parsing failed: {parse_err}"
|
296
|
+
)
|
297
|
+
parameters = {
|
298
|
+
"_raw_params": params_text
|
299
|
+
} # Store raw string on error
|
300
|
+
|
301
|
+
if tool_name:
|
302
|
+
tool_calls.append({"name": tool_name, "parameters": parameters})
|
303
|
+
else:
|
304
|
+
logger.warning(f"Parsed tool block missing name: {tool_content}")
|
305
|
+
except Exception as e:
|
306
|
+
logger.error(f"Error parsing tool content: {tool_content} - {e}")
|
307
|
+
|
308
|
+
logger.info(f"Parsed {len(tool_calls)} tool calls from response.")
|
309
|
+
return tool_calls
|
310
|
+
|
311
|
+
# --- Helper to execute a single parsed tool call ---
|
312
|
+
async def _execute_single_tool(
|
313
|
+
self, agent_name: str, tool_call: Dict[str, Any]
|
314
|
+
) -> Dict[str, Any]:
|
315
|
+
"""Executes a single tool call dictionary and returns its result."""
|
316
|
+
tool_name = tool_call.get("name")
|
317
|
+
parameters = tool_call.get("parameters", {})
|
318
|
+
if not tool_name:
|
319
|
+
return {
|
320
|
+
"tool_name": "unknown",
|
321
|
+
"status": "error",
|
322
|
+
"message": "Tool name missing in parsed call",
|
323
|
+
}
|
324
|
+
# Ensure parameters is a dict, even if parsing failed
|
325
|
+
if not isinstance(parameters, dict):
|
326
|
+
logger.warning(
|
327
|
+
f"Parameters for tool '{tool_name}' is not a dict: {parameters}. Attempting execution with empty params."
|
328
|
+
)
|
329
|
+
parameters = {}
|
330
|
+
|
331
|
+
logger.debug(
|
332
|
+
f"Preparing to execute tool '{tool_name}' with params: {parameters}"
|
333
|
+
)
|
334
|
+
result = await self.execute_tool(agent_name, tool_name, parameters)
|
335
|
+
# Add tool name to result for easier aggregation
|
336
|
+
result["tool_name"] = tool_name
|
337
|
+
return result
|
338
|
+
|
207
339
|
async def generate_response(
|
208
340
|
self,
|
209
341
|
agent_name: str,
|
@@ -229,18 +361,17 @@ class AgentService(AgentServiceInterface):
|
|
229
361
|
] = "aac",
|
230
362
|
prompt: Optional[str] = None,
|
231
363
|
) -> AsyncGenerator[Union[str, bytes], None]: # pragma: no cover
|
232
|
-
"""Generate a response
|
364
|
+
"""Generate a response, supporting multiple sequential tool calls with placeholder substitution.
|
233
365
|
|
234
|
-
|
235
|
-
|
236
|
-
Otherwise, text responses stream chunk-by-chunk. Audio responses always buffer.
|
366
|
+
Text responses are always generated as a single block.
|
367
|
+
Audio responses always buffer text before TTS.
|
237
368
|
"""
|
238
369
|
agent = next((a for a in self.agents if a.name == agent_name), None)
|
239
370
|
if not agent:
|
240
371
|
error_msg = f"Agent '{agent_name}' not found."
|
241
372
|
logger.warning(error_msg)
|
242
|
-
# Handle error output (unchanged)
|
243
373
|
if output_format == "audio":
|
374
|
+
# Assuming tts returns an async generator
|
244
375
|
async for chunk in self.llm_provider.tts(
|
245
376
|
error_msg,
|
246
377
|
instructions=audio_instructions,
|
@@ -249,367 +380,307 @@ class AgentService(AgentServiceInterface):
|
|
249
380
|
):
|
250
381
|
yield chunk
|
251
382
|
else:
|
252
|
-
yield error_msg
|
383
|
+
yield error_msg # Yield the single error string
|
253
384
|
return
|
254
385
|
|
255
|
-
# --- Determine Buffering Strategy ---
|
256
|
-
# Buffer text ONLY if format is text AND guardrails are present
|
257
|
-
should_buffer_text = bool(self.output_guardrails) and output_format == "text"
|
258
386
|
logger.debug(
|
259
|
-
f"
|
387
|
+
f"Generating response for agent '{agent_name}'. Output format: {output_format}."
|
260
388
|
)
|
261
389
|
|
262
390
|
try:
|
263
391
|
# --- System Prompt Assembly ---
|
264
392
|
system_prompt_parts = [self.get_agent_system_prompt(agent_name)]
|
265
|
-
|
266
|
-
# Add tool usage instructions if tools are available for the agent
|
267
393
|
tool_instructions = self._get_tool_usage_prompt(agent_name)
|
268
394
|
if tool_instructions:
|
269
395
|
system_prompt_parts.append(tool_instructions)
|
270
|
-
|
271
|
-
# Add user ID context
|
272
396
|
system_prompt_parts.append(f"USER IDENTIFIER: {user_id}")
|
273
|
-
|
274
|
-
# Add memory context if provided
|
275
397
|
if memory_context:
|
276
398
|
system_prompt_parts.append(f"\nCONVERSATION HISTORY:\n{memory_context}")
|
277
|
-
|
278
|
-
# Add optional prompt if provided
|
279
399
|
if prompt:
|
280
400
|
system_prompt_parts.append(f"\nADDITIONAL PROMPT:\n{prompt}")
|
401
|
+
final_system_prompt = "\n\n".join(filter(None, system_prompt_parts))
|
281
402
|
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
# --- End System Prompt Assembly ---
|
286
|
-
|
287
|
-
# --- Response Generation ---
|
288
|
-
complete_text_response = (
|
289
|
-
"" # Always used for final storage and potentially for buffering
|
290
|
-
)
|
291
|
-
full_response_buffer = "" # Used ONLY for audio buffering
|
292
|
-
|
293
|
-
# Tool call handling variables (unchanged)
|
294
|
-
tool_buffer = ""
|
295
|
-
pending_chunk = ""
|
296
|
-
is_tool_call = False
|
403
|
+
# --- Initial Response Generation (No Streaming) ---
|
404
|
+
initial_llm_response_buffer = ""
|
405
|
+
tool_calls_detected = False
|
297
406
|
start_marker = "[TOOL]"
|
298
|
-
end_marker = "[/TOOL]"
|
299
407
|
|
300
|
-
logger.info(
|
301
|
-
|
302
|
-
|
303
|
-
async for chunk in self.llm_provider.generate_text(
|
408
|
+
logger.info(f"Generating initial response for agent '{agent_name}'...")
|
409
|
+
# Call generate_text and await the string result
|
410
|
+
initial_llm_response_buffer = await self.llm_provider.generate_text(
|
304
411
|
prompt=str(query),
|
305
412
|
system_prompt=final_system_prompt,
|
306
413
|
api_key=self.api_key,
|
307
414
|
base_url=self.base_url,
|
308
415
|
model=self.model,
|
416
|
+
)
|
417
|
+
|
418
|
+
# Check for errors returned as string by the adapter
|
419
|
+
if isinstance(
|
420
|
+
initial_llm_response_buffer, str
|
421
|
+
) and initial_llm_response_buffer.startswith(
|
422
|
+
"I apologize, but I encountered an error"
|
309
423
|
):
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
424
|
+
logger.error(
|
425
|
+
f"LLM provider failed during initial generation: {initial_llm_response_buffer}"
|
426
|
+
)
|
427
|
+
# Yield the error and exit
|
428
|
+
if output_format == "audio":
|
429
|
+
async for chunk in self.llm_provider.tts(
|
430
|
+
initial_llm_response_buffer,
|
431
|
+
voice=audio_voice,
|
432
|
+
response_format=audio_output_format,
|
433
|
+
instructions=audio_instructions,
|
434
|
+
):
|
435
|
+
yield chunk
|
314
436
|
else:
|
315
|
-
|
316
|
-
|
317
|
-
# STEP 1: Check for tool call start marker
|
318
|
-
if start_marker in combined_chunk and not is_tool_call:
|
319
|
-
is_tool_call = True
|
320
|
-
start_pos = combined_chunk.find(start_marker)
|
321
|
-
before_marker = combined_chunk[:start_pos]
|
322
|
-
after_marker = combined_chunk[start_pos:]
|
323
|
-
|
324
|
-
if before_marker:
|
325
|
-
processed_before_marker = before_marker
|
326
|
-
# Apply guardrails ONLY if NOT buffering text
|
327
|
-
if not should_buffer_text:
|
328
|
-
for guardrail in self.output_guardrails:
|
329
|
-
try:
|
330
|
-
processed_before_marker = await guardrail.process(
|
331
|
-
processed_before_marker
|
332
|
-
)
|
333
|
-
except Exception as e:
|
334
|
-
logger.error(
|
335
|
-
f"Error applying output guardrail {guardrail.__class__.__name__} to pre-tool text: {e}"
|
336
|
-
)
|
437
|
+
yield initial_llm_response_buffer
|
438
|
+
return
|
337
439
|
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
and output_format == "text"
|
343
|
-
):
|
344
|
-
yield processed_before_marker
|
440
|
+
# Check for tool markers in the complete response
|
441
|
+
if start_marker.lower() in initial_llm_response_buffer.lower():
|
442
|
+
tool_calls_detected = True
|
443
|
+
logger.info("Tool call marker detected in initial response.")
|
345
444
|
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
445
|
+
logger.debug(
|
446
|
+
f"Full initial LLM response buffer:\n--- START ---\n{initial_llm_response_buffer}\n--- END ---"
|
447
|
+
)
|
448
|
+
logger.info(
|
449
|
+
f"Initial LLM response received (length: {len(initial_llm_response_buffer)}). Tools detected: {tool_calls_detected}"
|
450
|
+
)
|
351
451
|
|
352
|
-
|
353
|
-
|
452
|
+
# --- Tool Execution Phase (if tools were detected) ---
|
453
|
+
final_response_text = ""
|
454
|
+
if tool_calls_detected:
|
455
|
+
parsed_calls = self._parse_tool_calls(initial_llm_response_buffer)
|
354
456
|
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
agent_name=agent_name, tool_text=tool_buffer
|
361
|
-
)
|
362
|
-
response_text = self._clean_tool_response(response_text)
|
363
|
-
user_prompt = f"{str(query)}\n\nTOOL RESULT: {response_text}"
|
364
|
-
|
365
|
-
# --- Rebuild system prompt for follow-up ---
|
366
|
-
follow_up_system_prompt_parts = [
|
367
|
-
self.get_agent_system_prompt(agent_name)
|
368
|
-
]
|
369
|
-
# Re-add tool instructions if needed for follow-up context
|
370
|
-
if tool_instructions:
|
371
|
-
follow_up_system_prompt_parts.append(tool_instructions)
|
372
|
-
follow_up_system_prompt_parts.append(
|
373
|
-
f"USER IDENTIFIER: {user_id}"
|
374
|
-
)
|
375
|
-
# Include original memory + original query + tool result context
|
376
|
-
if memory_context:
|
377
|
-
follow_up_system_prompt_parts.append(
|
378
|
-
f"\nORIGINAL CONVERSATION HISTORY:\n{memory_context}"
|
379
|
-
)
|
380
|
-
# Add the original prompt if it was provided
|
381
|
-
if prompt:
|
382
|
-
follow_up_system_prompt_parts.append(
|
383
|
-
f"\nORIGINAL ADDITIONAL PROMPT:\n{prompt}"
|
384
|
-
)
|
385
|
-
# Add context about the tool call that just happened
|
386
|
-
follow_up_system_prompt_parts.append(
|
387
|
-
f"\nPREVIOUS TOOL CALL CONTEXT:\nOriginal Query: {str(query)}\nTool Used: (Inferred from result)\nTool Result: {response_text}"
|
388
|
-
)
|
457
|
+
if parsed_calls:
|
458
|
+
# --- Execute tools SEQUENTIALLY with Placeholder Substitution ---
|
459
|
+
executed_tool_results = [] # Store full result dicts
|
460
|
+
# Map tool names to their string results for substitution
|
461
|
+
tool_results_map: Dict[str, str] = {}
|
389
462
|
|
390
|
-
|
391
|
-
|
463
|
+
logger.info(
|
464
|
+
f"Executing {len(parsed_calls)} tools sequentially with substitution..."
|
465
|
+
)
|
466
|
+
for i, call in enumerate(parsed_calls):
|
467
|
+
tool_name_to_exec = call.get("name", "unknown")
|
468
|
+
logger.info(
|
469
|
+
f"Executing tool {i + 1}/{len(parsed_calls)}: {tool_name_to_exec}"
|
392
470
|
)
|
393
|
-
# --- End Rebuild system prompt ---
|
394
|
-
|
395
|
-
logger.info("Generating follow-up response with tool results")
|
396
|
-
async for processed_chunk in self.llm_provider.generate_text(
|
397
|
-
prompt=user_prompt, # Use the prompt that includes the tool result
|
398
|
-
system_prompt=final_follow_up_system_prompt,
|
399
|
-
api_key=self.api_key,
|
400
|
-
base_url=self.base_url,
|
401
|
-
model=self.model,
|
402
|
-
):
|
403
|
-
chunk_to_yield_followup = processed_chunk
|
404
|
-
# Apply guardrails ONLY if NOT buffering text
|
405
|
-
if not should_buffer_text:
|
406
|
-
for guardrail in self.output_guardrails:
|
407
|
-
try:
|
408
|
-
chunk_to_yield_followup = (
|
409
|
-
await guardrail.process(
|
410
|
-
chunk_to_yield_followup
|
411
|
-
)
|
412
|
-
)
|
413
|
-
except Exception as e:
|
414
|
-
logger.error(
|
415
|
-
f"Error applying output guardrail {guardrail.__class__.__name__} to follow-up chunk: {e}"
|
416
|
-
)
|
417
|
-
|
418
|
-
# Yield ONLY if NOT buffering text
|
419
|
-
if (
|
420
|
-
chunk_to_yield_followup
|
421
|
-
and not should_buffer_text
|
422
|
-
and output_format == "text"
|
423
|
-
):
|
424
|
-
yield chunk_to_yield_followup
|
425
|
-
|
426
|
-
# Always accumulate
|
427
|
-
if chunk_to_yield_followup:
|
428
|
-
complete_text_response += chunk_to_yield_followup
|
429
|
-
if output_format == "audio":
|
430
|
-
full_response_buffer += chunk_to_yield_followup
|
431
|
-
|
432
|
-
is_tool_call = False
|
433
|
-
tool_buffer = ""
|
434
|
-
pending_chunk = ""
|
435
|
-
break # Exit the original generation loop
|
436
|
-
|
437
|
-
continue # Continue collecting tool call
|
438
|
-
|
439
|
-
# STEP 3: Check for possible partial start markers
|
440
|
-
potential_marker = False
|
441
|
-
chunk_to_yield = combined_chunk
|
442
|
-
for i in range(1, len(start_marker)):
|
443
|
-
if combined_chunk.endswith(start_marker[:i]):
|
444
|
-
pending_chunk = combined_chunk[-i:]
|
445
|
-
chunk_to_yield = combined_chunk[:-i]
|
446
|
-
potential_marker = True
|
447
|
-
break
|
448
|
-
|
449
|
-
if potential_marker:
|
450
|
-
chunk_to_yield_safe = chunk_to_yield
|
451
|
-
# Apply guardrails ONLY if NOT buffering text
|
452
|
-
if not should_buffer_text:
|
453
|
-
for guardrail in self.output_guardrails:
|
454
|
-
try:
|
455
|
-
chunk_to_yield_safe = await guardrail.process(
|
456
|
-
chunk_to_yield_safe
|
457
|
-
)
|
458
|
-
except Exception as e:
|
459
|
-
logger.error(
|
460
|
-
f"Error applying output guardrail {guardrail.__class__.__name__} to safe chunk: {e}"
|
461
|
-
)
|
462
471
|
|
463
|
-
|
464
|
-
if (
|
465
|
-
chunk_to_yield_safe
|
466
|
-
and not should_buffer_text
|
467
|
-
and output_format == "text"
|
468
|
-
):
|
469
|
-
yield chunk_to_yield_safe
|
470
|
-
|
471
|
-
# Always accumulate
|
472
|
-
if chunk_to_yield_safe:
|
473
|
-
complete_text_response += chunk_to_yield_safe
|
474
|
-
if output_format == "audio":
|
475
|
-
full_response_buffer += chunk_to_yield_safe
|
476
|
-
continue
|
477
|
-
|
478
|
-
# STEP 4: Normal text processing
|
479
|
-
chunk_to_yield_normal = combined_chunk
|
480
|
-
# Apply guardrails ONLY if NOT buffering text
|
481
|
-
if not should_buffer_text:
|
482
|
-
for guardrail in self.output_guardrails:
|
472
|
+
# --- Substitute placeholders in parameters ---
|
483
473
|
try:
|
484
|
-
|
485
|
-
|
474
|
+
original_params = call.get("parameters", {})
|
475
|
+
substituted_params = self._substitute_placeholders(
|
476
|
+
original_params, tool_results_map
|
486
477
|
)
|
487
|
-
|
478
|
+
if substituted_params != original_params:
|
479
|
+
logger.info(
|
480
|
+
f"Substituted parameters for tool '{tool_name_to_exec}': {substituted_params}"
|
481
|
+
)
|
482
|
+
call["parameters"] = substituted_params # Update call dict
|
483
|
+
except Exception as sub_err:
|
488
484
|
logger.error(
|
489
|
-
f"Error
|
485
|
+
f"Error substituting placeholders for tool '{tool_name_to_exec}': {sub_err}",
|
486
|
+
exc_info=True,
|
490
487
|
)
|
488
|
+
# Proceed with original params but log the error
|
491
489
|
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
490
|
+
# --- Execute the tool ---
|
491
|
+
try:
|
492
|
+
result = await self._execute_single_tool(agent_name, call)
|
493
|
+
executed_tool_results.append(result)
|
494
|
+
|
495
|
+
# --- Store successful result string for future substitutions ---
|
496
|
+
if result.get("status") == "success":
|
497
|
+
tool_result_str = str(result.get("result", ""))
|
498
|
+
tool_results_map[tool_name_to_exec] = tool_result_str
|
499
|
+
logger.debug(
|
500
|
+
f"Stored result for '{tool_name_to_exec}' (length: {len(tool_result_str)})"
|
501
|
+
)
|
502
|
+
else:
|
503
|
+
# Store error message as result
|
504
|
+
error_message = result.get("message", "Unknown error")
|
505
|
+
tool_results_map[tool_name_to_exec] = (
|
506
|
+
f"Error: {error_message}"
|
507
|
+
)
|
508
|
+
logger.warning(
|
509
|
+
f"Tool '{tool_name_to_exec}' failed, storing error message as result."
|
510
|
+
)
|
505
511
|
|
506
|
-
|
512
|
+
except Exception as tool_exec_err:
|
513
|
+
logger.error(
|
514
|
+
f"Exception during execution of tool {tool_name_to_exec}: {tool_exec_err}",
|
515
|
+
exc_info=True,
|
516
|
+
)
|
517
|
+
error_result = {
|
518
|
+
"tool_name": tool_name_to_exec,
|
519
|
+
"status": "error",
|
520
|
+
"message": f"Exception during execution: {str(tool_exec_err)}",
|
521
|
+
}
|
522
|
+
executed_tool_results.append(error_result)
|
523
|
+
tool_results_map[tool_name_to_exec] = (
|
524
|
+
f"Error: {str(tool_exec_err)}" # Store error
|
525
|
+
)
|
507
526
|
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
527
|
+
logger.info("Sequential tool execution with substitution complete.")
|
528
|
+
# --- End Sequential Execution ---
|
529
|
+
|
530
|
+
# Format results for the follow-up prompt (use executed_tool_results)
|
531
|
+
tool_results_text_parts = []
|
532
|
+
for i, result in enumerate(
|
533
|
+
executed_tool_results
|
534
|
+
): # Use the collected results
|
535
|
+
tool_name = result.get(
|
536
|
+
"tool_name", "unknown"
|
537
|
+
) # Name should be in the result dict now
|
538
|
+
if (
|
539
|
+
isinstance(result, Exception)
|
540
|
+
or result.get("status") == "error"
|
541
|
+
):
|
542
|
+
error_msg = (
|
543
|
+
result.get("message", str(result))
|
544
|
+
if isinstance(result, dict)
|
545
|
+
else str(result)
|
520
546
|
)
|
521
|
-
|
522
|
-
|
523
|
-
f"
|
547
|
+
logger.error(f"Tool '{tool_name}' failed: {error_msg}")
|
548
|
+
tool_results_text_parts.append(
|
549
|
+
f"Tool {i + 1} ({tool_name}) Execution Failed:\n{error_msg}"
|
550
|
+
)
|
551
|
+
else:
|
552
|
+
tool_output = str(result.get("result", ""))
|
553
|
+
tool_results_text_parts.append(
|
554
|
+
f"Tool {i + 1} ({tool_name}) Result:\n{tool_output}"
|
524
555
|
)
|
556
|
+
tool_results_context = "\n\n".join(tool_results_text_parts)
|
525
557
|
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
558
|
+
# --- Generate Final Response using Tool Results (No Streaming) ---
|
559
|
+
follow_up_prompt = f"Original Query: {str(query)}\n\nRESULTS FROM TOOL CALLS:\n{tool_results_context}\n\nBased on the original query and the tool results, please provide the final response to the user."
|
560
|
+
# Rebuild system prompt
|
561
|
+
follow_up_system_prompt_parts = [
|
562
|
+
self.get_agent_system_prompt(agent_name)
|
563
|
+
]
|
564
|
+
follow_up_system_prompt_parts.append(f"USER IDENTIFIER: {user_id}")
|
565
|
+
if memory_context:
|
566
|
+
follow_up_system_prompt_parts.append(
|
567
|
+
f"\nORIGINAL CONVERSATION HISTORY:\n{memory_context}"
|
568
|
+
)
|
569
|
+
if prompt:
|
570
|
+
follow_up_system_prompt_parts.append(
|
571
|
+
f"\nORIGINAL ADDITIONAL PROMPT:\n{prompt}"
|
572
|
+
)
|
573
|
+
follow_up_system_prompt_parts.append(
|
574
|
+
f"\nCONTEXT: You previously decided to run {len(parsed_calls)} tool(s) sequentially to answer the query. The results are provided above."
|
575
|
+
)
|
576
|
+
final_follow_up_system_prompt = "\n\n".join(
|
577
|
+
filter(None, follow_up_system_prompt_parts)
|
578
|
+
)
|
533
579
|
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
580
|
+
logger.info(
|
581
|
+
"Generating final response incorporating tool results..."
|
582
|
+
)
|
583
|
+
# Call generate_text and await the string result
|
584
|
+
synthesized_response_buffer = await self.llm_provider.generate_text(
|
585
|
+
prompt=follow_up_prompt,
|
586
|
+
system_prompt=final_follow_up_system_prompt,
|
587
|
+
api_key=self.api_key,
|
588
|
+
base_url=self.base_url,
|
589
|
+
model=self.model,
|
590
|
+
)
|
539
591
|
|
540
|
-
|
592
|
+
# Check for errors returned as string by the adapter
|
593
|
+
if isinstance(
|
594
|
+
synthesized_response_buffer, str
|
595
|
+
) and synthesized_response_buffer.startswith(
|
596
|
+
"I apologize, but I encountered an error"
|
597
|
+
):
|
598
|
+
logger.error(
|
599
|
+
f"LLM provider failed during final generation: {synthesized_response_buffer}"
|
600
|
+
)
|
601
|
+
# Yield the error and exit
|
602
|
+
if output_format == "audio":
|
603
|
+
async for chunk in self.llm_provider.tts(
|
604
|
+
synthesized_response_buffer,
|
605
|
+
voice=audio_voice,
|
606
|
+
response_format=audio_output_format,
|
607
|
+
instructions=audio_instructions,
|
608
|
+
):
|
609
|
+
yield chunk
|
610
|
+
else:
|
611
|
+
yield synthesized_response_buffer
|
612
|
+
return
|
541
613
|
|
542
|
-
|
543
|
-
|
614
|
+
final_response_text = synthesized_response_buffer
|
615
|
+
logger.info(
|
616
|
+
f"Final synthesized response length: {len(final_response_text)}"
|
617
|
+
)
|
618
|
+
|
619
|
+
else:
|
620
|
+
# Tools detected but parsing failed
|
621
|
+
logger.warning(
|
622
|
+
"Tool markers detected, but no valid tool calls parsed. Treating initial response as final."
|
623
|
+
)
|
624
|
+
final_response_text = initial_llm_response_buffer
|
625
|
+
else:
|
626
|
+
# No tools detected
|
627
|
+
final_response_text = initial_llm_response_buffer
|
628
|
+
logger.info("No tools detected. Using initial response as final.")
|
629
|
+
|
630
|
+
# --- Final Output Processing (Guardrails, TTS, Yielding) ---
|
631
|
+
processed_final_text = final_response_text
|
632
|
+
if self.output_guardrails:
|
544
633
|
logger.info(
|
545
|
-
f"Applying output guardrails to
|
634
|
+
f"Applying output guardrails to final text response (length: {len(processed_final_text)})"
|
546
635
|
)
|
547
|
-
|
636
|
+
original_len = len(processed_final_text)
|
548
637
|
for guardrail in self.output_guardrails:
|
549
638
|
try:
|
550
|
-
|
551
|
-
|
552
|
-
)
|
553
|
-
except Exception as e:
|
554
|
-
logger.error(
|
555
|
-
f"Error applying output guardrail {guardrail.__class__.__name__} to full text buffer: {e}"
|
556
|
-
)
|
557
|
-
|
558
|
-
if processed_full_text:
|
559
|
-
yield processed_full_text
|
560
|
-
# Update last_text_response with the final processed text
|
561
|
-
self.last_text_response = processed_full_text
|
562
|
-
|
563
|
-
# Case 2: Audio output (apply guardrails to buffer before TTS) - Unchanged Logic
|
564
|
-
elif output_format == "audio" and full_response_buffer:
|
565
|
-
original_buffer = full_response_buffer
|
566
|
-
processed_audio_buffer = full_response_buffer
|
567
|
-
for (
|
568
|
-
guardrail
|
569
|
-
) in self.output_guardrails: # Apply even if empty, for consistency
|
570
|
-
try:
|
571
|
-
processed_audio_buffer = await guardrail.process(
|
572
|
-
processed_audio_buffer
|
639
|
+
processed_final_text = await guardrail.process(
|
640
|
+
processed_final_text
|
573
641
|
)
|
574
642
|
except Exception as e:
|
575
643
|
logger.error(
|
576
|
-
f"Error applying output guardrail {guardrail.__class__.__name__} to
|
644
|
+
f"Error applying output guardrail {guardrail.__class__.__name__} to final text: {e}"
|
577
645
|
)
|
578
|
-
if
|
646
|
+
if len(processed_final_text) != original_len:
|
579
647
|
logger.info(
|
580
|
-
f"
|
648
|
+
f"Guardrails modified final text length from {original_len} to {len(processed_final_text)}"
|
581
649
|
)
|
582
650
|
|
583
|
-
|
584
|
-
logger.info(
|
585
|
-
f"Processing {len(cleaned_audio_buffer)} characters for audio output"
|
586
|
-
)
|
587
|
-
async for audio_chunk in self.llm_provider.tts(
|
588
|
-
text=cleaned_audio_buffer,
|
589
|
-
voice=audio_voice,
|
590
|
-
response_format=audio_output_format,
|
591
|
-
instructions=audio_instructions,
|
592
|
-
):
|
593
|
-
yield audio_chunk
|
594
|
-
# Update last_text_response with the text *before* TTS cleaning
|
595
|
-
self.last_text_response = (
|
596
|
-
processed_audio_buffer # Store the guardrail-processed text
|
597
|
-
)
|
651
|
+
self.last_text_response = processed_final_text
|
598
652
|
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
653
|
+
if output_format == "text":
|
654
|
+
# Yield the single final string
|
655
|
+
if processed_final_text:
|
656
|
+
yield processed_final_text
|
657
|
+
else:
|
658
|
+
logger.warning("Final processed text was empty.")
|
659
|
+
yield ""
|
660
|
+
elif output_format == "audio":
|
661
|
+
# TTS still needs a generator
|
662
|
+
text_for_tts = processed_final_text
|
663
|
+
cleaned_audio_buffer = self._clean_for_audio(text_for_tts)
|
603
664
|
logger.info(
|
604
|
-
"
|
665
|
+
f"Processing {len(cleaned_audio_buffer)} characters for audio output"
|
605
666
|
)
|
667
|
+
if cleaned_audio_buffer:
|
668
|
+
async for audio_chunk in self.llm_provider.tts(
|
669
|
+
text=cleaned_audio_buffer,
|
670
|
+
voice=audio_voice,
|
671
|
+
response_format=audio_output_format,
|
672
|
+
instructions=audio_instructions,
|
673
|
+
):
|
674
|
+
yield audio_chunk
|
675
|
+
else:
|
676
|
+
logger.warning("Final text for audio was empty after cleaning.")
|
606
677
|
|
607
678
|
logger.info(
|
608
679
|
f"Response generation complete for agent '{agent_name}': {len(self.last_text_response)} final chars"
|
609
680
|
)
|
610
681
|
|
611
682
|
except Exception as e:
|
612
|
-
# --- Error Handling
|
683
|
+
# --- Error Handling ---
|
613
684
|
import traceback
|
614
685
|
|
615
686
|
error_msg = (
|
@@ -711,13 +782,18 @@ class AgentService(AgentServiceInterface):
|
|
711
782
|
|
712
783
|
tools_json = json.dumps(simplified_tools, indent=2)
|
713
784
|
|
785
|
+
logger.info(
|
786
|
+
f"Generated tool usage prompt for agent '{agent_name}': {tools_json}"
|
787
|
+
)
|
788
|
+
|
714
789
|
return f"""
|
715
790
|
AVAILABLE TOOLS:
|
716
791
|
{tools_json}
|
717
792
|
|
718
|
-
⚠️ CRITICAL
|
719
|
-
|
720
|
-
|
793
|
+
⚠️ CRITICAL INSTRUCTIONS FOR TOOL USAGE:
|
794
|
+
1. EXECUTION ORDER MATTERS: If multiple steps are needed (e.g., get information THEN use it), you MUST output the [TOOL] blocks in the exact sequence they need to run. Output the information-gathering tool call FIRST, then the tool call that uses the information.
|
795
|
+
2. ONLY TOOL CALLS: When using a tool, NEVER include explanatory text before or after the tool call block. Only output the exact tool call format shown below.
|
796
|
+
3. USE TOOLS WHEN NEEDED: Always call the necessary tool to give the latest information, especially for time-sensitive queries.
|
721
797
|
|
722
798
|
TOOL USAGE FORMAT:
|
723
799
|
[TOOL]
|
@@ -726,24 +802,62 @@ class AgentService(AgentServiceInterface):
|
|
726
802
|
[/TOOL]
|
727
803
|
|
728
804
|
EXAMPLES:
|
729
|
-
|
805
|
+
|
806
|
+
✅ CORRECT - Get news THEN email (Correct Order):
|
730
807
|
[TOOL]
|
731
808
|
name: search_internet
|
732
|
-
parameters: {{"query": "latest news on
|
809
|
+
parameters: {{"query": "latest news on Canada"}}
|
810
|
+
[/TOOL]
|
811
|
+
[TOOL]
|
812
|
+
name: mcp
|
813
|
+
parameters: {{"query": "Send an email to
|
814
|
+
bob@bob.com with subject
|
815
|
+
'Friendly Reminder to Clean Your Room'
|
816
|
+
and body 'Hi Bob, just a friendly
|
817
|
+
reminder to please clean your room
|
818
|
+
when you get a chance.'"}}
|
733
819
|
[/TOOL]
|
820
|
+
(Note: The system will handle replacing placeholders like '{{output_of_search_internet}}' if possible, but the ORDER is crucial.)
|
734
821
|
|
735
|
-
|
736
|
-
|
822
|
+
|
823
|
+
❌ INCORRECT - Wrong Order:
|
824
|
+
[TOOL]
|
825
|
+
name: mcp
|
826
|
+
parameters: {{"query": "Send an email to
|
827
|
+
bob@bob.com with subject
|
828
|
+
'Friendly Reminder to Clean Your Room'
|
829
|
+
and body 'Hi Bob, just a friendly
|
830
|
+
reminder to please clean your room
|
831
|
+
when you get a chance.'"}}
|
832
|
+
[/TOOL]
|
833
|
+
[TOOL]
|
834
|
+
name: search_internet
|
835
|
+
parameters: {{"query": "latest news on Canada"}}
|
836
|
+
[/TOOL]
|
837
|
+
|
838
|
+
|
839
|
+
❌ INCORRECT - Explanatory Text:
|
840
|
+
To get the news, I'll search.
|
737
841
|
[TOOL]
|
738
842
|
name: search_internet
|
739
843
|
parameters: {{"query": "latest news on Solana"}}
|
740
844
|
[/TOOL]
|
845
|
+
Now I will email it.
|
846
|
+
[TOOL]
|
847
|
+
name: mcp
|
848
|
+
parameters: {{"query": "Send an email to
|
849
|
+
bob@bob.com with subject
|
850
|
+
'Friendly Reminder to Clean Your Room'
|
851
|
+
and body 'Hi Bob, just a friendly
|
852
|
+
reminder to please clean your room
|
853
|
+
when you get a chance.'"}}
|
854
|
+
[/TOOL]
|
855
|
+
|
741
856
|
|
742
857
|
REMEMBER:
|
743
|
-
|
744
|
-
|
745
|
-
|
746
|
-
4. You will receive the tool results and can then respond to the user
|
858
|
+
- Output ONLY the [TOOL] blocks in the correct execution order.
|
859
|
+
- I will execute the tools sequentially as you provide them.
|
860
|
+
- You will receive the results of ALL tool calls before formulating the final response.
|
747
861
|
"""
|
748
862
|
|
749
863
|
def _clean_for_audio(self, text: str) -> str:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: solana-agent
|
3
|
-
Version:
|
3
|
+
Version: 28.0.0
|
4
4
|
Summary: AI Agents for Solana
|
5
5
|
License: MIT
|
6
6
|
Keywords: solana,solana ai,solana agent,ai,ai agent,ai agents
|
@@ -59,6 +59,7 @@ Build your AI agents in three lines of code!
|
|
59
59
|
* Intelligent Routing
|
60
60
|
* Business Alignment
|
61
61
|
* Extensible Tooling
|
62
|
+
* Automatic Tool Workflows
|
62
63
|
* Knowledge Base
|
63
64
|
* MCP Support
|
64
65
|
* Guardrails
|
@@ -84,26 +85,25 @@ Build your AI agents in three lines of code!
|
|
84
85
|
* Assigned tools are utilized by agents automatically and effectively
|
85
86
|
* Integrated Knowledge Base with semantic search and automatic PDF chunking
|
86
87
|
* Input and output guardrails for content filtering, safety, and data sanitization
|
88
|
+
* Automatic sequential tool workflows allowing agents to chain multiple tools
|
87
89
|
|
88
90
|
## Stack
|
89
91
|
|
90
92
|
### Tech
|
91
93
|
|
92
94
|
* [Python](https://python.org) - Programming Language
|
93
|
-
* [OpenAI](https://openai.com)
|
95
|
+
* [OpenAI](https://openai.com) - AI Provider
|
94
96
|
* [MongoDB](https://mongodb.com) - Conversational History (optional)
|
95
97
|
* [Zep Cloud](https://getzep.com) - Conversational Memory (optional)
|
96
98
|
* [Pinecone](https://pinecone.io) - Knowledge Base (optional)
|
97
99
|
|
98
|
-
###
|
100
|
+
### AI Models Used
|
99
101
|
|
100
|
-
* [gpt-4.1
|
102
|
+
* [gpt-4.1](https://platform.openai.com/docs/models/gpt-4.1) (agent)
|
101
103
|
* [gpt-4.1-nano](https://platform.openai.com/docs/models/gpt-4.1-nano) (router)
|
102
104
|
* [text-embedding-3-large](https://platform.openai.com/docs/models/text-embedding-3-large) or [text-embedding-3-small](https://platform.openai.com/docs/models/text-embedding-3-small) (embedding)
|
103
105
|
* [tts-1](https://platform.openai.com/docs/models/tts-1) (audio TTS)
|
104
106
|
* [gpt-4o-mini-transcribe](https://platform.openai.com/docs/models/gpt-4o-mini-transcribe) (audio transcription)
|
105
|
-
* [gemini-2.5-flash-preview](https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview) (optional)
|
106
|
-
* [grok-3-mini-fast-beta](https://docs.x.ai/docs/models#models-and-pricing) (optional)
|
107
107
|
|
108
108
|
## Installation
|
109
109
|
|
@@ -113,13 +113,13 @@ You can install Solana Agent using pip:
|
|
113
113
|
|
114
114
|
## Flows
|
115
115
|
|
116
|
-
In both flows of single and multiple agents - it is one user query to one agent using one
|
116
|
+
In both flows of single and multiple agents - it is one user query to one agent using one or many tools (if needed).
|
117
117
|
|
118
|
-
An agent can have multiple tools and will choose the best
|
118
|
+
An agent can have multiple tools and will choose the best ones to fulfill the user's query.
|
119
119
|
|
120
|
-
Routing is determined by optimal domain expertise of the agent for the user query.
|
120
|
+
Routing is determined by optimal domain expertise of the agent for the user's query.
|
121
121
|
|
122
|
-
When the agent uses
|
122
|
+
When the agent uses tools it feeds the tools output back to itself to generate the final response.
|
123
123
|
|
124
124
|
This is important as tools generally output unstructured and unformatted data that the agent needs to prepare for the user.
|
125
125
|
|
@@ -128,13 +128,13 @@ Keep this in mind while designing your agentic systems using Solana Agent.
|
|
128
128
|
```ascii
|
129
129
|
Single Agent
|
130
130
|
|
131
|
-
┌────────┐ ┌─────────┐
|
132
|
-
│ │ │ │ │
|
133
|
-
│ │ │ │ │
|
134
|
-
│ User │◄──────►│ Agent │◄──────►│
|
135
|
-
│ │ │ │ │
|
136
|
-
│ │ │ │ │
|
137
|
-
└────────┘ └─────────┘
|
131
|
+
┌────────┐ ┌─────────┐ ┌────────-┐
|
132
|
+
│ │ │ │ │ │
|
133
|
+
│ │ │ │ │ │
|
134
|
+
│ User │◄──────►│ Agent │◄──────►│ Tools │
|
135
|
+
│ │ │ │ │ │
|
136
|
+
│ │ │ │ │ │
|
137
|
+
└────────┘ └─────────┘ └────────-┘
|
138
138
|
|
139
139
|
|
140
140
|
|
@@ -142,13 +142,13 @@ Keep this in mind while designing your agentic systems using Solana Agent.
|
|
142
142
|
|
143
143
|
Multiple Agents
|
144
144
|
|
145
|
-
┌────────┐ ┌──────────┐ ┌─────────┐
|
146
|
-
│ │ │ │ │ │ │
|
147
|
-
│ │ │ │ │ │ │
|
148
|
-
┌───►│ User ├───────►│ Router ├───────►│ Agent │◄──────►│
|
149
|
-
│ │ │ │ │ │ │ │
|
150
|
-
│ │ │ │ │ │ │ │
|
151
|
-
│ └────────┘ └──────────┘ └────┬────┘
|
145
|
+
┌────────┐ ┌──────────┐ ┌─────────┐ ┌────────-┐
|
146
|
+
│ │ │ │ │ │ │ │
|
147
|
+
│ │ │ │ │ │ │ │
|
148
|
+
┌───►│ User ├───────►│ Router ├───────►│ Agent │◄──────►│ Tools │
|
149
|
+
│ │ │ │ │ │ │ │ │
|
150
|
+
│ │ │ │ │ │ │ │ │
|
151
|
+
│ └────────┘ └──────────┘ └────┬────┘ └────────-┘
|
152
152
|
│ │
|
153
153
|
│ │
|
154
154
|
│ │
|
@@ -319,30 +319,6 @@ config = {
|
|
319
319
|
}
|
320
320
|
```
|
321
321
|
|
322
|
-
### Gemini
|
323
|
-
|
324
|
-
This allows Gemini to replace OpenAI for agent and router.
|
325
|
-
|
326
|
-
```python
|
327
|
-
config = {
|
328
|
-
"gemini": {
|
329
|
-
"api_key": "your-gemini-api-key",
|
330
|
-
},
|
331
|
-
}
|
332
|
-
```
|
333
|
-
|
334
|
-
### Grok
|
335
|
-
|
336
|
-
This allows Grok to replace OpenAI (or Gemini) for agent.
|
337
|
-
|
338
|
-
```python
|
339
|
-
config = {
|
340
|
-
"grok": {
|
341
|
-
"api_key": "your-grok-api-key",
|
342
|
-
},
|
343
|
-
}
|
344
|
-
```
|
345
|
-
|
346
322
|
### Knowledge Base
|
347
323
|
|
348
324
|
The Knowledge Base (KB) is meant to store text values and/or small PDFs.
|
@@ -533,8 +509,8 @@ class MyOutputGuardrail(OutputGuardrail):
|
|
533
509
|
|
534
510
|
Tools can be used from plugins like Solana Agent Kit (sakit) or via inline tools. Tools available via plugins integrate automatically with Solana Agent.
|
535
511
|
|
536
|
-
* Agents can
|
537
|
-
* Agents choose the best
|
512
|
+
* Agents can use multiple tools per response and should apply the right sequential order (like send an email to bob@bob.com with the latest news on Solana)
|
513
|
+
* Agents choose the best tools for the job
|
538
514
|
* Solana Agent doesn't use OpenAI function calling (tools) as they don't support async functions
|
539
515
|
* Solana Agent tools are async functions
|
540
516
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
solana_agent/__init__.py,sha256=g83qhMOCwcWL19V4CYbQwl0Ykpb0xn49OUh05i-pu3g,1001
|
2
2
|
solana_agent/adapters/__init__.py,sha256=tiEEuuy0NF3ngc_tGEcRTt71zVI58v3dYY9RvMrF2Cg,204
|
3
3
|
solana_agent/adapters/mongodb_adapter.py,sha256=0KWIa6kaFbUFvtKUzuV_0p0RFlPPGKrDVIEU2McVY3k,2734
|
4
|
-
solana_agent/adapters/openai_adapter.py,sha256=
|
4
|
+
solana_agent/adapters/openai_adapter.py,sha256=QwNMWT2JKChb6RoHzlHWZrhujsi1cn8V4HDPWjUclIs,11105
|
5
5
|
solana_agent/adapters/pinecone_adapter.py,sha256=SDbf_XJMuFDKhNfF25_VXaYG3vrmYyPIo2SyhaniEwg,23048
|
6
6
|
solana_agent/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
7
|
solana_agent/client/solana_agent.py,sha256=jUGWxYJL9ZWxGsVX9C6FrRQyX7r6Cep0ijcfm7cbkJI,10098
|
@@ -9,14 +9,14 @@ solana_agent/domains/__init__.py,sha256=HiC94wVPRy-QDJSSRywCRrhrFfTBeHjfi5z-QfZv
|
|
9
9
|
solana_agent/domains/agent.py,sha256=3Q1wg4eIul0CPpaYBOjEthKTfcdhf1SAiWc2R-IMGO8,2561
|
10
10
|
solana_agent/domains/routing.py,sha256=1yR4IswGcmREGgbOOI6TKCfuM7gYGOhQjLkBqnZ-rNo,582
|
11
11
|
solana_agent/factories/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
12
|
-
solana_agent/factories/agent_factory.py,sha256=
|
12
|
+
solana_agent/factories/agent_factory.py,sha256=WdXvVE4301vi_5QTDesQG3dCoSnMi4rp7LWvBQQD0bk,11377
|
13
13
|
solana_agent/guardrails/pii.py,sha256=FCz1IC3mmkr41QFFf5NaC0fwJrVkwFsxgyOCS2POO5I,4428
|
14
14
|
solana_agent/interfaces/__init__.py,sha256=IQs1WIM1FeKP1-kY2FEfyhol_dB-I-VAe2rD6jrVF6k,355
|
15
15
|
solana_agent/interfaces/client/client.py,sha256=hsvaQiQdz3MLMNc77oD6ocvvnyl7Ez2n087ptFDA19M,3687
|
16
16
|
solana_agent/interfaces/guardrails/guardrails.py,sha256=gZCQ1FrirW-mX6s7FoYrbRs6golsp-x269kk4kQiZzc,572
|
17
17
|
solana_agent/interfaces/plugins/plugins.py,sha256=Rz52cWBLdotwf4kV-2mC79tRYlN29zHSu1z9-y1HVPk,3329
|
18
18
|
solana_agent/interfaces/providers/data_storage.py,sha256=Y92Cq8BtC55VlsYLD7bo3ofqQabNnlg7Q4H1Q6CDsLU,1713
|
19
|
-
solana_agent/interfaces/providers/llm.py,sha256=
|
19
|
+
solana_agent/interfaces/providers/llm.py,sha256=Wxn0qXIk7BmpI0FBrhjJVV6DmsfLUpUauZR-pE3brz8,2395
|
20
20
|
solana_agent/interfaces/providers/memory.py,sha256=h3HEOwWCiFGIuFBX49XOv1jFaQW3NGjyKPOfmQloevk,1011
|
21
21
|
solana_agent/interfaces/providers/vector_storage.py,sha256=XPYzvoWrlDVFCS9ItBmoqCFWXXWNYY-d9I7_pvP7YYk,1561
|
22
22
|
solana_agent/interfaces/services/agent.py,sha256=YsxyvBPK3ygBEStLyL4BwmIl84NMrV3dK0PlwCFoyq0,2094
|
@@ -31,11 +31,11 @@ solana_agent/plugins/tools/auto_tool.py,sha256=uihijtlc9CCqCIaRcwPuuN7o1SHIpWL2G
|
|
31
31
|
solana_agent/repositories/__init__.py,sha256=fP83w83CGzXLnSdq-C5wbw9EhWTYtqE2lQTgp46-X_4,163
|
32
32
|
solana_agent/repositories/memory.py,sha256=YYpCyiDVi3a5ZOFYFkzBS6MDjo9g2TnwbEZ5KKfKbII,7204
|
33
33
|
solana_agent/services/__init__.py,sha256=iko0c2MlF8b_SA_nuBGFllr2E3g_JowOrOzGcnU9tkA,162
|
34
|
-
solana_agent/services/agent.py,sha256=
|
34
|
+
solana_agent/services/agent.py,sha256=6CwTqIzPykx4yGI53BM1UbYhQN5Va1DJu0RzNvzuh6U,41894
|
35
35
|
solana_agent/services/knowledge_base.py,sha256=J9V8dNoCCcko3EasiGwK2JJ_A_oG_e-Ni9pgNg0T6wA,33486
|
36
36
|
solana_agent/services/query.py,sha256=bAoUfe_2EBVEVeh99-2E9KZ0zaHUzf7Lqel3rlHyNX8,17459
|
37
37
|
solana_agent/services/routing.py,sha256=-0fNIKDtCn0-TLUYDFYAE4jPLMeI_jCXIpgtgWDpdf8,6986
|
38
|
-
solana_agent-
|
39
|
-
solana_agent-
|
40
|
-
solana_agent-
|
41
|
-
solana_agent-
|
38
|
+
solana_agent-28.0.0.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
|
39
|
+
solana_agent-28.0.0.dist-info/METADATA,sha256=UdgCn1QADdEba_j5vwdZmqPAQiHm_fO9sMzqJtvFYL0,26646
|
40
|
+
solana_agent-28.0.0.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
|
41
|
+
solana_agent-28.0.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|