solana-agent 29.3.0__tar.gz → 30.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. {solana_agent-29.3.0 → solana_agent-30.0.1}/PKG-INFO +6 -54
  2. {solana_agent-29.3.0 → solana_agent-30.0.1}/README.md +5 -53
  3. {solana_agent-29.3.0 → solana_agent-30.0.1}/pyproject.toml +1 -1
  4. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/adapters/openai_adapter.py +10 -16
  5. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/factories/agent_factory.py +0 -21
  6. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/interfaces/providers/llm.py +3 -4
  7. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/services/agent.py +46 -34
  8. {solana_agent-29.3.0 → solana_agent-30.0.1}/LICENSE +0 -0
  9. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/__init__.py +0 -0
  10. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/adapters/__init__.py +0 -0
  11. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/adapters/mongodb_adapter.py +0 -0
  12. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/adapters/pinecone_adapter.py +0 -0
  13. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/cli.py +0 -0
  14. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/client/__init__.py +0 -0
  15. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/client/solana_agent.py +0 -0
  16. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/domains/__init__.py +0 -0
  17. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/domains/agent.py +0 -0
  18. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/domains/routing.py +0 -0
  19. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/factories/__init__.py +0 -0
  20. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/guardrails/pii.py +0 -0
  21. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/interfaces/__init__.py +0 -0
  22. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/interfaces/client/client.py +0 -0
  23. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/interfaces/guardrails/guardrails.py +0 -0
  24. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/interfaces/plugins/plugins.py +0 -0
  25. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/interfaces/providers/data_storage.py +0 -0
  26. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/interfaces/providers/memory.py +0 -0
  27. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/interfaces/providers/vector_storage.py +0 -0
  28. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/interfaces/services/agent.py +0 -0
  29. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/interfaces/services/knowledge_base.py +0 -0
  30. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/interfaces/services/query.py +0 -0
  31. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/interfaces/services/routing.py +0 -0
  32. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/plugins/__init__.py +0 -0
  33. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/plugins/manager.py +0 -0
  34. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/plugins/registry.py +0 -0
  35. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/plugins/tools/__init__.py +0 -0
  36. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/plugins/tools/auto_tool.py +0 -0
  37. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/repositories/__init__.py +0 -0
  38. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/repositories/memory.py +0 -0
  39. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/services/__init__.py +0 -0
  40. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/services/knowledge_base.py +0 -0
  41. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/services/query.py +0 -0
  42. {solana_agent-29.3.0 → solana_agent-30.0.1}/solana_agent/services/routing.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 29.3.0
3
+ Version: 30.0.1
4
4
  Summary: AI Agents for Solana
5
5
  License: MIT
6
6
  Keywords: solana,solana ai,solana agent,ai,ai agent,ai agents
@@ -53,7 +53,6 @@ Build your AI agents in three lines of code!
53
53
  * Three lines of code setup
54
54
  * Simple Agent Definition
55
55
  * Fast Responses
56
- * Multi-Vendor Support
57
56
  * Solana Integration
58
57
  * Multi-Agent Swarm
59
58
  * Multi-Modal (Images & Audio & Text)
@@ -62,8 +61,8 @@ Build your AI agents in three lines of code!
62
61
  * Intelligent Routing
63
62
  * Business Alignment
64
63
  * Extensible Tooling
65
- * Automatic Tool Workflows
66
64
  * Autonomous Operation
65
+ * Automated Tool Workflows
67
66
  * Structured Outputs
68
67
  * Knowledge Base
69
68
  * MCP Support
@@ -78,13 +77,11 @@ Build your AI agents in three lines of code!
78
77
 
79
78
  * Easy three lines of code setup
80
79
  * Simple agent definition using JSON
81
- * Fast AI responses
82
- * Multi-vendor support including OpenAI, Grok, and Gemini AI services
80
+ * Designed for a multi-agent swarm
81
+ * Fast multi-modal processing of text, audio, and images
83
82
  * Solana Integration
84
83
  * MCP tool usage with first-class support for [Zapier](https://zapier.com/mcp)
85
84
  * Integrated observability and tracing via [Pydantic Logfire](https://pydantic.dev/logfire)
86
- * Designed for a multi-agent swarm
87
- * Seamless streaming with real-time multi-modal processing of text, audio, and images
88
85
  * Persistent memory that preserves context across all agent interactions
89
86
  * Quick Internet search to answer users' queries
90
87
  * Streamlined message history for all agent interactions
@@ -95,8 +92,8 @@ Build your AI agents in three lines of code!
95
92
  * Integrated Knowledge Base with semantic search and automatic PDF chunking
96
93
  * Input and output guardrails for content filtering, safety, and data sanitization
97
94
  * Generate custom images based on text prompts with storage on S3 compatible services
98
- * Automatic sequential tool workflows allowing agents to chain multiple tools
99
95
  * Deterministically return structured outputs
96
+ * Sequentially provide tool commands that execute in the proper order
100
97
  * Combine with event-driven systems to create autonomous agents
101
98
 
102
99
  ## Stack
@@ -114,25 +111,10 @@ Build your AI agents in three lines of code!
114
111
  ### AI Models Used
115
112
 
116
113
  **OpenAI**
117
- * [gpt-4.1](https://platform.openai.com/docs/models/gpt-4.1) (agent - can be overridden)
118
- * [gpt-4.1-nano](https://platform.openai.com/docs/models/gpt-4.1-nano) (router - can be overridden)
114
+ * [gpt-4.1-mini](https://platform.openai.com/docs/models/gpt-4.1-mini) (agent & router)
119
115
  * [text-embedding-3-large](https://platform.openai.com/docs/models/text-embedding-3-large) (embedding)
120
116
  * [tts-1](https://platform.openai.com/docs/models/tts-1) (audio TTS)
121
117
  * [gpt-4o-mini-transcribe](https://platform.openai.com/docs/models/gpt-4o-mini-transcribe) (audio transcription)
122
- * [gpt-image-1](https://platform.openai.com/docs/models/gpt-image-1) (image generation - can be overridden)
123
- * [gpt-4o-mini-search-preview](https://platform.openai.com/docs/models/gpt-4o-mini-search-preview) (Internet search)
124
-
125
- **Grok**
126
- * [grok-3-fast](https://x.ai/api#pricing) (agent - optional)
127
- * [grok-3-mini-fast](https://x.ai/api#pricing) (router - optional)
128
- * [grok-2-image](https://x.ai/api#pricing) (image generation - optional)
129
-
130
- **Gemini**
131
- * [gemini-2.5-flash-preview-04-17](https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview) (agent & router - optional)
132
- * [imagen-3.0-generate-002](https://ai.google.dev/gemini-api/docs/models#imagen-3) (image generation - optional)
133
-
134
- **Ollama**
135
- * [gemma:4b-it-qat](https://ollama.com/library/gemma3) - (agent & router - optional)
136
118
 
137
119
  ## Installation
138
120
 
@@ -467,36 +449,6 @@ config = {
467
449
  }
468
450
  ```
469
451
 
470
- ### Grok
471
-
472
- ```python
473
- config = {
474
- "grok": {
475
- "api_key": "your-grok-api-key",
476
- },
477
- }
478
- ```
479
-
480
- ### Gemini
481
-
482
- ```python
483
- config = {
484
- "gemini": {
485
- "api_key": "your-gemini-api-key",
486
- },
487
- }
488
- ```
489
-
490
- ### Ollama
491
-
492
- ```python
493
- config = {
494
- "ollama": {
495
- "api_key": "use-this-key-1010"
496
- },
497
- }
498
- ```
499
-
500
452
  ### Knowledge Base
501
453
 
502
454
  The Knowledge Base (KB) is meant to store text values and/or PDFs (extracts text) - can handle very large PDFs.
@@ -18,7 +18,6 @@ Build your AI agents in three lines of code!
18
18
  * Three lines of code setup
19
19
  * Simple Agent Definition
20
20
  * Fast Responses
21
- * Multi-Vendor Support
22
21
  * Solana Integration
23
22
  * Multi-Agent Swarm
24
23
  * Multi-Modal (Images & Audio & Text)
@@ -27,8 +26,8 @@ Build your AI agents in three lines of code!
27
26
  * Intelligent Routing
28
27
  * Business Alignment
29
28
  * Extensible Tooling
30
- * Automatic Tool Workflows
31
29
  * Autonomous Operation
30
+ * Automated Tool Workflows
32
31
  * Structured Outputs
33
32
  * Knowledge Base
34
33
  * MCP Support
@@ -43,13 +42,11 @@ Build your AI agents in three lines of code!
43
42
 
44
43
  * Easy three lines of code setup
45
44
  * Simple agent definition using JSON
46
- * Fast AI responses
47
- * Multi-vendor support including OpenAI, Grok, and Gemini AI services
45
+ * Designed for a multi-agent swarm
46
+ * Fast multi-modal processing of text, audio, and images
48
47
  * Solana Integration
49
48
  * MCP tool usage with first-class support for [Zapier](https://zapier.com/mcp)
50
49
  * Integrated observability and tracing via [Pydantic Logfire](https://pydantic.dev/logfire)
51
- * Designed for a multi-agent swarm
52
- * Seamless streaming with real-time multi-modal processing of text, audio, and images
53
50
  * Persistent memory that preserves context across all agent interactions
54
51
  * Quick Internet search to answer users' queries
55
52
  * Streamlined message history for all agent interactions
@@ -60,8 +57,8 @@ Build your AI agents in three lines of code!
60
57
  * Integrated Knowledge Base with semantic search and automatic PDF chunking
61
58
  * Input and output guardrails for content filtering, safety, and data sanitization
62
59
  * Generate custom images based on text prompts with storage on S3 compatible services
63
- * Automatic sequential tool workflows allowing agents to chain multiple tools
64
60
  * Deterministically return structured outputs
61
+ * Sequentially provide tool commands that execute in the proper order
65
62
  * Combine with event-driven systems to create autonomous agents
66
63
 
67
64
  ## Stack
@@ -79,25 +76,10 @@ Build your AI agents in three lines of code!
79
76
  ### AI Models Used
80
77
 
81
78
  **OpenAI**
82
- * [gpt-4.1](https://platform.openai.com/docs/models/gpt-4.1) (agent - can be overridden)
83
- * [gpt-4.1-nano](https://platform.openai.com/docs/models/gpt-4.1-nano) (router - can be overridden)
79
+ * [gpt-4.1-mini](https://platform.openai.com/docs/models/gpt-4.1-mini) (agent & router)
84
80
  * [text-embedding-3-large](https://platform.openai.com/docs/models/text-embedding-3-large) (embedding)
85
81
  * [tts-1](https://platform.openai.com/docs/models/tts-1) (audio TTS)
86
82
  * [gpt-4o-mini-transcribe](https://platform.openai.com/docs/models/gpt-4o-mini-transcribe) (audio transcription)
87
- * [gpt-image-1](https://platform.openai.com/docs/models/gpt-image-1) (image generation - can be overridden)
88
- * [gpt-4o-mini-search-preview](https://platform.openai.com/docs/models/gpt-4o-mini-search-preview) (Internet search)
89
-
90
- **Grok**
91
- * [grok-3-fast](https://x.ai/api#pricing) (agent - optional)
92
- * [grok-3-mini-fast](https://x.ai/api#pricing) (router - optional)
93
- * [grok-2-image](https://x.ai/api#pricing) (image generation - optional)
94
-
95
- **Gemini**
96
- * [gemini-2.5-flash-preview-04-17](https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview) (agent & router - optional)
97
- * [imagen-3.0-generate-002](https://ai.google.dev/gemini-api/docs/models#imagen-3) (image generation - optional)
98
-
99
- **Ollama**
100
- * [gemma:4b-it-qat](https://ollama.com/library/gemma3) - (agent & router - optional)
101
83
 
102
84
  ## Installation
103
85
 
@@ -432,36 +414,6 @@ config = {
432
414
  }
433
415
  ```
434
416
 
435
- ### Grok
436
-
437
- ```python
438
- config = {
439
- "grok": {
440
- "api_key": "your-grok-api-key",
441
- },
442
- }
443
- ```
444
-
445
- ### Gemini
446
-
447
- ```python
448
- config = {
449
- "gemini": {
450
- "api_key": "your-gemini-api-key",
451
- },
452
- }
453
- ```
454
-
455
- ### Ollama
456
-
457
- ```python
458
- config = {
459
- "ollama": {
460
- "api_key": "use-this-key-1010"
461
- },
462
- }
463
- ```
464
-
465
417
  ### Knowledge Base
466
418
 
467
419
  The Knowledge Base (KB) is meant to store text values and/or PDFs (extracts text) - can handle very large PDFs.
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "solana-agent"
3
- version = "29.3.0"
3
+ version = "30.0.1"
4
4
  description = "AI Agents for Solana"
5
5
  authors = ["Bevan Hunt <bevan@bevanhunt.com>"]
6
6
  license = "MIT"
@@ -33,9 +33,9 @@ logger = logging.getLogger(__name__)
33
33
 
34
34
  T = TypeVar("T", bound=BaseModel)
35
35
 
36
- DEFAULT_CHAT_MODEL = "gpt-4.1"
37
- DEFAULT_VISION_MODEL = "gpt-4.1"
38
- DEFAULT_PARSE_MODEL = "gpt-4.1-nano"
36
+ DEFAULT_CHAT_MODEL = "gpt-4.1-mini"
37
+ DEFAULT_VISION_MODEL = "gpt-4.1-mini"
38
+ DEFAULT_PARSE_MODEL = "gpt-4.1-mini"
39
39
  DEFAULT_EMBEDDING_MODEL = "text-embedding-3-large"
40
40
  DEFAULT_EMBEDDING_DIMENSIONS = 3072
41
41
  DEFAULT_TRANSCRIPTION_MODEL = "gpt-4o-mini-transcribe"
@@ -163,9 +163,8 @@ class OpenAIAdapter(LLMProvider):
163
163
  api_key: Optional[str] = None,
164
164
  base_url: Optional[str] = None,
165
165
  model: Optional[str] = None,
166
- functions: Optional[List[Dict[str, Any]]] = None,
167
- function_call: Optional[Union[str, Dict[str, Any]]] = None,
168
- ) -> Any: # pragma: no cover
166
+ tools: Optional[List[Dict[str, Any]]] = None,
167
+ ) -> str: # pragma: no cover
169
168
  """Generate text or function call from OpenAI models."""
170
169
  messages = []
171
170
  if system_prompt:
@@ -176,10 +175,8 @@ class OpenAIAdapter(LLMProvider):
176
175
  "messages": messages,
177
176
  "model": model or self.text_model,
178
177
  }
179
- if functions:
180
- request_params["functions"] = functions
181
- if function_call:
182
- request_params["function_call"] = function_call
178
+ if tools:
179
+ request_params["tools"] = tools
183
180
 
184
181
  if api_key and base_url:
185
182
  client = AsyncOpenAI(api_key=api_key, base_url=base_url)
@@ -410,8 +407,7 @@ class OpenAIAdapter(LLMProvider):
410
407
  api_key: Optional[str] = None,
411
408
  base_url: Optional[str] = None,
412
409
  model: Optional[str] = None,
413
- functions: Optional[List[Dict[str, Any]]] = None,
414
- function_call: Optional[Union[str, Dict[str, Any]]] = None,
410
+ tools: Optional[List[Dict[str, Any]]] = None,
415
411
  ) -> T: # pragma: no cover
416
412
  """Generate structured output using Pydantic model parsing with Instructor."""
417
413
 
@@ -439,10 +435,8 @@ class OpenAIAdapter(LLMProvider):
439
435
  "response_model": model_class,
440
436
  "max_retries": 2, # Automatically retry on validation errors
441
437
  }
442
- if functions:
443
- create_args["tools"] = functions
444
- if function_call:
445
- create_args["function_call"] = function_call
438
+ if tools:
439
+ create_args["tools"] = tools
446
440
 
447
441
  response = await patched_client.chat.completions.create(**create_args)
448
442
  return response
@@ -195,27 +195,6 @@ class SolanaAgentFactory:
195
195
  model="gemini-2.5-flash-preview-05-20",
196
196
  ) # pragma: no cover
197
197
 
198
- elif "grok" in config and "api_key" in config["grok"]:
199
- # Create primary services
200
- agent_service = AgentService(
201
- llm_provider=llm_adapter,
202
- business_mission=business_mission,
203
- config=config,
204
- api_key=config["grok"]["api_key"],
205
- base_url="https://api.x.ai/v1",
206
- model="grok-3-fast",
207
- output_guardrails=output_guardrails,
208
- ) # pragma: no cover
209
-
210
- # Create routing service
211
- routing_service = RoutingService(
212
- llm_provider=llm_adapter,
213
- agent_service=agent_service,
214
- api_key=config["gemini"]["api_key"],
215
- base_url="https://api.x.ai/v1",
216
- model="grok-3-mini-fast",
217
- ) # pragma: no cover
218
-
219
198
  elif "ollama" in config and "api_key" in config["ollama"]:
220
199
  # Create primary services
221
200
  agent_service = AgentService(
@@ -28,8 +28,7 @@ class LLMProvider(ABC):
28
28
  api_key: Optional[str] = None,
29
29
  base_url: Optional[str] = None,
30
30
  model: Optional[str] = None,
31
- functions: Optional[List[Dict[str, Any]]] = None,
32
- function_call: Optional[Union[str, Dict[str, Any]]] = None,
31
+ tools: Optional[List[Dict[str, Any]]] = None,
33
32
  ) -> Any:
34
33
  """Generate text from the language model."""
35
34
  pass
@@ -43,8 +42,7 @@ class LLMProvider(ABC):
43
42
  api_key: Optional[str] = None,
44
43
  base_url: Optional[str] = None,
45
44
  model: Optional[str] = None,
46
- functions: Optional[List[Dict[str, Any]]] = None,
47
- function_call: Optional[Union[str, Dict[str, Any]]] = None,
45
+ tools: Optional[List[Dict[str, Any]]] = None,
48
46
  ) -> T:
49
47
  """Generate structured output using a specific model class."""
50
48
  pass
@@ -106,6 +104,7 @@ class LLMProvider(ABC):
106
104
  images: List[Union[str, bytes]],
107
105
  system_prompt: str = "",
108
106
  detail: Literal["low", "high", "auto"] = "auto",
107
+ tools: Optional[List[Dict[str, Any]]] = None,
109
108
  ) -> str:
110
109
  """Generate text from the language model using images."""
111
110
  pass
@@ -267,11 +267,15 @@ class AgentService(AgentServiceInterface):
267
267
  full_prompt += f"USER IDENTIFIER: {user_id}"
268
268
 
269
269
  # Get OpenAI function schemas for this agent's tools
270
- functions = [
270
+ tools = [
271
271
  {
272
- "name": tool["name"],
273
- "description": tool.get("description", ""),
274
- "parameters": tool.get("parameters", {}),
272
+ "type": "function",
273
+ "function": {
274
+ "name": tool["name"],
275
+ "description": tool.get("description", ""),
276
+ "parameters": tool.get("parameters", {}),
277
+ "strict": True,
278
+ },
275
279
  }
276
280
  for tool in self.get_agent_tools(agent_name)
277
281
  ]
@@ -286,8 +290,7 @@ class AgentService(AgentServiceInterface):
286
290
  api_key=self.api_key,
287
291
  base_url=self.base_url,
288
292
  model=self.model,
289
- functions=functions if functions else None,
290
- function_call="auto" if functions else None,
293
+ tools=tools if tools else None,
291
294
  )
292
295
  yield model_instance
293
296
  return
@@ -295,15 +298,25 @@ class AgentService(AgentServiceInterface):
295
298
  # --- Streaming text/audio with tool support (as before) ---
296
299
  response_text = ""
297
300
  while True:
298
- response = await self.llm_provider.generate_text(
299
- prompt=full_prompt,
300
- system_prompt=system_prompt,
301
- functions=functions if functions else None,
302
- function_call="auto" if functions else None,
303
- api_key=self.api_key,
304
- base_url=self.base_url,
305
- model=self.model,
306
- )
301
+ if not images:
302
+ response = await self.llm_provider.generate_text(
303
+ prompt=full_prompt,
304
+ system_prompt=system_prompt,
305
+ api_key=self.api_key,
306
+ base_url=self.base_url,
307
+ model=self.model,
308
+ tools=tools if tools else None,
309
+ )
310
+ else:
311
+ response = await self.llm_provider.generate_text_with_images(
312
+ prompt=full_prompt,
313
+ system_prompt=system_prompt,
314
+ api_key=self.api_key,
315
+ base_url=self.base_url,
316
+ model=self.model,
317
+ tools=tools if tools else None,
318
+ images=images,
319
+ )
307
320
  if (
308
321
  not response
309
322
  or not hasattr(response, "choices")
@@ -316,25 +329,24 @@ class AgentService(AgentServiceInterface):
316
329
  choice = response.choices[0]
317
330
  message = getattr(choice, "message", choice)
318
331
 
319
- # If the model wants to call a function/tool
320
- if hasattr(message, "function_call") and message.function_call:
321
- function_name = message.function_call.name
322
- arguments = json.loads(message.function_call.arguments)
323
- logger.info(
324
- f"Model requested tool '{function_name}' with args: {arguments}"
325
- )
326
-
327
- # Execute the tool (async)
328
- tool_result = await self.execute_tool(
329
- agent_name, function_name, arguments
330
- )
331
-
332
- # Add the tool result to the prompt for the next round
333
- full_prompt += (
334
- f"\n\nTool '{function_name}' was called with arguments {arguments}.\n"
335
- f"Result: {tool_result}\n"
336
- )
337
- continue # Loop again, LLM will see tool result and may call another tool or finish
332
+ if hasattr(message, "tool_calls") and message.tool_calls:
333
+ for tool_call in message.tool_calls:
334
+ if tool_call.type == "function":
335
+ function_name = tool_call.function.name
336
+ arguments = json.loads(tool_call.function.arguments)
337
+ logger.info(
338
+ f"Model requested tool '{function_name}' with args: {arguments}"
339
+ )
340
+ # Execute the tool (async)
341
+ tool_result = await self.execute_tool(
342
+ agent_name, function_name, arguments
343
+ )
344
+ # Add the tool result to the prompt for the next round
345
+ full_prompt += (
346
+ f"\n\nTool '{function_name}' was called with arguments {arguments}.\n"
347
+ f"Result: {tool_result}\n"
348
+ )
349
+ continue
338
350
 
339
351
  # Otherwise, it's a normal message (final answer)
340
352
  response_text = message.content
File without changes