solana-agent 29.3.0__py3-none-any.whl → 30.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -33,8 +33,8 @@ logger = logging.getLogger(__name__)
33
33
 
34
34
  T = TypeVar("T", bound=BaseModel)
35
35
 
36
- DEFAULT_CHAT_MODEL = "gpt-4.1"
37
- DEFAULT_VISION_MODEL = "gpt-4.1"
36
+ DEFAULT_CHAT_MODEL = "gpt-4.1-nano"
37
+ DEFAULT_VISION_MODEL = "gpt-4.1-nano"
38
38
  DEFAULT_PARSE_MODEL = "gpt-4.1-nano"
39
39
  DEFAULT_EMBEDDING_MODEL = "text-embedding-3-large"
40
40
  DEFAULT_EMBEDDING_DIMENSIONS = 3072
@@ -163,9 +163,8 @@ class OpenAIAdapter(LLMProvider):
163
163
  api_key: Optional[str] = None,
164
164
  base_url: Optional[str] = None,
165
165
  model: Optional[str] = None,
166
- functions: Optional[List[Dict[str, Any]]] = None,
167
- function_call: Optional[Union[str, Dict[str, Any]]] = None,
168
- ) -> Any: # pragma: no cover
166
+ tools: Optional[List[Dict[str, Any]]] = None,
167
+ ) -> str: # pragma: no cover
169
168
  """Generate text or function call from OpenAI models."""
170
169
  messages = []
171
170
  if system_prompt:
@@ -176,10 +175,8 @@ class OpenAIAdapter(LLMProvider):
176
175
  "messages": messages,
177
176
  "model": model or self.text_model,
178
177
  }
179
- if functions:
180
- request_params["functions"] = functions
181
- if function_call:
182
- request_params["function_call"] = function_call
178
+ if tools:
179
+ request_params["tools"] = tools
183
180
 
184
181
  if api_key and base_url:
185
182
  client = AsyncOpenAI(api_key=api_key, base_url=base_url)
@@ -410,8 +407,7 @@ class OpenAIAdapter(LLMProvider):
410
407
  api_key: Optional[str] = None,
411
408
  base_url: Optional[str] = None,
412
409
  model: Optional[str] = None,
413
- functions: Optional[List[Dict[str, Any]]] = None,
414
- function_call: Optional[Union[str, Dict[str, Any]]] = None,
410
+ tools: Optional[List[Dict[str, Any]]] = None,
415
411
  ) -> T: # pragma: no cover
416
412
  """Generate structured output using Pydantic model parsing with Instructor."""
417
413
 
@@ -439,10 +435,8 @@ class OpenAIAdapter(LLMProvider):
439
435
  "response_model": model_class,
440
436
  "max_retries": 2, # Automatically retry on validation errors
441
437
  }
442
- if functions:
443
- create_args["tools"] = functions
444
- if function_call:
445
- create_args["function_call"] = function_call
438
+ if tools:
439
+ create_args["tools"] = tools
446
440
 
447
441
  response = await patched_client.chat.completions.create(**create_args)
448
442
  return response
@@ -195,27 +195,6 @@ class SolanaAgentFactory:
195
195
  model="gemini-2.5-flash-preview-05-20",
196
196
  ) # pragma: no cover
197
197
 
198
- elif "grok" in config and "api_key" in config["grok"]:
199
- # Create primary services
200
- agent_service = AgentService(
201
- llm_provider=llm_adapter,
202
- business_mission=business_mission,
203
- config=config,
204
- api_key=config["grok"]["api_key"],
205
- base_url="https://api.x.ai/v1",
206
- model="grok-3-fast",
207
- output_guardrails=output_guardrails,
208
- ) # pragma: no cover
209
-
210
- # Create routing service
211
- routing_service = RoutingService(
212
- llm_provider=llm_adapter,
213
- agent_service=agent_service,
214
- api_key=config["gemini"]["api_key"],
215
- base_url="https://api.x.ai/v1",
216
- model="grok-3-mini-fast",
217
- ) # pragma: no cover
218
-
219
198
  elif "ollama" in config and "api_key" in config["ollama"]:
220
199
  # Create primary services
221
200
  agent_service = AgentService(
@@ -28,8 +28,7 @@ class LLMProvider(ABC):
28
28
  api_key: Optional[str] = None,
29
29
  base_url: Optional[str] = None,
30
30
  model: Optional[str] = None,
31
- functions: Optional[List[Dict[str, Any]]] = None,
32
- function_call: Optional[Union[str, Dict[str, Any]]] = None,
31
+ tools: Optional[List[Dict[str, Any]]] = None,
33
32
  ) -> Any:
34
33
  """Generate text from the language model."""
35
34
  pass
@@ -43,8 +42,7 @@ class LLMProvider(ABC):
43
42
  api_key: Optional[str] = None,
44
43
  base_url: Optional[str] = None,
45
44
  model: Optional[str] = None,
46
- functions: Optional[List[Dict[str, Any]]] = None,
47
- function_call: Optional[Union[str, Dict[str, Any]]] = None,
45
+ tools: Optional[List[Dict[str, Any]]] = None,
48
46
  ) -> T:
49
47
  """Generate structured output using a specific model class."""
50
48
  pass
@@ -106,6 +104,7 @@ class LLMProvider(ABC):
106
104
  images: List[Union[str, bytes]],
107
105
  system_prompt: str = "",
108
106
  detail: Literal["low", "high", "auto"] = "auto",
107
+ tools: Optional[List[Dict[str, Any]]] = None,
109
108
  ) -> str:
110
109
  """Generate text from the language model using images."""
111
110
  pass
@@ -267,11 +267,15 @@ class AgentService(AgentServiceInterface):
267
267
  full_prompt += f"USER IDENTIFIER: {user_id}"
268
268
 
269
269
  # Get OpenAI function schemas for this agent's tools
270
- functions = [
270
+ tools = [
271
271
  {
272
- "name": tool["name"],
273
- "description": tool.get("description", ""),
274
- "parameters": tool.get("parameters", {}),
272
+ "type": "function",
273
+ "function": {
274
+ "name": tool["name"],
275
+ "description": tool.get("description", ""),
276
+ "parameters": tool.get("parameters", {}),
277
+ "strict": True,
278
+ },
275
279
  }
276
280
  for tool in self.get_agent_tools(agent_name)
277
281
  ]
@@ -286,8 +290,7 @@ class AgentService(AgentServiceInterface):
286
290
  api_key=self.api_key,
287
291
  base_url=self.base_url,
288
292
  model=self.model,
289
- functions=functions if functions else None,
290
- function_call="auto" if functions else None,
293
+ tools=tools if tools else None,
291
294
  )
292
295
  yield model_instance
293
296
  return
@@ -295,15 +298,25 @@ class AgentService(AgentServiceInterface):
295
298
  # --- Streaming text/audio with tool support (as before) ---
296
299
  response_text = ""
297
300
  while True:
298
- response = await self.llm_provider.generate_text(
299
- prompt=full_prompt,
300
- system_prompt=system_prompt,
301
- functions=functions if functions else None,
302
- function_call="auto" if functions else None,
303
- api_key=self.api_key,
304
- base_url=self.base_url,
305
- model=self.model,
306
- )
301
+ if not images:
302
+ response = await self.llm_provider.generate_text(
303
+ prompt=full_prompt,
304
+ system_prompt=system_prompt,
305
+ api_key=self.api_key,
306
+ base_url=self.base_url,
307
+ model=self.model,
308
+ tools=tools if tools else None,
309
+ )
310
+ else:
311
+ response = await self.llm_provider.generate_text_with_images(
312
+ prompt=full_prompt,
313
+ system_prompt=system_prompt,
314
+ api_key=self.api_key,
315
+ base_url=self.base_url,
316
+ model=self.model,
317
+ tools=tools if tools else None,
318
+ images=images,
319
+ )
307
320
  if (
308
321
  not response
309
322
  or not hasattr(response, "choices")
@@ -316,25 +329,24 @@ class AgentService(AgentServiceInterface):
316
329
  choice = response.choices[0]
317
330
  message = getattr(choice, "message", choice)
318
331
 
319
- # If the model wants to call a function/tool
320
- if hasattr(message, "function_call") and message.function_call:
321
- function_name = message.function_call.name
322
- arguments = json.loads(message.function_call.arguments)
323
- logger.info(
324
- f"Model requested tool '{function_name}' with args: {arguments}"
325
- )
326
-
327
- # Execute the tool (async)
328
- tool_result = await self.execute_tool(
329
- agent_name, function_name, arguments
330
- )
331
-
332
- # Add the tool result to the prompt for the next round
333
- full_prompt += (
334
- f"\n\nTool '{function_name}' was called with arguments {arguments}.\n"
335
- f"Result: {tool_result}\n"
336
- )
337
- continue # Loop again, LLM will see tool result and may call another tool or finish
332
+ if hasattr(message, "tool_calls") and message.tool_calls:
333
+ for tool_call in message.tool_calls:
334
+ if tool_call.type == "function":
335
+ function_name = tool_call.function.name
336
+ arguments = json.loads(tool_call.function.arguments)
337
+ logger.info(
338
+ f"Model requested tool '{function_name}' with args: {arguments}"
339
+ )
340
+ # Execute the tool (async)
341
+ tool_result = await self.execute_tool(
342
+ agent_name, function_name, arguments
343
+ )
344
+ # Add the tool result to the prompt for the next round
345
+ full_prompt += (
346
+ f"\n\nTool '{function_name}' was called with arguments {arguments}.\n"
347
+ f"Result: {tool_result}\n"
348
+ )
349
+ continue
338
350
 
339
351
  # Otherwise, it's a normal message (final answer)
340
352
  response_text = message.content
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 29.3.0
3
+ Version: 30.0.0
4
4
  Summary: AI Agents for Solana
5
5
  License: MIT
6
6
  Keywords: solana,solana ai,solana agent,ai,ai agent,ai agents
@@ -53,7 +53,6 @@ Build your AI agents in three lines of code!
53
53
  * Three lines of code setup
54
54
  * Simple Agent Definition
55
55
  * Fast Responses
56
- * Multi-Vendor Support
57
56
  * Solana Integration
58
57
  * Multi-Agent Swarm
59
58
  * Multi-Modal (Images & Audio & Text)
@@ -79,7 +78,6 @@ Build your AI agents in three lines of code!
79
78
  * Easy three lines of code setup
80
79
  * Simple agent definition using JSON
81
80
  * Fast AI responses
82
- * Multi-vendor support including OpenAI, Grok, and Gemini AI services
83
81
  * Solana Integration
84
82
  * MCP tool usage with first-class support for [Zapier](https://zapier.com/mcp)
85
83
  * Integrated observability and tracing via [Pydantic Logfire](https://pydantic.dev/logfire)
@@ -114,25 +112,10 @@ Build your AI agents in three lines of code!
114
112
  ### AI Models Used
115
113
 
116
114
  **OpenAI**
117
- * [gpt-4.1](https://platform.openai.com/docs/models/gpt-4.1) (agent - can be overridden)
118
- * [gpt-4.1-nano](https://platform.openai.com/docs/models/gpt-4.1-nano) (router - can be overridden)
115
+ * [gpt-4.1-nano](https://platform.openai.com/docs/models/gpt-4.1-nano) (agent & router)
119
116
  * [text-embedding-3-large](https://platform.openai.com/docs/models/text-embedding-3-large) (embedding)
120
117
  * [tts-1](https://platform.openai.com/docs/models/tts-1) (audio TTS)
121
118
  * [gpt-4o-mini-transcribe](https://platform.openai.com/docs/models/gpt-4o-mini-transcribe) (audio transcription)
122
- * [gpt-image-1](https://platform.openai.com/docs/models/gpt-image-1) (image generation - can be overridden)
123
- * [gpt-4o-mini-search-preview](https://platform.openai.com/docs/models/gpt-4o-mini-search-preview) (Internet search)
124
-
125
- **Grok**
126
- * [grok-3-fast](https://x.ai/api#pricing) (agent - optional)
127
- * [grok-3-mini-fast](https://x.ai/api#pricing) (router - optional)
128
- * [grok-2-image](https://x.ai/api#pricing) (image generation - optional)
129
-
130
- **Gemini**
131
- * [gemini-2.5-flash-preview-04-17](https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview) (agent & router - optional)
132
- * [imagen-3.0-generate-002](https://ai.google.dev/gemini-api/docs/models#imagen-3) (image generation - optional)
133
-
134
- **Ollama**
135
- * [gemma:4b-it-qat](https://ollama.com/library/gemma3) - (agent & router - optional)
136
119
 
137
120
  ## Installation
138
121
 
@@ -467,36 +450,6 @@ config = {
467
450
  }
468
451
  ```
469
452
 
470
- ### Grok
471
-
472
- ```python
473
- config = {
474
- "grok": {
475
- "api_key": "your-grok-api-key",
476
- },
477
- }
478
- ```
479
-
480
- ### Gemini
481
-
482
- ```python
483
- config = {
484
- "gemini": {
485
- "api_key": "your-gemini-api-key",
486
- },
487
- }
488
- ```
489
-
490
- ### Ollama
491
-
492
- ```python
493
- config = {
494
- "ollama": {
495
- "api_key": "use-this-key-1010"
496
- },
497
- }
498
- ```
499
-
500
453
  ### Knowledge Base
501
454
 
502
455
  The Knowledge Base (KB) is meant to store text values and/or PDFs (extracts text) - can handle very large PDFs.
@@ -1,7 +1,7 @@
1
1
  solana_agent/__init__.py,sha256=g83qhMOCwcWL19V4CYbQwl0Ykpb0xn49OUh05i-pu3g,1001
2
2
  solana_agent/adapters/__init__.py,sha256=tiEEuuy0NF3ngc_tGEcRTt71zVI58v3dYY9RvMrF2Cg,204
3
3
  solana_agent/adapters/mongodb_adapter.py,sha256=Hq3S8VzfLmnPjV40z8yJXGqUamOJcX5GbOMd-1nNWO4,3175
4
- solana_agent/adapters/openai_adapter.py,sha256=2-YvMbrBdrHCkMt1qJPD6B926vgfEdkHqnbu3exUe9c,23584
4
+ solana_agent/adapters/openai_adapter.py,sha256=l1KQch01LhtbKFHz76oXI1Lvz_AM2zZzCMh2qOcbzmM,23253
5
5
  solana_agent/adapters/pinecone_adapter.py,sha256=XlfOpoKHwzpaU4KZnovO2TnEYbsw-3B53ZKQDtBeDgU,23847
6
6
  solana_agent/cli.py,sha256=FGvTIQmKLp6XsQdyKtuhIIfbBtMmcCCXfigNrj4bzMc,4704
7
7
  solana_agent/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -10,14 +10,14 @@ solana_agent/domains/__init__.py,sha256=HiC94wVPRy-QDJSSRywCRrhrFfTBeHjfi5z-QfZv
10
10
  solana_agent/domains/agent.py,sha256=3Q1wg4eIul0CPpaYBOjEthKTfcdhf1SAiWc2R-IMGO8,2561
11
11
  solana_agent/domains/routing.py,sha256=1yR4IswGcmREGgbOOI6TKCfuM7gYGOhQjLkBqnZ-rNo,582
12
12
  solana_agent/factories/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
- solana_agent/factories/agent_factory.py,sha256=P8d-wGdWhksu79jngwTtL4jOeMjeo3OfiUJhwPWVh7U,15897
13
+ solana_agent/factories/agent_factory.py,sha256=IekAzqLSQe1pvtFr124JvyESoWHZeP7FKK7pVuqD85E,15058
14
14
  solana_agent/guardrails/pii.py,sha256=FCz1IC3mmkr41QFFf5NaC0fwJrVkwFsxgyOCS2POO5I,4428
15
15
  solana_agent/interfaces/__init__.py,sha256=IQs1WIM1FeKP1-kY2FEfyhol_dB-I-VAe2rD6jrVF6k,355
16
16
  solana_agent/interfaces/client/client.py,sha256=9hg35-hp_CI-WVGOXehBE1ZCKYahLmbeAvtQOYmML4o,3245
17
17
  solana_agent/interfaces/guardrails/guardrails.py,sha256=gZCQ1FrirW-mX6s7FoYrbRs6golsp-x269kk4kQiZzc,572
18
18
  solana_agent/interfaces/plugins/plugins.py,sha256=Rz52cWBLdotwf4kV-2mC79tRYlN29zHSu1z9-y1HVPk,3329
19
19
  solana_agent/interfaces/providers/data_storage.py,sha256=Y92Cq8BtC55VlsYLD7bo3ofqQabNnlg7Q4H1Q6CDsLU,1713
20
- solana_agent/interfaces/providers/llm.py,sha256=Si1xvybHKC6bhjTYjGQi5K9PO9WA7Te29oqs24jxRlY,2998
20
+ solana_agent/interfaces/providers/llm.py,sha256=Naj8gTGi3GpIMFHKwQjw7EuAF_uSWwwz2-41iUYtov4,2908
21
21
  solana_agent/interfaces/providers/memory.py,sha256=h3HEOwWCiFGIuFBX49XOv1jFaQW3NGjyKPOfmQloevk,1011
22
22
  solana_agent/interfaces/providers/vector_storage.py,sha256=XPYzvoWrlDVFCS9ItBmoqCFWXXWNYY-d9I7_pvP7YYk,1561
23
23
  solana_agent/interfaces/services/agent.py,sha256=A-Hmgelr3g_qaNB0PEPMFHxB5nSCBK0WJ5hauJtIcmI,2257
@@ -32,12 +32,12 @@ solana_agent/plugins/tools/auto_tool.py,sha256=uihijtlc9CCqCIaRcwPuuN7o1SHIpWL2G
32
32
  solana_agent/repositories/__init__.py,sha256=fP83w83CGzXLnSdq-C5wbw9EhWTYtqE2lQTgp46-X_4,163
33
33
  solana_agent/repositories/memory.py,sha256=SKQJJisrERccqd4cm4ERlp5BmKHVQAp1fzp8ce4i2bw,8377
34
34
  solana_agent/services/__init__.py,sha256=iko0c2MlF8b_SA_nuBGFllr2E3g_JowOrOzGcnU9tkA,162
35
- solana_agent/services/agent.py,sha256=kznv2Jhpc4DiFbB-kV3eG5FuPPgl2Hu26h5yCsxyCuw,18624
35
+ solana_agent/services/agent.py,sha256=acfauSIdDQdwuvqyDpx6VryQpL3nNfss3NlVtnI_kUg,19191
36
36
  solana_agent/services/knowledge_base.py,sha256=ZvOPrSmcNDgUzz4bJIQ4LeRl9vMZiK9hOfs71IpB7Bk,32735
37
37
  solana_agent/services/query.py,sha256=3v5Ym8UqL0rfOC-0MWHALAsS2jVWdpUR3A-YI9n0xyo,18771
38
38
  solana_agent/services/routing.py,sha256=C5Ku4t9TqvY7S8wlUPMTC04HCrT4Ib3E8Q8yX0lVU_s,7137
39
- solana_agent-29.3.0.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
40
- solana_agent-29.3.0.dist-info/METADATA,sha256=YYGG6n8-S23KwMjDY3Rmec7DcCHaVTv8e4lecOB9J7I,29849
41
- solana_agent-29.3.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
42
- solana_agent-29.3.0.dist-info/entry_points.txt,sha256=-AuT_mfqk8dlZ0pHuAjx1ouAWpTRjpqvEUa6YV3lmc0,53
43
- solana_agent-29.3.0.dist-info/RECORD,,
39
+ solana_agent-30.0.0.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
40
+ solana_agent-30.0.0.dist-info/METADATA,sha256=MWvby4A_W0bi7MZS4xwQ5MBKOl3U99kcwctE1_m_sjQ,28557
41
+ solana_agent-30.0.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
42
+ solana_agent-30.0.0.dist-info/entry_points.txt,sha256=-AuT_mfqk8dlZ0pHuAjx1ouAWpTRjpqvEUa6YV3lmc0,53
43
+ solana_agent-30.0.0.dist-info/RECORD,,