solana-agent 29.2.3__py3-none-any.whl → 30.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -33,8 +33,8 @@ logger = logging.getLogger(__name__)
33
33
 
34
34
  T = TypeVar("T", bound=BaseModel)
35
35
 
36
- DEFAULT_CHAT_MODEL = "gpt-4.1"
37
- DEFAULT_VISION_MODEL = "gpt-4.1"
36
+ DEFAULT_CHAT_MODEL = "gpt-4.1-nano"
37
+ DEFAULT_VISION_MODEL = "gpt-4.1-nano"
38
38
  DEFAULT_PARSE_MODEL = "gpt-4.1-nano"
39
39
  DEFAULT_EMBEDDING_MODEL = "text-embedding-3-large"
40
40
  DEFAULT_EMBEDDING_DIMENSIONS = 3072
@@ -163,9 +163,8 @@ class OpenAIAdapter(LLMProvider):
163
163
  api_key: Optional[str] = None,
164
164
  base_url: Optional[str] = None,
165
165
  model: Optional[str] = None,
166
- functions: Optional[List[Dict[str, Any]]] = None,
167
- function_call: Optional[Union[str, Dict[str, Any]]] = None,
168
- ) -> Any: # pragma: no cover
166
+ tools: Optional[List[Dict[str, Any]]] = None,
167
+ ) -> str: # pragma: no cover
169
168
  """Generate text or function call from OpenAI models."""
170
169
  messages = []
171
170
  if system_prompt:
@@ -176,10 +175,8 @@ class OpenAIAdapter(LLMProvider):
176
175
  "messages": messages,
177
176
  "model": model or self.text_model,
178
177
  }
179
- if functions:
180
- request_params["functions"] = functions
181
- if function_call:
182
- request_params["function_call"] = function_call
178
+ if tools:
179
+ request_params["tools"] = tools
183
180
 
184
181
  if api_key and base_url:
185
182
  client = AsyncOpenAI(api_key=api_key, base_url=base_url)
@@ -410,6 +407,7 @@ class OpenAIAdapter(LLMProvider):
410
407
  api_key: Optional[str] = None,
411
408
  base_url: Optional[str] = None,
412
409
  model: Optional[str] = None,
410
+ tools: Optional[List[Dict[str, Any]]] = None,
413
411
  ) -> T: # pragma: no cover
414
412
  """Generate structured output using Pydantic model parsing with Instructor."""
415
413
 
@@ -431,13 +429,16 @@ class OpenAIAdapter(LLMProvider):
431
429
 
432
430
  patched_client = instructor.from_openai(client, mode=Mode.TOOLS_STRICT)
433
431
 
434
- # Use instructor's structured generation with function calling
435
- response = await patched_client.chat.completions.create(
436
- model=current_parse_model, # Use the determined model
437
- messages=messages,
438
- response_model=model_class,
439
- max_retries=2, # Automatically retry on validation errors
440
- )
432
+ create_args = {
433
+ "model": current_parse_model,
434
+ "messages": messages,
435
+ "response_model": model_class,
436
+ "max_retries": 2, # Automatically retry on validation errors
437
+ }
438
+ if tools:
439
+ create_args["tools"] = tools
440
+
441
+ response = await patched_client.chat.completions.create(**create_args)
441
442
  return response
442
443
  except Exception as e:
443
444
  logger.warning(
@@ -7,7 +7,9 @@ the agent system without dealing with internal implementation details.
7
7
 
8
8
  import json
9
9
  import importlib.util
10
- from typing import AsyncGenerator, Dict, Any, List, Literal, Optional, Union
10
+ from typing import AsyncGenerator, Dict, Any, List, Literal, Optional, Type, Union
11
+
12
+ from pydantic import BaseModel
11
13
 
12
14
  from solana_agent.factories.agent_factory import SolanaAgentFactory
13
15
  from solana_agent.interfaces.client.client import SolanaAgent as SolanaAgentInterface
@@ -69,7 +71,8 @@ class SolanaAgent(SolanaAgentInterface):
69
71
  ] = "mp4",
70
72
  router: Optional[RoutingInterface] = None,
71
73
  images: Optional[List[Union[str, bytes]]] = None,
72
- ) -> AsyncGenerator[Union[str, bytes], None]: # pragma: no cover
74
+ output_model: Optional[Type[BaseModel]] = None,
75
+ ) -> AsyncGenerator[Union[str, bytes, BaseModel], None]: # pragma: no cover
73
76
  """Process a user message (text or audio) and optional images, returning the response stream.
74
77
 
75
78
  Args:
@@ -83,6 +86,7 @@ class SolanaAgent(SolanaAgentInterface):
83
86
  audio_input_format: Audio input format
84
87
  router: Optional routing service for processing
85
88
  images: Optional list of image URLs (str) or image bytes.
89
+ output_model: Optional Pydantic model for structured output
86
90
 
87
91
  Returns:
88
92
  Async generator yielding response chunks (text strings or audio bytes)
@@ -98,6 +102,7 @@ class SolanaAgent(SolanaAgentInterface):
98
102
  audio_input_format=audio_input_format,
99
103
  prompt=prompt,
100
104
  router=router,
105
+ output_model=output_model,
101
106
  ):
102
107
  yield chunk
103
108
 
@@ -195,27 +195,6 @@ class SolanaAgentFactory:
195
195
  model="gemini-2.5-flash-preview-05-20",
196
196
  ) # pragma: no cover
197
197
 
198
- elif "grok" in config and "api_key" in config["grok"]:
199
- # Create primary services
200
- agent_service = AgentService(
201
- llm_provider=llm_adapter,
202
- business_mission=business_mission,
203
- config=config,
204
- api_key=config["grok"]["api_key"],
205
- base_url="https://api.x.ai/v1",
206
- model="grok-3-fast",
207
- output_guardrails=output_guardrails,
208
- ) # pragma: no cover
209
-
210
- # Create routing service
211
- routing_service = RoutingService(
212
- llm_provider=llm_adapter,
213
- agent_service=agent_service,
214
- api_key=config["gemini"]["api_key"],
215
- base_url="https://api.x.ai/v1",
216
- model="grok-3-mini-fast",
217
- ) # pragma: no cover
218
-
219
198
  elif "ollama" in config and "api_key" in config["ollama"]:
220
199
  # Create primary services
221
200
  agent_service = AgentService(
@@ -1,5 +1,7 @@
1
1
  from abc import ABC, abstractmethod
2
- from typing import AsyncGenerator, Dict, Any, List, Literal, Optional, Union
2
+ from typing import AsyncGenerator, Dict, Any, List, Literal, Optional, Type, Union
3
+
4
+ from pydantic import BaseModel
3
5
  from solana_agent.interfaces.plugins.plugins import Tool
4
6
  from solana_agent.interfaces.services.routing import RoutingService as RoutingInterface
5
7
 
@@ -35,7 +37,8 @@ class SolanaAgent(ABC):
35
37
  ] = "mp4",
36
38
  router: Optional[RoutingInterface] = None,
37
39
  images: Optional[List[Union[str, bytes]]] = None,
38
- ) -> AsyncGenerator[Union[str, bytes], None]:
40
+ output_model: Optional[Type[BaseModel]] = None,
41
+ ) -> AsyncGenerator[Union[str, bytes, BaseModel], None]:
39
42
  """Process a user message and return the response stream."""
40
43
  pass
41
44
 
@@ -28,8 +28,7 @@ class LLMProvider(ABC):
28
28
  api_key: Optional[str] = None,
29
29
  base_url: Optional[str] = None,
30
30
  model: Optional[str] = None,
31
- functions: Optional[List[Dict[str, Any]]] = None,
32
- function_call: Optional[Union[str, Dict[str, Any]]] = None,
31
+ tools: Optional[List[Dict[str, Any]]] = None,
33
32
  ) -> Any:
34
33
  """Generate text from the language model."""
35
34
  pass
@@ -43,6 +42,7 @@ class LLMProvider(ABC):
43
42
  api_key: Optional[str] = None,
44
43
  base_url: Optional[str] = None,
45
44
  model: Optional[str] = None,
45
+ tools: Optional[List[Dict[str, Any]]] = None,
46
46
  ) -> T:
47
47
  """Generate structured output using a specific model class."""
48
48
  pass
@@ -104,6 +104,7 @@ class LLMProvider(ABC):
104
104
  images: List[Union[str, bytes]],
105
105
  system_prompt: str = "",
106
106
  detail: Literal["low", "high", "auto"] = "auto",
107
+ tools: Optional[List[Dict[str, Any]]] = None,
107
108
  ) -> str:
108
109
  """Generate text from the language model using images."""
109
110
  pass
@@ -1,5 +1,7 @@
1
1
  from abc import ABC, abstractmethod
2
- from typing import Any, AsyncGenerator, Dict, List, Literal, Optional, Union
2
+ from typing import Any, AsyncGenerator, Dict, List, Literal, Optional, Type, Union
3
+
4
+ from pydantic import BaseModel
3
5
 
4
6
  from solana_agent.domains.agent import AIAgent
5
7
 
@@ -45,7 +47,8 @@ class AgentService(ABC):
45
47
  ] = "aac",
46
48
  prompt: Optional[str] = None,
47
49
  images: Optional[List[Union[str, bytes]]] = None,
48
- ) -> AsyncGenerator[Union[str, bytes], None]:
50
+ output_model: Optional[Type[BaseModel]] = None,
51
+ ) -> AsyncGenerator[Union[str, bytes, BaseModel], None]:
49
52
  """Generate a response from an agent."""
50
53
  pass
51
54
 
@@ -1,5 +1,7 @@
1
1
  from abc import ABC, abstractmethod
2
- from typing import Any, AsyncGenerator, Dict, List, Literal, Optional, Union
2
+ from typing import Any, AsyncGenerator, Dict, List, Literal, Optional, Type, Union
3
+
4
+ from pydantic import BaseModel
3
5
 
4
6
  from solana_agent.interfaces.services.routing import RoutingService as RoutingInterface
5
7
 
@@ -35,7 +37,8 @@ class QueryService(ABC):
35
37
  prompt: Optional[str] = None,
36
38
  router: Optional[RoutingInterface] = None,
37
39
  images: Optional[List[Union[str, bytes]]] = None,
38
- ) -> AsyncGenerator[Union[str, bytes], None]:
40
+ output_model: Optional[Type[BaseModel]] = None,
41
+ ) -> AsyncGenerator[Union[str, bytes, BaseModel], None]:
39
42
  """Process the user request and generate a response."""
40
43
  pass
41
44
 
@@ -10,7 +10,9 @@ from datetime import datetime
10
10
  import json
11
11
  import logging # Add logging
12
12
  import re
13
- from typing import AsyncGenerator, Dict, List, Literal, Optional, Any, Union
13
+ from typing import AsyncGenerator, Dict, List, Literal, Optional, Any, Type, Union
14
+
15
+ from pydantic import BaseModel
14
16
 
15
17
  from solana_agent.interfaces.services.agent import AgentService as AgentServiceInterface
16
18
  from solana_agent.interfaces.providers.llm import LLMProvider
@@ -229,8 +231,9 @@ class AgentService(AgentServiceInterface):
229
231
  "mp3", "opus", "aac", "flac", "wav", "pcm"
230
232
  ] = "aac",
231
233
  prompt: Optional[str] = None,
232
- ) -> AsyncGenerator[Union[str, bytes], None]: # pragma: no cover
233
- """Generate a response using OpenAI function calling (tools API) via generate_text."""
234
+ output_model: Optional[Type[BaseModel]] = None,
235
+ ) -> AsyncGenerator[Union[str, bytes, BaseModel], None]: # pragma: no cover
236
+ """Generate a response using OpenAI function calling (tools API) or structured output."""
234
237
 
235
238
  agent = next((a for a in self.agents if a.name == agent_name), None)
236
239
  if not agent:
@@ -252,9 +255,7 @@ class AgentService(AgentServiceInterface):
252
255
  system_prompt = self.get_agent_system_prompt(agent_name)
253
256
  user_content = str(query)
254
257
  if images:
255
- user_content += (
256
- "\n\n[Images attached]" # Optionally, handle images as needed
257
- )
258
+ user_content += "\n\n[Images attached]"
258
259
 
259
260
  # Compose the prompt for generate_text
260
261
  full_prompt = ""
@@ -266,28 +267,56 @@ class AgentService(AgentServiceInterface):
266
267
  full_prompt += f"USER IDENTIFIER: {user_id}"
267
268
 
268
269
  # Get OpenAI function schemas for this agent's tools
269
- functions = []
270
- for tool in self.get_agent_tools(agent_name):
271
- functions.append(
272
- {
270
+ tools = [
271
+ {
272
+ "type": "function",
273
+ "function": {
273
274
  "name": tool["name"],
274
275
  "description": tool.get("description", ""),
275
276
  "parameters": tool.get("parameters", {}),
276
- }
277
- )
277
+ "strict": True,
278
+ },
279
+ }
280
+ for tool in self.get_agent_tools(agent_name)
281
+ ]
278
282
 
279
- response_text = ""
280
283
  try:
281
- while True:
282
- response = await self.llm_provider.generate_text(
284
+ if output_model is not None:
285
+ # --- Structured output with tool support ---
286
+ model_instance = await self.llm_provider.parse_structured_output(
283
287
  prompt=full_prompt,
284
288
  system_prompt=system_prompt,
285
- functions=functions if functions else None,
286
- function_call="auto" if functions else None,
289
+ model_class=output_model,
287
290
  api_key=self.api_key,
288
291
  base_url=self.base_url,
289
292
  model=self.model,
293
+ tools=tools if tools else None,
290
294
  )
295
+ yield model_instance
296
+ return
297
+
298
+ # --- Streaming text/audio with tool support (as before) ---
299
+ response_text = ""
300
+ while True:
301
+ if not images:
302
+ response = await self.llm_provider.generate_text(
303
+ prompt=full_prompt,
304
+ system_prompt=system_prompt,
305
+ api_key=self.api_key,
306
+ base_url=self.base_url,
307
+ model=self.model,
308
+ tools=tools if tools else None,
309
+ )
310
+ else:
311
+ response = await self.llm_provider.generate_text_with_images(
312
+ prompt=full_prompt,
313
+ system_prompt=system_prompt,
314
+ api_key=self.api_key,
315
+ base_url=self.base_url,
316
+ model=self.model,
317
+ tools=tools if tools else None,
318
+ images=images,
319
+ )
291
320
  if (
292
321
  not response
293
322
  or not hasattr(response, "choices")
@@ -298,30 +327,26 @@ class AgentService(AgentServiceInterface):
298
327
  break
299
328
 
300
329
  choice = response.choices[0]
301
- message = getattr(
302
- choice, "message", choice
303
- ) # Support both OpenAI and instructor
304
-
305
- # If the model wants to call a function/tool
306
- if hasattr(message, "function_call") and message.function_call:
307
- function_name = message.function_call.name
308
- arguments = json.loads(message.function_call.arguments)
309
- logger.info(
310
- f"Model requested tool '{function_name}' with args: {arguments}"
311
- )
312
-
313
- # Execute the tool (async)
314
- tool_result = await self.execute_tool(
315
- agent_name, function_name, arguments
316
- )
317
-
318
- # Add the tool result to the prompt for the next round
319
- # (You may want to format this differently for your use case)
320
- full_prompt += (
321
- f"\n\nTool '{function_name}' was called with arguments {arguments}.\n"
322
- f"Result: {tool_result}\n"
323
- )
324
- continue # Loop again, LLM will see tool result and may call another tool or finish
330
+ message = getattr(choice, "message", choice)
331
+
332
+ if hasattr(message, "tool_calls") and message.tool_calls:
333
+ for tool_call in message.tool_calls:
334
+ if tool_call.type == "function":
335
+ function_name = tool_call.function.name
336
+ arguments = json.loads(tool_call.function.arguments)
337
+ logger.info(
338
+ f"Model requested tool '{function_name}' with args: {arguments}"
339
+ )
340
+ # Execute the tool (async)
341
+ tool_result = await self.execute_tool(
342
+ agent_name, function_name, arguments
343
+ )
344
+ # Add the tool result to the prompt for the next round
345
+ full_prompt += (
346
+ f"\n\nTool '{function_name}' was called with arguments {arguments}.\n"
347
+ f"Result: {tool_result}\n"
348
+ )
349
+ continue
325
350
 
326
351
  # Otherwise, it's a normal message (final answer)
327
352
  response_text = message.content
@@ -381,6 +406,7 @@ class AgentService(AgentServiceInterface):
381
406
 
382
407
  if not text:
383
408
  return ""
409
+ text = text.replace("’", "'").replace("‘", "'")
384
410
  text = re.sub(r"\[([^\]]+)\]\([^\)]+\)", r"\1", text)
385
411
  text = re.sub(r"`([^`]+)`", r"\1", text)
386
412
  text = re.sub(r"(\*\*|__)(.*?)\1", r"\2", text)
@@ -411,9 +437,7 @@ class AgentService(AgentServiceInterface):
411
437
  flags=re.UNICODE,
412
438
  )
413
439
  text = emoji_pattern.sub(r" ", text)
414
- text = re.sub(
415
- r"[^\w\s\.\,\;\:\?\!\'\"\-\(\)]", " ", text
416
- ) # Keep basic punctuation
440
+ text = re.sub(r"[^\w\s\.\,\;\:\?\!\'\"\-\(\)]", " ", text)
417
441
  text = re.sub(r"\s+", " ", text)
418
442
  return text.strip()
419
443
 
@@ -7,7 +7,9 @@ clean separation of concerns.
7
7
  """
8
8
 
9
9
  import logging
10
- from typing import Any, AsyncGenerator, Dict, List, Literal, Optional, Union
10
+ from typing import Any, AsyncGenerator, Dict, List, Literal, Optional, Type, Union
11
+
12
+ from pydantic import BaseModel
11
13
 
12
14
  # Interface imports
13
15
  from solana_agent.interfaces.services.query import QueryService as QueryServiceInterface
@@ -86,7 +88,8 @@ class QueryService(QueryServiceInterface):
86
88
  ] = "mp4",
87
89
  prompt: Optional[str] = None,
88
90
  router: Optional[RoutingServiceInterface] = None,
89
- ) -> AsyncGenerator[Union[str, bytes], None]: # pragma: no cover
91
+ output_model: Optional[Type[BaseModel]] = None,
92
+ ) -> AsyncGenerator[Union[str, bytes, BaseModel], None]: # pragma: no cover
90
93
  """Process the user request with appropriate agent and apply input guardrails.
91
94
 
92
95
  Args:
@@ -100,6 +103,7 @@ class QueryService(QueryServiceInterface):
100
103
  audio_input_format: Audio input format
101
104
  prompt: Optional prompt for the agent
102
105
  router: Optional routing service for processing
106
+ output_model: Optional Pydantic model for structured output
103
107
 
104
108
  Yields:
105
109
  Response chunks (text strings or audio bytes)
@@ -267,9 +271,11 @@ class QueryService(QueryServiceInterface):
267
271
  memory_context=combined_context,
268
272
  output_format="text",
269
273
  prompt=prompt,
274
+ output_model=output_model,
270
275
  ):
271
276
  yield chunk
272
- full_text_response += chunk
277
+ if output_model is None:
278
+ full_text_response += chunk
273
279
 
274
280
  # Store conversation using processed user_text
275
281
  # Note: Storing images in history is not directly supported by current memory provider interface
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 29.2.3
3
+ Version: 30.0.0
4
4
  Summary: AI Agents for Solana
5
5
  License: MIT
6
6
  Keywords: solana,solana ai,solana agent,ai,ai agent,ai agents
@@ -15,12 +15,12 @@ Classifier: Programming Language :: Python :: 3.12
15
15
  Classifier: Programming Language :: Python :: 3.13
16
16
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
17
  Requires-Dist: instructor (==1.8.3)
18
- Requires-Dist: llama-index-core (==0.12.37)
18
+ Requires-Dist: llama-index-core (==0.12.39)
19
19
  Requires-Dist: llama-index-embeddings-openai (==0.3.1)
20
20
  Requires-Dist: logfire (==3.16.1)
21
- Requires-Dist: openai (==1.82.0)
21
+ Requires-Dist: openai (==1.82.1)
22
22
  Requires-Dist: pillow (==11.2.1)
23
- Requires-Dist: pinecone (==7.0.1)
23
+ Requires-Dist: pinecone (==7.0.2)
24
24
  Requires-Dist: pydantic (>=2)
25
25
  Requires-Dist: pymongo (==4.13.0)
26
26
  Requires-Dist: pypdf (==5.5.0)
@@ -53,7 +53,6 @@ Build your AI agents in three lines of code!
53
53
  * Three lines of code setup
54
54
  * Simple Agent Definition
55
55
  * Fast Responses
56
- * Multi-Vendor Support
57
56
  * Solana Integration
58
57
  * Multi-Agent Swarm
59
58
  * Multi-Modal (Images & Audio & Text)
@@ -64,6 +63,7 @@ Build your AI agents in three lines of code!
64
63
  * Extensible Tooling
65
64
  * Automatic Tool Workflows
66
65
  * Autonomous Operation
66
+ * Structured Outputs
67
67
  * Knowledge Base
68
68
  * MCP Support
69
69
  * Guardrails
@@ -78,7 +78,6 @@ Build your AI agents in three lines of code!
78
78
  * Easy three lines of code setup
79
79
  * Simple agent definition using JSON
80
80
  * Fast AI responses
81
- * Multi-vendor support including OpenAI, Grok, and Gemini AI services
82
81
  * Solana Integration
83
82
  * MCP tool usage with first-class support for [Zapier](https://zapier.com/mcp)
84
83
  * Integrated observability and tracing via [Pydantic Logfire](https://pydantic.dev/logfire)
@@ -95,6 +94,7 @@ Build your AI agents in three lines of code!
95
94
  * Input and output guardrails for content filtering, safety, and data sanitization
96
95
  * Generate custom images based on text prompts with storage on S3 compatible services
97
96
  * Automatic sequential tool workflows allowing agents to chain multiple tools
97
+ * Deterministically return structured outputs
98
98
  * Combine with event-driven systems to create autonomous agents
99
99
 
100
100
  ## Stack
@@ -112,25 +112,10 @@ Build your AI agents in three lines of code!
112
112
  ### AI Models Used
113
113
 
114
114
  **OpenAI**
115
- * [gpt-4.1](https://platform.openai.com/docs/models/gpt-4.1) (agent - can be overridden)
116
- * [gpt-4.1-nano](https://platform.openai.com/docs/models/gpt-4.1-nano) (router - can be overridden)
115
+ * [gpt-4.1-nano](https://platform.openai.com/docs/models/gpt-4.1-nano) (agent & router)
117
116
  * [text-embedding-3-large](https://platform.openai.com/docs/models/text-embedding-3-large) (embedding)
118
117
  * [tts-1](https://platform.openai.com/docs/models/tts-1) (audio TTS)
119
118
  * [gpt-4o-mini-transcribe](https://platform.openai.com/docs/models/gpt-4o-mini-transcribe) (audio transcription)
120
- * [gpt-image-1](https://platform.openai.com/docs/models/gpt-image-1) (image generation - can be overridden)
121
- * [gpt-4o-mini-search-preview](https://platform.openai.com/docs/models/gpt-4o-mini-search-preview) (Internet search)
122
-
123
- **Grok**
124
- * [grok-3-fast](https://x.ai/api#pricing) (agent - optional)
125
- * [grok-3-mini-fast](https://x.ai/api#pricing) (router - optional)
126
- * [grok-2-image](https://x.ai/api#pricing) (image generation - optional)
127
-
128
- **Gemini**
129
- * [gemini-2.5-flash-preview-04-17](https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview) (agent & router - optional)
130
- * [imagen-3.0-generate-002](https://ai.google.dev/gemini-api/docs/models#imagen-3) (image generation - optional)
131
-
132
- **Ollama**
133
- * [gemma:4b-it-qat](https://ollama.com/library/gemma3) - (agent & router - optional)
134
119
 
135
120
  ## Installation
136
121
 
@@ -341,6 +326,38 @@ async for response in solana_agent.process("user123", "What is in this image? De
341
326
  print(response, end="")
342
327
  ```
343
328
 
329
+ ### Structured Outputs
330
+
331
+ ```python
332
+ from solana_agent import SolanaAgent
333
+
334
+ config = {
335
+ "openai": {
336
+ "api_key": "your-openai-api-key",
337
+ },
338
+ "agents": [
339
+ {
340
+ "name": "researcher",
341
+ "instructions": "You are a research expert.",
342
+ "specialization": "Researcher",
343
+ }
344
+ ],
345
+ }
346
+
347
+ solana_agent = SolanaAgent(config=config)
348
+
349
+ class ResearchProposal(BaseModel):
350
+ title: str
351
+ abstract: str
352
+ key_points: list[str]
353
+
354
+ full_response = None
355
+ async for response in solana_agent.process("user123", "Research the life of Ben Franklin - the founding Father.", output_model=ResearchProposal):
356
+ full_response = response
357
+
358
+ print(full_response.model_dump())
359
+ ```
360
+
344
361
  ### Command Line Interface (CLI)
345
362
 
346
363
  Solana Agent includes a command-line interface (CLI) for text-based chat using a configuration file.
@@ -433,36 +450,6 @@ config = {
433
450
  }
434
451
  ```
435
452
 
436
- ### Grok
437
-
438
- ```python
439
- config = {
440
- "grok": {
441
- "api_key": "your-grok-api-key",
442
- },
443
- }
444
- ```
445
-
446
- ### Gemini
447
-
448
- ```python
449
- config = {
450
- "gemini": {
451
- "api_key": "your-gemini-api-key",
452
- },
453
- }
454
- ```
455
-
456
- ### Ollama
457
-
458
- ```python
459
- config = {
460
- "ollama": {
461
- "api_key": "use-this-key-1010"
462
- },
463
- }
464
- ```
465
-
466
453
  ### Knowledge Base
467
454
 
468
455
  The Knowledge Base (KB) is meant to store text values and/or PDFs (extracts text) - can handle very large PDFs.
@@ -575,6 +562,8 @@ async for response in solana_agent.process("user123", "Summarize the annual repo
575
562
 
576
563
  Guardrails allow you to process and potentially modify user input before it reaches the agent (Input Guardrails) and agent output before it's sent back to the user (Output Guardrails). This is useful for implementing safety checks, content moderation, data sanitization, or custom transformations.
577
564
 
565
+ Guardrails don't work with structured outputs.
566
+
578
567
  Solana Agent provides a built-in PII scrubber based on [scrubadub](https://github.com/LeapBeyond/scrubadub).
579
568
 
580
569
  ```python
@@ -615,6 +604,8 @@ config = {
615
604
 
616
605
  #### Example Custom Guardrails
617
606
 
607
+ Guardrails don't work with structured outputs.
608
+
618
609
  ```python
619
610
  from solana_agent import InputGuardrail, OutputGuardrail
620
611
  import logging
@@ -1,28 +1,28 @@
1
1
  solana_agent/__init__.py,sha256=g83qhMOCwcWL19V4CYbQwl0Ykpb0xn49OUh05i-pu3g,1001
2
2
  solana_agent/adapters/__init__.py,sha256=tiEEuuy0NF3ngc_tGEcRTt71zVI58v3dYY9RvMrF2Cg,204
3
3
  solana_agent/adapters/mongodb_adapter.py,sha256=Hq3S8VzfLmnPjV40z8yJXGqUamOJcX5GbOMd-1nNWO4,3175
4
- solana_agent/adapters/openai_adapter.py,sha256=nKfJzgtxNfQWLp6vcu-2Z8zBbvpYgTOEil7nPH7QIlU,23340
4
+ solana_agent/adapters/openai_adapter.py,sha256=l1KQch01LhtbKFHz76oXI1Lvz_AM2zZzCMh2qOcbzmM,23253
5
5
  solana_agent/adapters/pinecone_adapter.py,sha256=XlfOpoKHwzpaU4KZnovO2TnEYbsw-3B53ZKQDtBeDgU,23847
6
6
  solana_agent/cli.py,sha256=FGvTIQmKLp6XsQdyKtuhIIfbBtMmcCCXfigNrj4bzMc,4704
7
7
  solana_agent/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
- solana_agent/client/solana_agent.py,sha256=z_8i_dDJOV5JcZvvRNsRulCd40RvIUuE_f231fRXBDo,8975
8
+ solana_agent/client/solana_agent.py,sha256=ZNBRpougMi8xjkFQffSyrkJCnbRe7C2rKOjJfhQN5ug,9191
9
9
  solana_agent/domains/__init__.py,sha256=HiC94wVPRy-QDJSSRywCRrhrFfTBeHjfi5z-QfZv46U,168
10
10
  solana_agent/domains/agent.py,sha256=3Q1wg4eIul0CPpaYBOjEthKTfcdhf1SAiWc2R-IMGO8,2561
11
11
  solana_agent/domains/routing.py,sha256=1yR4IswGcmREGgbOOI6TKCfuM7gYGOhQjLkBqnZ-rNo,582
12
12
  solana_agent/factories/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
- solana_agent/factories/agent_factory.py,sha256=P8d-wGdWhksu79jngwTtL4jOeMjeo3OfiUJhwPWVh7U,15897
13
+ solana_agent/factories/agent_factory.py,sha256=IekAzqLSQe1pvtFr124JvyESoWHZeP7FKK7pVuqD85E,15058
14
14
  solana_agent/guardrails/pii.py,sha256=FCz1IC3mmkr41QFFf5NaC0fwJrVkwFsxgyOCS2POO5I,4428
15
15
  solana_agent/interfaces/__init__.py,sha256=IQs1WIM1FeKP1-kY2FEfyhol_dB-I-VAe2rD6jrVF6k,355
16
- solana_agent/interfaces/client/client.py,sha256=hFYe04lFGbp4BDlUMOnYQrp_SQXFPcktGOwsi0F87vU,3140
16
+ solana_agent/interfaces/client/client.py,sha256=9hg35-hp_CI-WVGOXehBE1ZCKYahLmbeAvtQOYmML4o,3245
17
17
  solana_agent/interfaces/guardrails/guardrails.py,sha256=gZCQ1FrirW-mX6s7FoYrbRs6golsp-x269kk4kQiZzc,572
18
18
  solana_agent/interfaces/plugins/plugins.py,sha256=Rz52cWBLdotwf4kV-2mC79tRYlN29zHSu1z9-y1HVPk,3329
19
19
  solana_agent/interfaces/providers/data_storage.py,sha256=Y92Cq8BtC55VlsYLD7bo3ofqQabNnlg7Q4H1Q6CDsLU,1713
20
- solana_agent/interfaces/providers/llm.py,sha256=IBtvl_p2Dd9pzuA6ub6DZRn50Td8f-XMtcc1M4V3gvA,2872
20
+ solana_agent/interfaces/providers/llm.py,sha256=Naj8gTGi3GpIMFHKwQjw7EuAF_uSWwwz2-41iUYtov4,2908
21
21
  solana_agent/interfaces/providers/memory.py,sha256=h3HEOwWCiFGIuFBX49XOv1jFaQW3NGjyKPOfmQloevk,1011
22
22
  solana_agent/interfaces/providers/vector_storage.py,sha256=XPYzvoWrlDVFCS9ItBmoqCFWXXWNYY-d9I7_pvP7YYk,1561
23
- solana_agent/interfaces/services/agent.py,sha256=MgLudTwzCzzzSR6PsVTB-w5rhGDHB5B81TGjo2z3G-A,2152
23
+ solana_agent/interfaces/services/agent.py,sha256=A-Hmgelr3g_qaNB0PEPMFHxB5nSCBK0WJ5hauJtIcmI,2257
24
24
  solana_agent/interfaces/services/knowledge_base.py,sha256=Mu8lCGFXPmI_IW5LRGti7octLoWZIg4k5PmGwPfe7LQ,1479
25
- solana_agent/interfaces/services/query.py,sha256=eLMMwc8hwHHjxFxlvVvkZfoQi8cSgQycWJbYAVphl9E,1632
25
+ solana_agent/interfaces/services/query.py,sha256=Co-pThoT4Zz8jhwKt5fV3LH9MaE_lSyEwNFxsMnTU9Y,1737
26
26
  solana_agent/interfaces/services/routing.py,sha256=Qbn3-DQGVSQKaegHDekSFmn_XCklA0H2f0XUx9-o3wA,367
27
27
  solana_agent/plugins/__init__.py,sha256=coZdgJKq1ExOaj6qB810i3rEhbjdVlrkN76ozt_Ojgo,193
28
28
  solana_agent/plugins/manager.py,sha256=mO_dKSVJ8GToD3wZflMcpKDEBXRoaaMRtY267HENCI0,5542
@@ -32,12 +32,12 @@ solana_agent/plugins/tools/auto_tool.py,sha256=uihijtlc9CCqCIaRcwPuuN7o1SHIpWL2G
32
32
  solana_agent/repositories/__init__.py,sha256=fP83w83CGzXLnSdq-C5wbw9EhWTYtqE2lQTgp46-X_4,163
33
33
  solana_agent/repositories/memory.py,sha256=SKQJJisrERccqd4cm4ERlp5BmKHVQAp1fzp8ce4i2bw,8377
34
34
  solana_agent/services/__init__.py,sha256=iko0c2MlF8b_SA_nuBGFllr2E3g_JowOrOzGcnU9tkA,162
35
- solana_agent/services/agent.py,sha256=q5cYzY6Jp5HFtYgJtrqTBWp5Z2n3dXdpQ5YScSKfvPo,18056
35
+ solana_agent/services/agent.py,sha256=acfauSIdDQdwuvqyDpx6VryQpL3nNfss3NlVtnI_kUg,19191
36
36
  solana_agent/services/knowledge_base.py,sha256=ZvOPrSmcNDgUzz4bJIQ4LeRl9vMZiK9hOfs71IpB7Bk,32735
37
- solana_agent/services/query.py,sha256=ENUfs4WSTpODMRXppDVW-Y3li9jYn8pOfQIHIPerUdQ,18498
37
+ solana_agent/services/query.py,sha256=3v5Ym8UqL0rfOC-0MWHALAsS2jVWdpUR3A-YI9n0xyo,18771
38
38
  solana_agent/services/routing.py,sha256=C5Ku4t9TqvY7S8wlUPMTC04HCrT4Ib3E8Q8yX0lVU_s,7137
39
- solana_agent-29.2.3.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
40
- solana_agent-29.2.3.dist-info/METADATA,sha256=tQgYee62kteB-uJScA1UsE6GItQpJQe63AY7AR2nWJ0,28982
41
- solana_agent-29.2.3.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
42
- solana_agent-29.2.3.dist-info/entry_points.txt,sha256=-AuT_mfqk8dlZ0pHuAjx1ouAWpTRjpqvEUa6YV3lmc0,53
43
- solana_agent-29.2.3.dist-info/RECORD,,
39
+ solana_agent-30.0.0.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
40
+ solana_agent-30.0.0.dist-info/METADATA,sha256=MWvby4A_W0bi7MZS4xwQ5MBKOl3U99kcwctE1_m_sjQ,28557
41
+ solana_agent-30.0.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
42
+ solana_agent-30.0.0.dist-info/entry_points.txt,sha256=-AuT_mfqk8dlZ0pHuAjx1ouAWpTRjpqvEUa6YV3lmc0,53
43
+ solana_agent-30.0.0.dist-info/RECORD,,