solana-agent 27.5.0__tar.gz → 28.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. {solana_agent-27.5.0 → solana_agent-28.1.0}/PKG-INFO +35 -43
  2. {solana_agent-27.5.0 → solana_agent-28.1.0}/README.md +33 -42
  3. {solana_agent-27.5.0 → solana_agent-28.1.0}/pyproject.toml +2 -1
  4. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/adapters/openai_adapter.py +45 -21
  5. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/factories/agent_factory.py +31 -86
  6. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/interfaces/providers/llm.py +1 -1
  7. solana_agent-28.1.0/solana_agent/services/agent.py +948 -0
  8. solana_agent-27.5.0/solana_agent/services/agent.py +0 -838
  9. {solana_agent-27.5.0 → solana_agent-28.1.0}/LICENSE +0 -0
  10. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/__init__.py +0 -0
  11. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/adapters/__init__.py +0 -0
  12. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/adapters/mongodb_adapter.py +0 -0
  13. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/adapters/pinecone_adapter.py +0 -0
  14. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/client/__init__.py +0 -0
  15. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/client/solana_agent.py +0 -0
  16. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/domains/__init__.py +0 -0
  17. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/domains/agent.py +0 -0
  18. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/domains/routing.py +0 -0
  19. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/factories/__init__.py +0 -0
  20. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/guardrails/pii.py +0 -0
  21. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/interfaces/__init__.py +0 -0
  22. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/interfaces/client/client.py +0 -0
  23. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/interfaces/guardrails/guardrails.py +0 -0
  24. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/interfaces/plugins/plugins.py +0 -0
  25. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/interfaces/providers/data_storage.py +0 -0
  26. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/interfaces/providers/memory.py +0 -0
  27. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/interfaces/providers/vector_storage.py +0 -0
  28. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/interfaces/services/agent.py +0 -0
  29. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/interfaces/services/knowledge_base.py +0 -0
  30. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/interfaces/services/query.py +0 -0
  31. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/interfaces/services/routing.py +0 -0
  32. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/plugins/__init__.py +0 -0
  33. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/plugins/manager.py +0 -0
  34. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/plugins/registry.py +0 -0
  35. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/plugins/tools/__init__.py +0 -0
  36. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/plugins/tools/auto_tool.py +0 -0
  37. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/repositories/__init__.py +0 -0
  38. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/repositories/memory.py +0 -0
  39. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/services/__init__.py +0 -0
  40. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/services/knowledge_base.py +0 -0
  41. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/services/query.py +0 -0
  42. {solana_agent-27.5.0 → solana_agent-28.1.0}/solana_agent/services/routing.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 27.5.0
3
+ Version: 28.1.0
4
4
  Summary: AI Agents for Solana
5
5
  License: MIT
6
6
  Keywords: solana,solana ai,solana agent,ai,ai agent,ai agents
@@ -17,6 +17,7 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
17
  Requires-Dist: instructor (>=1.7.9,<2.0.0)
18
18
  Requires-Dist: llama-index-core (>=0.12.30,<0.13.0)
19
19
  Requires-Dist: llama-index-embeddings-openai (>=0.3.1,<0.4.0)
20
+ Requires-Dist: logfire (>=3.14.0,<4.0.0)
20
21
  Requires-Dist: openai (>=1.75.0,<2.0.0)
21
22
  Requires-Dist: pinecone (>=6.0.2,<7.0.0)
22
23
  Requires-Dist: pydantic (>=2)
@@ -59,9 +60,11 @@ Build your AI agents in three lines of code!
59
60
  * Intelligent Routing
60
61
  * Business Alignment
61
62
  * Extensible Tooling
63
+ * Automatic Tool Workflows
62
64
  * Knowledge Base
63
65
  * MCP Support
64
66
  * Guardrails
67
+ * Pydantic Logfire
65
68
  * Tested & Secure
66
69
  * Built in Python
67
70
  * Powers [CometHeart](https://cometheart.com)
@@ -73,6 +76,7 @@ Build your AI agents in three lines of code!
73
76
  * Fast AI responses
74
77
  * Solana Ecosystem Integration via [AgentiPy](https://github.com/niceberginc/agentipy)
75
78
  * MCP tool usage with first-class support for [Zapier](https://zapier.com/mcp)
79
+ * Integrated observability and tracing via [Pydantic Logfire](https://logfire.pydantic.dev/)
76
80
  * Designed for a multi-agent swarm
77
81
  * Seamless text and audio streaming with real-time multi-modal processing
78
82
  * Persistent memory that preserves context across all agent interactions
@@ -84,26 +88,28 @@ Build your AI agents in three lines of code!
84
88
  * Assigned tools are utilized by agents automatically and effectively
85
89
  * Integrated Knowledge Base with semantic search and automatic PDF chunking
86
90
  * Input and output guardrails for content filtering, safety, and data sanitization
91
+ * Automatic sequential tool workflows allowing agents to chain multiple tools
87
92
 
88
93
  ## Stack
89
94
 
90
95
  ### Tech
91
96
 
92
97
  * [Python](https://python.org) - Programming Language
93
- * [OpenAI](https://openai.com), [Google](https://ai.google.dev), [xAI](https://x.ai) - LLM Providers
98
+ * [OpenAI](https://openai.com) - AI Model Provider
94
99
  * [MongoDB](https://mongodb.com) - Conversational History (optional)
95
100
  * [Zep Cloud](https://getzep.com) - Conversational Memory (optional)
96
101
  * [Pinecone](https://pinecone.io) - Knowledge Base (optional)
102
+ * [AgentiPy](https://agentipy.fun) - Solana Ecosystem (optional)
103
+ * [Zapier](https://zapier.com) - App Integrations (optional)
104
+ * [Pydantic Logfire](https://logfire.pydantic.dev) - Observability and Tracing (optional)
97
105
 
98
- ### LLMs
106
+ ### AI Models Used
99
107
 
100
- * [gpt-4.1-mini](https://platform.openai.com/docs/models/gpt-4.1-mini) (agent)
108
+ * [gpt-4.1](https://platform.openai.com/docs/models/gpt-4.1) (agent)
101
109
  * [gpt-4.1-nano](https://platform.openai.com/docs/models/gpt-4.1-nano) (router)
102
110
  * [text-embedding-3-large](https://platform.openai.com/docs/models/text-embedding-3-large) or [text-embedding-3-small](https://platform.openai.com/docs/models/text-embedding-3-small) (embedding)
103
111
  * [tts-1](https://platform.openai.com/docs/models/tts-1) (audio TTS)
104
112
  * [gpt-4o-mini-transcribe](https://platform.openai.com/docs/models/gpt-4o-mini-transcribe) (audio transcription)
105
- * [gemini-2.5-flash-preview](https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview) (optional)
106
- * [grok-3-mini-fast-beta](https://docs.x.ai/docs/models#models-and-pricing) (optional)
107
113
 
108
114
  ## Installation
109
115
 
@@ -113,13 +119,13 @@ You can install Solana Agent using pip:
113
119
 
114
120
  ## Flows
115
121
 
116
- In both flows of single and multiple agents - it is one user query to one agent using one tool (if needed).
122
+ In both flows of single and multiple agents - it is one user query to one agent using one or many tools (if needed).
117
123
 
118
- An agent can have multiple tools and will choose the best one to answer the user query.
124
+ An agent can have multiple tools and will choose the best ones to fulfill the user's query.
119
125
 
120
- Routing is determined by optimal domain expertise of the agent for the user query.
126
+ Routing is determined by optimal domain expertise of the agent for the user's query.
121
127
 
122
- When the agent uses a tool it feeds the tool output back to itself to generate the final response.
128
+ When the agent uses tools it feeds the tools output back to itself to generate the final response.
123
129
 
124
130
  This is important as tools generally output unstructured and unformatted data that the agent needs to prepare for the user.
125
131
 
@@ -128,13 +134,13 @@ Keep this in mind while designing your agentic systems using Solana Agent.
128
134
  ```ascii
129
135
  Single Agent
130
136
 
131
- ┌────────┐ ┌─────────┐ ┌────────┐
132
- │ │ │ │ │
133
- │ │ │ │ │
134
- │ User │◄──────►│ Agent │◄──────►│ Tool
135
- │ │ │ │ │
136
- │ │ │ │ │
137
- └────────┘ └─────────┘ └────────┘
137
+ ┌────────┐ ┌─────────┐ ┌────────-┐
138
+ │ │ │ │ │
139
+ │ │ │ │ │
140
+ │ User │◄──────►│ Agent │◄──────►│ Tools
141
+ │ │ │ │ │
142
+ │ │ │ │ │
143
+ └────────┘ └─────────┘ └────────-┘
138
144
 
139
145
 
140
146
 
@@ -142,13 +148,13 @@ Keep this in mind while designing your agentic systems using Solana Agent.
142
148
 
143
149
  Multiple Agents
144
150
 
145
- ┌────────┐ ┌──────────┐ ┌─────────┐ ┌────────┐
146
- │ │ │ │ │ │ │
147
- │ │ │ │ │ │ │
148
- ┌───►│ User ├───────►│ Router ├───────►│ Agent │◄──────►│ Tool
149
- │ │ │ │ │ │ │ │
150
- │ │ │ │ │ │ │ │
151
- │ └────────┘ └──────────┘ └────┬────┘ └────────┘
151
+ ┌────────┐ ┌──────────┐ ┌─────────┐ ┌────────-┐
152
+ │ │ │ │ │ │ │
153
+ │ │ │ │ │ │ │
154
+ ┌───►│ User ├───────►│ Router ├───────►│ Agent │◄──────►│ Tools
155
+ │ │ │ │ │ │ │ │
156
+ │ │ │ │ │ │ │ │
157
+ │ └────────┘ └──────────┘ └────┬────┘ └────────-┘
152
158
  │ │
153
159
  │ │
154
160
  │ │
@@ -319,26 +325,12 @@ config = {
319
325
  }
320
326
  ```
321
327
 
322
- ### Gemini
323
-
324
- This allows Gemini to replace OpenAI for agent and router.
325
-
326
- ```python
327
- config = {
328
- "gemini": {
329
- "api_key": "your-gemini-api-key",
330
- },
331
- }
332
- ```
333
-
334
- ### Grok
335
-
336
- This allows Grok to replace OpenAI (or Gemini) for agent.
328
+ ### Observability and Tracing
337
329
 
338
330
  ```python
339
331
  config = {
340
- "grok": {
341
- "api_key": "your-grok-api-key",
332
+ "logfire": {
333
+ "api_key": "your-logfire-write-token",
342
334
  },
343
335
  }
344
336
  ```
@@ -533,8 +525,8 @@ class MyOutputGuardrail(OutputGuardrail):
533
525
 
534
526
  Tools can be used from plugins like Solana Agent Kit (sakit) or via inline tools. Tools available via plugins integrate automatically with Solana Agent.
535
527
 
536
- * Agents can only call one tool per response
537
- * Agents choose the best tool for the job
528
+ * Agents can use multiple tools per response and should apply the right sequential order (like send an email to bob@bob.com with the latest news on Solana)
529
+ * Agents choose the best tools for the job
538
530
  * Solana Agent doesn't use OpenAI function calling (tools) as they don't support async functions
539
531
  * Solana Agent tools are async functions
540
532
 
@@ -28,9 +28,11 @@ Build your AI agents in three lines of code!
28
28
  * Intelligent Routing
29
29
  * Business Alignment
30
30
  * Extensible Tooling
31
+ * Automatic Tool Workflows
31
32
  * Knowledge Base
32
33
  * MCP Support
33
34
  * Guardrails
35
+ * Pydantic Logfire
34
36
  * Tested & Secure
35
37
  * Built in Python
36
38
  * Powers [CometHeart](https://cometheart.com)
@@ -42,6 +44,7 @@ Build your AI agents in three lines of code!
42
44
  * Fast AI responses
43
45
  * Solana Ecosystem Integration via [AgentiPy](https://github.com/niceberginc/agentipy)
44
46
  * MCP tool usage with first-class support for [Zapier](https://zapier.com/mcp)
47
+ * Integrated observability and tracing via [Pydantic Logfire](https://logfire.pydantic.dev/)
45
48
  * Designed for a multi-agent swarm
46
49
  * Seamless text and audio streaming with real-time multi-modal processing
47
50
  * Persistent memory that preserves context across all agent interactions
@@ -53,26 +56,28 @@ Build your AI agents in three lines of code!
53
56
  * Assigned tools are utilized by agents automatically and effectively
54
57
  * Integrated Knowledge Base with semantic search and automatic PDF chunking
55
58
  * Input and output guardrails for content filtering, safety, and data sanitization
59
+ * Automatic sequential tool workflows allowing agents to chain multiple tools
56
60
 
57
61
  ## Stack
58
62
 
59
63
  ### Tech
60
64
 
61
65
  * [Python](https://python.org) - Programming Language
62
- * [OpenAI](https://openai.com), [Google](https://ai.google.dev), [xAI](https://x.ai) - LLM Providers
66
+ * [OpenAI](https://openai.com) - AI Model Provider
63
67
  * [MongoDB](https://mongodb.com) - Conversational History (optional)
64
68
  * [Zep Cloud](https://getzep.com) - Conversational Memory (optional)
65
69
  * [Pinecone](https://pinecone.io) - Knowledge Base (optional)
70
+ * [AgentiPy](https://agentipy.fun) - Solana Ecosystem (optional)
71
+ * [Zapier](https://zapier.com) - App Integrations (optional)
72
+ * [Pydantic Logfire](https://logfire.pydantic.dev) - Observability and Tracing (optional)
66
73
 
67
- ### LLMs
74
+ ### AI Models Used
68
75
 
69
- * [gpt-4.1-mini](https://platform.openai.com/docs/models/gpt-4.1-mini) (agent)
76
+ * [gpt-4.1](https://platform.openai.com/docs/models/gpt-4.1) (agent)
70
77
  * [gpt-4.1-nano](https://platform.openai.com/docs/models/gpt-4.1-nano) (router)
71
78
  * [text-embedding-3-large](https://platform.openai.com/docs/models/text-embedding-3-large) or [text-embedding-3-small](https://platform.openai.com/docs/models/text-embedding-3-small) (embedding)
72
79
  * [tts-1](https://platform.openai.com/docs/models/tts-1) (audio TTS)
73
80
  * [gpt-4o-mini-transcribe](https://platform.openai.com/docs/models/gpt-4o-mini-transcribe) (audio transcription)
74
- * [gemini-2.5-flash-preview](https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview) (optional)
75
- * [grok-3-mini-fast-beta](https://docs.x.ai/docs/models#models-and-pricing) (optional)
76
81
 
77
82
  ## Installation
78
83
 
@@ -82,13 +87,13 @@ You can install Solana Agent using pip:
82
87
 
83
88
  ## Flows
84
89
 
85
- In both flows of single and multiple agents - it is one user query to one agent using one tool (if needed).
90
+ In both flows of single and multiple agents - it is one user query to one agent using one or many tools (if needed).
86
91
 
87
- An agent can have multiple tools and will choose the best one to answer the user query.
92
+ An agent can have multiple tools and will choose the best ones to fulfill the user's query.
88
93
 
89
- Routing is determined by optimal domain expertise of the agent for the user query.
94
+ Routing is determined by optimal domain expertise of the agent for the user's query.
90
95
 
91
- When the agent uses a tool it feeds the tool output back to itself to generate the final response.
96
+ When the agent uses tools it feeds the tools output back to itself to generate the final response.
92
97
 
93
98
  This is important as tools generally output unstructured and unformatted data that the agent needs to prepare for the user.
94
99
 
@@ -97,13 +102,13 @@ Keep this in mind while designing your agentic systems using Solana Agent.
97
102
  ```ascii
98
103
  Single Agent
99
104
 
100
- ┌────────┐ ┌─────────┐ ┌────────┐
101
- │ │ │ │ │
102
- │ │ │ │ │
103
- │ User │◄──────►│ Agent │◄──────►│ Tool
104
- │ │ │ │ │
105
- │ │ │ │ │
106
- └────────┘ └─────────┘ └────────┘
105
+ ┌────────┐ ┌─────────┐ ┌────────-┐
106
+ │ │ │ │ │
107
+ │ │ │ │ │
108
+ │ User │◄──────►│ Agent │◄──────►│ Tools
109
+ │ │ │ │ │
110
+ │ │ │ │ │
111
+ └────────┘ └─────────┘ └────────-┘
107
112
 
108
113
 
109
114
 
@@ -111,13 +116,13 @@ Keep this in mind while designing your agentic systems using Solana Agent.
111
116
 
112
117
  Multiple Agents
113
118
 
114
- ┌────────┐ ┌──────────┐ ┌─────────┐ ┌────────┐
115
- │ │ │ │ │ │ │
116
- │ │ │ │ │ │ │
117
- ┌───►│ User ├───────►│ Router ├───────►│ Agent │◄──────►│ Tool
118
- │ │ │ │ │ │ │ │
119
- │ │ │ │ │ │ │ │
120
- │ └────────┘ └──────────┘ └────┬────┘ └────────┘
119
+ ┌────────┐ ┌──────────┐ ┌─────────┐ ┌────────-┐
120
+ │ │ │ │ │ │ │
121
+ │ │ │ │ │ │ │
122
+ ┌───►│ User ├───────►│ Router ├───────►│ Agent │◄──────►│ Tools
123
+ │ │ │ │ │ │ │ │
124
+ │ │ │ │ │ │ │ │
125
+ │ └────────┘ └──────────┘ └────┬────┘ └────────-┘
121
126
  │ │
122
127
  │ │
123
128
  │ │
@@ -288,26 +293,12 @@ config = {
288
293
  }
289
294
  ```
290
295
 
291
- ### Gemini
292
-
293
- This allows Gemini to replace OpenAI for agent and router.
294
-
295
- ```python
296
- config = {
297
- "gemini": {
298
- "api_key": "your-gemini-api-key",
299
- },
300
- }
301
- ```
302
-
303
- ### Grok
304
-
305
- This allows Grok to replace OpenAI (or Gemini) for agent.
296
+ ### Observability and Tracing
306
297
 
307
298
  ```python
308
299
  config = {
309
- "grok": {
310
- "api_key": "your-grok-api-key",
300
+ "logfire": {
301
+ "api_key": "your-logfire-write-token",
311
302
  },
312
303
  }
313
304
  ```
@@ -502,8 +493,8 @@ class MyOutputGuardrail(OutputGuardrail):
502
493
 
503
494
  Tools can be used from plugins like Solana Agent Kit (sakit) or via inline tools. Tools available via plugins integrate automatically with Solana Agent.
504
495
 
505
- * Agents can only call one tool per response
506
- * Agents choose the best tool for the job
496
+ * Agents can use multiple tools per response and should apply the right sequential order (like send an email to bob@bob.com with the latest news on Solana)
497
+ * Agents choose the best tools for the job
507
498
  * Solana Agent doesn't use OpenAI function calling (tools) as they don't support async functions
508
499
  * Solana Agent tools are async functions
509
500
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "solana-agent"
3
- version = "27.5.0"
3
+ version = "28.1.0"
4
4
  description = "AI Agents for Solana"
5
5
  authors = ["Bevan Hunt <bevan@bevanhunt.com>"]
6
6
  license = "MIT"
@@ -34,6 +34,7 @@ llama-index-core = "^0.12.30"
34
34
  llama-index-embeddings-openai = "^0.3.1"
35
35
  pypdf = "^5.4.0"
36
36
  scrubadub = "^2.0.1"
37
+ logfire = "^3.14.0"
37
38
 
38
39
  [tool.poetry.group.dev.dependencies]
39
40
  pytest = "^8.3.5"
@@ -10,12 +10,13 @@ from openai import AsyncOpenAI
10
10
  from pydantic import BaseModel
11
11
  import instructor
12
12
  from instructor import Mode
13
+ import logfire
13
14
 
14
15
  from solana_agent.interfaces.providers.llm import LLMProvider
15
16
 
16
17
  T = TypeVar("T", bound=BaseModel)
17
18
 
18
- DEFAULT_CHAT_MODEL = "gpt-4.1-mini"
19
+ DEFAULT_CHAT_MODEL = "gpt-4.1"
19
20
  DEFAULT_PARSE_MODEL = "gpt-4.1-nano"
20
21
  DEFAULT_EMBEDDING_MODEL = "text-embedding-3-large"
21
22
  DEFAULT_EMBEDDING_DIMENSIONS = 3072
@@ -26,8 +27,21 @@ DEFAULT_TTS_MODEL = "tts-1"
26
27
  class OpenAIAdapter(LLMProvider):
27
28
  """OpenAI implementation of LLMProvider with web search capabilities."""
28
29
 
29
- def __init__(self, api_key: str):
30
+ def __init__(self, api_key: str, logfire_api_key: Optional[str] = None):
30
31
  self.client = AsyncOpenAI(api_key=api_key)
32
+
33
+ self.logfire = False
34
+ if logfire_api_key:
35
+ try:
36
+ logfire.configure(token=logfire_api_key)
37
+ self.logfire = True
38
+ print("Logfire configured successfully.") # Optional: confirmation log
39
+ except Exception as e:
40
+ print(
41
+ f"Failed to configure Logfire: {e}"
42
+ ) # Log error if configuration fails
43
+ self.logfire = False # Ensure logfire is False if config fails
44
+
31
45
  self.parse_model = DEFAULT_PARSE_MODEL
32
46
  self.text_model = DEFAULT_CHAT_MODEL
33
47
  self.transcription_model = DEFAULT_TRANSCRIPTION_MODEL
@@ -65,6 +79,7 @@ class OpenAIAdapter(LLMProvider):
65
79
  Audio bytes as they become available
66
80
  """
67
81
  try:
82
+ logfire.instrument_openai(self.client)
68
83
  async with self.client.audio.speech.with_streaming_response.create(
69
84
  model=self.tts_model,
70
85
  voice=voice,
@@ -106,6 +121,7 @@ class OpenAIAdapter(LLMProvider):
106
121
  Transcript text chunks as they become available
107
122
  """
108
123
  try:
124
+ logfire.instrument_openai(self.client)
109
125
  async with self.client.audio.transcriptions.with_streaming_response.create(
110
126
  model=self.transcription_model,
111
127
  file=(f"file.{input_format}", audio_bytes),
@@ -129,45 +145,44 @@ class OpenAIAdapter(LLMProvider):
129
145
  api_key: Optional[str] = None,
130
146
  base_url: Optional[str] = None,
131
147
  model: Optional[str] = None,
132
- ) -> AsyncGenerator[str, None]: # pragma: no cover
133
- """Generate text from OpenAI models."""
148
+ ) -> str: # pragma: no cover
149
+ """Generate text from OpenAI models as a single string."""
134
150
  messages = []
135
-
136
151
  if system_prompt:
137
152
  messages.append({"role": "system", "content": system_prompt})
138
-
139
153
  messages.append({"role": "user", "content": prompt})
140
154
 
141
- # Prepare request parameters
155
+ # Prepare request parameters - stream is always False now
142
156
  request_params = {
143
157
  "messages": messages,
144
- "stream": True,
145
- "model": self.text_model,
158
+ "stream": False, # Hardcoded to False
159
+ "model": model or self.text_model,
146
160
  }
147
161
 
162
+ # Determine client based on provided api_key/base_url
148
163
  if api_key and base_url:
149
164
  client = AsyncOpenAI(api_key=api_key, base_url=base_url)
150
165
  else:
151
166
  client = self.client
152
167
 
153
- if model:
154
- request_params["model"] = model
168
+ if self.logfire:
169
+ logfire.instrument_openai(client)
155
170
 
156
171
  try:
172
+ # Make the non-streaming API call
157
173
  response = await client.chat.completions.create(**request_params)
158
174
 
159
- async for chunk in response:
160
- if chunk.choices:
161
- if chunk.choices[0].delta.content:
162
- text = chunk.choices[0].delta.content
163
- yield text
175
+ # Handle non-streaming response
176
+ if response.choices and response.choices[0].message.content:
177
+ full_text = response.choices[0].message.content
178
+ return full_text # Return the complete string
179
+ else:
180
+ print("Received non-streaming response with no content.")
181
+ return "" # Return empty string if no content
164
182
 
165
183
  except Exception as e:
166
- print(f"Error in generate_text: {str(e)}")
167
- import traceback
168
-
169
- print(traceback.format_exc())
170
- yield f"I apologize, but I encountered an error: {str(e)}"
184
+ # Log the error and return an error message string
185
+ print(f"Error in generate_text: {e}")
171
186
 
172
187
  async def parse_structured_output(
173
188
  self,
@@ -190,6 +205,9 @@ class OpenAIAdapter(LLMProvider):
190
205
  else:
191
206
  client = self.client
192
207
 
208
+ if self.logfire:
209
+ logfire.instrument_openai(client)
210
+
193
211
  if model:
194
212
  self.parse_model = model
195
213
 
@@ -233,6 +251,9 @@ class OpenAIAdapter(LLMProvider):
233
251
  else:
234
252
  client = self.client
235
253
 
254
+ if self.logfire:
255
+ logfire.instrument_openai(client)
256
+
236
257
  if model:
237
258
  self.parse_model = model
238
259
 
@@ -292,6 +313,9 @@ class OpenAIAdapter(LLMProvider):
292
313
  # Replace newlines with spaces as recommended by OpenAI
293
314
  text = text.replace("\n", " ")
294
315
 
316
+ if self.logfire:
317
+ logfire.instrument_openai(self.client)
318
+
295
319
  response = await self.client.embeddings.create(
296
320
  input=[text], model=embedding_model, dimensions=embedding_dimensions
297
321
  )
@@ -78,6 +78,10 @@ class SolanaAgentFactory:
78
78
  # Create adapters
79
79
 
80
80
  if "mongo" in config:
81
+ if "connection_string" not in config["mongo"]:
82
+ raise ValueError("MongoDB connection string is required.")
83
+ if "database" not in config["mongo"]:
84
+ raise ValueError("MongoDB database name is required.")
81
85
  db_adapter = MongoDBAdapter(
82
86
  connection_string=config["mongo"]["connection_string"],
83
87
  database_name=config["mongo"]["database"],
@@ -85,9 +89,21 @@ class SolanaAgentFactory:
85
89
  else:
86
90
  db_adapter = None
87
91
 
88
- llm_adapter = OpenAIAdapter(
89
- api_key=config["openai"]["api_key"],
90
- )
92
+ if "logfire" in config:
93
+ if "api_key" not in config["logfire"]:
94
+ raise ValueError("Pydantic Logfire API key is required.")
95
+ if "openai" not in config or "api_key" not in config["openai"]:
96
+ raise ValueError("OpenAI API key is required.")
97
+ llm_adapter = OpenAIAdapter(
98
+ api_key=config["openai"]["api_key"],
99
+ logfire_api_key=config["logfire"].get("api_key"),
100
+ )
101
+ else:
102
+ if "openai" not in config or "api_key" not in config["openai"]:
103
+ raise ValueError("OpenAI API key is required.")
104
+ llm_adapter = OpenAIAdapter(
105
+ api_key=config["openai"].get("api_key"),
106
+ )
91
107
 
92
108
  # Create business mission if specified in config
93
109
  business_mission = None
@@ -130,90 +146,19 @@ class SolanaAgentFactory:
130
146
  f"Loaded {len(input_guardrails)} input guardrails and {len(output_guardrails)} output guardrails."
131
147
  )
132
148
 
133
- if (
134
- "gemini" in config
135
- and "api_key" in config["gemini"]
136
- and "grok" not in config
137
- ):
138
- # Create primary services
139
- agent_service = AgentService(
140
- llm_provider=llm_adapter,
141
- business_mission=business_mission,
142
- config=config,
143
- api_key=config["gemini"]["api_key"],
144
- base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
145
- model="gemini-2.5-flash-preview-04-17",
146
- output_guardrails=output_guardrails,
147
- )
148
-
149
- # Create routing service
150
- routing_service = RoutingService(
151
- llm_provider=llm_adapter,
152
- agent_service=agent_service,
153
- api_key=config["gemini"]["api_key"],
154
- base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
155
- model="gemini-2.5-flash-preview-04-17",
156
- )
157
-
158
- elif (
159
- "gemini" in config
160
- and "api_key" in config["gemini"]
161
- and "grok" in config
162
- and "api_key" in config["grok"]
163
- ):
164
- # Create primary services
165
- agent_service = AgentService(
166
- llm_provider=llm_adapter,
167
- business_mission=business_mission,
168
- config=config,
169
- api_key=config["grok"]["api_key"],
170
- base_url="https://api.x.ai/v1",
171
- model="grok-3-mini-fast-beta",
172
- output_guardrails=output_guardrails,
173
- )
174
- # Create routing service
175
- routing_service = RoutingService(
176
- llm_provider=llm_adapter,
177
- agent_service=agent_service,
178
- api_key=config["gemini"]["api_key"],
179
- base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
180
- model="gemini-2.5-flash-preview-04-17",
181
- )
182
-
183
- elif (
184
- "grok" in config and "api_key" in config["grok"] and "gemini" not in config
185
- ):
186
- # Create primary services
187
- agent_service = AgentService(
188
- llm_provider=llm_adapter,
189
- business_mission=business_mission,
190
- config=config,
191
- api_key=config["grok"]["api_key"],
192
- base_url="https://api.x.ai/v1",
193
- model="grok-3-mini-fast-beta",
194
- output_guardrails=output_guardrails,
195
- )
196
-
197
- # Create routing service
198
- routing_service = RoutingService(
199
- llm_provider=llm_adapter,
200
- agent_service=agent_service,
201
- )
202
-
203
- else:
204
- # Create primary services
205
- agent_service = AgentService(
206
- llm_provider=llm_adapter,
207
- business_mission=business_mission,
208
- config=config,
209
- output_guardrails=output_guardrails,
210
- )
149
+ # Create primary services
150
+ agent_service = AgentService(
151
+ llm_provider=llm_adapter,
152
+ business_mission=business_mission,
153
+ config=config,
154
+ output_guardrails=output_guardrails,
155
+ )
211
156
 
212
- # Create routing service
213
- routing_service = RoutingService(
214
- llm_provider=llm_adapter,
215
- agent_service=agent_service,
216
- )
157
+ # Create routing service
158
+ routing_service = RoutingService(
159
+ llm_provider=llm_adapter,
160
+ agent_service=agent_service,
161
+ )
217
162
 
218
163
  # Debug the agent service tool registry
219
164
  print(
@@ -25,7 +25,7 @@ class LLMProvider(ABC):
25
25
  api_key: Optional[str] = None,
26
26
  base_url: Optional[str] = None,
27
27
  model: Optional[str] = None,
28
- ) -> AsyncGenerator[str, None]:
28
+ ) -> str:
29
29
  """Generate text from the language model."""
30
30
  pass
31
31