solana-agent 28.1.0__tar.gz → 28.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. {solana_agent-28.1.0 → solana_agent-28.2.0}/PKG-INFO +46 -3
  2. {solana_agent-28.1.0 → solana_agent-28.2.0}/README.md +43 -2
  3. {solana_agent-28.1.0 → solana_agent-28.2.0}/pyproject.toml +6 -1
  4. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/adapters/openai_adapter.py +59 -44
  5. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/adapters/pinecone_adapter.py +76 -57
  6. solana_agent-28.2.0/solana_agent/cli.py +128 -0
  7. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/factories/agent_factory.py +35 -16
  8. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/plugins/manager.py +24 -8
  9. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/plugins/registry.py +23 -10
  10. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/repositories/memory.py +18 -10
  11. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/services/knowledge_base.py +91 -41
  12. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/services/routing.py +14 -9
  13. {solana_agent-28.1.0 → solana_agent-28.2.0}/LICENSE +0 -0
  14. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/__init__.py +0 -0
  15. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/adapters/__init__.py +0 -0
  16. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/adapters/mongodb_adapter.py +0 -0
  17. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/client/__init__.py +0 -0
  18. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/client/solana_agent.py +0 -0
  19. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/domains/__init__.py +0 -0
  20. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/domains/agent.py +0 -0
  21. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/domains/routing.py +0 -0
  22. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/factories/__init__.py +0 -0
  23. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/guardrails/pii.py +0 -0
  24. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/interfaces/__init__.py +0 -0
  25. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/interfaces/client/client.py +0 -0
  26. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/interfaces/guardrails/guardrails.py +0 -0
  27. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/interfaces/plugins/plugins.py +0 -0
  28. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/interfaces/providers/data_storage.py +0 -0
  29. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/interfaces/providers/llm.py +0 -0
  30. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/interfaces/providers/memory.py +0 -0
  31. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/interfaces/providers/vector_storage.py +0 -0
  32. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/interfaces/services/agent.py +0 -0
  33. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/interfaces/services/knowledge_base.py +0 -0
  34. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/interfaces/services/query.py +0 -0
  35. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/interfaces/services/routing.py +0 -0
  36. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/plugins/__init__.py +0 -0
  37. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/plugins/tools/__init__.py +0 -0
  38. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/plugins/tools/auto_tool.py +0 -0
  39. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/repositories/__init__.py +0 -0
  40. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/services/__init__.py +0 -0
  41. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/services/agent.py +0 -0
  42. {solana_agent-28.1.0 → solana_agent-28.2.0}/solana_agent/services/query.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 28.1.0
3
+ Version: 28.2.0
4
4
  Summary: AI Agents for Solana
5
5
  License: MIT
6
6
  Keywords: solana,solana ai,solana agent,ai,ai agent,ai agents
@@ -23,7 +23,9 @@ Requires-Dist: pinecone (>=6.0.2,<7.0.0)
23
23
  Requires-Dist: pydantic (>=2)
24
24
  Requires-Dist: pymongo (>=4.12.0,<5.0.0)
25
25
  Requires-Dist: pypdf (>=5.4.0,<6.0.0)
26
+ Requires-Dist: rich (>=13)
26
27
  Requires-Dist: scrubadub (>=2.0.1,<3.0.0)
28
+ Requires-Dist: typer (>=0.15.2,<0.16.0)
27
29
  Requires-Dist: zep-cloud (>=2.10.1,<3.0.0)
28
30
  Project-URL: Documentation, https://docs.solana-agent.com
29
31
  Project-URL: Homepage, https://solana-agent.com
@@ -76,7 +78,7 @@ Build your AI agents in three lines of code!
76
78
  * Fast AI responses
77
79
  * Solana Ecosystem Integration via [AgentiPy](https://github.com/niceberginc/agentipy)
78
80
  * MCP tool usage with first-class support for [Zapier](https://zapier.com/mcp)
79
- * Integrated observability and tracing via [Pydantic Logfire](https://logfire.pydantic.dev/)
81
+ * Integrated observability and tracing via [Pydantic Logfire](https://pydantic.dev/logfire)
80
82
  * Designed for a multi-agent swarm
81
83
  * Seamless text and audio streaming with real-time multi-modal processing
82
84
  * Persistent memory that preserves context across all agent interactions
@@ -101,7 +103,7 @@ Build your AI agents in three lines of code!
101
103
  * [Pinecone](https://pinecone.io) - Knowledge Base (optional)
102
104
  * [AgentiPy](https://agentipy.fun) - Solana Ecosystem (optional)
103
105
  * [Zapier](https://zapier.com) - App Integrations (optional)
104
- * [Pydantic Logfire](https://logfire.pydantic.dev) - Observability and Tracing (optional)
106
+ * [Pydantic Logfire](https://pydantic.dev/logfire) - Observability and Tracing (optional)
105
107
 
106
108
  ### AI Models Used
107
109
 
@@ -284,6 +286,47 @@ async for response in solana_agent.process("user123", audio_content, audio_input
284
286
  print(response, end="")
285
287
  ```
286
288
 
289
+ ### Command Line Interface (CLI)
290
+
291
+ Solana Agent includes a command-line interface (CLI) for text-based chat using a configuration file.
292
+
293
+ Ensure you have a valid configuration file (e.g., `config.json`) containing at least your OpenAI API key and agent definitions.
294
+
295
+ **./config.json**
296
+ ```json
297
+ {
298
+ "openai": {
299
+ "api_key": "your-openai-api-key"
300
+ },
301
+ "agents": [
302
+ {
303
+ "name": "default_agent",
304
+ "instructions": "You are a helpful AI assistant.",
305
+ "specialization": "general"
306
+ }
307
+ ]
308
+ }
309
+ ```
310
+
311
+ Also ensure that you have `pip install uv` to call `uvx`.
312
+
313
+ ```bash
314
+ uvx solana-agent [OPTIONS]
315
+
316
+ Options:
317
+
318
+ --user-id TEXT: The user ID for the conversation (default: cli_user).
319
+ --config TEXT: Path to the configuration JSON file (default: config.json).
320
+ --prompt TEXT: Optional system prompt override for the agent.
321
+ --help: Show help message and exit.
322
+
323
+ # Using default config.json and user_id
324
+ uvx solana-agent
325
+
326
+ # Specifying user ID and config path
327
+ uvx solana-agent --user-id my_cli_session --config ./my_agent_config.json
328
+ ```
329
+
287
330
  ## Optional Feature Configs
288
331
 
289
332
  ### Business Alignment
@@ -44,7 +44,7 @@ Build your AI agents in three lines of code!
44
44
  * Fast AI responses
45
45
  * Solana Ecosystem Integration via [AgentiPy](https://github.com/niceberginc/agentipy)
46
46
  * MCP tool usage with first-class support for [Zapier](https://zapier.com/mcp)
47
- * Integrated observability and tracing via [Pydantic Logfire](https://logfire.pydantic.dev/)
47
+ * Integrated observability and tracing via [Pydantic Logfire](https://pydantic.dev/logfire)
48
48
  * Designed for a multi-agent swarm
49
49
  * Seamless text and audio streaming with real-time multi-modal processing
50
50
  * Persistent memory that preserves context across all agent interactions
@@ -69,7 +69,7 @@ Build your AI agents in three lines of code!
69
69
  * [Pinecone](https://pinecone.io) - Knowledge Base (optional)
70
70
  * [AgentiPy](https://agentipy.fun) - Solana Ecosystem (optional)
71
71
  * [Zapier](https://zapier.com) - App Integrations (optional)
72
- * [Pydantic Logfire](https://logfire.pydantic.dev) - Observability and Tracing (optional)
72
+ * [Pydantic Logfire](https://pydantic.dev/logfire) - Observability and Tracing (optional)
73
73
 
74
74
  ### AI Models Used
75
75
 
@@ -252,6 +252,47 @@ async for response in solana_agent.process("user123", audio_content, audio_input
252
252
  print(response, end="")
253
253
  ```
254
254
 
255
+ ### Command Line Interface (CLI)
256
+
257
+ Solana Agent includes a command-line interface (CLI) for text-based chat using a configuration file.
258
+
259
+ Ensure you have a valid configuration file (e.g., `config.json`) containing at least your OpenAI API key and agent definitions.
260
+
261
+ **./config.json**
262
+ ```json
263
+ {
264
+ "openai": {
265
+ "api_key": "your-openai-api-key"
266
+ },
267
+ "agents": [
268
+ {
269
+ "name": "default_agent",
270
+ "instructions": "You are a helpful AI assistant.",
271
+ "specialization": "general"
272
+ }
273
+ ]
274
+ }
275
+ ```
276
+
277
+ Also ensure that you have `pip install uv` to call `uvx`.
278
+
279
+ ```bash
280
+ uvx solana-agent [OPTIONS]
281
+
282
+ Options:
283
+
284
+ --user-id TEXT: The user ID for the conversation (default: cli_user).
285
+ --config TEXT: Path to the configuration JSON file (default: config.json).
286
+ --prompt TEXT: Optional system prompt override for the agent.
287
+ --help: Show help message and exit.
288
+
289
+ # Using default config.json and user_id
290
+ uvx solana-agent
291
+
292
+ # Specifying user ID and config path
293
+ uvx solana-agent --user-id my_cli_session --config ./my_agent_config.json
294
+ ```
295
+
255
296
  ## Optional Feature Configs
256
297
 
257
298
  ### Business Alignment
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "solana-agent"
3
- version = "28.1.0"
3
+ version = "28.2.0"
4
4
  description = "AI Agents for Solana"
5
5
  authors = ["Bevan Hunt <bevan@bevanhunt.com>"]
6
6
  license = "MIT"
@@ -35,6 +35,8 @@ llama-index-embeddings-openai = "^0.3.1"
35
35
  pypdf = "^5.4.0"
36
36
  scrubadub = "^2.0.1"
37
37
  logfire = "^3.14.0"
38
+ typer = "^0.15.2"
39
+ rich = ">=13"
38
40
 
39
41
  [tool.poetry.group.dev.dependencies]
40
42
  pytest = "^8.3.5"
@@ -49,6 +51,9 @@ sphinx-autobuild = "^2024.10.3"
49
51
  mongomock = "^4.3.0"
50
52
  ruff = "^0.11.6"
51
53
 
54
+ [tool.poetry.scripts]
55
+ solana-agent = "solana_agent.cli:app"
56
+
52
57
  [build-system]
53
58
  requires = ["poetry-core>=1.0.0"]
54
59
  build-backend = "poetry.core.masonry.api"
@@ -4,6 +4,7 @@ LLM provider adapters for the Solana Agent system.
4
4
  These adapters implement the LLMProvider interface for different LLM services.
5
5
  """
6
6
 
7
+ import logging
7
8
  from typing import AsyncGenerator, List, Literal, Optional, Type, TypeVar
8
9
 
9
10
  from openai import AsyncOpenAI
@@ -14,6 +15,9 @@ import logfire
14
15
 
15
16
  from solana_agent.interfaces.providers.llm import LLMProvider
16
17
 
18
+ # Setup logger for this module
19
+ logger = logging.getLogger(__name__)
20
+
17
21
  T = TypeVar("T", bound=BaseModel)
18
22
 
19
23
  DEFAULT_CHAT_MODEL = "gpt-4.1"
@@ -35,12 +39,10 @@ class OpenAIAdapter(LLMProvider):
35
39
  try:
36
40
  logfire.configure(token=logfire_api_key)
37
41
  self.logfire = True
38
- print("Logfire configured successfully.") # Optional: confirmation log
42
+ logger.info("Logfire configured successfully.") # Use logger.info
39
43
  except Exception as e:
40
- print(
41
- f"Failed to configure Logfire: {e}"
42
- ) # Log error if configuration fails
43
- self.logfire = False # Ensure logfire is False if config fails
44
+ logger.error(f"Failed to configure Logfire: {e}") # Use logger.error
45
+ self.logfire = False
44
46
 
45
47
  self.parse_model = DEFAULT_PARSE_MODEL
46
48
  self.text_model = DEFAULT_CHAT_MODEL
@@ -79,7 +81,8 @@ class OpenAIAdapter(LLMProvider):
79
81
  Audio bytes as they become available
80
82
  """
81
83
  try:
82
- logfire.instrument_openai(self.client)
84
+ if self.logfire: # Instrument only if logfire is enabled
85
+ logfire.instrument_openai(self.client)
83
86
  async with self.client.audio.speech.with_streaming_response.create(
84
87
  model=self.tts_model,
85
88
  voice=voice,
@@ -91,17 +94,8 @@ class OpenAIAdapter(LLMProvider):
91
94
  yield chunk
92
95
 
93
96
  except Exception as e:
94
- print(f"Error in text_to_speech: {str(e)}")
95
- import traceback
96
-
97
- print(traceback.format_exc())
98
- yield b"" # Return empty bytes on error
99
-
100
- except Exception as e:
101
- print(f"Error in text_to_speech: {str(e)}")
102
- import traceback
103
-
104
- print(traceback.format_exc())
97
+ # Log the exception with traceback
98
+ logger.exception(f"Error in text_to_speech: {e}")
105
99
  yield b"" # Return empty bytes on error
106
100
 
107
101
  async def transcribe_audio(
@@ -121,7 +115,8 @@ class OpenAIAdapter(LLMProvider):
121
115
  Transcript text chunks as they become available
122
116
  """
123
117
  try:
124
- logfire.instrument_openai(self.client)
118
+ if self.logfire: # Instrument only if logfire is enabled
119
+ logfire.instrument_openai(self.client)
125
120
  async with self.client.audio.transcriptions.with_streaming_response.create(
126
121
  model=self.transcription_model,
127
122
  file=(f"file.{input_format}", audio_bytes),
@@ -132,10 +127,8 @@ class OpenAIAdapter(LLMProvider):
132
127
  yield chunk
133
128
 
134
129
  except Exception as e:
135
- print(f"Error in transcribe_audio: {str(e)}")
136
- import traceback
137
-
138
- print(traceback.format_exc())
130
+ # Log the exception with traceback
131
+ logger.exception(f"Error in transcribe_audio: {e}")
139
132
  yield f"I apologize, but I encountered an error transcribing the audio: {str(e)}"
140
133
 
141
134
  async def generate_text(
@@ -177,12 +170,16 @@ class OpenAIAdapter(LLMProvider):
177
170
  full_text = response.choices[0].message.content
178
171
  return full_text # Return the complete string
179
172
  else:
180
- print("Received non-streaming response with no content.")
173
+ logger.warning(
174
+ "Received non-streaming response with no content."
175
+ ) # Use logger.warning
181
176
  return "" # Return empty string if no content
182
177
 
183
178
  except Exception as e:
184
- # Log the error and return an error message string
185
- print(f"Error in generate_text: {e}")
179
+ # Log the exception and return an error message string
180
+ logger.exception(f"Error in generate_text: {e}")
181
+ # Consider returning a more informative error string or raising
182
+ return f"Error generating text: {e}"
186
183
 
187
184
  async def parse_structured_output(
188
185
  self,
@@ -208,56 +205,67 @@ class OpenAIAdapter(LLMProvider):
208
205
  if self.logfire:
209
206
  logfire.instrument_openai(client)
210
207
 
211
- if model:
212
- self.parse_model = model
208
+ # Use the provided model or the default parse model
209
+ current_parse_model = model or self.parse_model
213
210
 
214
211
  patched_client = instructor.from_openai(client, mode=Mode.TOOLS_STRICT)
215
212
 
216
213
  # Use instructor's structured generation with function calling
217
214
  response = await patched_client.chat.completions.create(
218
- model=self.parse_model,
215
+ model=current_parse_model, # Use the determined model
219
216
  messages=messages,
220
217
  response_model=model_class,
221
218
  max_retries=2, # Automatically retry on validation errors
222
219
  )
223
220
  return response
224
221
  except Exception as e:
225
- print(f"Error with instructor parsing (TOOLS_STRICT mode): {e}")
222
+ logger.warning(
223
+ f"Instructor parsing (TOOLS_STRICT mode) failed: {e}"
224
+ ) # Log warning
226
225
 
227
226
  try:
227
+ # Determine client again for fallback
228
228
  if api_key and base_url:
229
229
  client = AsyncOpenAI(api_key=api_key, base_url=base_url)
230
230
  else:
231
231
  client = self.client
232
232
 
233
- if model:
234
- self.parse_model = model
233
+ if self.logfire: # Instrument again if needed
234
+ logfire.instrument_openai(client)
235
+
236
+ # Use the provided model or the default parse model
237
+ current_parse_model = model or self.parse_model
235
238
 
236
239
  # First fallback: Try regular JSON mode
240
+ logger.info("Falling back to instructor JSON mode.") # Log info
237
241
  patched_client = instructor.from_openai(client, mode=Mode.JSON)
238
242
  response = await patched_client.chat.completions.create(
239
- model=self.parse_model,
243
+ model=current_parse_model, # Use the determined model
240
244
  messages=messages,
241
245
  response_model=model_class,
242
246
  max_retries=1,
243
247
  )
244
248
  return response
245
249
  except Exception as json_error:
246
- print(f"JSON mode fallback also failed: {json_error}")
250
+ logger.warning(
251
+ f"Instructor JSON mode fallback also failed: {json_error}"
252
+ ) # Log warning
247
253
 
248
254
  try:
255
+ # Determine client again for final fallback
249
256
  if api_key and base_url:
250
257
  client = AsyncOpenAI(api_key=api_key, base_url=base_url)
251
258
  else:
252
259
  client = self.client
253
260
 
254
- if self.logfire:
261
+ if self.logfire: # Instrument again if needed
255
262
  logfire.instrument_openai(client)
256
263
 
257
- if model:
258
- self.parse_model = model
264
+ # Use the provided model or the default parse model
265
+ current_parse_model = model or self.parse_model
259
266
 
260
267
  # Final fallback: Manual extraction with a detailed prompt
268
+ logger.info("Falling back to manual JSON extraction.") # Log info
261
269
  fallback_system_prompt = f"""
262
270
  {system_prompt}
263
271
 
@@ -269,7 +277,7 @@ class OpenAIAdapter(LLMProvider):
269
277
 
270
278
  # Regular completion without instructor
271
279
  completion = await client.chat.completions.create(
272
- model=self.parse_model,
280
+ model=current_parse_model, # Use the determined model
273
281
  messages=[
274
282
  {"role": "system", "content": fallback_system_prompt},
275
283
  {"role": "user", "content": prompt},
@@ -284,7 +292,10 @@ class OpenAIAdapter(LLMProvider):
284
292
  return model_class.model_validate_json(json_str)
285
293
 
286
294
  except Exception as fallback_error:
287
- print(f"All fallback methods failed: {fallback_error}")
295
+ # Log the final exception with traceback
296
+ logger.exception(
297
+ f"All structured output fallback methods failed: {fallback_error}"
298
+ )
288
299
  raise ValueError(
289
300
  f"Failed to generate structured output: {e}. All fallbacks failed."
290
301
  ) from e
@@ -303,6 +314,8 @@ class OpenAIAdapter(LLMProvider):
303
314
  A list of floats representing the embedding vector.
304
315
  """
305
316
  if not text:
317
+ # Log error instead of raising immediately, let caller handle empty input if needed
318
+ logger.error("Attempted to embed empty text.")
306
319
  raise ValueError("Text cannot be empty")
307
320
 
308
321
  try:
@@ -313,7 +326,7 @@ class OpenAIAdapter(LLMProvider):
313
326
  # Replace newlines with spaces as recommended by OpenAI
314
327
  text = text.replace("\n", " ")
315
328
 
316
- if self.logfire:
329
+ if self.logfire: # Instrument only if logfire is enabled
317
330
  logfire.instrument_openai(self.client)
318
331
 
319
332
  response = await self.client.embeddings.create(
@@ -323,11 +336,13 @@ class OpenAIAdapter(LLMProvider):
323
336
  if response.data and response.data[0].embedding:
324
337
  return response.data[0].embedding
325
338
  else:
339
+ # Log warning about unexpected response structure
340
+ logger.warning(
341
+ "Failed to retrieve embedding from OpenAI response structure."
342
+ )
326
343
  raise ValueError("Failed to retrieve embedding from OpenAI response")
327
344
 
328
345
  except Exception as e:
329
- print(f"Error generating embedding: {e}")
330
- import traceback
331
-
332
- print(traceback.format_exc())
333
- raise
346
+ # Log the exception with traceback before raising
347
+ logger.exception(f"Error generating embedding: {e}")
348
+ raise # Re-raise the original exception