solana-agent 0.0.14__tar.gz → 0.0.16__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 0.0.14
3
+ Version: 0.0.16
4
4
  Summary: Build self-learning AI Agents
5
5
  License: MIT
6
6
  Keywords: ai,openai,ai agents
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "solana-agent"
3
- version = "0.0.14"
3
+ version = "0.0.16"
4
4
  description = "Build self-learning AI Agents"
5
5
  authors = ["Bevan Hunt <bevan@bevanhunt.com>"]
6
6
  license = "MIT"
@@ -84,8 +84,9 @@ class AI:
84
84
  code_interpreter: bool = True,
85
85
  openai_assistant_model: Literal["gpt-4o-mini",
86
86
  "gpt-4o"] = "gpt-4o-mini",
87
- openai_embedding_model: Literal["text-embedding-3-small",
88
- "text-embedding-3-large"] = "text-embedding-3-small"
87
+ openai_embedding_model: Literal[
88
+ "text-embedding-3-small", "text-embedding-3-large"
89
+ ] = "text-embedding-3-small",
89
90
  ):
90
91
  """Initialize a new AI assistant with memory and tool integration capabilities.
91
92
 
@@ -131,22 +132,19 @@ class AI:
131
132
  self._assistant_id = None
132
133
  self._database: MongoDatabase = database
133
134
  self._accumulated_value_queue = asyncio.Queue()
134
- self._zep = (
135
- AsyncZep(api_key=zep_api_key)
136
- if zep_api_key
137
- else None
138
- )
139
- self._sync_zep = (
140
- Zep(api_key=zep_api_key) if zep_api_key else None
141
- )
135
+ self._zep = AsyncZep(api_key=zep_api_key) if zep_api_key else None
136
+ self._sync_zep = Zep(api_key=zep_api_key) if zep_api_key else None
142
137
  self._perplexity_api_key = perplexity_api_key
143
138
  self._grok_api_key = grok_api_key
144
139
  self._gemini_api_key = gemini_api_key
145
- self._pinecone = Pinecone(
146
- api_key=pinecone_api_key) if pinecone_api_key else None
140
+ self._pinecone = (
141
+ Pinecone(api_key=pinecone_api_key) if pinecone_api_key else None
142
+ )
147
143
  self._pinecone_index_name = pinecone_index_name if pinecone_index_name else None
148
- self._pinecone_index = self._pinecone.Index(
149
- self._pinecone_index_name) if self._pinecone else None
144
+ self._pinecone_index = (
145
+ self._pinecone.Index(
146
+ self._pinecone_index_name) if self._pinecone else None
147
+ )
150
148
 
151
149
  async def __aenter__(self):
152
150
  assistants = self._client.beta.assistants.list()
@@ -180,7 +178,12 @@ class AI:
180
178
  if self._zep:
181
179
  try:
182
180
  await self._zep.user.add(user_id=user_id)
183
- await self._zep.memory.add_session(user_id=user_id, session_id=user_id)
181
+ except Exception:
182
+ pass
183
+ try:
184
+ await self._zep.memory.add_session(
185
+ user_id=user_id, session_id=user_id
186
+ )
184
187
  except Exception:
185
188
  pass
186
189
 
@@ -203,7 +206,8 @@ class AI:
203
206
 
204
207
  async def _get_run_status(self, thread_id: str, run_id: str) -> str:
205
208
  run = self._client.beta.threads.runs.retrieve(
206
- thread_id=thread_id, run_id=run_id)
209
+ thread_id=thread_id, run_id=run_id
210
+ )
207
211
  return run.status
208
212
 
209
213
  # converter tool - has to be sync
@@ -258,7 +262,11 @@ class AI:
258
262
  model=self._openai_embedding_model,
259
263
  )
260
264
  search_results = self._pinecone_index.query(
261
- vector=response.data[0].embedding, top_k=limit, include_metadata=True, include_values=False)
265
+ vector=response.data[0].embedding,
266
+ top_k=limit,
267
+ include_metadata=True,
268
+ include_values=False,
269
+ )
262
270
  matches = search_results.matches
263
271
  metadata = [match.metadata for match in matches]
264
272
  return json.dumps(metadata)
@@ -306,13 +314,16 @@ class AI:
306
314
 
307
315
  # summarize tool - has to be sync
308
316
  def summarize(
309
- self, text: str, model: Literal["gemini-2.0-flash", "gemini-1.5-pro"] = "gemini-1.5-pro"
317
+ self,
318
+ text: str,
319
+ model: Literal["gemini-2.0-flash",
320
+ "gemini-1.5-pro"] = "gemini-1.5-pro",
310
321
  ) -> str:
311
322
  """Summarize text using Google's Gemini language model.
312
323
 
313
324
  Args:
314
325
  text (str): The text content to be summarized
315
- model (Literal["gemini-2.0-flash", "gemini-1.5-pro"], optional):
326
+ model (Literal["gemini-2.0-flash", "gemini-1.5-pro"], optional):
316
327
  Gemini model to use. Defaults to "gemini-1.5-pro"
317
328
  - gemini-2.0-flash: Faster, shorter summaries
318
329
  - gemini-1.5-pro: More detailed summaries
@@ -408,7 +419,7 @@ class AI:
408
419
 
409
420
  Args:
410
421
  query (str): Search query string
411
- model (Literal["sonar", "sonar-pro", "sonar-reasoning-pro", "sonar-reasoning"], optional):
422
+ model (Literal["sonar", "sonar-pro", "sonar-reasoning-pro", "sonar-reasoning"], optional):
412
423
  Perplexity model to use. Defaults to "sonar"
413
424
  - sonar: Fast, general-purpose search
414
425
  - sonar-pro: Enhanced search capabilities
@@ -708,9 +719,7 @@ class AI:
708
719
  content=full_response,
709
720
  ),
710
721
  ]
711
- await self._zep.memory.add(
712
- user_id=user_id, session_id=user_id, messages=messages
713
- )
722
+ await self._zep.memory.add(session_id=user_id, messages=messages)
714
723
 
715
724
  async def conversation(
716
725
  self,
@@ -822,9 +831,7 @@ class AI:
822
831
  content=full_response,
823
832
  ),
824
833
  ]
825
- await self._zep.memory.add(
826
- user_id=user_id, session_id=user_id, messages=messages
827
- )
834
+ await self._zep.memory.add(session_id=user_id, messages=messages)
828
835
 
829
836
  # Generate and stream the audio response
830
837
  with self._client.audio.speech.with_streaming_response.create(
File without changes
File without changes