solana-agent 0.0.15__tar.gz → 0.0.17__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 0.0.15
3
+ Version: 0.0.17
4
4
  Summary: Build self-learning AI Agents
5
5
  License: MIT
6
6
  Keywords: ai,openai,ai agents
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "solana-agent"
3
- version = "0.0.15"
3
+ version = "0.0.17"
4
4
  description = "Build self-learning AI Agents"
5
5
  authors = ["Bevan Hunt <bevan@bevanhunt.com>"]
6
6
  license = "MIT"
@@ -84,8 +84,9 @@ class AI:
84
84
  code_interpreter: bool = True,
85
85
  openai_assistant_model: Literal["gpt-4o-mini",
86
86
  "gpt-4o"] = "gpt-4o-mini",
87
- openai_embedding_model: Literal["text-embedding-3-small",
88
- "text-embedding-3-large"] = "text-embedding-3-small"
87
+ openai_embedding_model: Literal[
88
+ "text-embedding-3-small", "text-embedding-3-large"
89
+ ] = "text-embedding-3-small",
89
90
  ):
90
91
  """Initialize a new AI assistant with memory and tool integration capabilities.
91
92
 
@@ -131,22 +132,19 @@ class AI:
131
132
  self._assistant_id = None
132
133
  self._database: MongoDatabase = database
133
134
  self._accumulated_value_queue = asyncio.Queue()
134
- self._zep = (
135
- AsyncZep(api_key=zep_api_key)
136
- if zep_api_key
137
- else None
138
- )
139
- self._sync_zep = (
140
- Zep(api_key=zep_api_key) if zep_api_key else None
141
- )
135
+ self._zep = AsyncZep(api_key=zep_api_key) if zep_api_key else None
136
+ self._sync_zep = Zep(api_key=zep_api_key) if zep_api_key else None
142
137
  self._perplexity_api_key = perplexity_api_key
143
138
  self._grok_api_key = grok_api_key
144
139
  self._gemini_api_key = gemini_api_key
145
- self._pinecone = Pinecone(
146
- api_key=pinecone_api_key) if pinecone_api_key else None
140
+ self._pinecone = (
141
+ Pinecone(api_key=pinecone_api_key) if pinecone_api_key else None
142
+ )
147
143
  self._pinecone_index_name = pinecone_index_name if pinecone_index_name else None
148
- self._pinecone_index = self._pinecone.Index(
149
- self._pinecone_index_name) if self._pinecone else None
144
+ self._pinecone_index = (
145
+ self._pinecone.Index(
146
+ self._pinecone_index_name) if self._pinecone else None
147
+ )
150
148
 
151
149
  async def __aenter__(self):
152
150
  assistants = self._client.beta.assistants.list()
@@ -157,7 +155,7 @@ class AI:
157
155
  self._assistant_id = existing_assistant.id
158
156
  else:
159
157
  self._assistant_id = self._client.beta.assistants.create(
160
- name=self.name,
158
+ name=self._name,
161
159
  instructions=self._instructions,
162
160
  tools=self._tools,
163
161
  model=self._openai_assistant_model,
@@ -183,7 +181,9 @@ class AI:
183
181
  except Exception:
184
182
  pass
185
183
  try:
186
- await self._zep.memory.add_session(user_id=user_id, session_id=user_id)
184
+ await self._zep.memory.add_session(
185
+ user_id=user_id, session_id=user_id
186
+ )
187
187
  except Exception:
188
188
  pass
189
189
 
@@ -206,7 +206,8 @@ class AI:
206
206
 
207
207
  async def _get_run_status(self, thread_id: str, run_id: str) -> str:
208
208
  run = self._client.beta.threads.runs.retrieve(
209
- thread_id=thread_id, run_id=run_id)
209
+ thread_id=thread_id, run_id=run_id
210
+ )
210
211
  return run.status
211
212
 
212
213
  # converter tool - has to be sync
@@ -261,7 +262,11 @@ class AI:
261
262
  model=self._openai_embedding_model,
262
263
  )
263
264
  search_results = self._pinecone_index.query(
264
- vector=response.data[0].embedding, top_k=limit, include_metadata=True, include_values=False)
265
+ vector=response.data[0].embedding,
266
+ top_k=limit,
267
+ include_metadata=True,
268
+ include_values=False,
269
+ )
265
270
  matches = search_results.matches
266
271
  metadata = [match.metadata for match in matches]
267
272
  return json.dumps(metadata)
@@ -309,13 +314,16 @@ class AI:
309
314
 
310
315
  # summarize tool - has to be sync
311
316
  def summarize(
312
- self, text: str, model: Literal["gemini-2.0-flash", "gemini-1.5-pro"] = "gemini-1.5-pro"
317
+ self,
318
+ text: str,
319
+ model: Literal["gemini-2.0-flash",
320
+ "gemini-1.5-pro"] = "gemini-1.5-pro",
313
321
  ) -> str:
314
322
  """Summarize text using Google's Gemini language model.
315
323
 
316
324
  Args:
317
325
  text (str): The text content to be summarized
318
- model (Literal["gemini-2.0-flash", "gemini-1.5-pro"], optional):
326
+ model (Literal["gemini-2.0-flash", "gemini-1.5-pro"], optional):
319
327
  Gemini model to use. Defaults to "gemini-1.5-pro"
320
328
  - gemini-2.0-flash: Faster, shorter summaries
321
329
  - gemini-1.5-pro: More detailed summaries
@@ -411,7 +419,7 @@ class AI:
411
419
 
412
420
  Args:
413
421
  query (str): Search query string
414
- model (Literal["sonar", "sonar-pro", "sonar-reasoning-pro", "sonar-reasoning"], optional):
422
+ model (Literal["sonar", "sonar-pro", "sonar-reasoning-pro", "sonar-reasoning"], optional):
415
423
  Perplexity model to use. Defaults to "sonar"
416
424
  - sonar: Fast, general-purpose search
417
425
  - sonar-pro: Enhanced search capabilities
@@ -711,9 +719,7 @@ class AI:
711
719
  content=full_response,
712
720
  ),
713
721
  ]
714
- await self._zep.memory.add(
715
- user_id=user_id, session_id=user_id, messages=messages
716
- )
722
+ await self._zep.memory.add(session_id=user_id, messages=messages)
717
723
 
718
724
  async def conversation(
719
725
  self,
@@ -825,9 +831,7 @@ class AI:
825
831
  content=full_response,
826
832
  ),
827
833
  ]
828
- await self._zep.memory.add(
829
- user_id=user_id, session_id=user_id, messages=messages
830
- )
834
+ await self._zep.memory.add(session_id=user_id, messages=messages)
831
835
 
832
836
  # Generate and stream the audio response
833
837
  with self._client.audio.speech.with_streaming_response.create(
File without changes
File without changes