solana-agent 1.2.0__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
solana_agent/ai.py CHANGED
@@ -49,6 +49,8 @@ class MongoDatabase:
49
49
  self._threads = self.db["threads"]
50
50
  self.messages = self.db["messages"]
51
51
  self.kb = self.db["kb"]
52
+ self.vector_stores = self.db["vector_stores"]
53
+ self.files = self.db["files"]
52
54
 
53
55
  def save_thread_id(self, user_id: str, thread_id: str):
54
56
  self._threads.insert_one({"thread_id": thread_id, "user_id": user_id})
@@ -76,6 +78,22 @@ class MongoDatabase:
76
78
  storage["timestamp"] = datetime.datetime.now(datetime.timezone.utc)
77
79
  self.kb.insert_one(storage)
78
80
 
81
+ def get_vector_store_id(self) -> str | None:
82
+ document = self.vector_stores.find_one()
83
+ return document["vector_store_id"] if document else None
84
+
85
+ def save_vector_store_id(self, vector_store_id: str):
86
+ self.vector_stores.insert_one({"vector_store_id": vector_store_id})
87
+
88
+ def delete_vector_store_id(self, vector_store_id: str):
89
+ self.vector_stores.delete_one({"vector_store_id": vector_store_id})
90
+
91
+ def add_file(self, file_id: str):
92
+ self.files.insert_one({"file_id": file_id})
93
+
94
+ def delete_file(self, file_id: str):
95
+ self.files.delete_one({"file_id": file_id})
96
+
79
97
 
80
98
  class AI:
81
99
  def __init__(
@@ -92,6 +110,7 @@ class AI:
92
110
  cohere_api_key: str = None,
93
111
  cohere_model: Literal["rerank-v3.5"] = "rerank-v3.5",
94
112
  code_interpreter: bool = True,
113
+ file_search: bool = True,
95
114
  openai_assistant_model: Literal["gpt-4o-mini",
96
115
  "gpt-4o"] = "gpt-4o-mini",
97
116
  openai_embedding_model: Literal[
@@ -113,6 +132,7 @@ class AI:
113
132
  cohere_api_key (str, optional): API key for Cohere search. Defaults to None
114
133
  cohere_model (Literal["rerank-v3.5"], optional): Cohere model for reranking. Defaults to "rerank-v3.5"
115
134
  code_interpreter (bool, optional): Enable code interpretation. Defaults to True
135
+ file_search (bool, optional): Enable file search tool. Defaults to True
116
136
  openai_assistant_model (Literal["gpt-4o-mini", "gpt-4o"], optional): OpenAI model for assistant. Defaults to "gpt-4o-mini"
117
137
  openai_embedding_model (Literal["text-embedding-3-small", "text-embedding-3-large"], optional): OpenAI model for text embedding. Defaults to "text-embedding-3-small"
118
138
 
@@ -137,8 +157,20 @@ class AI:
137
157
  self._instructions = instructions
138
158
  self._openai_assistant_model = openai_assistant_model
139
159
  self._openai_embedding_model = openai_embedding_model
140
- self._tools = [{"type": "code_interpreter"}
141
- ] if code_interpreter else []
160
+ self._file_search = file_search
161
+ if file_search:
162
+ self._tools = (
163
+ [
164
+ {"type": "code_interpreter"},
165
+ {"type": "file_search"},
166
+ ]
167
+ if code_interpreter
168
+ else [{"type": "file_search"}]
169
+ )
170
+ else:
171
+ self._tools = [{"type": "code_interpreter"}
172
+ ] if code_interpreter else []
173
+
142
174
  self._tool_handlers = {}
143
175
  self._assistant_id = None
144
176
  self._database: MongoDatabase = database
@@ -174,7 +206,23 @@ class AI:
174
206
  model=self._openai_assistant_model,
175
207
  ).id
176
208
  self._database.delete_all_threads()
177
-
209
+ if self._file_search:
210
+ vectore_store_id = self._database.get_vector_store_id()
211
+ if vectore_store_id:
212
+ self._vector_store = self._client.beta.vector_stores.retrieve(
213
+ vector_store_id=vectore_store_id
214
+ )
215
+ else:
216
+ uid = uuid.uuid4().hex
217
+ self._vector_store = self._client.beta.vector_stores.create(
218
+ name=uid)
219
+ self._database.save_vector_store_id(self._vector_store.id)
220
+ self._client.beta.assistants.update(
221
+ assistant_id=self._assistant_id,
222
+ tool_resources={
223
+ "file_search": {"vector_store_ids": [self._vector_store.id]}
224
+ },
225
+ )
178
226
  return self
179
227
 
180
228
  async def __aexit__(self, exc_type, exc_val, exc_tb):
@@ -223,6 +271,106 @@ class AI:
223
271
  )
224
272
  return run.status
225
273
 
274
+ def delete_vector_store_and_files(self):
275
+ """Delete the OpenAI vector store and files.
276
+
277
+ Example:
278
+ ```python
279
+ ai.delete_vector_store()
280
+ ```
281
+
282
+ Note:
283
+ - Requires file_search=True in AI initialization
284
+ - Deletes the vector store and all associated files
285
+ """
286
+ vector_store_id = self._database.get_vector_store_id()
287
+ if vector_store_id:
288
+ self._client.beta.vector_stores.delete(vector_store_id)
289
+ self._database.delete_vector_store_id(vector_store_id)
290
+ for file in self._database.files.find().to_list():
291
+ self._client.files.delete(file["file_id"])
292
+ self._database.delete_file(file["file_id"])
293
+
294
+ def max_files(self) -> bool:
295
+ """Check if the OpenAI vector store has reached its maximum file capacity.
296
+
297
+ Returns:
298
+ bool: True if file count is at maximum (10,000), False otherwise
299
+
300
+ Example:
301
+ ```python
302
+ if ai.max_files():
303
+ print("Vector store is full")
304
+ else:
305
+ print("Can still add more files")
306
+ ```
307
+
308
+ Note:
309
+ - Requires file_search=True in AI initialization
310
+ - OpenAI vector stores have a 10,000 file limit
311
+ - Returns False if vector store is not configured
312
+ """
313
+ self._vector_store.file_counts.completed == 10000
314
+
315
+ def file_count(self) -> int:
316
+ """Get the total number of files processed in the OpenAI vector store.
317
+
318
+ Returns:
319
+ int: Number of successfully processed files in the vector store
320
+
321
+ Example:
322
+ ```python
323
+ count = ai.file_count()
324
+ print(f"Processed {count} files")
325
+ # Returns: "Processed 5 files"
326
+ ```
327
+
328
+ Note:
329
+ - Requires file_search=True in AI initialization
330
+ - Only counts successfully processed files
331
+ - Returns 0 if vector store is not configured
332
+ """
333
+ self._vector_store.file_counts.completed
334
+
335
+ def add_file(
336
+ self,
337
+ filename: str,
338
+ file_stream: bytes,
339
+ ) -> Literal["in_progress", "completed", "cancelled", "failed"]:
340
+ """Upload and process a file in the OpenAI vector store.
341
+
342
+ Args:
343
+ filename (str): Name of the file to upload
344
+ file_stream (bytes): Raw bytes of the file to upload
345
+
346
+ Returns:
347
+ Literal["in_progress", "completed", "cancelled", "failed"]: Status of file processing
348
+
349
+ Example:
350
+ ```python
351
+ with open('document.pdf', 'rb') as f:
352
+ status = ai.add_file(f.filename, f.read())
353
+ if status == "completed":
354
+ print("File processed successfully")
355
+ ```
356
+
357
+ Note:
358
+ - Requires file_search=True in AI initialization
359
+ - Files are vectorized for semantic search
360
+ - Maximum file size: 512MB
361
+ - Maximum 10,000 files per vector store
362
+ - Processing may take a few seconds to minutes
363
+ """
364
+ vector_store_id = self._database.get_vector_store_id()
365
+ file = self._client.files.create(
366
+ file=(filename, file_stream), purpose="assistants"
367
+ )
368
+ file_batch = self._client.beta.vector_stores.files.create_and_poll(
369
+ vector_store_id=vector_store_id, file_id=file.id
370
+ )
371
+ self._database.add_file(file.id)
372
+ return file_batch.status
373
+
226
374
  def search_kb(self, query: str, namespace: str = "global", limit: int = 3) -> str:
227
375
  """Search Pinecone knowledge base using OpenAI embeddings.
228
376
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 1.2.0
3
+ Version: 1.3.1
4
4
  Summary: Build self-learning AI Agents
5
5
  License: MIT
6
6
  Keywords: ai,openai,ai agents
@@ -17,7 +17,7 @@ Classifier: Programming Language :: Python :: 3.13
17
17
  Classifier: Programming Language :: Python :: 3 :: Only
18
18
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
19
19
  Requires-Dist: cohere (>=5.13.12,<6.0.0)
20
- Requires-Dist: openai (>=1.61.1,<2.0.0)
20
+ Requires-Dist: openai (>=1.63.2,<2.0.0)
21
21
  Requires-Dist: pinecone (>=6.0.1,<7.0.0)
22
22
  Requires-Dist: pydantic (>=2.10.6,<3.0.0)
23
23
  Requires-Dist: pymongo (>=4.11.1,<5.0.0)
@@ -45,7 +45,7 @@ Unlike traditional AI assistants that forget conversations after each session, S
45
45
  - 🎯 **Context-Aware**: Recalls past interactions for more relevant responses
46
46
  - 🔄 **Self-Improving**: Builds knowledge and improves reasoning automatically
47
47
  - 🧠 **Knowledge Base**: Add domain-specific knowledge for better reasoning
48
- - 🏢 **Enterprise-Ready**: Scales from personal to organization-wide deployment
48
+ - 🏢 **File Context**: Upload propriety files to be part of the conversation
49
49
  - 🛡️ **Secure**: Secure and private memory and data storage
50
50
 
51
51
  **Experience Agentic IQ!**
@@ -57,7 +57,7 @@ Unlike traditional AI assistants that forget conversations after each session, S
57
57
  - Real-time voice-to-voice conversations
58
58
 
59
59
  🧠 **Memory System and Extensibility**
60
- - Advanced AI memory combining conversational context, conversational facts, and parallel tool calling
60
+ - Advanced AI memory combining conversational context, conversational facts, knowledge base, file search, and parallel tool calling
61
61
  - Create custom tools for extending the Agent's capabilities like further API integrations
62
62
 
63
63
  🔍 **Multi-Source Search and Reasoning**
@@ -66,6 +66,7 @@ Unlike traditional AI assistants that forget conversations after each session, S
66
66
  - Conversational fact search powered by Zep
67
67
  - Conversational message history using MongoDB (on-prem or hosted)
68
68
  - Knowledge Base using Pinecone with reranking by Cohere - available globally or user-specific
69
+ - File search using OpenAI
69
70
  - Comprehensive reasoning combining multiple data sources
70
71
 
71
72
  ## Why Choose Solana Agent Over LangChain?
@@ -77,6 +78,7 @@ Unlike traditional AI assistants that forget conversations after each session, S
77
78
  - Persistent cross-session knowledge retention
78
79
  - Automatic self-learning from conversations
79
80
  - Knowledge Base to add domain specific knowledge
81
+ - File uploads to perform document context search
80
82
 
81
83
  🏢 **Enterprise Focus**
82
84
  - Production-ready out of the box in a few lines of code
@@ -0,0 +1,6 @@
1
+ solana_agent/__init__.py,sha256=zpfnWqANd3OHGWm7NCF5Y6m01BWG4NkNk8SK9Ex48nA,18
2
+ solana_agent/ai.py,sha256=RxbyG9UzvFjZSGTZ2SC7t6U0Pfw6uERaAisxGDqLsUw,41457
3
+ solana_agent-1.3.1.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
4
+ solana_agent-1.3.1.dist-info/METADATA,sha256=I6QyN8qmmeFzpNNlXN4ZquFJ3srdK5FuooKEOqmzTrw,4694
5
+ solana_agent-1.3.1.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
6
+ solana_agent-1.3.1.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.1.0
2
+ Generator: poetry-core 2.1.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,6 +0,0 @@
1
- solana_agent/__init__.py,sha256=zpfnWqANd3OHGWm7NCF5Y6m01BWG4NkNk8SK9Ex48nA,18
2
- solana_agent/ai.py,sha256=Xi3_9bFhF6qsVNsYCSNJ11rXB3aoFsy-UPY2EgUffHI,36023
3
- solana_agent-1.2.0.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
4
- solana_agent-1.2.0.dist-info/METADATA,sha256=ZNkNWKcJ-nJWs7qmQ7693tGVlQ4-nSKz8doRan-QFx8,4587
5
- solana_agent-1.2.0.dist-info/WHEEL,sha256=7dDg4QLnNKTvwIDR9Ac8jJaAmBC_owJrckbC0jjThyA,88
6
- solana_agent-1.2.0.dist-info/RECORD,,