solana-agent 1.1.2__py3-none-any.whl → 1.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- solana_agent/ai.py +349 -28
- {solana_agent-1.1.2.dist-info → solana_agent-1.3.0.dist-info}/METADATA +12 -5
- solana_agent-1.3.0.dist-info/RECORD +6 -0
- {solana_agent-1.1.2.dist-info → solana_agent-1.3.0.dist-info}/WHEEL +1 -1
- solana_agent-1.1.2.dist-info/RECORD +0 -6
- {solana_agent-1.1.2.dist-info → solana_agent-1.3.0.dist-info}/LICENSE +0 -0
solana_agent/ai.py
CHANGED
|
@@ -2,8 +2,10 @@ import asyncio
|
|
|
2
2
|
import datetime
|
|
3
3
|
import json
|
|
4
4
|
from typing import AsyncGenerator, Literal, Optional, Dict, Any, Callable
|
|
5
|
+
import uuid
|
|
6
|
+
import cohere
|
|
5
7
|
from pydantic import BaseModel
|
|
6
|
-
from
|
|
8
|
+
from pymongo import MongoClient
|
|
7
9
|
from openai import OpenAI
|
|
8
10
|
from openai import AssistantEventHandler
|
|
9
11
|
from openai.types.beta.threads import TextDelta, Text
|
|
@@ -13,6 +15,7 @@ import requests
|
|
|
13
15
|
from zep_cloud.client import AsyncZep
|
|
14
16
|
from zep_cloud.client import Zep
|
|
15
17
|
from zep_cloud.types import Message
|
|
18
|
+
from pinecone import Pinecone
|
|
16
19
|
|
|
17
20
|
|
|
18
21
|
class EventHandler(AssistantEventHandler):
|
|
@@ -41,28 +44,55 @@ class ToolConfig(BaseModel):
|
|
|
41
44
|
|
|
42
45
|
class MongoDatabase:
|
|
43
46
|
def __init__(self, db_url: str, db_name: str):
|
|
44
|
-
self._client =
|
|
47
|
+
self._client = MongoClient(db_url)
|
|
45
48
|
self.db = self._client[db_name]
|
|
46
49
|
self._threads = self.db["threads"]
|
|
47
50
|
self.messages = self.db["messages"]
|
|
51
|
+
self.kb = self.db["kb"]
|
|
52
|
+
self.vector_stores = self.db["vector_stores"]
|
|
53
|
+
self.files = self.db["files"]
|
|
48
54
|
|
|
49
|
-
|
|
50
|
-
|
|
55
|
+
def save_thread_id(self, user_id: str, thread_id: str):
|
|
56
|
+
self._threads.insert_one({"thread_id": thread_id, "user_id": user_id})
|
|
51
57
|
|
|
52
|
-
|
|
53
|
-
document =
|
|
58
|
+
def get_thread_id(self, user_id: str) -> Optional[str]:
|
|
59
|
+
document = self._threads.find_one({"user_id": user_id})
|
|
54
60
|
return document["thread_id"] if document else None
|
|
55
61
|
|
|
56
|
-
|
|
62
|
+
def save_message(self, user_id: str, metadata: Dict[str, Any]):
|
|
57
63
|
metadata["user_id"] = user_id
|
|
58
|
-
|
|
64
|
+
self.messages.insert_one(metadata)
|
|
59
65
|
|
|
60
|
-
|
|
61
|
-
|
|
66
|
+
def delete_all_threads(self):
|
|
67
|
+
self._threads.delete_many({})
|
|
62
68
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
69
|
+
def clear_user_history(self, user_id: str):
|
|
70
|
+
self.messages.delete_many({"user_id": user_id})
|
|
71
|
+
self._threads.delete_one({"user_id": user_id})
|
|
72
|
+
|
|
73
|
+
def add_document_to_kb(self, id: str, namespace: str, document: str):
|
|
74
|
+
storage = {}
|
|
75
|
+
storage["namespace"] = namespace
|
|
76
|
+
storage["reference"] = id
|
|
77
|
+
storage["document"] = document
|
|
78
|
+
storage["timestamp"] = datetime.datetime.now(datetime.timezone.utc)
|
|
79
|
+
self.kb.insert_one(storage)
|
|
80
|
+
|
|
81
|
+
def get_vector_store_id(self) -> str | None:
|
|
82
|
+
document = self.vector_stores.find_one()
|
|
83
|
+
return document["vector_store_id"] if document else None
|
|
84
|
+
|
|
85
|
+
def save_vector_store_id(self, vector_store_id: str):
|
|
86
|
+
self.vector_stores.insert_one({"vector_store_id": vector_store_id})
|
|
87
|
+
|
|
88
|
+
def delete_vector_store_id(self, vector_store_id: str):
|
|
89
|
+
self.vector_stores.delete_one({"vector_store_id": vector_store_id})
|
|
90
|
+
|
|
91
|
+
def add_file(self, file_id: str):
|
|
92
|
+
self.files.insert_one({"file_id": file_id})
|
|
93
|
+
|
|
94
|
+
def delete_file(self, file_id: str):
|
|
95
|
+
self.files.delete_one({"file_id": file_id})
|
|
66
96
|
|
|
67
97
|
|
|
68
98
|
class AI:
|
|
@@ -75,7 +105,12 @@ class AI:
|
|
|
75
105
|
zep_api_key: str = None,
|
|
76
106
|
perplexity_api_key: str = None,
|
|
77
107
|
grok_api_key: str = None,
|
|
108
|
+
pinecone_api_key: str = None,
|
|
109
|
+
pinecone_index_name: str = None,
|
|
110
|
+
cohere_api_key: str = None,
|
|
111
|
+
cohere_model: Literal["rerank-v3.5"] = "rerank-v3.5",
|
|
78
112
|
code_interpreter: bool = True,
|
|
113
|
+
file_search: bool = True,
|
|
79
114
|
openai_assistant_model: Literal["gpt-4o-mini",
|
|
80
115
|
"gpt-4o"] = "gpt-4o-mini",
|
|
81
116
|
openai_embedding_model: Literal[
|
|
@@ -92,7 +127,12 @@ class AI:
|
|
|
92
127
|
zep_api_key (str, optional): API key for Zep memory integration. Defaults to None
|
|
93
128
|
perplexity_api_key (str, optional): API key for Perplexity search. Defaults to None
|
|
94
129
|
grok_api_key (str, optional): API key for X/Twitter search via Grok. Defaults to None
|
|
130
|
+
pinecone_api_key (str, optional): API key for Pinecone. Defaults to None
|
|
131
|
+
pinecone_index_name (str, optional): Name of the Pinecone index. Defaults to None
|
|
132
|
+
cohere_api_key (str, optional): API key for Cohere search. Defaults to None
|
|
133
|
+
cohere_model (Literal["rerank-v3.5"], optional): Cohere model for reranking. Defaults to "rerank-v3.5"
|
|
95
134
|
code_interpreter (bool, optional): Enable code interpretation. Defaults to True
|
|
135
|
+
file_search (bool, optional): Enable file search tool. Defaults to True
|
|
96
136
|
openai_assistant_model (Literal["gpt-4o-mini", "gpt-4o"], optional): OpenAI model for assistant. Defaults to "gpt-4o-mini"
|
|
97
137
|
openai_embedding_model (Literal["text-embedding-3-small", "text-embedding-3-large"], optional): OpenAI model for text embedding. Defaults to "text-embedding-3-small"
|
|
98
138
|
|
|
@@ -108,7 +148,7 @@ class AI:
|
|
|
108
148
|
Notes:
|
|
109
149
|
- Requires valid OpenAI API key for core functionality
|
|
110
150
|
- Database instance for storing messages and threads
|
|
111
|
-
- Optional integrations for Zep, Perplexity and Grok
|
|
151
|
+
- Optional integrations for Zep, Perplexity, Pinecone, Cohere, and Grok
|
|
112
152
|
- Supports code interpretation and custom tool functions
|
|
113
153
|
- You must create the Pinecone index in the dashboard before using it
|
|
114
154
|
"""
|
|
@@ -117,8 +157,20 @@ class AI:
|
|
|
117
157
|
self._instructions = instructions
|
|
118
158
|
self._openai_assistant_model = openai_assistant_model
|
|
119
159
|
self._openai_embedding_model = openai_embedding_model
|
|
120
|
-
self.
|
|
121
|
-
|
|
160
|
+
self._file_search = file_search
|
|
161
|
+
if file_search:
|
|
162
|
+
self._tools = (
|
|
163
|
+
[
|
|
164
|
+
{"type": "code_interpreter"},
|
|
165
|
+
{"type": "file_search"},
|
|
166
|
+
]
|
|
167
|
+
if code_interpreter
|
|
168
|
+
else [{"type": "file_search"}]
|
|
169
|
+
)
|
|
170
|
+
else:
|
|
171
|
+
self._tools = [{"type": "code_interpreter"}
|
|
172
|
+
] if code_interpreter else []
|
|
173
|
+
|
|
122
174
|
self._tool_handlers = {}
|
|
123
175
|
self._assistant_id = None
|
|
124
176
|
self._database: MongoDatabase = database
|
|
@@ -127,6 +179,17 @@ class AI:
|
|
|
127
179
|
self._sync_zep = Zep(api_key=zep_api_key) if zep_api_key else None
|
|
128
180
|
self._perplexity_api_key = perplexity_api_key
|
|
129
181
|
self._grok_api_key = grok_api_key
|
|
182
|
+
self._pinecone = (
|
|
183
|
+
Pinecone(api_key=pinecone_api_key) if pinecone_api_key else None
|
|
184
|
+
)
|
|
185
|
+
self._pinecone_index_name = pinecone_index_name if pinecone_index_name else None
|
|
186
|
+
self.kb = (
|
|
187
|
+
self._pinecone.Index(
|
|
188
|
+
self._pinecone_index_name) if self._pinecone else None
|
|
189
|
+
)
|
|
190
|
+
self._co = cohere.ClientV2(
|
|
191
|
+
api_key=cohere_api_key) if cohere_api_key else None
|
|
192
|
+
self._co_model = cohere_model if cohere_api_key else None
|
|
130
193
|
|
|
131
194
|
async def __aenter__(self):
|
|
132
195
|
assistants = self._client.beta.assistants.list()
|
|
@@ -142,8 +205,25 @@ class AI:
|
|
|
142
205
|
tools=self._tools,
|
|
143
206
|
model=self._openai_assistant_model,
|
|
144
207
|
).id
|
|
145
|
-
|
|
146
|
-
|
|
208
|
+
self._database.delete_all_threads()
|
|
209
|
+
if self._file_search:
|
|
210
|
+
vectore_store_id = self._database.get_vector_store_id()
|
|
211
|
+
if vectore_store_id:
|
|
212
|
+
self._vector_store = self._client.beta.vector_stores.retrieve(
|
|
213
|
+
vector_store_id=vectore_store_id
|
|
214
|
+
)
|
|
215
|
+
else:
|
|
216
|
+
uid = uuid.uuid4().hex
|
|
217
|
+
self._vector_store = self._client.beta.vector_stores.create(
|
|
218
|
+
name=uid
|
|
219
|
+
)
|
|
220
|
+
self._database.save_vector_store_id(self._vector_store.id)
|
|
221
|
+
self._client.beta.assistants.update(
|
|
222
|
+
assistant_id=self._assistant_id,
|
|
223
|
+
tool_resources={
|
|
224
|
+
"file_search": {"vector_store_ids": [self._vector_store.id]}
|
|
225
|
+
},
|
|
226
|
+
)
|
|
147
227
|
return self
|
|
148
228
|
|
|
149
229
|
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
@@ -151,12 +231,12 @@ class AI:
|
|
|
151
231
|
pass
|
|
152
232
|
|
|
153
233
|
async def _create_thread(self, user_id: str) -> str:
|
|
154
|
-
thread_id =
|
|
234
|
+
thread_id = self._database.get_thread_id(user_id)
|
|
155
235
|
|
|
156
236
|
if thread_id is None:
|
|
157
237
|
thread = self._client.beta.threads.create()
|
|
158
238
|
thread_id = thread.id
|
|
159
|
-
|
|
239
|
+
self._database.save_thread_id(user_id, thread_id)
|
|
160
240
|
if self._zep:
|
|
161
241
|
try:
|
|
162
242
|
await self._zep.user.add(user_id=user_id)
|
|
@@ -192,6 +272,235 @@ class AI:
|
|
|
192
272
|
)
|
|
193
273
|
return run.status
|
|
194
274
|
|
|
275
|
+
def delete_vector_store_and_files(self):
|
|
276
|
+
"""Delete the OpenAI vector store and files.
|
|
277
|
+
|
|
278
|
+
Example:
|
|
279
|
+
```python
|
|
280
|
+
ai.delete_vector_store()
|
|
281
|
+
```
|
|
282
|
+
|
|
283
|
+
Note:
|
|
284
|
+
- Requires file_search=True in AI initialization
|
|
285
|
+
- Deletes the vector store and all associated files
|
|
286
|
+
"""
|
|
287
|
+
vector_store_id = self._database.get_vector_store_id()
|
|
288
|
+
if vector_store_id:
|
|
289
|
+
self._client.beta.vector_stores.delete(vector_store_id)
|
|
290
|
+
self._database.delete_vector_store_id(vector_store_id)
|
|
291
|
+
for file in self._database.files.find().to_list():
|
|
292
|
+
self._client.files.delete(file["file_id"])
|
|
293
|
+
self._database.delete_file(file["file_id"])
|
|
294
|
+
|
|
295
|
+
def max_files(self) -> bool:
|
|
296
|
+
"""Check if the OpenAI vector store has reached its maximum file capacity.
|
|
297
|
+
|
|
298
|
+
Returns:
|
|
299
|
+
bool: True if file count is at maximum (10,000), False otherwise
|
|
300
|
+
|
|
301
|
+
Example:
|
|
302
|
+
```python
|
|
303
|
+
if ai.max_files():
|
|
304
|
+
print("Vector store is full")
|
|
305
|
+
else:
|
|
306
|
+
print("Can still add more files")
|
|
307
|
+
```
|
|
308
|
+
|
|
309
|
+
Note:
|
|
310
|
+
- Requires file_search=True in AI initialization
|
|
311
|
+
- OpenAI vector stores have a 10,000 file limit
|
|
312
|
+
- Returns False if vector store is not configured
|
|
313
|
+
"""
|
|
314
|
+
self._vector_store.file_counts.completed == 10000
|
|
315
|
+
|
|
316
|
+
def file_count(self) -> int:
|
|
317
|
+
"""Get the total number of files processed in the OpenAI vector store.
|
|
318
|
+
|
|
319
|
+
Returns:
|
|
320
|
+
int: Number of successfully processed files in the vector store
|
|
321
|
+
|
|
322
|
+
Example:
|
|
323
|
+
```python
|
|
324
|
+
count = ai.file_count()
|
|
325
|
+
print(f"Processed {count} files")
|
|
326
|
+
# Returns: "Processed 5 files"
|
|
327
|
+
```
|
|
328
|
+
|
|
329
|
+
Note:
|
|
330
|
+
- Requires file_search=True in AI initialization
|
|
331
|
+
- Only counts successfully processed files
|
|
332
|
+
- Returns 0 if vector store is not configured
|
|
333
|
+
"""
|
|
334
|
+
self._vector_store.file_counts.completed
|
|
335
|
+
|
|
336
|
+
def add_file(
|
|
337
|
+
self,
|
|
338
|
+
file_stream: bytes,
|
|
339
|
+
file_extension: Literal[
|
|
340
|
+
"doc", "docx", "json", "md", "pdf", "pptx", "tex", "txt"
|
|
341
|
+
] = "pdf",
|
|
342
|
+
) -> Literal["in_progress", "completed", "cancelled", "failed"]:
|
|
343
|
+
"""Upload and process a file in the OpenAI vector store.
|
|
344
|
+
|
|
345
|
+
Args:
|
|
346
|
+
file_stream (bytes): Raw bytes of the file to upload
|
|
347
|
+
file_extension (Literal, optional): File type extension. Defaults to "pdf"
|
|
348
|
+
Supported formats:
|
|
349
|
+
- doc, docx: Word documents
|
|
350
|
+
- json: JSON files
|
|
351
|
+
- md: Markdown files
|
|
352
|
+
- pdf: PDF documents
|
|
353
|
+
- pptx: PowerPoint presentations
|
|
354
|
+
- tex: LaTeX files
|
|
355
|
+
- txt: Plain text files
|
|
356
|
+
|
|
357
|
+
Returns:
|
|
358
|
+
Literal["in_progress", "completed", "cancelled", "failed"]: Status of file processing
|
|
359
|
+
|
|
360
|
+
Example:
|
|
361
|
+
```python
|
|
362
|
+
with open('document.pdf', 'rb') as f:
|
|
363
|
+
status = ai.add_file(f.read(), file_extension="pdf")
|
|
364
|
+
if status == "completed":
|
|
365
|
+
print("File processed successfully")
|
|
366
|
+
```
|
|
367
|
+
|
|
368
|
+
Note:
|
|
369
|
+
- Requires file_search=True in AI initialization
|
|
370
|
+
- Files are vectorized for semantic search
|
|
371
|
+
- Maximum file size: 512MB
|
|
372
|
+
- Maximum 10,000 files per vector store
|
|
373
|
+
- Processing may take a few seconds to minutes
|
|
374
|
+
"""
|
|
375
|
+
vector_store_id = self._database.get_vector_store_id()
|
|
376
|
+
file = self._client.files.create(
|
|
377
|
+
file=(f"file.{file_extension}", file_stream), purpose="assistants"
|
|
378
|
+
)
|
|
379
|
+
file_batch = self._client.beta.vector_stores.files.create_and_poll(
|
|
380
|
+
vector_store_id=vector_store_id, file_id=file.id
|
|
381
|
+
)
|
|
382
|
+
self._database.add_file(file.id)
|
|
383
|
+
return file_batch.status
|
|
384
|
+
|
|
385
|
+
def search_kb(self, query: str, namespace: str = "global", limit: int = 3) -> str:
|
|
386
|
+
"""Search Pinecone knowledge base using OpenAI embeddings.
|
|
387
|
+
|
|
388
|
+
Args:
|
|
389
|
+
query (str): Search query to find relevant documents
|
|
390
|
+
namespace (str, optional): Namespace of the Pinecone to search. Defaults to "global".
|
|
391
|
+
limit (int, optional): Maximum number of results to return. Defaults to 3.
|
|
392
|
+
|
|
393
|
+
Returns:
|
|
394
|
+
str: JSON string of matched documents or error message
|
|
395
|
+
|
|
396
|
+
Example:
|
|
397
|
+
```python
|
|
398
|
+
results = ai.search_kb("user123", "machine learning basics")
|
|
399
|
+
# Returns: '["Document 1", "Document 2", ...]'
|
|
400
|
+
```
|
|
401
|
+
|
|
402
|
+
Note:
|
|
403
|
+
- Requires configured Pinecone index
|
|
404
|
+
- Uses OpenAI embeddings for semantic search
|
|
405
|
+
- Returns JSON-serialized Pinecone match metadata results
|
|
406
|
+
- Returns error message string if search fails
|
|
407
|
+
- Optionally reranks results using Cohere API
|
|
408
|
+
"""
|
|
409
|
+
try:
|
|
410
|
+
response = self._client.embeddings.create(
|
|
411
|
+
input=query,
|
|
412
|
+
model=self._openai_embedding_model,
|
|
413
|
+
)
|
|
414
|
+
search_results = self.kb.query(
|
|
415
|
+
vector=response.data[0].embedding,
|
|
416
|
+
top_k=10,
|
|
417
|
+
include_metadata=False,
|
|
418
|
+
include_values=False,
|
|
419
|
+
namespace=namespace,
|
|
420
|
+
)
|
|
421
|
+
matches = search_results.matches
|
|
422
|
+
ids = []
|
|
423
|
+
for match in matches:
|
|
424
|
+
ids.append(match.id)
|
|
425
|
+
docs = []
|
|
426
|
+
for id in ids:
|
|
427
|
+
document = self._database.kb.find_one({"reference": id})
|
|
428
|
+
docs.append(document["document"])
|
|
429
|
+
if self._co:
|
|
430
|
+
try:
|
|
431
|
+
response = self._co.rerank(
|
|
432
|
+
model=self._co_model,
|
|
433
|
+
query=query,
|
|
434
|
+
documents=docs,
|
|
435
|
+
top_n=limit,
|
|
436
|
+
)
|
|
437
|
+
reranked_docs = response.results
|
|
438
|
+
new_docs = []
|
|
439
|
+
for doc in reranked_docs:
|
|
440
|
+
new_docs.append(docs[doc.index])
|
|
441
|
+
return json.dumps(new_docs)
|
|
442
|
+
except Exception:
|
|
443
|
+
return json.dumps(docs[:limit])
|
|
444
|
+
else:
|
|
445
|
+
return json.dumps(docs[:limit])
|
|
446
|
+
except Exception as e:
|
|
447
|
+
return f"Failed to search KB. Error: {e}"
|
|
448
|
+
|
|
449
|
+
def add_document_to_kb(
|
|
450
|
+
self,
|
|
451
|
+
document: str,
|
|
452
|
+
id: str = uuid.uuid4().hex,
|
|
453
|
+
namespace: str = "global",
|
|
454
|
+
):
|
|
455
|
+
"""Add a document to the Pinecone knowledge base with OpenAI embeddings.
|
|
456
|
+
|
|
457
|
+
Args:
|
|
458
|
+
document (str): Document to add to the knowledge base
|
|
459
|
+
id (str, optional): Unique identifier for the document. Defaults to random UUID.
|
|
460
|
+
namespace (str): Namespace of the Pinecone index to search. Defaults to "global".
|
|
461
|
+
|
|
462
|
+
Example:
|
|
463
|
+
```python
|
|
464
|
+
ai.add_document_to_kb("user123 has 4 cats")
|
|
465
|
+
```
|
|
466
|
+
|
|
467
|
+
Note:
|
|
468
|
+
- Requires Pinecone index to be configured
|
|
469
|
+
- Uses OpenAI embeddings API
|
|
470
|
+
"""
|
|
471
|
+
response = self._client.embeddings.create(
|
|
472
|
+
input=document,
|
|
473
|
+
model=self._openai_embedding_model,
|
|
474
|
+
)
|
|
475
|
+
self.kb.upsert(
|
|
476
|
+
vectors=[
|
|
477
|
+
{
|
|
478
|
+
"id": id,
|
|
479
|
+
"values": response.data[0].embedding,
|
|
480
|
+
}
|
|
481
|
+
],
|
|
482
|
+
namespace=namespace,
|
|
483
|
+
)
|
|
484
|
+
self._database.add_document_to_kb(id, namespace, document)
|
|
485
|
+
|
|
486
|
+
def delete_document_from_kb(self, id: str, user_id: str = "global"):
|
|
487
|
+
"""Delete a document from the Pinecone knowledge base.
|
|
488
|
+
|
|
489
|
+
Args:
|
|
490
|
+
id (str): Unique identifier for the document
|
|
491
|
+
user_id (str): Unique identifier for the user. Defaults to "global".
|
|
492
|
+
|
|
493
|
+
Example:
|
|
494
|
+
```python
|
|
495
|
+
ai.delete_document_from_kb("user123", "document_id")
|
|
496
|
+
```
|
|
497
|
+
|
|
498
|
+
Note:
|
|
499
|
+
- Requires Pinecone index to be configured
|
|
500
|
+
"""
|
|
501
|
+
self.kb.delete(ids=[id], namespace=user_id)
|
|
502
|
+
self._database.kb.delete_one({"reference": id})
|
|
503
|
+
|
|
195
504
|
# check time tool - has to be sync
|
|
196
505
|
def check_time(self) -> str:
|
|
197
506
|
"""Get current UTC time formatted as a string.
|
|
@@ -209,7 +518,9 @@ class AI:
|
|
|
209
518
|
This is a synchronous tool method required for OpenAI function calling.
|
|
210
519
|
Always returns time in UTC timezone for consistency.
|
|
211
520
|
"""
|
|
212
|
-
return datetime.datetime.now(datetime.timezone.utc).strftime(
|
|
521
|
+
return datetime.datetime.now(datetime.timezone.utc).strftime(
|
|
522
|
+
"%Y-%m-%d %H:%M:%S %Z"
|
|
523
|
+
)
|
|
213
524
|
|
|
214
525
|
# search facts tool - has to be sync
|
|
215
526
|
def search_facts(
|
|
@@ -336,11 +647,13 @@ class AI:
|
|
|
336
647
|
use_perplexity: bool = True,
|
|
337
648
|
use_grok: bool = True,
|
|
338
649
|
use_facts: bool = True,
|
|
650
|
+
use_kb: bool = True,
|
|
339
651
|
perplexity_model: Literal[
|
|
340
652
|
"sonar", "sonar-pro", "sonar-reasoning-pro", "sonar-reasoning"
|
|
341
653
|
] = "sonar",
|
|
342
654
|
openai_model: Literal["o1", "o3-mini"] = "o3-mini",
|
|
343
655
|
grok_model: Literal["grok-beta"] = "grok-beta",
|
|
656
|
+
namespace: str = "global",
|
|
344
657
|
) -> str:
|
|
345
658
|
"""Combine multiple data sources with AI reasoning to answer queries.
|
|
346
659
|
|
|
@@ -354,6 +667,7 @@ class AI:
|
|
|
354
667
|
perplexity_model (Literal, optional): Perplexity model to use. Defaults to "sonar"
|
|
355
668
|
openai_model (Literal, optional): OpenAI model for reasoning. Defaults to "o3-mini"
|
|
356
669
|
grok_model (Literal, optional): Grok model for X search. Defaults to "grok-beta"
|
|
670
|
+
namespace (str): Namespace of the Pinecone index to search. Defaults to "global"
|
|
357
671
|
|
|
358
672
|
Returns:
|
|
359
673
|
str: Reasoned response combining all enabled data sources or error message
|
|
@@ -373,6 +687,13 @@ class AI:
|
|
|
373
687
|
Will gracefully handle missing or failed data sources.
|
|
374
688
|
"""
|
|
375
689
|
try:
|
|
690
|
+
if use_kb:
|
|
691
|
+
try:
|
|
692
|
+
kb_results = self.search_kb(query, namespace)
|
|
693
|
+
except Exception:
|
|
694
|
+
kb_results = ""
|
|
695
|
+
else:
|
|
696
|
+
kb_results = ""
|
|
376
697
|
if use_facts:
|
|
377
698
|
try:
|
|
378
699
|
facts = self.search_facts(user_id, query)
|
|
@@ -405,7 +726,7 @@ class AI:
|
|
|
405
726
|
},
|
|
406
727
|
{
|
|
407
728
|
"role": "user",
|
|
408
|
-
"content": f"Query: {query}, Facts: {facts}, Internet Search Results: {search_results}, X Search Results: {x_search_results}",
|
|
729
|
+
"content": f"Query: {query}, Facts: {facts}, KB Results: {kb_results}, Internet Search Results: {search_results}, X Search Results: {x_search_results}",
|
|
409
730
|
},
|
|
410
731
|
],
|
|
411
732
|
)
|
|
@@ -474,7 +795,7 @@ class AI:
|
|
|
474
795
|
except Exception:
|
|
475
796
|
pass
|
|
476
797
|
try:
|
|
477
|
-
|
|
798
|
+
self._database.clear_user_history(user_id)
|
|
478
799
|
except Exception:
|
|
479
800
|
pass
|
|
480
801
|
try:
|
|
@@ -491,7 +812,7 @@ class AI:
|
|
|
491
812
|
# Deletes the assistant conversation thread for a user
|
|
492
813
|
```
|
|
493
814
|
"""
|
|
494
|
-
thread_id =
|
|
815
|
+
thread_id = self._database.get_thread_id(user_id)
|
|
495
816
|
await self._client.beta.threads.delete(thread_id=thread_id)
|
|
496
817
|
|
|
497
818
|
async def delete_facts(self, user_id: str):
|
|
@@ -547,7 +868,7 @@ class AI:
|
|
|
547
868
|
"""
|
|
548
869
|
self._accumulated_value_queue = asyncio.Queue()
|
|
549
870
|
|
|
550
|
-
thread_id =
|
|
871
|
+
thread_id = self._database.get_thread_id(user_id)
|
|
551
872
|
|
|
552
873
|
if thread_id is None:
|
|
553
874
|
thread_id = await self._create_thread(user_id)
|
|
@@ -602,7 +923,7 @@ class AI:
|
|
|
602
923
|
"timestamp": datetime.datetime.now(datetime.timezone.utc),
|
|
603
924
|
}
|
|
604
925
|
|
|
605
|
-
|
|
926
|
+
self._database.save_message(user_id, metadata)
|
|
606
927
|
if self._zep:
|
|
607
928
|
messages = [
|
|
608
929
|
Message(
|
|
@@ -668,7 +989,7 @@ class AI:
|
|
|
668
989
|
# Reset the queue for each new conversation
|
|
669
990
|
self._accumulated_value_queue = asyncio.Queue()
|
|
670
991
|
|
|
671
|
-
thread_id =
|
|
992
|
+
thread_id = self._database.get_thread_id(user_id)
|
|
672
993
|
|
|
673
994
|
if thread_id is None:
|
|
674
995
|
thread_id = await self._create_thread(user_id)
|
|
@@ -713,7 +1034,7 @@ class AI:
|
|
|
713
1034
|
"timestamp": datetime.datetime.now(datetime.timezone.utc),
|
|
714
1035
|
}
|
|
715
1036
|
|
|
716
|
-
|
|
1037
|
+
self._database.save_message(user_id, metadata)
|
|
717
1038
|
|
|
718
1039
|
if self._zep:
|
|
719
1040
|
messages = [
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: solana-agent
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.3.0
|
|
4
4
|
Summary: Build self-learning AI Agents
|
|
5
5
|
License: MIT
|
|
6
6
|
Keywords: ai,openai,ai agents
|
|
@@ -16,9 +16,11 @@ Classifier: Programming Language :: Python :: 3.12
|
|
|
16
16
|
Classifier: Programming Language :: Python :: 3.13
|
|
17
17
|
Classifier: Programming Language :: Python :: 3 :: Only
|
|
18
18
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
19
|
-
Requires-Dist:
|
|
20
|
-
Requires-Dist: openai (>=1.
|
|
19
|
+
Requires-Dist: cohere (>=5.13.12,<6.0.0)
|
|
20
|
+
Requires-Dist: openai (>=1.63.2,<2.0.0)
|
|
21
|
+
Requires-Dist: pinecone (>=6.0.1,<7.0.0)
|
|
21
22
|
Requires-Dist: pydantic (>=2.10.6,<3.0.0)
|
|
23
|
+
Requires-Dist: pymongo (>=4.11.1,<5.0.0)
|
|
22
24
|
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
|
23
25
|
Requires-Dist: zep-cloud (>=2.4.0,<3.0.0)
|
|
24
26
|
Project-URL: Repository, https://github.com/truemagic-coder/solana-agent
|
|
@@ -42,7 +44,8 @@ Unlike traditional AI assistants that forget conversations after each session, S
|
|
|
42
44
|
- 📈 **Continuous Learning**: Evolves with every new interaction
|
|
43
45
|
- 🎯 **Context-Aware**: Recalls past interactions for more relevant responses
|
|
44
46
|
- 🔄 **Self-Improving**: Builds knowledge and improves reasoning automatically
|
|
45
|
-
-
|
|
47
|
+
- 🧠 **Knowledge Base**: Add domain-specific knowledge for better reasoning
|
|
48
|
+
- 🏢 **File Context**: Upload propriety files to be part of the conversation
|
|
46
49
|
- 🛡️ **Secure**: Secure and private memory and data storage
|
|
47
50
|
|
|
48
51
|
**Experience Agentic IQ!**
|
|
@@ -54,7 +57,7 @@ Unlike traditional AI assistants that forget conversations after each session, S
|
|
|
54
57
|
- Real-time voice-to-voice conversations
|
|
55
58
|
|
|
56
59
|
🧠 **Memory System and Extensibility**
|
|
57
|
-
- Advanced AI memory combining conversational context, conversational facts, and parallel tool calling
|
|
60
|
+
- Advanced AI memory combining conversational context, conversational facts, knowledge base, file search, and parallel tool calling
|
|
58
61
|
- Create custom tools for extending the Agent's capabilities like further API integrations
|
|
59
62
|
|
|
60
63
|
🔍 **Multi-Source Search and Reasoning**
|
|
@@ -62,6 +65,8 @@ Unlike traditional AI assistants that forget conversations after each session, S
|
|
|
62
65
|
- X (Twitter) search using Grok
|
|
63
66
|
- Conversational fact search powered by Zep
|
|
64
67
|
- Conversational message history using MongoDB (on-prem or hosted)
|
|
68
|
+
- Knowledge Base using Pinecone with reranking by Cohere - available globally or user-specific
|
|
69
|
+
- File search using OpenAI
|
|
65
70
|
- Comprehensive reasoning combining multiple data sources
|
|
66
71
|
|
|
67
72
|
## Why Choose Solana Agent Over LangChain?
|
|
@@ -72,6 +77,8 @@ Unlike traditional AI assistants that forget conversations after each session, S
|
|
|
72
77
|
- Built-in episodic memory vs LangChain's basic memory types
|
|
73
78
|
- Persistent cross-session knowledge retention
|
|
74
79
|
- Automatic self-learning from conversations
|
|
80
|
+
- Knowledge Base to add domain specific knowledge
|
|
81
|
+
- File uploads to perform document context search
|
|
75
82
|
|
|
76
83
|
🏢 **Enterprise Focus**
|
|
77
84
|
- Production-ready out of the box in a few lines of code
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
solana_agent/__init__.py,sha256=zpfnWqANd3OHGWm7NCF5Y6m01BWG4NkNk8SK9Ex48nA,18
|
|
2
|
+
solana_agent/ai.py,sha256=o6ztao5oFuzatYIkYxGlQSf16JYgQeIaUtCwL15ojoU,41942
|
|
3
|
+
solana_agent-1.3.0.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
|
|
4
|
+
solana_agent-1.3.0.dist-info/METADATA,sha256=bqeaN3lk98Fd1UWZmuZaH_HiEgNo11Q0K0GmNhbTgoY,4694
|
|
5
|
+
solana_agent-1.3.0.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
|
|
6
|
+
solana_agent-1.3.0.dist-info/RECORD,,
|
|
@@ -1,6 +0,0 @@
|
|
|
1
|
-
solana_agent/__init__.py,sha256=zpfnWqANd3OHGWm7NCF5Y6m01BWG4NkNk8SK9Ex48nA,18
|
|
2
|
-
solana_agent/ai.py,sha256=mM-oCkrUFO8Lp0dZoqWOlOU0f9lAKmjeuc3eFRWy9zo,29995
|
|
3
|
-
solana_agent-1.1.2.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
|
|
4
|
-
solana_agent-1.1.2.dist-info/METADATA,sha256=YNglp_2p5pn02MJjtzqAJUTjo_RyiKtp-zIZklJM3c0,4276
|
|
5
|
-
solana_agent-1.1.2.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
|
6
|
-
solana_agent-1.1.2.dist-info/RECORD,,
|
|
File without changes
|