solana-agent 0.0.6__py3-none-any.whl → 0.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- solana_agent/ai.py +477 -209
- solana_agent-0.0.7.dist-info/METADATA +153 -0
- solana_agent-0.0.7.dist-info/RECORD +6 -0
- solana_agent-0.0.6.dist-info/METADATA +0 -97
- solana_agent-0.0.6.dist-info/RECORD +0 -6
- {solana_agent-0.0.6.dist-info → solana_agent-0.0.7.dist-info}/LICENSE +0 -0
- {solana_agent-0.0.6.dist-info → solana_agent-0.0.7.dist-info}/WHEEL +0 -0
solana_agent/ai.py
CHANGED
|
@@ -2,53 +2,38 @@ import asyncio
|
|
|
2
2
|
from datetime import datetime
|
|
3
3
|
import json
|
|
4
4
|
from typing import AsyncGenerator, List, Literal, Optional, Dict, Any, Callable
|
|
5
|
+
import uuid
|
|
5
6
|
from pydantic import BaseModel
|
|
6
7
|
from motor.motor_asyncio import AsyncIOMotorClient
|
|
7
8
|
from openai import OpenAI
|
|
8
|
-
import openai
|
|
9
|
-
import aiosqlite
|
|
10
9
|
from openai import AssistantEventHandler
|
|
11
10
|
from openai.types.beta.threads import TextDelta, Text
|
|
12
11
|
from typing_extensions import override
|
|
13
|
-
import sqlite3
|
|
14
12
|
import inspect
|
|
15
13
|
import requests
|
|
16
|
-
from
|
|
17
|
-
from
|
|
18
|
-
from
|
|
14
|
+
from zep_cloud.client import AsyncZep
|
|
15
|
+
from zep_cloud.client import Zep
|
|
16
|
+
from zep_cloud.types import Message, RoleType
|
|
19
17
|
import pandas as pd
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def adapt_datetime(ts):
|
|
23
|
-
return ts.isoformat()
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
# Custom converter for datetime
|
|
27
|
-
def convert_datetime(ts):
|
|
28
|
-
return datetime.fromisoformat(ts)
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
# Register the adapter and converter
|
|
32
|
-
sqlite3.register_adapter(datetime, adapt_datetime)
|
|
33
|
-
sqlite3.register_converter("timestamp", convert_datetime)
|
|
18
|
+
from pinecone import Pinecone
|
|
34
19
|
|
|
35
20
|
|
|
36
21
|
class EventHandler(AssistantEventHandler):
|
|
37
22
|
def __init__(self, tool_handlers, ai_instance):
|
|
38
23
|
super().__init__()
|
|
39
|
-
self.
|
|
40
|
-
self.
|
|
24
|
+
self._tool_handlers = tool_handlers
|
|
25
|
+
self._ai_instance = ai_instance
|
|
41
26
|
|
|
42
27
|
@override
|
|
43
28
|
def on_text_delta(self, delta: TextDelta, snapshot: Text):
|
|
44
29
|
asyncio.create_task(
|
|
45
|
-
self.
|
|
30
|
+
self._ai_instance.accumulated_value_queue.put(delta.value))
|
|
46
31
|
|
|
47
32
|
@override
|
|
48
33
|
def on_event(self, event):
|
|
49
34
|
if event.event == "thread.run.requires_action":
|
|
50
35
|
run_id = event.data.id
|
|
51
|
-
self.
|
|
36
|
+
self._ai_instance._handle_requires_action(event.data, run_id)
|
|
52
37
|
|
|
53
38
|
|
|
54
39
|
class ToolConfig(BaseModel):
|
|
@@ -59,108 +44,24 @@ class ToolConfig(BaseModel):
|
|
|
59
44
|
|
|
60
45
|
class MongoDatabase:
|
|
61
46
|
def __init__(self, db_url: str, db_name: str):
|
|
62
|
-
self.
|
|
63
|
-
self.
|
|
64
|
-
self.
|
|
65
|
-
self.
|
|
47
|
+
self._client = AsyncIOMotorClient(db_url)
|
|
48
|
+
self._db = self.client[db_name]
|
|
49
|
+
self._threads = self.db["threads"]
|
|
50
|
+
self._messages = self.db["messages"]
|
|
66
51
|
|
|
67
52
|
async def save_thread_id(self, user_id: str, thread_id: str):
|
|
68
|
-
await self.
|
|
53
|
+
await self._threads.insert_one({"thread_id": thread_id, "user_id": user_id})
|
|
69
54
|
|
|
70
55
|
async def get_thread_id(self, user_id: str) -> Optional[str]:
|
|
71
|
-
document = await self.
|
|
56
|
+
document = await self._threads.find_one({"user_id": user_id})
|
|
72
57
|
return document["thread_id"] if document else None
|
|
73
58
|
|
|
74
59
|
async def save_message(self, user_id: str, metadata: Dict[str, Any]):
|
|
75
60
|
metadata["user_id"] = user_id
|
|
76
|
-
await self.
|
|
77
|
-
|
|
78
|
-
async def delete_thread_id(self, user_id: str):
|
|
79
|
-
document = await self.threads.find_one({"user_id": user_id})
|
|
80
|
-
thread_id = document["thread_id"]
|
|
81
|
-
openai.beta.threads.delete(thread_id)
|
|
82
|
-
await self.messages.delete_many({"user_id": user_id})
|
|
83
|
-
await self.threads.delete_one({"user_id": user_id})
|
|
61
|
+
await self._messages.insert_one(metadata)
|
|
84
62
|
|
|
85
63
|
async def delete_all_threads(self):
|
|
86
|
-
await self.
|
|
87
|
-
await self.messages.delete_many({})
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
class SQLiteDatabase:
|
|
91
|
-
def __init__(self, db_path: str):
|
|
92
|
-
self.db_path = db_path
|
|
93
|
-
self.conn = sqlite3.connect(db_path)
|
|
94
|
-
self.conn.execute(
|
|
95
|
-
"CREATE TABLE IF NOT EXISTS threads (user_id TEXT, thread_id TEXT)"
|
|
96
|
-
)
|
|
97
|
-
self.conn.execute(
|
|
98
|
-
"CREATE TABLE IF NOT EXISTS messages (user_id TEXT, message TEXT, response TEXT, timestamp TEXT)"
|
|
99
|
-
)
|
|
100
|
-
self.conn.commit()
|
|
101
|
-
self.conn.close()
|
|
102
|
-
|
|
103
|
-
async def save_thread_id(self, user_id: str, thread_id: str):
|
|
104
|
-
async with aiosqlite.connect(
|
|
105
|
-
self.db_path, detect_types=sqlite3.PARSE_DECLTYPES
|
|
106
|
-
) as db:
|
|
107
|
-
await db.execute(
|
|
108
|
-
"INSERT INTO threads (user_id, thread_id) VALUES (?, ?)",
|
|
109
|
-
(user_id, thread_id),
|
|
110
|
-
)
|
|
111
|
-
await db.commit()
|
|
112
|
-
|
|
113
|
-
async def get_thread_id(self, user_id: str) -> Optional[str]:
|
|
114
|
-
async with aiosqlite.connect(
|
|
115
|
-
self.db_path, detect_types=sqlite3.PARSE_DECLTYPES
|
|
116
|
-
) as db:
|
|
117
|
-
async with db.execute(
|
|
118
|
-
"SELECT thread_id FROM threads WHERE user_id = ?", (user_id,)
|
|
119
|
-
) as cursor:
|
|
120
|
-
row = await cursor.fetchone()
|
|
121
|
-
return row[0] if row else None
|
|
122
|
-
|
|
123
|
-
async def save_message(self, user_id: str, metadata: Dict[str, Any]):
|
|
124
|
-
async with aiosqlite.connect(
|
|
125
|
-
self.db_path, detect_types=sqlite3.PARSE_DECLTYPES
|
|
126
|
-
) as db:
|
|
127
|
-
await db.execute(
|
|
128
|
-
"INSERT INTO messages (user_id, message, response, timestamp) VALUES (?, ?, ?, ?)",
|
|
129
|
-
(
|
|
130
|
-
user_id,
|
|
131
|
-
metadata["message"],
|
|
132
|
-
metadata["response"],
|
|
133
|
-
metadata["timestamp"],
|
|
134
|
-
),
|
|
135
|
-
)
|
|
136
|
-
await db.commit()
|
|
137
|
-
|
|
138
|
-
async def delete_thread_id(self, user_id: str):
|
|
139
|
-
async with aiosqlite.connect(
|
|
140
|
-
self.db_path, detect_types=sqlite3.PARSE_DECLTYPES
|
|
141
|
-
) as db:
|
|
142
|
-
async with db.execute(
|
|
143
|
-
"SELECT thread_id FROM threads WHERE user_id = ?", (user_id,)
|
|
144
|
-
) as cursor:
|
|
145
|
-
row = await cursor.fetchone()
|
|
146
|
-
if row:
|
|
147
|
-
thread_id = row[0]
|
|
148
|
-
openai.beta.threads.delete(thread_id)
|
|
149
|
-
await db.execute(
|
|
150
|
-
"DELETE FROM messages WHERE user_id = ?", (user_id,)
|
|
151
|
-
)
|
|
152
|
-
await db.execute(
|
|
153
|
-
"DELETE FROM threads WHERE user_id = ?", (user_id,)
|
|
154
|
-
)
|
|
155
|
-
await db.commit()
|
|
156
|
-
|
|
157
|
-
async def delete_all_threads(self):
|
|
158
|
-
async with aiosqlite.connect(
|
|
159
|
-
self.db_path, detect_types=sqlite3.PARSE_DECLTYPES
|
|
160
|
-
) as db:
|
|
161
|
-
await db.execute("DELETE FROM messages")
|
|
162
|
-
await db.execute("DELETE FROM threads")
|
|
163
|
-
await db.commit()
|
|
64
|
+
await self._threads.delete_many({})
|
|
164
65
|
|
|
165
66
|
|
|
166
67
|
class AI:
|
|
@@ -171,49 +72,93 @@ class AI:
|
|
|
171
72
|
instructions: str,
|
|
172
73
|
database: Any,
|
|
173
74
|
zep_api_key: str = None,
|
|
174
|
-
zep_base_url: str = None,
|
|
175
75
|
perplexity_api_key: str = None,
|
|
176
76
|
grok_api_key: str = None,
|
|
177
77
|
gemini_api_key: str = None,
|
|
78
|
+
pinecone_api_key: str = None,
|
|
79
|
+
pinecone_index_name: str = None,
|
|
178
80
|
code_interpreter: bool = True,
|
|
179
|
-
|
|
81
|
+
openai_assistant_model: Literal["gpt-4o-mini",
|
|
82
|
+
"gpt-4o"] = "gpt-4o-mini",
|
|
83
|
+
openai_embedding_model: Literal["text-embedding-3-small",
|
|
84
|
+
"text-embedding-3-large"] = "text-embedding-3-small"
|
|
180
85
|
):
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
86
|
+
"""Initialize a new AI assistant with memory and tool integration capabilities.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
openai_api_key (str): OpenAI API key for core AI functionality
|
|
90
|
+
name (str): Name identifier for the assistant
|
|
91
|
+
instructions (str): Base behavioral instructions for the AI
|
|
92
|
+
database (Any): Database instance for message/thread storage
|
|
93
|
+
zep_api_key (str, optional): API key for Zep memory integration. Defaults to None
|
|
94
|
+
perplexity_api_key (str, optional): API key for Perplexity search. Defaults to None
|
|
95
|
+
grok_api_key (str, optional): API key for X/Twitter search via Grok. Defaults to None
|
|
96
|
+
gemini_api_key (str, optional): API key for Google Gemini. Defaults to None
|
|
97
|
+
pinecone_api_key (str, optional): API key for Pinecone. Defaults to None
|
|
98
|
+
pinecone_index_name (str, optional): Pinecone index name. Defaults to None
|
|
99
|
+
code_interpreter (bool, optional): Enable code interpretation. Defaults to True
|
|
100
|
+
openai_assistant_model (Literal["gpt-4o-mini", "gpt-4o"], optional): OpenAI model for assistant. Defaults to "gpt-4o-mini"
|
|
101
|
+
openai_embedding_model (Literal["text-embedding-3-small", "text-embedding-3-large"], optional): OpenAI model for text embedding. Defaults to "text-embedding-3-small"
|
|
102
|
+
|
|
103
|
+
Example:
|
|
104
|
+
```python
|
|
105
|
+
ai = AI(
|
|
106
|
+
openai_api_key="your-key",
|
|
107
|
+
name="Assistant",
|
|
108
|
+
instructions="Be helpful and concise",
|
|
109
|
+
database=MongoDatabase("mongodb://localhost", "ai_db"),
|
|
110
|
+
)
|
|
111
|
+
```
|
|
112
|
+
Notes:
|
|
113
|
+
- Requires valid OpenAI API key for core functionality
|
|
114
|
+
- Database instance for storing messages and threads
|
|
115
|
+
- Optional integrations for Zep, Perplexity, Grok, Gemini, Pinecone, and Cohere
|
|
116
|
+
- Supports code interpretation and custom tool functions
|
|
117
|
+
- You must create the Pinecone index in the dashboard before using it
|
|
118
|
+
"""
|
|
119
|
+
self._client = OpenAI(api_key=openai_api_key)
|
|
120
|
+
self._name = name
|
|
121
|
+
self._instructions = instructions
|
|
122
|
+
self._openai_assistant_model = openai_assistant_model
|
|
123
|
+
self._openai_embedding_model = openai_embedding_model
|
|
124
|
+
self._tools = [{"type": "code_interpreter"}
|
|
125
|
+
] if code_interpreter else []
|
|
126
|
+
self._tool_handlers = {}
|
|
127
|
+
self._assistant_id = None
|
|
128
|
+
self._database = database
|
|
129
|
+
self._accumulated_value_queue = asyncio.Queue()
|
|
130
|
+
self._zep = (
|
|
131
|
+
AsyncZep(api_key=zep_api_key)
|
|
192
132
|
if zep_api_key
|
|
193
133
|
else None
|
|
194
134
|
)
|
|
195
|
-
self.
|
|
196
|
-
Zep(api_key=zep_api_key
|
|
135
|
+
self._sync_zep = (
|
|
136
|
+
Zep(api_key=zep_api_key) if zep_api_key else None
|
|
197
137
|
)
|
|
198
|
-
self.
|
|
199
|
-
self.
|
|
200
|
-
self.
|
|
138
|
+
self._perplexity_api_key = perplexity_api_key
|
|
139
|
+
self._grok_api_key = grok_api_key
|
|
140
|
+
self._gemini_api_key = gemini_api_key
|
|
141
|
+
self._pinecone = Pinecone(
|
|
142
|
+
api_key=pinecone_api_key) if pinecone_api_key else None
|
|
143
|
+
self._pinecone_index_name = pinecone_index_name if pinecone_index_name else None
|
|
144
|
+
self._pinecone_index = self._pinecone.Index(
|
|
145
|
+
self._pinecone_index_name) if self._pinecone else None
|
|
201
146
|
|
|
202
147
|
async def __aenter__(self):
|
|
203
|
-
assistants =
|
|
148
|
+
assistants = self._client.beta.assistants.list()
|
|
204
149
|
existing_assistant = next(
|
|
205
|
-
(a for a in assistants if a.name == self.
|
|
150
|
+
(a for a in assistants if a.name == self._name), None)
|
|
206
151
|
|
|
207
152
|
if existing_assistant:
|
|
208
|
-
self.
|
|
153
|
+
self._assistant_id = existing_assistant.id
|
|
209
154
|
else:
|
|
210
|
-
self.
|
|
155
|
+
self._assistant_id = self._client.beta.assistants.create(
|
|
211
156
|
name=self.name,
|
|
212
|
-
instructions=self.
|
|
213
|
-
tools=self.
|
|
214
|
-
model=self.
|
|
157
|
+
instructions=self._instructions,
|
|
158
|
+
tools=self._tools,
|
|
159
|
+
model=self._openai_assistant_model,
|
|
215
160
|
).id
|
|
216
|
-
await self.
|
|
161
|
+
await self._database.delete_all_threads()
|
|
217
162
|
|
|
218
163
|
return self
|
|
219
164
|
|
|
@@ -221,51 +166,166 @@ class AI:
|
|
|
221
166
|
# Perform any cleanup actions here
|
|
222
167
|
pass
|
|
223
168
|
|
|
224
|
-
async def
|
|
225
|
-
thread_id = await self.
|
|
169
|
+
async def _create_thread(self, user_id: str) -> str:
|
|
170
|
+
thread_id = await self._database.get_thread_id(user_id)
|
|
226
171
|
|
|
227
172
|
if thread_id is None:
|
|
228
|
-
thread =
|
|
173
|
+
thread = self._client.beta.threads.create()
|
|
229
174
|
thread_id = thread.id
|
|
230
|
-
await self.
|
|
231
|
-
if self.
|
|
232
|
-
await self.
|
|
233
|
-
await self.
|
|
175
|
+
await self._database.save_thread_id(user_id, thread_id)
|
|
176
|
+
if self._zep:
|
|
177
|
+
await self._zep.user.add(user_id=user_id)
|
|
178
|
+
await self._zep.memory.add_session(user_id=user_id, session_id=user_id)
|
|
234
179
|
|
|
235
180
|
return thread_id
|
|
236
181
|
|
|
237
|
-
async def
|
|
182
|
+
async def _cancel_run(self, thread_id: str, run_id: str):
|
|
238
183
|
try:
|
|
239
|
-
self.
|
|
184
|
+
self._client.beta.threads.runs.cancel(
|
|
240
185
|
thread_id=thread_id, run_id=run_id)
|
|
241
186
|
except Exception as e:
|
|
242
187
|
print(f"Error cancelling run: {e}")
|
|
243
188
|
|
|
244
|
-
async def
|
|
245
|
-
runs = self.
|
|
189
|
+
async def _get_active_run(self, thread_id: str) -> Optional[str]:
|
|
190
|
+
runs = self._client.beta.threads.runs.list(
|
|
191
|
+
thread_id=thread_id, limit=1)
|
|
246
192
|
for run in runs:
|
|
247
193
|
if run.status in ["in_progress"]:
|
|
248
194
|
return run.id
|
|
249
195
|
return None
|
|
250
196
|
|
|
251
|
-
async def
|
|
252
|
-
run = self.
|
|
197
|
+
async def _get_run_status(self, thread_id: str, run_id: str) -> str:
|
|
198
|
+
run = self._client.beta.threads.runs.retrieve(
|
|
253
199
|
thread_id=thread_id, run_id=run_id)
|
|
254
200
|
return run.status
|
|
255
201
|
|
|
256
202
|
# converter tool - has to be sync
|
|
257
203
|
def csv_to_json(self, file_path: str) -> str:
|
|
204
|
+
"""Convert CSV file to JSON string format.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
file_path (str): Path to the CSV file to convert
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
str: JSON string containing the CSV data
|
|
211
|
+
|
|
212
|
+
Example:
|
|
213
|
+
```python
|
|
214
|
+
result = ai.csv_to_json("data.csv")
|
|
215
|
+
# Returns: '[{"column1": "value1", "column2": "value2"}]'
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
Note:
|
|
219
|
+
This is a synchronous tool method required for OpenAI function calling.
|
|
220
|
+
"""
|
|
258
221
|
df = pd.read_csv(file_path)
|
|
259
222
|
records = df.to_dict(orient="records")
|
|
260
223
|
return json.dumps(records)
|
|
261
224
|
|
|
225
|
+
# search kb tool - has to be sync
|
|
226
|
+
def search_kb(self, query: str, limit: int = 10) -> str:
|
|
227
|
+
"""Search Pinecone knowledge base using OpenAI embeddings.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
query (str): Search query to find relevant documents
|
|
231
|
+
limit (int, optional): Maximum number of results to return. Defaults to 10.
|
|
232
|
+
|
|
233
|
+
Returns:
|
|
234
|
+
str: JSON string of matched documents or error message
|
|
235
|
+
|
|
236
|
+
Example:
|
|
237
|
+
```python
|
|
238
|
+
results = ai.search_kb("machine learning basics", limit=5)
|
|
239
|
+
# Returns: '[{"title": "ML Intro", "content": "..."}]'
|
|
240
|
+
```
|
|
241
|
+
|
|
242
|
+
Note:
|
|
243
|
+
- Requires configured Pinecone index
|
|
244
|
+
- Uses OpenAI embeddings for semantic search
|
|
245
|
+
- Returns JSON-serialized Pinecone match metadata results
|
|
246
|
+
- Returns error message string if search fails
|
|
247
|
+
"""
|
|
248
|
+
try:
|
|
249
|
+
response = self._client.embeddings.create(
|
|
250
|
+
input=query,
|
|
251
|
+
model=self._openai_embedding_model,
|
|
252
|
+
)
|
|
253
|
+
search_results = self._pinecone_index.query(
|
|
254
|
+
vector=response.data[0].embedding, top_k=limit, include_metadata=True, include_values=False)
|
|
255
|
+
matches = search_results.matches
|
|
256
|
+
metadata = [match.metadata for match in matches]
|
|
257
|
+
return json.dumps(metadata)
|
|
258
|
+
except Exception as e:
|
|
259
|
+
return f"Failed to search KB. Error: {e}"
|
|
260
|
+
|
|
261
|
+
# add document to kb tool - has to be sync
|
|
262
|
+
def add_document_to_kb(self, document: Dict[str, str]):
|
|
263
|
+
"""Add a document to the Pinecone knowledge base with OpenAI embeddings.
|
|
264
|
+
|
|
265
|
+
Args:
|
|
266
|
+
document (Dict[str, str]): Document to add, with string fields as values
|
|
267
|
+
|
|
268
|
+
Example:
|
|
269
|
+
```python
|
|
270
|
+
ai.add_document_to_kb({
|
|
271
|
+
"title": "AI Basics",
|
|
272
|
+
"content": "Introduction to artificial intelligence...",
|
|
273
|
+
"author": "John Doe"
|
|
274
|
+
})
|
|
275
|
+
```
|
|
276
|
+
|
|
277
|
+
Note:
|
|
278
|
+
- Requires Pinecone index to be configured
|
|
279
|
+
- Uses OpenAI embeddings API
|
|
280
|
+
- Document values must be strings
|
|
281
|
+
- Automatically generates UUID for document
|
|
282
|
+
"""
|
|
283
|
+
values: List[str] = []
|
|
284
|
+
for _, v in document.items():
|
|
285
|
+
values.append(v)
|
|
286
|
+
response = self._client.embeddings.create(
|
|
287
|
+
input=values,
|
|
288
|
+
model=self._openai_embedding_model,
|
|
289
|
+
)
|
|
290
|
+
self._pinecone_index.upsert(
|
|
291
|
+
vectors=[
|
|
292
|
+
{
|
|
293
|
+
"id": uuid.uuid4().hex,
|
|
294
|
+
"values": response.data[0].embedding,
|
|
295
|
+
"metadata": document,
|
|
296
|
+
}
|
|
297
|
+
]
|
|
298
|
+
)
|
|
299
|
+
|
|
262
300
|
# summarize tool - has to be sync
|
|
263
301
|
def summarize(
|
|
264
|
-
self, text: str, model: Literal["gemini-2.0-flash"] = "gemini-
|
|
302
|
+
self, text: str, model: Literal["gemini-2.0-flash", "gemini-1.5-pro"] = "gemini-1.5-pro"
|
|
265
303
|
) -> str:
|
|
304
|
+
"""Summarize text using Google's Gemini language model.
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
text (str): The text content to be summarized
|
|
308
|
+
model (Literal["gemini-2.0-flash", "gemini-1.5-pro"], optional):
|
|
309
|
+
Gemini model to use. Defaults to "gemini-1.5-pro"
|
|
310
|
+
- gemini-2.0-flash: Faster, shorter summaries
|
|
311
|
+
- gemini-1.5-pro: More detailed summaries
|
|
312
|
+
|
|
313
|
+
Returns:
|
|
314
|
+
str: Summarized text or error message if summarization fails
|
|
315
|
+
|
|
316
|
+
Example:
|
|
317
|
+
```python
|
|
318
|
+
summary = ai.summarize("Long article text here...", model="gemini-1.5-pro")
|
|
319
|
+
# Returns: "Concise summary of the article..."
|
|
320
|
+
```
|
|
321
|
+
|
|
322
|
+
Note:
|
|
323
|
+
This is a synchronous tool method required for OpenAI function calling.
|
|
324
|
+
Requires valid Gemini API key to be configured.
|
|
325
|
+
"""
|
|
266
326
|
try:
|
|
267
327
|
client = OpenAI(
|
|
268
|
-
api_key=self.
|
|
328
|
+
api_key=self._gemini_api_key,
|
|
269
329
|
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
|
|
270
330
|
)
|
|
271
331
|
|
|
@@ -291,9 +351,33 @@ class AI:
|
|
|
291
351
|
query: str,
|
|
292
352
|
limit: int | None = None,
|
|
293
353
|
) -> List[str] | None:
|
|
294
|
-
|
|
354
|
+
"""Search stored conversation facts using Zep memory integration.
|
|
355
|
+
|
|
356
|
+
Args:
|
|
357
|
+
user_id (str): Unique identifier for the user
|
|
358
|
+
query (str): Search query to find relevant facts
|
|
359
|
+
limit (int | None, optional): Maximum number of facts to return. Defaults to None.
|
|
360
|
+
|
|
361
|
+
Returns:
|
|
362
|
+
List[str] | None: List of found facts or None if Zep is not configured
|
|
363
|
+
|
|
364
|
+
Example:
|
|
365
|
+
```python
|
|
366
|
+
facts = ai.search_facts(
|
|
367
|
+
user_id="user123",
|
|
368
|
+
query="project requirements",
|
|
369
|
+
limit=5
|
|
370
|
+
)
|
|
371
|
+
# Returns: ["Fact 1", "Fact 2", ...]
|
|
372
|
+
```
|
|
373
|
+
|
|
374
|
+
Note:
|
|
375
|
+
Requires Zep integration to be configured with valid API key and URL.
|
|
376
|
+
This is a synchronous tool method required for OpenAI function calling.
|
|
377
|
+
"""
|
|
378
|
+
if self._sync_zep:
|
|
295
379
|
facts = []
|
|
296
|
-
results = self.
|
|
380
|
+
results = self._sync_zep.memory.search_sessions(
|
|
297
381
|
user_id=user_id,
|
|
298
382
|
text=query,
|
|
299
383
|
limit=limit,
|
|
@@ -313,6 +397,33 @@ class AI:
|
|
|
313
397
|
"sonar", "sonar-pro", "sonar-reasoning-pro", "sonar-reasoning"
|
|
314
398
|
] = "sonar",
|
|
315
399
|
) -> str:
|
|
400
|
+
"""Search the internet using Perplexity AI API.
|
|
401
|
+
|
|
402
|
+
Args:
|
|
403
|
+
query (str): Search query string
|
|
404
|
+
model (Literal["sonar", "sonar-pro", "sonar-reasoning-pro", "sonar-reasoning"], optional):
|
|
405
|
+
Perplexity model to use. Defaults to "sonar"
|
|
406
|
+
- sonar: Fast, general-purpose search
|
|
407
|
+
- sonar-pro: Enhanced search capabilities
|
|
408
|
+
- sonar-reasoning-pro: Advanced reasoning with search
|
|
409
|
+
- sonar-reasoning: Basic reasoning with search
|
|
410
|
+
|
|
411
|
+
Returns:
|
|
412
|
+
str: Search results or error message if search fails
|
|
413
|
+
|
|
414
|
+
Example:
|
|
415
|
+
```python
|
|
416
|
+
result = ai.search_internet(
|
|
417
|
+
query="Latest AI developments",
|
|
418
|
+
model="sonar-reasoning-pro"
|
|
419
|
+
)
|
|
420
|
+
# Returns: "Detailed search results about AI..."
|
|
421
|
+
```
|
|
422
|
+
|
|
423
|
+
Note:
|
|
424
|
+
Requires valid Perplexity API key to be configured.
|
|
425
|
+
This is a synchronous tool method required for OpenAI function calling.
|
|
426
|
+
"""
|
|
316
427
|
try:
|
|
317
428
|
url = "https://api.perplexity.ai/chat/completions"
|
|
318
429
|
|
|
@@ -330,7 +441,7 @@ class AI:
|
|
|
330
441
|
],
|
|
331
442
|
}
|
|
332
443
|
headers = {
|
|
333
|
-
"Authorization": f"Bearer {self.
|
|
444
|
+
"Authorization": f"Bearer {self._perplexity_api_key}",
|
|
334
445
|
"Content-Type": "application/json",
|
|
335
446
|
}
|
|
336
447
|
|
|
@@ -354,12 +465,43 @@ class AI:
|
|
|
354
465
|
use_perplexity: bool = True,
|
|
355
466
|
use_grok: bool = True,
|
|
356
467
|
use_facts: bool = True,
|
|
468
|
+
use_kb=True,
|
|
357
469
|
perplexity_model: Literal[
|
|
358
470
|
"sonar", "sonar-pro", "sonar-reasoning-pro", "sonar-reasoning"
|
|
359
471
|
] = "sonar",
|
|
360
472
|
openai_model: Literal["o1", "o3-mini"] = "o3-mini",
|
|
361
473
|
grok_model: Literal["grok-beta"] = "grok-beta",
|
|
362
474
|
) -> str:
|
|
475
|
+
"""Combine multiple data sources with AI reasoning to answer queries.
|
|
476
|
+
|
|
477
|
+
Args:
|
|
478
|
+
user_id (str): Unique identifier for the user
|
|
479
|
+
query (str): The question or query to reason about
|
|
480
|
+
use_perplexity (bool, optional): Include Perplexity search results. Defaults to True
|
|
481
|
+
use_grok (bool, optional): Include X/Twitter search results. Defaults to True
|
|
482
|
+
use_facts (bool, optional): Include stored conversation facts. Defaults to True
|
|
483
|
+
use_kb (bool, optional): Include Pinecone knowledge base search results. Defaults to True
|
|
484
|
+
perplexity_model (Literal, optional): Perplexity model to use. Defaults to "sonar"
|
|
485
|
+
openai_model (Literal, optional): OpenAI model for reasoning. Defaults to "o3-mini"
|
|
486
|
+
grok_model (Literal, optional): Grok model for X search. Defaults to "grok-beta"
|
|
487
|
+
|
|
488
|
+
Returns:
|
|
489
|
+
str: Reasoned response combining all enabled data sources or error message
|
|
490
|
+
|
|
491
|
+
Example:
|
|
492
|
+
```python
|
|
493
|
+
result = ai.reason(
|
|
494
|
+
user_id="user123",
|
|
495
|
+
query="What are the latest AI trends?",
|
|
496
|
+
)
|
|
497
|
+
# Returns: "Based on multiple sources: [comprehensive answer]"
|
|
498
|
+
```
|
|
499
|
+
|
|
500
|
+
Note:
|
|
501
|
+
This is a synchronous tool method required for OpenAI function calling.
|
|
502
|
+
Requires configuration of relevant API keys for enabled data sources.
|
|
503
|
+
Will gracefully handle missing or failed data sources.
|
|
504
|
+
"""
|
|
363
505
|
try:
|
|
364
506
|
if use_facts:
|
|
365
507
|
facts = self.search_facts(user_id, query)
|
|
@@ -375,8 +517,12 @@ class AI:
|
|
|
375
517
|
x_search_results = self.search_x(query, grok_model)
|
|
376
518
|
else:
|
|
377
519
|
x_search_results = ""
|
|
520
|
+
if use_kb:
|
|
521
|
+
kb_results = self.search_kb(query)
|
|
522
|
+
else:
|
|
523
|
+
kb_results = ""
|
|
378
524
|
|
|
379
|
-
response = self.
|
|
525
|
+
response = self._client.chat.completions.create(
|
|
380
526
|
model=openai_model,
|
|
381
527
|
messages=[
|
|
382
528
|
{
|
|
@@ -385,7 +531,7 @@ class AI:
|
|
|
385
531
|
},
|
|
386
532
|
{
|
|
387
533
|
"role": "user",
|
|
388
|
-
"content": f"Query: {query}, Facts: {facts}, Internet Search Results: {search_results}, X Search Results: {x_search_results}",
|
|
534
|
+
"content": f"Query: {query}, Facts: {facts}, KB Results: {kb_results}, Internet Search Results: {search_results}, X Search Results: {x_search_results}",
|
|
389
535
|
},
|
|
390
536
|
],
|
|
391
537
|
)
|
|
@@ -396,7 +542,27 @@ class AI:
|
|
|
396
542
|
# x search tool - has to be sync
|
|
397
543
|
def search_x(self, query: str, model: Literal["grok-beta"] = "grok-beta") -> str:
|
|
398
544
|
try:
|
|
399
|
-
|
|
545
|
+
"""Search X (formerly Twitter) using Grok API integration.
|
|
546
|
+
|
|
547
|
+
Args:
|
|
548
|
+
query (str): Search query to find relevant X posts
|
|
549
|
+
model (Literal["grok-beta"], optional): Grok model to use. Defaults to "grok-beta"
|
|
550
|
+
|
|
551
|
+
Returns:
|
|
552
|
+
str: Search results from X or error message if search fails
|
|
553
|
+
|
|
554
|
+
Example:
|
|
555
|
+
```python
|
|
556
|
+
result = ai.search_x("AI announcements")
|
|
557
|
+
# Returns: "Recent relevant X posts about AI announcements..."
|
|
558
|
+
```
|
|
559
|
+
|
|
560
|
+
Note:
|
|
561
|
+
This is a synchronous tool method required for OpenAI function calling.
|
|
562
|
+
Requires valid Grok API key to be configured.
|
|
563
|
+
Returns error message string if API call fails.
|
|
564
|
+
"""
|
|
565
|
+
client = OpenAI(api_key=self._grok_api_key,
|
|
400
566
|
base_url="https://api.x.ai/v1")
|
|
401
567
|
|
|
402
568
|
completion = client.chat.completions.create(
|
|
@@ -415,45 +581,83 @@ class AI:
|
|
|
415
581
|
return f"Failed to search X. Error: {e}"
|
|
416
582
|
|
|
417
583
|
async def delete_facts(self, user_id: str):
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
584
|
+
"""Delete stored conversation facts for a specific user from Zep memory.
|
|
585
|
+
|
|
586
|
+
Args:
|
|
587
|
+
user_id (str): Unique identifier for the user whose facts should be deleted
|
|
588
|
+
|
|
589
|
+
Example:
|
|
590
|
+
```python
|
|
591
|
+
await ai.delete_facts("user123")
|
|
592
|
+
# Deletes all stored facts for user123
|
|
593
|
+
```
|
|
594
|
+
|
|
595
|
+
Note:
|
|
596
|
+
This is an async method and must be awaited.
|
|
597
|
+
Requires Zep integration to be configured.
|
|
598
|
+
No-op if Zep is not configured.
|
|
599
|
+
"""
|
|
600
|
+
if self._zep:
|
|
601
|
+
await self._zep.memory.delete(session_id=user_id)
|
|
602
|
+
|
|
603
|
+
async def _listen(self, audio_content: bytes, input_format: str) -> str:
|
|
604
|
+
transcription = self._client.audio.transcriptions.create(
|
|
423
605
|
model="whisper-1",
|
|
424
606
|
file=(f"file.{input_format}", audio_content),
|
|
425
607
|
)
|
|
426
608
|
return transcription.text
|
|
427
609
|
|
|
428
610
|
async def text(self, user_id: str, user_text: str) -> AsyncGenerator[str, None]:
|
|
429
|
-
|
|
611
|
+
"""Process text input and stream AI responses asynchronously.
|
|
430
612
|
|
|
431
|
-
|
|
613
|
+
Args:
|
|
614
|
+
user_id (str): Unique identifier for the user/conversation
|
|
615
|
+
user_text (str): Text input from user to process
|
|
616
|
+
|
|
617
|
+
Returns:
|
|
618
|
+
AsyncGenerator[str, None]: Stream of response text chunks
|
|
619
|
+
|
|
620
|
+
Example:
|
|
621
|
+
```python
|
|
622
|
+
async for chunk in ai.text("user123", "What is machine learning?"):
|
|
623
|
+
print(chunk, end="") # Prints response as it streams
|
|
624
|
+
```
|
|
625
|
+
|
|
626
|
+
Note:
|
|
627
|
+
- Maintains conversation thread using OpenAI's thread system
|
|
628
|
+
- Stores messages in configured database (MongoDB/SQLite)
|
|
629
|
+
- Integrates with Zep memory if configured
|
|
630
|
+
- Handles concurrent runs by canceling active ones
|
|
631
|
+
- Streams responses for real-time interaction
|
|
632
|
+
"""
|
|
633
|
+
self._accumulated_value_queue = asyncio.Queue()
|
|
634
|
+
|
|
635
|
+
thread_id = await self._database.get_thread_id(user_id)
|
|
432
636
|
|
|
433
637
|
if thread_id is None:
|
|
434
|
-
thread_id = await self.
|
|
638
|
+
thread_id = await self._create_thread(user_id)
|
|
435
639
|
|
|
436
|
-
self.
|
|
640
|
+
self._current_thread_id = thread_id
|
|
437
641
|
|
|
438
642
|
# Check for active runs and cancel if necessary
|
|
439
|
-
active_run_id = await self.
|
|
643
|
+
active_run_id = await self._get_active_run(thread_id)
|
|
440
644
|
if active_run_id:
|
|
441
|
-
await self.
|
|
442
|
-
while await self.
|
|
645
|
+
await self._cancel_run(thread_id, active_run_id)
|
|
646
|
+
while await self._get_run_status(thread_id, active_run_id) != "cancelled":
|
|
443
647
|
await asyncio.sleep(0.1)
|
|
444
648
|
|
|
445
649
|
# Create a message in the thread
|
|
446
|
-
self.
|
|
650
|
+
self._client.beta.threads.messages.create(
|
|
447
651
|
thread_id=thread_id,
|
|
448
652
|
role="user",
|
|
449
653
|
content=user_text,
|
|
450
654
|
)
|
|
451
|
-
event_handler = EventHandler(self.
|
|
655
|
+
event_handler = EventHandler(self._tool_handlers, self)
|
|
452
656
|
|
|
453
657
|
async def stream_processor():
|
|
454
|
-
with self.
|
|
658
|
+
with self._client.beta.threads.runs.stream(
|
|
455
659
|
thread_id=thread_id,
|
|
456
|
-
assistant_id=self.
|
|
660
|
+
assistant_id=self._assistant_id,
|
|
457
661
|
event_handler=event_handler,
|
|
458
662
|
) as stream:
|
|
459
663
|
stream.until_done()
|
|
@@ -466,13 +670,13 @@ class AI:
|
|
|
466
670
|
while True:
|
|
467
671
|
try:
|
|
468
672
|
value = await asyncio.wait_for(
|
|
469
|
-
self.
|
|
673
|
+
self._accumulated_value_queue.get(), timeout=0.1
|
|
470
674
|
)
|
|
471
675
|
if value is not None:
|
|
472
676
|
full_response += value
|
|
473
677
|
yield value
|
|
474
678
|
except asyncio.TimeoutError:
|
|
475
|
-
if self.
|
|
679
|
+
if self._accumulated_value_queue.empty():
|
|
476
680
|
break
|
|
477
681
|
|
|
478
682
|
# Save the message to the database
|
|
@@ -483,8 +687,8 @@ class AI:
|
|
|
483
687
|
"timestamp": datetime.now(),
|
|
484
688
|
}
|
|
485
689
|
|
|
486
|
-
await self.
|
|
487
|
-
if self.
|
|
690
|
+
await self._database.save_message(user_id, metadata)
|
|
691
|
+
if self._zep:
|
|
488
692
|
messages = [
|
|
489
693
|
Message(
|
|
490
694
|
role="user",
|
|
@@ -497,7 +701,7 @@ class AI:
|
|
|
497
701
|
content=full_response,
|
|
498
702
|
),
|
|
499
703
|
]
|
|
500
|
-
await self.
|
|
704
|
+
await self._zep.memory.add(
|
|
501
705
|
user_id=user_id, session_id=user_id, messages=messages
|
|
502
706
|
)
|
|
503
707
|
|
|
@@ -513,27 +717,62 @@ class AI:
|
|
|
513
717
|
response_format: Literal["mp3", "opus",
|
|
514
718
|
"aac", "flac", "wav", "pcm"] = "aac",
|
|
515
719
|
) -> AsyncGenerator[bytes, None]:
|
|
720
|
+
"""Process voice conversations and stream AI audio responses asynchronously.
|
|
721
|
+
|
|
722
|
+
Args:
|
|
723
|
+
user_id (str): Unique identifier for the user/conversation
|
|
724
|
+
audio_bytes (bytes): Raw audio input bytes to process
|
|
725
|
+
voice (Literal, optional): OpenAI TTS voice to use. Defaults to "nova"
|
|
726
|
+
input_format (Literal, optional): Input audio format. Defaults to "mp4"
|
|
727
|
+
response_format (Literal, optional): Output audio format. Defaults to "aac"
|
|
728
|
+
|
|
729
|
+
Returns:
|
|
730
|
+
AsyncGenerator[bytes, None]: Stream of audio response chunks
|
|
731
|
+
|
|
732
|
+
Example:
|
|
733
|
+
```python
|
|
734
|
+
async with open('input.mp4', 'rb') as f:
|
|
735
|
+
audio_data = f.read()
|
|
736
|
+
async for chunk in ai.conversation(
|
|
737
|
+
"user123",
|
|
738
|
+
audio_data,
|
|
739
|
+
voice="nova",
|
|
740
|
+
input_format="mp4",
|
|
741
|
+
response_format="aac"
|
|
742
|
+
):
|
|
743
|
+
# Process or save audio chunks
|
|
744
|
+
await process_audio_chunk(chunk)
|
|
745
|
+
```
|
|
746
|
+
|
|
747
|
+
Note:
|
|
748
|
+
- Converts audio to text using Whisper
|
|
749
|
+
- Maintains conversation thread using OpenAI
|
|
750
|
+
- Stores conversation in database
|
|
751
|
+
- Integrates with Zep memory if configured
|
|
752
|
+
- Streams audio response using OpenAI TTS
|
|
753
|
+
"""
|
|
754
|
+
|
|
516
755
|
# Reset the queue for each new conversation
|
|
517
|
-
self.
|
|
756
|
+
self._accumulated_value_queue = asyncio.Queue()
|
|
518
757
|
|
|
519
|
-
thread_id = await self.
|
|
758
|
+
thread_id = await self._database.get_thread_id(user_id)
|
|
520
759
|
|
|
521
760
|
if thread_id is None:
|
|
522
|
-
thread_id = await self.
|
|
761
|
+
thread_id = await self._create_thread(user_id)
|
|
523
762
|
|
|
524
|
-
self.
|
|
525
|
-
transcript = await self.
|
|
526
|
-
event_handler = EventHandler(self.
|
|
527
|
-
|
|
763
|
+
self._current_thread_id = thread_id
|
|
764
|
+
transcript = await self._listen(audio_bytes, input_format)
|
|
765
|
+
event_handler = EventHandler(self._tool_handlers, self)
|
|
766
|
+
self._client.beta.threads.messages.create(
|
|
528
767
|
thread_id=thread_id,
|
|
529
768
|
role="user",
|
|
530
769
|
content=transcript,
|
|
531
770
|
)
|
|
532
771
|
|
|
533
772
|
async def stream_processor():
|
|
534
|
-
with
|
|
773
|
+
with self._client.beta.threads.runs.stream(
|
|
535
774
|
thread_id=thread_id,
|
|
536
|
-
assistant_id=self.
|
|
775
|
+
assistant_id=self._assistant_id,
|
|
537
776
|
event_handler=event_handler,
|
|
538
777
|
) as stream:
|
|
539
778
|
stream.until_done()
|
|
@@ -546,12 +785,12 @@ class AI:
|
|
|
546
785
|
while True:
|
|
547
786
|
try:
|
|
548
787
|
value = await asyncio.wait_for(
|
|
549
|
-
self.
|
|
788
|
+
self._accumulated_value_queue.get(), timeout=0.1
|
|
550
789
|
)
|
|
551
790
|
if value is not None:
|
|
552
791
|
full_response += value
|
|
553
792
|
except asyncio.TimeoutError:
|
|
554
|
-
if self.
|
|
793
|
+
if self._accumulated_value_queue.empty():
|
|
555
794
|
break
|
|
556
795
|
|
|
557
796
|
metadata = {
|
|
@@ -561,9 +800,9 @@ class AI:
|
|
|
561
800
|
"timestamp": datetime.now(),
|
|
562
801
|
}
|
|
563
802
|
|
|
564
|
-
await self.
|
|
803
|
+
await self._database.save_message(user_id, metadata)
|
|
565
804
|
|
|
566
|
-
if self.
|
|
805
|
+
if self._zep:
|
|
567
806
|
messages = [
|
|
568
807
|
Message(
|
|
569
808
|
role="user",
|
|
@@ -576,12 +815,12 @@ class AI:
|
|
|
576
815
|
content=full_response,
|
|
577
816
|
),
|
|
578
817
|
]
|
|
579
|
-
await self.
|
|
818
|
+
await self._zep.memory.add(
|
|
580
819
|
user_id=user_id, session_id=user_id, messages=messages
|
|
581
820
|
)
|
|
582
821
|
|
|
583
822
|
# Generate and stream the audio response
|
|
584
|
-
with self.
|
|
823
|
+
with self._client.audio.speech.with_streaming_response.create(
|
|
585
824
|
model="tts-1",
|
|
586
825
|
voice=voice,
|
|
587
826
|
input=full_response,
|
|
@@ -590,27 +829,56 @@ class AI:
|
|
|
590
829
|
for chunk in response.iter_bytes(1024):
|
|
591
830
|
yield chunk
|
|
592
831
|
|
|
593
|
-
def
|
|
832
|
+
def _handle_requires_action(self, data, run_id):
|
|
594
833
|
tool_outputs = []
|
|
595
834
|
|
|
596
835
|
for tool in data.required_action.submit_tool_outputs.tool_calls:
|
|
597
|
-
if tool.function.name in self.
|
|
598
|
-
handler = self.
|
|
836
|
+
if tool.function.name in self._tool_handlers:
|
|
837
|
+
handler = self._tool_handlers[tool.function.name]
|
|
599
838
|
inputs = json.loads(tool.function.arguments)
|
|
600
839
|
output = handler(**inputs)
|
|
601
840
|
tool_outputs.append(
|
|
602
841
|
{"tool_call_id": tool.id, "output": output})
|
|
603
842
|
|
|
604
|
-
self.
|
|
843
|
+
self._submit_tool_outputs(tool_outputs, run_id)
|
|
605
844
|
|
|
606
|
-
def
|
|
607
|
-
with self.
|
|
608
|
-
thread_id=self.
|
|
845
|
+
def _submit_tool_outputs(self, tool_outputs, run_id):
|
|
846
|
+
with self._client.beta.threads.runs.submit_tool_outputs_stream(
|
|
847
|
+
thread_id=self._current_thread_id, run_id=run_id, tool_outputs=tool_outputs
|
|
609
848
|
) as stream:
|
|
610
849
|
for text in stream.text_deltas:
|
|
611
|
-
asyncio.create_task(self.
|
|
850
|
+
asyncio.create_task(self._accumulated_value_queue.put(text))
|
|
612
851
|
|
|
613
852
|
def add_tool(self, func: Callable):
|
|
853
|
+
"""Register a custom function as an AI tool using decorator pattern.
|
|
854
|
+
|
|
855
|
+
Args:
|
|
856
|
+
func (Callable): Function to register as a tool. Must have docstring and type hints.
|
|
857
|
+
|
|
858
|
+
Returns:
|
|
859
|
+
Callable: The decorated function
|
|
860
|
+
|
|
861
|
+
Example:
|
|
862
|
+
```python
|
|
863
|
+
@ai.add_tool
|
|
864
|
+
def custom_search(query: str) -> str:
|
|
865
|
+
'''Search custom data source.
|
|
866
|
+
|
|
867
|
+
Args:
|
|
868
|
+
query (str): Search query
|
|
869
|
+
|
|
870
|
+
Returns:
|
|
871
|
+
str: Search results
|
|
872
|
+
'''
|
|
873
|
+
return "Custom search results"
|
|
874
|
+
```
|
|
875
|
+
|
|
876
|
+
Note:
|
|
877
|
+
- Function must have proper docstring for tool description
|
|
878
|
+
- Parameters should have type hints
|
|
879
|
+
- Tool becomes available to AI for function calling
|
|
880
|
+
- Parameters are automatically converted to JSON schema
|
|
881
|
+
"""
|
|
614
882
|
sig = inspect.signature(func)
|
|
615
883
|
parameters = {"type": "object", "properties": {}, "required": []}
|
|
616
884
|
for name, param in sig.parameters.items():
|
|
@@ -626,8 +894,8 @@ class AI:
|
|
|
626
894
|
"parameters": parameters,
|
|
627
895
|
},
|
|
628
896
|
}
|
|
629
|
-
self.
|
|
630
|
-
self.
|
|
897
|
+
self._tools.append(tool_config)
|
|
898
|
+
self._tool_handlers[func.__name__] = func
|
|
631
899
|
return func
|
|
632
900
|
|
|
633
901
|
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: solana-agent
|
|
3
|
+
Version: 0.0.7
|
|
4
|
+
Summary: Build self-learning AI Agents
|
|
5
|
+
License: MIT
|
|
6
|
+
Keywords: ai,openai,ai agents
|
|
7
|
+
Author: Bevan Hunt
|
|
8
|
+
Author-email: bevan@bevanhunt.com
|
|
9
|
+
Requires-Python: >=3.9,<4.0
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
17
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
18
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
19
|
+
Requires-Dist: motor (>=3.7.0,<4.0.0)
|
|
20
|
+
Requires-Dist: openai (>=1.61.1,<2.0.0)
|
|
21
|
+
Requires-Dist: pandas (>=2.2.3,<3.0.0)
|
|
22
|
+
Requires-Dist: pinecone (>=6.0.1,<7.0.0)
|
|
23
|
+
Requires-Dist: pydantic (>=2.10.6,<3.0.0)
|
|
24
|
+
Requires-Dist: qdrant-client (>=1.13.2,<2.0.0)
|
|
25
|
+
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
|
26
|
+
Requires-Dist: zep-cloud (>=2.3.1,<3.0.0)
|
|
27
|
+
Project-URL: Repository, https://github.com/truemagic-coder/solana-agent
|
|
28
|
+
Description-Content-Type: text/markdown
|
|
29
|
+
|
|
30
|
+
# Solana Agent
|
|
31
|
+
|
|
32
|
+
[](https://pypi.org/project/solana-agent/)
|
|
33
|
+
|
|
34
|
+

|
|
35
|
+
|
|
36
|
+
Solana Agent is the first self-learning AI Agent framework.
|
|
37
|
+
|
|
38
|
+
## Why Solana Agent?
|
|
39
|
+
|
|
40
|
+
### 🧬 The First Self-Learning AI Agent
|
|
41
|
+
|
|
42
|
+
Unlike traditional AI assistants that forget conversations after each session, Solana Agent maintains a rich, searchable memory system that grows smarter with every interaction.
|
|
43
|
+
|
|
44
|
+
**Why This Matters:**
|
|
45
|
+
- 📈 **Continuous Learning**: Evolves with every new interaction
|
|
46
|
+
- 🎯 **Context-Aware**: Recalls past interactions for more relevant responses
|
|
47
|
+
- 🔄 **Self-Improving**: Builds knowledge and improves reasoning automatically
|
|
48
|
+
- 🏢 **Enterprise-Ready**: Scales from personal to organization-wide deployment
|
|
49
|
+
- 🛡️ **Secure**: Secure and private memory and data storage
|
|
50
|
+
|
|
51
|
+
**"It's not just an AI assistant - it's your organization's evolving intelligence layer."**
|
|
52
|
+
|
|
53
|
+
## Benefits
|
|
54
|
+
|
|
55
|
+
💬 **Enhanced Communication**
|
|
56
|
+
- Engage in natural conversations without typing delays
|
|
57
|
+
- Communicate hands-free with voice interactions
|
|
58
|
+
- Reduce response time with real-time processing
|
|
59
|
+
|
|
60
|
+
🎯 **Improved Decision Making**
|
|
61
|
+
- Access comprehensive data from multiple trusted sources
|
|
62
|
+
- Get instant answers backed by Internet and social-media research
|
|
63
|
+
- Make informed decisions with cross-referenced information
|
|
64
|
+
|
|
65
|
+
💪 **Operational Efficiency**
|
|
66
|
+
- Automate repetitive data processing tasks
|
|
67
|
+
- Convert data formats seamlessly
|
|
68
|
+
- Scale knowledge management effortlessly
|
|
69
|
+
|
|
70
|
+
🔐 **Enterprise Ready**
|
|
71
|
+
- Secure data handling with advanced memory systems
|
|
72
|
+
- Customize functionality through extensible architecture
|
|
73
|
+
- Integrate with existing business tools and APIs
|
|
74
|
+
|
|
75
|
+
🚀 **Competitive Advantage**
|
|
76
|
+
- Stay current with real-time social media and Internet insights
|
|
77
|
+
- Process and analyze large datasets quickly
|
|
78
|
+
- Transform raw data into actionable intelligence
|
|
79
|
+
|
|
80
|
+
## Features
|
|
81
|
+
|
|
82
|
+
🔄 **Real-time AI Interactions**
|
|
83
|
+
- Streaming text-based conversations
|
|
84
|
+
- Real-time voice-to-voice conversations
|
|
85
|
+
|
|
86
|
+
🧠 **Memory System and Extensibility**
|
|
87
|
+
- Advanced AI memory combining conversational context, conversational facts, and knowledge base
|
|
88
|
+
- Simple custom tool creation for extending capabilities like additional API integrations
|
|
89
|
+
|
|
90
|
+
🔍 **Multi-Source Search and Reasoning**
|
|
91
|
+
- Internet search via Perplexity
|
|
92
|
+
- Conversational fact search powered by Zep
|
|
93
|
+
- X (Twitter) search using Grok
|
|
94
|
+
- Conversational message history using MongoDB (on-prem or hosted)
|
|
95
|
+
- Knowledge Base search via Pinecone
|
|
96
|
+
- Comprehensive reasoning combining multiple data sources
|
|
97
|
+
|
|
98
|
+
🛠️ **Data Processing Tools**
|
|
99
|
+
- CSV to JSON conversion for data integration
|
|
100
|
+
- Text summarization powered by Gemini
|
|
101
|
+
- Enterprise-ready knowledge base powered by Pinecone
|
|
102
|
+
|
|
103
|
+
## Why Choose Solana Agent Over LangChain?
|
|
104
|
+
|
|
105
|
+
### 🎯 Key Differentiators
|
|
106
|
+
|
|
107
|
+
🧠 **Advanced Memory Architecture**
|
|
108
|
+
- Built-in episodic memory vs LangChain's basic memory types
|
|
109
|
+
- Persistent cross-session knowledge retention
|
|
110
|
+
- Automatic self-learning from conversations
|
|
111
|
+
|
|
112
|
+
🏢 **Enterprise Focus**
|
|
113
|
+
- Production-ready out of the box in a few lines of code
|
|
114
|
+
- Enterprise-grade deployment options for all components and services
|
|
115
|
+
- Simple conventions over complex configurations
|
|
116
|
+
|
|
117
|
+
🛠️ **Simplified Development**
|
|
118
|
+
- No chain building required
|
|
119
|
+
- Python plain functions vs complex chaining
|
|
120
|
+
- Fewer moving parts equals more stable applications
|
|
121
|
+
- Smaller repo size by 1000x: Solana Agent @ ~500 LOC vs LangChain @ ~500,000 LOC
|
|
122
|
+
|
|
123
|
+
🚀 **Performance**
|
|
124
|
+
- Optimized for real-time streaming responses
|
|
125
|
+
- Built-in voice processing capabilities
|
|
126
|
+
- Multi-source search with automatic reasoning synthesis
|
|
127
|
+
|
|
128
|
+
## Installation
|
|
129
|
+
|
|
130
|
+
You can install Solana Agent using pip:
|
|
131
|
+
|
|
132
|
+
```bash
|
|
133
|
+
pip install solana-agent
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
## Documentation
|
|
137
|
+
* All public methods have docstrings for real-time IDE hinting
|
|
138
|
+
|
|
139
|
+
## Production Apps
|
|
140
|
+
* [Solana Agent Copilot](https://ai.solana-agent.com) - Solana Token AI Copilot using streaming text conversations
|
|
141
|
+
* [CometHeart](https://cometheart.com) - AI Companion and Business Coach on mobile using voice-to-voice conversations
|
|
142
|
+
|
|
143
|
+
## Example Apps
|
|
144
|
+
* [Solana Agent Example App](https://github.com/truemagic-coder/solana-agent-app) - See as source of documentation
|
|
145
|
+
|
|
146
|
+
## Contributing
|
|
147
|
+
|
|
148
|
+
Contributions to Solana Agent are welcome! Please feel free to submit a Pull Request.
|
|
149
|
+
|
|
150
|
+
## License
|
|
151
|
+
|
|
152
|
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
|
153
|
+
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
solana_agent/__init__.py,sha256=zpfnWqANd3OHGWm7NCF5Y6m01BWG4NkNk8SK9Ex48nA,18
|
|
2
|
+
solana_agent/ai.py,sha256=nAFDHwEjQqoHZUNv9YW0hzJWRsUKr8Nm7G_wFaW2XnI,33393
|
|
3
|
+
solana_agent-0.0.7.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
|
|
4
|
+
solana_agent-0.0.7.dist-info/METADATA,sha256=J9JA1qZX8aaxqcqwUuTWQEx0TkCS0x3zHDLXftI5vGQ,5623
|
|
5
|
+
solana_agent-0.0.7.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
|
6
|
+
solana_agent-0.0.7.dist-info/RECORD,,
|
|
@@ -1,97 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.3
|
|
2
|
-
Name: solana-agent
|
|
3
|
-
Version: 0.0.6
|
|
4
|
-
Summary: The Best AI Agent Framework
|
|
5
|
-
License: MIT
|
|
6
|
-
Keywords: ai,openai,ai agents
|
|
7
|
-
Author: Bevan Hunt
|
|
8
|
-
Author-email: bevan@bevanhunt.com
|
|
9
|
-
Requires-Python: >=3.9,<4.0
|
|
10
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
-
Classifier: Programming Language :: Python :: 3
|
|
12
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
-
Classifier: Programming Language :: Python :: 3.13
|
|
17
|
-
Classifier: Programming Language :: Python :: 3 :: Only
|
|
18
|
-
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
19
|
-
Requires-Dist: aiosqlite (>=0.21.0,<0.22.0)
|
|
20
|
-
Requires-Dist: motor (>=3.7.0,<4.0.0)
|
|
21
|
-
Requires-Dist: openai (>=1.61.1,<2.0.0)
|
|
22
|
-
Requires-Dist: pandas (>=2.2.3,<3.0.0)
|
|
23
|
-
Requires-Dist: pydantic (>=2.10.6,<3.0.0)
|
|
24
|
-
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
|
25
|
-
Requires-Dist: zep-python (>=2.0.2,<3.0.0)
|
|
26
|
-
Project-URL: Repository, https://github.com/truemagic-coder/solana-agent
|
|
27
|
-
Description-Content-Type: text/markdown
|
|
28
|
-
|
|
29
|
-
# Solana Agent
|
|
30
|
-
|
|
31
|
-
[](https://pypi.org/project/solana-agent/)
|
|
32
|
-
|
|
33
|
-

|
|
34
|
-
|
|
35
|
-
Solana Agent is the best AI Agent framework.
|
|
36
|
-
|
|
37
|
-
## Features
|
|
38
|
-
|
|
39
|
-
- Streaming text-based conversations with AI
|
|
40
|
-
- Audio transcription and streaming text-to-speech conversion
|
|
41
|
-
- Thread management for maintaining conversation context
|
|
42
|
-
- Message persistence using SQLite or MongoDB
|
|
43
|
-
- Custom tool integration for extending AI capabilities
|
|
44
|
-
- The best memory context currently available for AI Agents
|
|
45
|
-
- Zep integration for tracking facts
|
|
46
|
-
- Search Internet with Perplexity tool
|
|
47
|
-
- Search Zep facts tool
|
|
48
|
-
- Search X with Grok tool
|
|
49
|
-
- Reasoning tool that combines OpenAI model reasoning, Zep facts, Internet search, and X search.
|
|
50
|
-
- CSV to JSON tool
|
|
51
|
-
- Summarize text tool using Gemini
|
|
52
|
-
- Solana tools upcoming...
|
|
53
|
-
|
|
54
|
-
## Installation
|
|
55
|
-
|
|
56
|
-
You can install Solana Agent using pip:
|
|
57
|
-
|
|
58
|
-
```bash
|
|
59
|
-
pip install solana-agent
|
|
60
|
-
```
|
|
61
|
-
|
|
62
|
-
## Usage
|
|
63
|
-
|
|
64
|
-
Here's a basic example of how to use Solana Agent:
|
|
65
|
-
|
|
66
|
-
```python
|
|
67
|
-
from solana_agent import AI, SQLiteDatabase
|
|
68
|
-
|
|
69
|
-
async def main():
|
|
70
|
-
database = SQLiteDatabase("conversations.db")
|
|
71
|
-
async with AI("your_openai_api_key", "AI Assistant", "Your instructions here", database) as ai:
|
|
72
|
-
user_id = "user123"
|
|
73
|
-
response = await ai.text(user_id, "Hello, AI!")
|
|
74
|
-
async for chunk in response:
|
|
75
|
-
print(chunk, end="", flush=True)
|
|
76
|
-
print()
|
|
77
|
-
|
|
78
|
-
# Run the async main function
|
|
79
|
-
import asyncio
|
|
80
|
-
asyncio.run(main())
|
|
81
|
-
```
|
|
82
|
-
|
|
83
|
-
## Production Apps
|
|
84
|
-
* [Solana Agent](https://solana-agent.com) - AI Market Intelligence
|
|
85
|
-
* [CometHeart](https://cometheart.com) - AI Companion
|
|
86
|
-
|
|
87
|
-
## Example Apps
|
|
88
|
-
* [Solana Agent Example App](https://github.com/truemagic-coder/solana-agent-app)
|
|
89
|
-
|
|
90
|
-
## Contributing
|
|
91
|
-
|
|
92
|
-
Contributions to Solana Agent are welcome! Please feel free to submit a Pull Request.
|
|
93
|
-
|
|
94
|
-
## License
|
|
95
|
-
|
|
96
|
-
This project is licensed under the MIT License - see the LICENSE file for details.
|
|
97
|
-
|
|
@@ -1,6 +0,0 @@
|
|
|
1
|
-
solana_agent/__init__.py,sha256=zpfnWqANd3OHGWm7NCF5Y6m01BWG4NkNk8SK9Ex48nA,18
|
|
2
|
-
solana_agent/ai.py,sha256=a82WmVPwsQEKMojS9DM4-rSHqdoF3XwKdn92I96UQwA,22001
|
|
3
|
-
solana_agent-0.0.6.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
|
|
4
|
-
solana_agent-0.0.6.dist-info/METADATA,sha256=EBKYqwMaxIz0iR7sz9Ga-5o_lBty3yfNOVezcMMb8HU,3069
|
|
5
|
-
solana_agent-0.0.6.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
|
6
|
-
solana_agent-0.0.6.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|