autonomous-app 0.3.5__py3-none-any.whl → 0.3.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autonomous/__init__.py +1 -1
- autonomous/ai/audioagent.py +15 -19
- autonomous/ai/baseagent.py +42 -0
- autonomous/ai/imageagent.py +3 -19
- autonomous/ai/jsonagent.py +2 -16
- autonomous/ai/models/aws.py +317 -0
- autonomous/ai/models/deepseek.py +99 -0
- autonomous/ai/models/gemini.py +309 -0
- autonomous/ai/models/local.py +99 -0
- autonomous/ai/models/openai.py +82 -43
- autonomous/ai/textagent.py +2 -16
- autonomous/auth/autoauth.py +7 -4
- autonomous/auth/user.py +2 -2
- autonomous/db/base/fields.py +3 -11
- autonomous/db/connection.py +1 -1
- autonomous/db/document.py +0 -1
- autonomous/db/fields.py +12 -23
- autonomous/db/queryset/base.py +1 -4
- autonomous/db/queryset/queryset.py +1 -0
- autonomous/db/queryset/transform.py +11 -10
- autonomous/model/autoattr.py +21 -4
- autonomous/model/automodel.py +54 -10
- {autonomous_app-0.3.5.dist-info → autonomous_app-0.3.24.dist-info}/METADATA +6 -24
- {autonomous_app-0.3.5.dist-info → autonomous_app-0.3.24.dist-info}/RECORD +26 -23
- {autonomous_app-0.3.5.dist-info → autonomous_app-0.3.24.dist-info}/WHEEL +1 -1
- autonomous/ai/oaiagent.py +0 -40
- autonomous_app-0.3.5.dist-info/LICENSE +0 -21
- {autonomous_app-0.3.5.dist-info → autonomous_app-0.3.24.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,309 @@
|
|
|
1
|
+
import io
|
|
2
|
+
import os
|
|
3
|
+
import random
|
|
4
|
+
import wave
|
|
5
|
+
|
|
6
|
+
from google import genai
|
|
7
|
+
from google.genai import types
|
|
8
|
+
from pydub import AudioSegment
|
|
9
|
+
|
|
10
|
+
from autonomous import log
|
|
11
|
+
from autonomous.model.autoattr import ListAttr, StringAttr
|
|
12
|
+
from autonomous.model.automodel import AutoModel
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class GeminiAIModel(AutoModel):
|
|
16
|
+
_client = None
|
|
17
|
+
_text_model = "gemini-3-pro-preview"
|
|
18
|
+
_summary_model = "gemini-2.5-flash"
|
|
19
|
+
_image_model = "gemini-3-pro-image-preview"
|
|
20
|
+
_json_model = "gemini-3-pro-preview"
|
|
21
|
+
_stt_model = "gemini-3-pro-preview"
|
|
22
|
+
_tts_model = "gemini-2.5-flash-preview-tts"
|
|
23
|
+
MAX_FILES = 14
|
|
24
|
+
VOICES = {
|
|
25
|
+
"Zephyr": ["female"],
|
|
26
|
+
"Puck": ["male"],
|
|
27
|
+
"Charon": ["male"],
|
|
28
|
+
"Kore": ["female"],
|
|
29
|
+
"Fenrir": ["non-binary"],
|
|
30
|
+
"Leda": ["female"],
|
|
31
|
+
"Orus": ["male"],
|
|
32
|
+
"Aoede": ["female"],
|
|
33
|
+
"Callirhoe": ["female"],
|
|
34
|
+
"Autonoe": ["female"],
|
|
35
|
+
"Enceladus": ["male"],
|
|
36
|
+
"Iapetus": ["male"],
|
|
37
|
+
"Umbriel": ["male"],
|
|
38
|
+
"Algieba": ["male"],
|
|
39
|
+
"Despina": ["female"],
|
|
40
|
+
"Erinome": ["female"],
|
|
41
|
+
"Algenib": ["male"],
|
|
42
|
+
"Rasalgethi": ["non-binary"],
|
|
43
|
+
"Laomedeia": ["female"],
|
|
44
|
+
"Achernar": ["female"],
|
|
45
|
+
"Alnilam": ["male"],
|
|
46
|
+
"Schedar": ["male"],
|
|
47
|
+
"Gacrux": ["female"],
|
|
48
|
+
"Pulcherrima": ["non-binary"],
|
|
49
|
+
"Achird": ["male"],
|
|
50
|
+
"Zubenelgenubi": ["male"],
|
|
51
|
+
"Vindemiatrix": ["female"],
|
|
52
|
+
"Sadachbia": ["male"],
|
|
53
|
+
"Sadaltager": ["male"],
|
|
54
|
+
"Sulafar": ["female"],
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
messages = ListAttr(StringAttr(default=[]))
|
|
58
|
+
name = StringAttr(default="agent")
|
|
59
|
+
instructions = StringAttr(
|
|
60
|
+
default="You are highly skilled AI trained to assist with various tasks."
|
|
61
|
+
)
|
|
62
|
+
description = StringAttr(
|
|
63
|
+
default="A helpful AI assistant trained to assist with various tasks."
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
@property
|
|
67
|
+
def client(self):
|
|
68
|
+
if not self._client:
|
|
69
|
+
# log("=== Initializing Gemini AI Client ===", _print=True)
|
|
70
|
+
self._client = genai.Client(api_key=os.environ.get("GOOGLEAI_KEY"))
|
|
71
|
+
# log("=== Gemini AI Client Initialized ===", _print=True)
|
|
72
|
+
return self._client
|
|
73
|
+
|
|
74
|
+
def _add_function(self, user_function):
|
|
75
|
+
# This function is now a bit more advanced to conform to the Tool Use schema
|
|
76
|
+
tool_schema = {
|
|
77
|
+
"name": user_function.get("name"),
|
|
78
|
+
"description": user_function.get("description"),
|
|
79
|
+
"parameters": user_function.get("parameters"),
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
# Validate that the schema has a name, description, and parameters
|
|
83
|
+
if not all(
|
|
84
|
+
[tool_schema["name"], tool_schema["description"], tool_schema["parameters"]]
|
|
85
|
+
):
|
|
86
|
+
raise ValueError(
|
|
87
|
+
"Tool schema must have a 'name', 'description', and 'parameters' field."
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
return tool_schema
|
|
91
|
+
|
|
92
|
+
def _create_wav_header(
|
|
93
|
+
self, raw_audio_bytes, channels=1, rate=24000, sample_width=2
|
|
94
|
+
):
|
|
95
|
+
"""Creates an in-memory WAV file from raw PCM audio bytes."""
|
|
96
|
+
buffer = io.BytesIO()
|
|
97
|
+
with wave.open(buffer, "wb") as wav_file:
|
|
98
|
+
# Set audio parameters
|
|
99
|
+
wav_file.setnchannels(channels)
|
|
100
|
+
wav_file.setsampwidth(sample_width)
|
|
101
|
+
wav_file.setframerate(rate) # 16,000 Hz sample rate
|
|
102
|
+
|
|
103
|
+
# Write the raw audio data
|
|
104
|
+
wav_file.writeframes(raw_audio_bytes)
|
|
105
|
+
|
|
106
|
+
buffer.seek(0)
|
|
107
|
+
return buffer
|
|
108
|
+
|
|
109
|
+
def generate_json(self, message, function, additional_instructions=""):
|
|
110
|
+
# The API call must use the 'tools' parameter instead of 'response_json_schema'
|
|
111
|
+
function_definition = self._add_function(function)
|
|
112
|
+
|
|
113
|
+
response = self.client.models.generate_content(
|
|
114
|
+
model=self._json_model,
|
|
115
|
+
contents=message,
|
|
116
|
+
config=types.GenerateContentConfig(
|
|
117
|
+
system_instruction=f"{self.instructions}.{additional_instructions}",
|
|
118
|
+
tools=[types.Tool(function_declarations=[function_definition])],
|
|
119
|
+
tool_config={
|
|
120
|
+
"function_calling_config": {
|
|
121
|
+
"mode": "ANY", # Force a function call
|
|
122
|
+
}
|
|
123
|
+
},
|
|
124
|
+
),
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
# The response is now a ToolCall, not a JSON string
|
|
128
|
+
try:
|
|
129
|
+
# log(response.candidates[0].content.parts[0].function_call, _print=True)
|
|
130
|
+
tool_call = response.candidates[0].content.parts[0].function_call
|
|
131
|
+
if tool_call and tool_call.name == function["name"]:
|
|
132
|
+
return tool_call.args
|
|
133
|
+
else:
|
|
134
|
+
log(
|
|
135
|
+
"==== Model did not return a tool call or returned the wrong one. ===="
|
|
136
|
+
)
|
|
137
|
+
log(f"Response: {response.text}", _print=True)
|
|
138
|
+
return {}
|
|
139
|
+
except Exception as e:
|
|
140
|
+
log(f"==== Failed to parse ToolCall response: {e} ====")
|
|
141
|
+
return {}
|
|
142
|
+
|
|
143
|
+
def generate_text(self, message, additional_instructions=""):
|
|
144
|
+
response = self.client.models.generate_content(
|
|
145
|
+
model=self._text_model,
|
|
146
|
+
config=types.GenerateContentConfig(
|
|
147
|
+
system_instruction=f"{self.instructions}.{additional_instructions}",
|
|
148
|
+
),
|
|
149
|
+
contents=message,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# log(results, _print=True)
|
|
153
|
+
# log("=================== END REPORT ===================", _print=True)
|
|
154
|
+
return response.text
|
|
155
|
+
|
|
156
|
+
def summarize_text(self, text, primer=""):
|
|
157
|
+
primer = primer or self.instructions
|
|
158
|
+
response = self.client.models.generate_content(
|
|
159
|
+
model=self._summary_model,
|
|
160
|
+
config=types.GenerateContentConfig(
|
|
161
|
+
system_instruction=f"{primer}",
|
|
162
|
+
),
|
|
163
|
+
contents=text,
|
|
164
|
+
)
|
|
165
|
+
log(response)
|
|
166
|
+
try:
|
|
167
|
+
result = response.candidates[0].content.parts[0].text
|
|
168
|
+
except Exception as e:
|
|
169
|
+
log(f"{type(e)}:{e}\n\n Unable to generate content ====")
|
|
170
|
+
return None
|
|
171
|
+
|
|
172
|
+
return result
|
|
173
|
+
|
|
174
|
+
def generate_audio_text(
|
|
175
|
+
self, audio_file, prompt="Transcribe this audio clip", **kwargs
|
|
176
|
+
):
|
|
177
|
+
myfile = self.client.files.upload(
|
|
178
|
+
file=io.BytesIO(audio_file),
|
|
179
|
+
config={
|
|
180
|
+
"mime_type": "audio/mp3",
|
|
181
|
+
"display_name": kwargs.get("display_name", "audio.mp3"),
|
|
182
|
+
},
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
response = self.client.models.generate_content(
|
|
186
|
+
model=self._stt_model,
|
|
187
|
+
contents=[
|
|
188
|
+
prompt,
|
|
189
|
+
myfile,
|
|
190
|
+
],
|
|
191
|
+
)
|
|
192
|
+
return response.text
|
|
193
|
+
|
|
194
|
+
def list_voices(self, filters=[]):
|
|
195
|
+
if not filters:
|
|
196
|
+
return list(self.VOICES.keys())
|
|
197
|
+
voices = []
|
|
198
|
+
for voice, attribs in self.VOICES.items():
|
|
199
|
+
if any(f.lower() in attribs for f in filters):
|
|
200
|
+
voices.append(voice)
|
|
201
|
+
return voices
|
|
202
|
+
|
|
203
|
+
def generate_audio(self, prompt, voice=None, **kwargs):
|
|
204
|
+
voice = voice or random.choice(self.list_voices())
|
|
205
|
+
try:
|
|
206
|
+
response = self.client.models.generate_content(
|
|
207
|
+
model=self._tts_model,
|
|
208
|
+
contents=prompt,
|
|
209
|
+
config=types.GenerateContentConfig(
|
|
210
|
+
response_modalities=["AUDIO"],
|
|
211
|
+
speech_config=types.SpeechConfig(
|
|
212
|
+
voice_config=types.VoiceConfig(
|
|
213
|
+
prebuilt_voice_config=types.PrebuiltVoiceConfig(
|
|
214
|
+
voice_name=voice,
|
|
215
|
+
)
|
|
216
|
+
)
|
|
217
|
+
),
|
|
218
|
+
),
|
|
219
|
+
)
|
|
220
|
+
blob = response.candidates[0].content.parts[0].inline_data
|
|
221
|
+
|
|
222
|
+
# Create a WAV file in memory from the raw audio bytes
|
|
223
|
+
wav_buffer = self._create_wav_header(blob.data)
|
|
224
|
+
|
|
225
|
+
# 2. Load the WAV audio using pydub, which will now correctly read the header
|
|
226
|
+
audio_segment = AudioSegment.from_file(wav_buffer, format="wav")
|
|
227
|
+
|
|
228
|
+
# 3. Create a new in-memory buffer for the MP3 output
|
|
229
|
+
mp3_buffer = io.BytesIO()
|
|
230
|
+
|
|
231
|
+
# 4. Export the audio segment directly to the in-memory buffer
|
|
232
|
+
audio_segment.export(mp3_buffer, format="mp3")
|
|
233
|
+
|
|
234
|
+
# 5. Return the bytes from the buffer, not the filename
|
|
235
|
+
return mp3_buffer.getvalue()
|
|
236
|
+
|
|
237
|
+
except Exception as e:
|
|
238
|
+
log(
|
|
239
|
+
f"==== Error: Unable to generate audio ====\n{type(e)}:{e}", _print=True
|
|
240
|
+
)
|
|
241
|
+
# You can return a default empty byte string or re-raise the exception
|
|
242
|
+
raise e
|
|
243
|
+
|
|
244
|
+
def generate_image(self, prompt, **kwargs):
|
|
245
|
+
image = None
|
|
246
|
+
contents = [prompt]
|
|
247
|
+
|
|
248
|
+
if kwargs.get("files"):
|
|
249
|
+
counter = 0
|
|
250
|
+
for fn, f in kwargs.get("files").items():
|
|
251
|
+
media = io.BytesIO(f)
|
|
252
|
+
myfile = self.client.files.upload(
|
|
253
|
+
file=media,
|
|
254
|
+
config={"mime_type": "image/webp", "display_name": fn},
|
|
255
|
+
)
|
|
256
|
+
contents += [myfile]
|
|
257
|
+
counter += 1
|
|
258
|
+
if counter >= self.MAX_FILES:
|
|
259
|
+
break
|
|
260
|
+
|
|
261
|
+
try:
|
|
262
|
+
# log(self._image_model, contents, _print=True)
|
|
263
|
+
response = self.client.models.generate_content(
|
|
264
|
+
model=self._image_model,
|
|
265
|
+
contents=contents,
|
|
266
|
+
config=types.GenerateContentConfig(
|
|
267
|
+
safety_settings=[
|
|
268
|
+
types.SafetySetting(
|
|
269
|
+
category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
|
270
|
+
threshold=types.HarmBlockThreshold.BLOCK_NONE,
|
|
271
|
+
),
|
|
272
|
+
types.SafetySetting(
|
|
273
|
+
category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
|
274
|
+
threshold=types.HarmBlockThreshold.BLOCK_NONE,
|
|
275
|
+
),
|
|
276
|
+
types.SafetySetting(
|
|
277
|
+
category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
|
278
|
+
threshold=types.HarmBlockThreshold.BLOCK_NONE,
|
|
279
|
+
),
|
|
280
|
+
types.SafetySetting(
|
|
281
|
+
category=types.HarmCategory.HARM_CATEGORY_HARASSMENT,
|
|
282
|
+
threshold=types.HarmBlockThreshold.BLOCK_NONE,
|
|
283
|
+
),
|
|
284
|
+
types.SafetySetting(
|
|
285
|
+
category=types.HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY,
|
|
286
|
+
threshold=types.HarmBlockThreshold.BLOCK_NONE,
|
|
287
|
+
),
|
|
288
|
+
],
|
|
289
|
+
image_config=types.ImageConfig(
|
|
290
|
+
aspect_ratio=kwargs.get("aspect_ratio", "3:4"),
|
|
291
|
+
image_size=kwargs.get("image_size", "2K"),
|
|
292
|
+
),
|
|
293
|
+
),
|
|
294
|
+
)
|
|
295
|
+
# log(response, _print=True)
|
|
296
|
+
# log(response.candidates[0], _print=True)
|
|
297
|
+
image_parts = [
|
|
298
|
+
part.inline_data.data
|
|
299
|
+
for part in response.candidates[0].content.parts
|
|
300
|
+
if part.inline_data
|
|
301
|
+
]
|
|
302
|
+
image = image_parts[0]
|
|
303
|
+
except Exception as e:
|
|
304
|
+
log(
|
|
305
|
+
f"==== Error: Unable to create image ====\n\n{e}",
|
|
306
|
+
_print=True,
|
|
307
|
+
)
|
|
308
|
+
raise e
|
|
309
|
+
return image
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
import io
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
import random
|
|
5
|
+
import time
|
|
6
|
+
from base64 import b64decode
|
|
7
|
+
|
|
8
|
+
import openai
|
|
9
|
+
from ollama import ChatResponse, chat
|
|
10
|
+
|
|
11
|
+
from autonomous import log
|
|
12
|
+
from autonomous.model.autoattr import DictAttr, ListAttr, StringAttr
|
|
13
|
+
from autonomous.model.automodel import AutoModel
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class LocalAIModel(AutoModel):
|
|
17
|
+
_client = None
|
|
18
|
+
instructions = StringAttr(
|
|
19
|
+
default="You are highly skilled AI trained to assist with various tasks."
|
|
20
|
+
)
|
|
21
|
+
description = StringAttr(
|
|
22
|
+
default="A helpful AI assistant trained to assist with various tasks."
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
@property
|
|
26
|
+
def client(self):
|
|
27
|
+
if not self._client:
|
|
28
|
+
self._client = "deepseek-r1" # OpenAI(api_key=os.environ.get("OPENAI_KEY"))
|
|
29
|
+
return self._client
|
|
30
|
+
|
|
31
|
+
def clear_agent(self):
|
|
32
|
+
pass
|
|
33
|
+
|
|
34
|
+
def clear_agents(self):
|
|
35
|
+
pass
|
|
36
|
+
|
|
37
|
+
# def _get_agent_id(self):
|
|
38
|
+
# pass
|
|
39
|
+
|
|
40
|
+
# def _add_function(self, user_function):
|
|
41
|
+
pass
|
|
42
|
+
|
|
43
|
+
def _format_messages(self, messages):
|
|
44
|
+
pass
|
|
45
|
+
|
|
46
|
+
def clear_files(self, file_id=None):
|
|
47
|
+
pass
|
|
48
|
+
|
|
49
|
+
def attach_file(self, file_contents, filename="dbdata.json"):
|
|
50
|
+
pass
|
|
51
|
+
|
|
52
|
+
def generate_json(self, messages, function, additional_instructions=""):
|
|
53
|
+
message = messages + additional_instructions
|
|
54
|
+
message += f"""
|
|
55
|
+
IMPORTANT: Respond in JSON FORMAT using the SCHEMA below. DO NOT add any text to the response outside of the supplied JSON schema:
|
|
56
|
+
{function}
|
|
57
|
+
"""
|
|
58
|
+
response: ChatResponse = chat(
|
|
59
|
+
model=self.client,
|
|
60
|
+
messages=[
|
|
61
|
+
{
|
|
62
|
+
"role": "user",
|
|
63
|
+
"content": message,
|
|
64
|
+
},
|
|
65
|
+
],
|
|
66
|
+
)
|
|
67
|
+
return response.message.content
|
|
68
|
+
|
|
69
|
+
def generate_text(self, messages, additional_instructions=""):
|
|
70
|
+
message = messages + additional_instructions
|
|
71
|
+
response: ChatResponse = chat(
|
|
72
|
+
model=self.client,
|
|
73
|
+
messages=[
|
|
74
|
+
{
|
|
75
|
+
"role": "user",
|
|
76
|
+
"content": message,
|
|
77
|
+
},
|
|
78
|
+
],
|
|
79
|
+
)
|
|
80
|
+
return response.message.content
|
|
81
|
+
|
|
82
|
+
def generate_audio(self, prompt, **kwargs):
|
|
83
|
+
raise NotImplementedError
|
|
84
|
+
|
|
85
|
+
def generate_image(self, prompt, **kwargs):
|
|
86
|
+
raise NotImplementedError
|
|
87
|
+
|
|
88
|
+
def summarize_text(self, text, primer=""):
|
|
89
|
+
response: ChatResponse = chat(
|
|
90
|
+
model=self.client,
|
|
91
|
+
messages=[
|
|
92
|
+
{
|
|
93
|
+
"role": "system",
|
|
94
|
+
"content": f"You are a highly skilled AI trained in language comprehension and summarization.{primer}",
|
|
95
|
+
},
|
|
96
|
+
{"role": "user", "content": text},
|
|
97
|
+
],
|
|
98
|
+
)
|
|
99
|
+
return response.message.content
|
autonomous/ai/models/openai.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import io
|
|
1
2
|
import json
|
|
2
3
|
import os
|
|
3
4
|
import random
|
|
@@ -15,7 +16,7 @@ from autonomous.model.automodel import AutoModel
|
|
|
15
16
|
|
|
16
17
|
class OpenAIModel(AutoModel):
|
|
17
18
|
_client = None
|
|
18
|
-
_text_model = "
|
|
19
|
+
_text_model = "o3-mini"
|
|
19
20
|
_image_model = "dall-e-3"
|
|
20
21
|
_json_model = "gpt-4o"
|
|
21
22
|
agent_id = StringAttr()
|
|
@@ -36,6 +37,15 @@ class OpenAIModel(AutoModel):
|
|
|
36
37
|
self._client = OpenAI(api_key=os.environ.get("OPENAI_KEY"))
|
|
37
38
|
return self._client
|
|
38
39
|
|
|
40
|
+
def delete(self):
|
|
41
|
+
self.clear_files()
|
|
42
|
+
if self.agent_id:
|
|
43
|
+
try:
|
|
44
|
+
self.client.beta.assistants.delete(self.agent_id)
|
|
45
|
+
except openai_NotFoundError:
|
|
46
|
+
log(f"==== Agent with ID: {self.agent_id} not found ====")
|
|
47
|
+
return super().delete()
|
|
48
|
+
|
|
39
49
|
def clear_agent(self):
|
|
40
50
|
if self.agent_id:
|
|
41
51
|
self.client.beta.assistants.delete(self.agent_id)
|
|
@@ -44,18 +54,22 @@ class OpenAIModel(AutoModel):
|
|
|
44
54
|
|
|
45
55
|
def clear_agents(self):
|
|
46
56
|
assistants = self.client.beta.assistants.list().data
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
57
|
+
if assistants:
|
|
58
|
+
log(assistants)
|
|
59
|
+
for assistant in assistants:
|
|
60
|
+
log(f"==== Deleting Agent with ID: {assistant.id} ====")
|
|
61
|
+
try:
|
|
62
|
+
self.client.beta.assistants.delete(assistant.id)
|
|
63
|
+
except openai_NotFoundError:
|
|
64
|
+
log(f"==== Agent with ID: {assistant.id} not found ====")
|
|
65
|
+
self.agent_id = ""
|
|
66
|
+
self.save()
|
|
56
67
|
|
|
57
68
|
def _get_agent_id(self):
|
|
58
|
-
|
|
69
|
+
try:
|
|
70
|
+
self.client.beta.assistants.retrieve(self.agent_id)
|
|
71
|
+
except (ValueError, openai.NotFoundError) as e:
|
|
72
|
+
log(f"{e} -- no agent found, creating a new one")
|
|
59
73
|
agent = self.client.beta.assistants.create(
|
|
60
74
|
instructions=self.instructions,
|
|
61
75
|
description=self.description,
|
|
@@ -67,18 +81,15 @@ class OpenAIModel(AutoModel):
|
|
|
67
81
|
self.save()
|
|
68
82
|
return self.agent_id
|
|
69
83
|
|
|
70
|
-
def clear_files(self, file_id=None
|
|
84
|
+
def clear_files(self, file_id=None):
|
|
71
85
|
if not file_id:
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
for vs in self.client.beta.vector_stores.list().data:
|
|
86
|
+
for vs in self.client.vector_stores.list().data:
|
|
75
87
|
try:
|
|
76
|
-
self.client.
|
|
88
|
+
self.client.vector_stores.delete(vs.id)
|
|
77
89
|
except openai_NotFoundError:
|
|
78
90
|
log(f"==== Vector Store {vs.id} not found ====")
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
self.client.files.delete(file_id=sf.id)
|
|
91
|
+
for sf in self.client.files.list().data:
|
|
92
|
+
self.client.files.delete(file_id=sf.id)
|
|
82
93
|
else:
|
|
83
94
|
self.client.files.delete(file_id=file_id)
|
|
84
95
|
self.tools.pop("file_search", None)
|
|
@@ -89,19 +100,27 @@ class OpenAIModel(AutoModel):
|
|
|
89
100
|
# Upload the user provided file to OpenAI
|
|
90
101
|
self.tools["file_search"] = {"type": "file_search"}
|
|
91
102
|
# Create a vector store
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
103
|
+
try:
|
|
104
|
+
if vs := self.client.vector_stores.list().data:
|
|
105
|
+
self.vector_store = self.client.vector_stores.retrieve(
|
|
106
|
+
vector_store_id=vs[0].id
|
|
107
|
+
).id
|
|
108
|
+
else:
|
|
109
|
+
for sf in self.client.files.list().data:
|
|
110
|
+
self.client.files.delete(file_id=sf.id)
|
|
111
|
+
raise FileNotFoundError("No vector store found")
|
|
112
|
+
except FileNotFoundError:
|
|
113
|
+
self.vector_store = self.client.vector_stores.create(
|
|
114
|
+
name="World Reference",
|
|
97
115
|
expires_after={"anchor": "last_active_at", "days": 14},
|
|
98
116
|
).id
|
|
99
|
-
|
|
117
|
+
log(f"==== Vector Store ID: {self.vector_store}====", _print=True)
|
|
118
|
+
# Attach File
|
|
100
119
|
file_obj = self.client.files.create(
|
|
101
120
|
file=(filename, file_contents), purpose="assistants"
|
|
102
121
|
)
|
|
103
|
-
|
|
104
|
-
self.client.
|
|
122
|
+
log(f"==== FileStore ID: {file_obj.id}====", _print=True)
|
|
123
|
+
self.client.vector_stores.files.create(
|
|
105
124
|
vector_store_id=self.vector_store,
|
|
106
125
|
file_id=file_obj.id,
|
|
107
126
|
)
|
|
@@ -183,14 +202,14 @@ IMPORTANT: Always use the function 'response' tool to respond to the user with o
|
|
|
183
202
|
]:
|
|
184
203
|
running_job = False
|
|
185
204
|
|
|
186
|
-
except openai.
|
|
205
|
+
except openai.BadRequestError as err:
|
|
187
206
|
# Handle specific bad request errors
|
|
188
|
-
|
|
189
|
-
if "already has an active run" in
|
|
207
|
+
log(f"==== Error: {err} ====", _print=True)
|
|
208
|
+
if "already has an active run" in str(err):
|
|
190
209
|
log("Previous run is still active. Waiting...", _print=True)
|
|
191
210
|
time.sleep(2) # wait before retrying or checking run status
|
|
192
211
|
else:
|
|
193
|
-
raise
|
|
212
|
+
raise err
|
|
194
213
|
|
|
195
214
|
# while run.status in ["queued", "in_progress"]:
|
|
196
215
|
# run = self.client.beta.threads.runs.retrieve(
|
|
@@ -202,7 +221,7 @@ IMPORTANT: Always use the function 'response' tool to respond to the user with o
|
|
|
202
221
|
log(f"==== !!! ERROR !!!: {run.last_error} ====", _print=True)
|
|
203
222
|
return None
|
|
204
223
|
log("=================== RUN COMPLETED ===================", _print=True)
|
|
205
|
-
log(run.status, _print=True)
|
|
224
|
+
# log(run.status, _print=True)
|
|
206
225
|
if run.status == "completed":
|
|
207
226
|
response = self.client.beta.threads.messages.list(thread_id=thread.id)
|
|
208
227
|
results = response.data[0].content[0].text.value
|
|
@@ -218,14 +237,15 @@ IMPORTANT: Always use the function 'response' tool to respond to the user with o
|
|
|
218
237
|
try:
|
|
219
238
|
results = json.loads(results, strict=False)
|
|
220
239
|
except Exception:
|
|
221
|
-
|
|
240
|
+
log(f"==== Invalid JSON:\n{results}", _print=True)
|
|
222
241
|
return {}
|
|
223
242
|
else:
|
|
224
|
-
log(f"==== Results: {results}", _print=True)
|
|
225
|
-
log("=================== END REPORT ===================", _print=True)
|
|
243
|
+
# log(f"==== Results: {results}", _print=True)
|
|
244
|
+
# log("=================== END REPORT ===================", _print=True)
|
|
226
245
|
return results
|
|
227
246
|
|
|
228
247
|
def generate_text(self, messages, additional_instructions=""):
|
|
248
|
+
self._get_agent_id()
|
|
229
249
|
formatted_messages = self._format_messages(messages)
|
|
230
250
|
thread = self.client.beta.threads.create(messages=formatted_messages)
|
|
231
251
|
|
|
@@ -247,8 +267,8 @@ IMPORTANT: Always use the function 'response' tool to respond to the user with o
|
|
|
247
267
|
if run.status in ["failed", "expired", "canceled"]:
|
|
248
268
|
log(f"==== Error: {run.last_error} ====", _print=True)
|
|
249
269
|
return None
|
|
250
|
-
log("=================== RUN COMPLETED ===================", _print=True)
|
|
251
|
-
log(run.status, _print=True)
|
|
270
|
+
# log("=================== RUN COMPLETED ===================", _print=True)
|
|
271
|
+
# log(run.status, _print=True)
|
|
252
272
|
if run.status == "completed":
|
|
253
273
|
response = self.client.beta.threads.messages.list(thread_id=thread.id)
|
|
254
274
|
results = response.data[0].content[0].text.value
|
|
@@ -256,21 +276,39 @@ IMPORTANT: Always use the function 'response' tool to respond to the user with o
|
|
|
256
276
|
log(f"====Status: {run.status} Error: {run.last_error} ====", _print=True)
|
|
257
277
|
return None
|
|
258
278
|
|
|
259
|
-
log(results, _print=True)
|
|
260
|
-
log("=================== END REPORT ===================", _print=True)
|
|
279
|
+
# log(results, _print=True)
|
|
280
|
+
# log("=================== END REPORT ===================", _print=True)
|
|
261
281
|
return results
|
|
262
282
|
|
|
263
|
-
def generate_audio(self, prompt,
|
|
283
|
+
def generate_audio(self, prompt, **kwargs):
|
|
264
284
|
voice = kwargs.get("voice") or random.choice(
|
|
265
|
-
[
|
|
285
|
+
[
|
|
286
|
+
"alloy",
|
|
287
|
+
"ash",
|
|
288
|
+
"ballad",
|
|
289
|
+
"coral",
|
|
290
|
+
"echo",
|
|
291
|
+
"fable",
|
|
292
|
+
"onyx",
|
|
293
|
+
"nova",
|
|
294
|
+
"sage",
|
|
295
|
+
"shimmer",
|
|
296
|
+
]
|
|
266
297
|
)
|
|
267
298
|
response = self.client.audio.speech.create(
|
|
268
299
|
model="tts-1",
|
|
269
300
|
voice=voice,
|
|
270
301
|
input=prompt,
|
|
271
302
|
)
|
|
303
|
+
# log(response, _print=True)
|
|
304
|
+
return response.read()
|
|
272
305
|
|
|
273
|
-
|
|
306
|
+
def generate_audio_text(self, audio_file, **kwargs):
|
|
307
|
+
response = self.client.audio.transcriptions.create(
|
|
308
|
+
model="gpt-4o-transcribe", file=audio_file, language="en", **kwargs
|
|
309
|
+
)
|
|
310
|
+
log(response, _print=True)
|
|
311
|
+
return response.text
|
|
274
312
|
|
|
275
313
|
def generate_image(self, prompt, **kwargs):
|
|
276
314
|
image = None
|
|
@@ -283,7 +321,8 @@ IMPORTANT: Always use the function 'response' tool to respond to the user with o
|
|
|
283
321
|
)
|
|
284
322
|
image_dict = response.data[0]
|
|
285
323
|
except Exception as e:
|
|
286
|
-
|
|
324
|
+
log(f"==== Error: Unable to create image ====\n\n{e}", _print=True)
|
|
325
|
+
raise e
|
|
287
326
|
else:
|
|
288
327
|
image = b64decode(image_dict.b64_json)
|
|
289
328
|
return image
|
autonomous/ai/textagent.py
CHANGED
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
from autonomous import log
|
|
2
2
|
from autonomous.model.autoattr import ReferenceAttr, StringAttr
|
|
3
3
|
from autonomous.model.automodel import AutoModel
|
|
4
|
-
|
|
4
|
+
from autonomous.ai.baseagent import BaseAgent
|
|
5
5
|
from .models.openai import OpenAIModel
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
class TextAgent(
|
|
9
|
-
client = ReferenceAttr(choices=[OpenAIModel])
|
|
8
|
+
class TextAgent(BaseAgent):
|
|
10
9
|
name = StringAttr(default="textagent")
|
|
11
10
|
instructions = StringAttr(
|
|
12
11
|
default="You are highly skilled AI trained to assist with generating text according to the given requirements."
|
|
@@ -15,19 +14,6 @@ class TextAgent(AutoModel):
|
|
|
15
14
|
default="A helpful AI assistant trained to assist with generating text according to the given requirements."
|
|
16
15
|
)
|
|
17
16
|
|
|
18
|
-
_ai_model = OpenAIModel
|
|
19
|
-
|
|
20
|
-
def get_client(self):
|
|
21
|
-
if self.client is None:
|
|
22
|
-
self.client = self._ai_model(
|
|
23
|
-
name=self.name,
|
|
24
|
-
instructions=self.instructions,
|
|
25
|
-
description=self.description,
|
|
26
|
-
)
|
|
27
|
-
self.client.save()
|
|
28
|
-
self.save()
|
|
29
|
-
return self.client
|
|
30
|
-
|
|
31
17
|
def summarize_text(self, text, primer=""):
|
|
32
18
|
return self.get_client().summarize_text(text, primer)
|
|
33
19
|
|