autonomous-app 0.3.32__py3-none-any.whl → 0.3.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,347 +0,0 @@
1
- import io
2
- import json
3
- import os
4
- import random
5
- import time
6
- from base64 import b64decode
7
-
8
- import openai
9
- from openai import NotFoundError as openai_NotFoundError
10
- from openai import OpenAI
11
-
12
- from autonomous import log
13
- from autonomous.model.autoattr import DictAttr, ListAttr, StringAttr
14
- from autonomous.model.automodel import AutoModel
15
-
16
-
17
- class OpenAIModel(AutoModel):
18
- _client = None
19
- _text_model = "o3-mini"
20
- _image_model = "dall-e-3"
21
- _json_model = "gpt-4o"
22
- agent_id = StringAttr()
23
- messages = ListAttr(StringAttr(default=[]))
24
- tools = DictAttr()
25
- vector_store = StringAttr()
26
- name = StringAttr(default="agent")
27
- instructions = StringAttr(
28
- default="You are highly skilled AI trained to assist with various tasks."
29
- )
30
- description = StringAttr(
31
- default="A helpful AI assistant trained to assist with various tasks."
32
- )
33
-
34
- @property
35
- def client(self):
36
- if not self._client:
37
- self._client = OpenAI(api_key=os.environ.get("OPENAI_KEY"))
38
- return self._client
39
-
40
- def delete(self):
41
- self.clear_files()
42
- if self.agent_id:
43
- try:
44
- self.client.beta.assistants.delete(self.agent_id)
45
- except openai_NotFoundError:
46
- log(f"==== Agent with ID: {self.agent_id} not found ====")
47
- return super().delete()
48
-
49
- def clear_agent(self):
50
- if self.agent_id:
51
- self.client.beta.assistants.delete(self.agent_id)
52
- self.agent_id = ""
53
- self.save()
54
-
55
- def clear_agents(self):
56
- assistants = self.client.beta.assistants.list().data
57
- if assistants:
58
- log(assistants)
59
- for assistant in assistants:
60
- log(f"==== Deleting Agent with ID: {assistant.id} ====")
61
- try:
62
- self.client.beta.assistants.delete(assistant.id)
63
- except openai_NotFoundError:
64
- log(f"==== Agent with ID: {assistant.id} not found ====")
65
- self.agent_id = ""
66
- self.save()
67
-
68
- def _get_agent_id(self):
69
- try:
70
- self.client.beta.assistants.retrieve(self.agent_id)
71
- except (ValueError, openai.NotFoundError) as e:
72
- log(f"{e} -- no agent found, creating a new one")
73
- agent = self.client.beta.assistants.create(
74
- instructions=self.instructions,
75
- description=self.description,
76
- name=self.name,
77
- model=self._json_model,
78
- )
79
- self.agent_id = agent.id
80
- log(f"==== Creating Agent with ID: {self.agent_id} ====")
81
- self.save()
82
- return self.agent_id
83
-
84
- def clear_files(self, file_id=None):
85
- if not file_id:
86
- for vs in self.client.vector_stores.list().data:
87
- try:
88
- self.client.vector_stores.delete(vs.id)
89
- except openai_NotFoundError:
90
- log(f"==== Vector Store {vs.id} not found ====")
91
- for sf in self.client.files.list().data:
92
- self.client.files.delete(file_id=sf.id)
93
- else:
94
- self.client.files.delete(file_id=file_id)
95
- self.tools.pop("file_search", None)
96
- self.save()
97
- return self.client.files.list()
98
-
99
- def attach_file(self, file_contents, filename="dbdata.json"):
100
- # Upload the user provided file to OpenAI
101
- self.tools["file_search"] = {"type": "file_search"}
102
- # Create a vector store
103
- try:
104
- if vs := self.client.vector_stores.list().data:
105
- self.vector_store = self.client.vector_stores.retrieve(
106
- vector_store_id=vs[0].id
107
- ).id
108
- else:
109
- for sf in self.client.files.list().data:
110
- self.client.files.delete(file_id=sf.id)
111
- raise FileNotFoundError("No vector store found")
112
- except FileNotFoundError:
113
- self.vector_store = self.client.vector_stores.create(
114
- name="World Reference",
115
- expires_after={"anchor": "last_active_at", "days": 14},
116
- ).id
117
- log(f"==== Vector Store ID: {self.vector_store}====", _print=True)
118
- # Attach File
119
- file_obj = self.client.files.create(
120
- file=(filename, file_contents), purpose="assistants"
121
- )
122
- log(f"==== FileStore ID: {file_obj.id}====", _print=True)
123
- self.client.vector_stores.files.create(
124
- vector_store_id=self.vector_store,
125
- file_id=file_obj.id,
126
- )
127
- self.client.beta.assistants.update(
128
- self._get_agent_id(),
129
- tools=list(self.tools.values()),
130
- tool_resources={"file_search": {"vector_store_ids": [self.vector_store]}},
131
- )
132
- self.save()
133
- return file_obj.id
134
-
135
- def _add_function(self, user_function):
136
- user_function["strict"] = True
137
- user_function["parameters"]["additionalProperties"] = False
138
- if not user_function["parameters"].get("required"):
139
- user_function["parameters"]["required"] = list(
140
- user_function["parameters"]["properties"].keys()
141
- )
142
-
143
- self.tools["function"] = {"type": "function", "function": user_function}
144
- self.client.beta.assistants.update(
145
- self._get_agent_id(), tools=list(self.tools.values())
146
- )
147
- return """
148
- IMPORTANT: Always use the function 'response' tool to respond to the user with only the requested JSON schema. DO NOT add any text to the response outside of the JSON schema.
149
-
150
- """
151
-
152
- def _format_messages(self, messages):
153
- message_list = []
154
- if isinstance(messages, str):
155
- message_list.insert(0, {"role": "user", "content": messages})
156
- else:
157
- for message in messages:
158
- if isinstance(message, str):
159
- message_list.insert(0, {"role": "user", "content": message})
160
- else:
161
- raise Exception(
162
- f"==== Invalid message: {message} ====\nMust be a string "
163
- )
164
- return message_list
165
-
166
- def generate_json(self, messages, function, additional_instructions=""):
167
- # _instructions_addition = self._add_function(function)
168
- function["strict"] = True
169
- function["parameters"]["additionalProperties"] = False
170
- function["parameters"]["required"] = list(
171
- function["parameters"]["properties"].keys()
172
- )
173
-
174
- formatted_messages = self._format_messages(messages)
175
- thread = self.client.beta.threads.create(messages=formatted_messages)
176
- # log(function, _print=True)
177
- running_job = True
178
- while running_job:
179
- try:
180
- run = self.client.beta.threads.runs.create_and_poll(
181
- thread_id=thread.id,
182
- assistant_id=self._get_agent_id(),
183
- additional_instructions=additional_instructions,
184
- parallel_tool_calls=False,
185
- tools=[
186
- {"type": "file_search"},
187
- {"type": "function", "function": function},
188
- ],
189
- tool_choice={
190
- "type": "function",
191
- "function": {"name": function["name"]},
192
- },
193
- )
194
- log(f"==== Job Status: {run.status} ====", _print=True)
195
- if run.status in [
196
- "failed",
197
- "expired",
198
- "canceled",
199
- "completed",
200
- "incomplete",
201
- "requires_action",
202
- ]:
203
- running_job = False
204
-
205
- except openai.BadRequestError as err:
206
- # Handle specific bad request errors
207
- log(f"==== Error: {err} ====", _print=True)
208
- if "already has an active run" in str(err):
209
- log("Previous run is still active. Waiting...", _print=True)
210
- time.sleep(2) # wait before retrying or checking run status
211
- else:
212
- raise err
213
-
214
- # while run.status in ["queued", "in_progress"]:
215
- # run = self.client.beta.threads.runs.retrieve(
216
- # thread_id=thread.id,
217
- # run_id=run.id,
218
- # )
219
- # time.sleep(0.5)
220
- if run.status in ["failed", "expired", "canceled"]:
221
- log(f"==== !!! ERROR !!!: {run.last_error} ====", _print=True)
222
- return None
223
- log("=================== RUN COMPLETED ===================", _print=True)
224
- # log(run.status, _print=True)
225
- if run.status == "completed":
226
- response = self.client.beta.threads.messages.list(thread_id=thread.id)
227
- results = response.data[0].content[0].text.value
228
- elif run.status == "requires_action":
229
- results = run.required_action.submit_tool_outputs.tool_calls[
230
- 0
231
- ].function.arguments
232
- else:
233
- log(f"====Status: {run.status} Error: {run.last_error} ====", _print=True)
234
- return None
235
-
236
- results = results[results.find("{") : results.rfind("}") + 1]
237
- try:
238
- results = json.loads(results, strict=False)
239
- except Exception:
240
- log(f"==== Invalid JSON:\n{results}", _print=True)
241
- return {}
242
- else:
243
- # log(f"==== Results: {results}", _print=True)
244
- # log("=================== END REPORT ===================", _print=True)
245
- return results
246
-
247
- def generate_text(self, messages, additional_instructions=""):
248
- self._get_agent_id()
249
- formatted_messages = self._format_messages(messages)
250
- thread = self.client.beta.threads.create(messages=formatted_messages)
251
-
252
- run = self.client.beta.threads.runs.create(
253
- thread_id=thread.id,
254
- assistant_id=self._get_agent_id(),
255
- additional_instructions=additional_instructions,
256
- parallel_tool_calls=False,
257
- )
258
-
259
- while run.status in ["queued", "in_progress"]:
260
- run = self.client.beta.threads.runs.retrieve(
261
- thread_id=thread.id,
262
- run_id=run.id,
263
- )
264
- time.sleep(0.5)
265
- log(f"==== Job Status: {run.status} ====", _print=True)
266
-
267
- if run.status in ["failed", "expired", "canceled"]:
268
- log(f"==== Error: {run.last_error} ====", _print=True)
269
- return None
270
- # log("=================== RUN COMPLETED ===================", _print=True)
271
- # log(run.status, _print=True)
272
- if run.status == "completed":
273
- response = self.client.beta.threads.messages.list(thread_id=thread.id)
274
- results = response.data[0].content[0].text.value
275
- else:
276
- log(f"====Status: {run.status} Error: {run.last_error} ====", _print=True)
277
- return None
278
-
279
- # log(results, _print=True)
280
- # log("=================== END REPORT ===================", _print=True)
281
- return results
282
-
283
- def generate_audio(self, prompt, **kwargs):
284
- voice = kwargs.get("voice") or random.choice(
285
- [
286
- "alloy",
287
- "ash",
288
- "ballad",
289
- "coral",
290
- "echo",
291
- "fable",
292
- "onyx",
293
- "nova",
294
- "sage",
295
- "shimmer",
296
- ]
297
- )
298
- response = self.client.audio.speech.create(
299
- model="tts-1",
300
- voice=voice,
301
- input=prompt,
302
- )
303
- # log(response, _print=True)
304
- return response.read()
305
-
306
- def generate_audio_text(self, audio_file, **kwargs):
307
- response = self.client.audio.transcriptions.create(
308
- model="gpt-4o-transcribe", file=audio_file, language="en", **kwargs
309
- )
310
- log(response, _print=True)
311
- return response.text
312
-
313
- def generate_image(self, prompt, **kwargs):
314
- image = None
315
- try:
316
- response = self.client.images.generate(
317
- model=self._image_model,
318
- prompt=prompt,
319
- response_format="b64_json",
320
- **kwargs,
321
- )
322
- image_dict = response.data[0]
323
- except Exception as e:
324
- log(f"==== Error: Unable to create image ====\n\n{e}", _print=True)
325
- raise e
326
- else:
327
- image = b64decode(image_dict.b64_json)
328
- return image
329
-
330
- def summarize_text(self, text, primer=""):
331
- message = [
332
- {
333
- "role": "system",
334
- "content": f"You are a highly skilled AI trained in language comprehension and summarization.{primer}",
335
- },
336
- {"role": "user", "content": text},
337
- ]
338
- response = self.client.chat.completions.create(
339
- model=self._text_model, messages=message
340
- )
341
- try:
342
- result = response.choices[0].message.content
343
- except Exception as e:
344
- log(f"{type(e)}:{e}\n\n Unable to generate content ====")
345
- return None
346
-
347
- return result
@@ -1 +0,0 @@
1
- from .autotask import AutoTasks
File without changes