autonomous-app 0.2.25__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. autonomous/__init__.py +5 -2
  2. autonomous/ai/audioagent.py +32 -0
  3. autonomous/ai/imageagent.py +31 -0
  4. autonomous/ai/jsonagent.py +40 -0
  5. autonomous/ai/models/__init__.py +0 -0
  6. autonomous/ai/models/openai.py +308 -0
  7. autonomous/ai/oaiagent.py +20 -194
  8. autonomous/ai/textagent.py +35 -0
  9. autonomous/auth/autoauth.py +11 -11
  10. autonomous/auth/user.py +24 -11
  11. autonomous/db/__init__.py +41 -0
  12. autonomous/db/base/__init__.py +33 -0
  13. autonomous/db/base/common.py +62 -0
  14. autonomous/db/base/datastructures.py +476 -0
  15. autonomous/db/base/document.py +1230 -0
  16. autonomous/db/base/fields.py +767 -0
  17. autonomous/db/base/metaclasses.py +468 -0
  18. autonomous/db/base/utils.py +22 -0
  19. autonomous/db/common.py +79 -0
  20. autonomous/db/connection.py +472 -0
  21. autonomous/db/context_managers.py +313 -0
  22. autonomous/db/dereference.py +291 -0
  23. autonomous/db/document.py +1141 -0
  24. autonomous/db/errors.py +165 -0
  25. autonomous/db/fields.py +2732 -0
  26. autonomous/db/mongodb_support.py +24 -0
  27. autonomous/db/pymongo_support.py +80 -0
  28. autonomous/db/queryset/__init__.py +28 -0
  29. autonomous/db/queryset/base.py +2033 -0
  30. autonomous/db/queryset/field_list.py +88 -0
  31. autonomous/db/queryset/manager.py +58 -0
  32. autonomous/db/queryset/queryset.py +189 -0
  33. autonomous/db/queryset/transform.py +527 -0
  34. autonomous/db/queryset/visitor.py +189 -0
  35. autonomous/db/signals.py +59 -0
  36. autonomous/logger.py +3 -0
  37. autonomous/model/autoattr.py +120 -0
  38. autonomous/model/automodel.py +121 -308
  39. autonomous/storage/imagestorage.py +9 -54
  40. autonomous/tasks/autotask.py +0 -25
  41. {autonomous_app-0.2.25.dist-info → autonomous_app-0.3.1.dist-info}/METADATA +7 -8
  42. autonomous_app-0.3.1.dist-info/RECORD +60 -0
  43. {autonomous_app-0.2.25.dist-info → autonomous_app-0.3.1.dist-info}/WHEEL +1 -1
  44. autonomous/db/autodb.py +0 -86
  45. autonomous/db/table.py +0 -156
  46. autonomous/errors/__init__.py +0 -1
  47. autonomous/errors/danglingreferenceerror.py +0 -8
  48. autonomous/model/autoattribute.py +0 -20
  49. autonomous/model/orm.py +0 -86
  50. autonomous/model/serializer.py +0 -110
  51. autonomous_app-0.2.25.dist-info/RECORD +0 -36
  52. /autonomous/{storage → apis}/version_control/GHCallbacks.py +0 -0
  53. /autonomous/{storage → apis}/version_control/GHOrganization.py +0 -0
  54. /autonomous/{storage → apis}/version_control/GHRepo.py +0 -0
  55. /autonomous/{storage → apis}/version_control/GHVersionControl.py +0 -0
  56. /autonomous/{storage → apis}/version_control/__init__.py +0 -0
  57. /autonomous/{storage → utils}/markdown.py +0 -0
  58. {autonomous_app-0.2.25.dist-info → autonomous_app-0.3.1.dist-info}/LICENSE +0 -0
  59. {autonomous_app-0.2.25.dist-info → autonomous_app-0.3.1.dist-info}/top_level.txt +0 -0
autonomous/__init__.py CHANGED
@@ -1,4 +1,7 @@
1
- __version__ = "0.2.25"
1
+ __version__ = "0.3.01"
2
+
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
2
6
 
3
7
  from .logger import log
4
- from .model.automodel import AutoModel
@@ -0,0 +1,32 @@
1
+ from autonomous import log
2
+ from autonomous.model.autoattr import ReferenceAttr, StringAttr
3
+ from autonomous.model.automodel import AutoModel
4
+
5
+ from .models.openai import OpenAIModel
6
+
7
+
8
+ class AudioAgent(AutoModel):
9
+ client = ReferenceAttr(choices=[OpenAIModel])
10
+ name = StringAttr(default="audioagent")
11
+ instructions = StringAttr(
12
+ default="You are highly skilled AI trained to assist with generating audio files."
13
+ )
14
+ description = StringAttr(
15
+ default="A helpful AI assistant trained to assist with generating audio files."
16
+ )
17
+
18
+ _ai_model = OpenAIModel
19
+
20
+ def get_client(self):
21
+ if self.client is None:
22
+ self.client = self._ai_model(
23
+ name=self.name,
24
+ instructions=self.instructions,
25
+ description=self.description,
26
+ )
27
+ self.client.save()
28
+ self.save()
29
+ return self.client
30
+
31
+ def generate(self, prompt, file_path, **kwargs):
32
+ return self.get_client().generate_audio(prompt, file_path, **kwargs)
@@ -0,0 +1,31 @@
1
+ from autonomous.model.autoattr import ReferenceAttr, StringAttr
2
+ from autonomous.model.automodel import AutoModel
3
+
4
+ from .models.openai import OpenAIModel
5
+
6
+
7
+ class ImageAgent(AutoModel):
8
+ client = ReferenceAttr(choices=[OpenAIModel])
9
+ name = StringAttr(default="imageagent")
10
+ instructions = StringAttr(
11
+ default="You are highly skilled AI trained to assist with generating images."
12
+ )
13
+ description = StringAttr(
14
+ default="A helpful AI assistant trained to assist with generating images."
15
+ )
16
+
17
+ _ai_model = OpenAIModel
18
+
19
+ def get_client(self):
20
+ if self.client is None:
21
+ self.client = self._ai_model(
22
+ name=self.name,
23
+ instructions=self.instructions,
24
+ description=self.description,
25
+ )
26
+ self.client.save()
27
+ self.save()
28
+ return self.client
29
+
30
+ def generate(self, prompt, **kwargs):
31
+ return self.get_client().generate_image(prompt, **kwargs)
@@ -0,0 +1,40 @@
1
+ import json
2
+
3
+ from autonomous.model.autoattr import ReferenceAttr, StringAttr
4
+ from autonomous.model.automodel import AutoModel
5
+
6
+ from .models.openai import OpenAIModel
7
+
8
+
9
+ class JSONAgent(AutoModel):
10
+ client = ReferenceAttr(choices=[OpenAIModel])
11
+ name = StringAttr(default="jsonagent")
12
+ instructions = StringAttr(
13
+ default="You are highly skilled AI trained to assist with generating JSON formatted data."
14
+ )
15
+ description = StringAttr(
16
+ default="A helpful AI assistant trained to assist with generating JSON formatted data."
17
+ )
18
+
19
+ _ai_model = OpenAIModel
20
+
21
+ def get_client(self):
22
+ if self.client is None:
23
+ self.client = self._ai_model(
24
+ name=self.name,
25
+ instructions=self.instructions,
26
+ description=self.description,
27
+ )
28
+ self.client.save()
29
+ self.save()
30
+ return self.client
31
+
32
+ def generate(self, messages, function, additional_instructions=""):
33
+ result = self.get_client().generate_json(
34
+ messages, function, additional_instructions
35
+ )
36
+ if isinstance(result, str):
37
+ result = json.loads(result)
38
+ elif not isinstance(result, dict):
39
+ raise ValueError(f"Invalid JSON response from AI model.\n\n{result}")
40
+ return result
File without changes
@@ -0,0 +1,308 @@
1
+ import json
2
+ import os
3
+ import random
4
+ import time
5
+ from base64 import b64decode
6
+
7
+ import openai
8
+ from openai import NotFoundError as openai_NotFoundError
9
+ from openai import OpenAI
10
+
11
+ from autonomous import log
12
+ from autonomous.model.autoattr import DictAttr, ListAttr, StringAttr
13
+ from autonomous.model.automodel import AutoModel
14
+
15
+
16
+ class OpenAIModel(AutoModel):
17
+ _client = None
18
+ _text_model = "gpt-4o-mini"
19
+ _image_model = "dall-e-3"
20
+ _json_model = "gpt-4o"
21
+ agent_id = StringAttr()
22
+ messages = ListAttr(StringAttr(default=[]))
23
+ tools = DictAttr()
24
+ vector_store = StringAttr()
25
+ name = StringAttr(default="agent")
26
+ instructions = StringAttr(
27
+ default="You are highly skilled AI trained to assist with various tasks."
28
+ )
29
+ description = StringAttr(
30
+ default="A helpful AI assistant trained to assist with various tasks."
31
+ )
32
+
33
+ @property
34
+ def client(self):
35
+ if not self._client:
36
+ self._client = OpenAI(api_key=os.environ.get("OPENAI_KEY"))
37
+ return self._client
38
+
39
+ def clear_agent(self):
40
+ if self.agent_id:
41
+ self.client.beta.assistants.delete(self.agent_id)
42
+ self.agent_id = ""
43
+ self.save()
44
+
45
+ def clear_agents(self):
46
+ assistants = self.client.beta.assistants.list().data
47
+ log(assistants)
48
+ for assistant in assistants:
49
+ log(f"==== Deleting Agent with ID: {assistant.id} ====")
50
+ try:
51
+ self.client.beta.assistants.delete(assistant.id)
52
+ except openai_NotFoundError:
53
+ log(f"==== Agent with ID: {assistant.id} not found ====")
54
+ self.agent_id = ""
55
+ self.save()
56
+
57
+ def _get_agent_id(self):
58
+ if not self.agent_id or not self.client.beta.assistants.retrieve(self.agent_id):
59
+ agent = self.client.beta.assistants.create(
60
+ instructions=self.instructions,
61
+ description=self.description,
62
+ name=self.name,
63
+ model=self._json_model,
64
+ )
65
+ self.agent_id = agent.id
66
+ log(f"==== Creating Agent with ID: {self.agent_id} ====")
67
+ self.save()
68
+ return self.agent_id
69
+
70
+ def clear_files(self, file_id=None, all=False):
71
+ if not file_id:
72
+ store_files = self.client.files.list().data
73
+
74
+ for vs in self.client.beta.vector_stores.list().data:
75
+ try:
76
+ self.client.beta.vector_stores.delete(vs.id)
77
+ except openai_NotFoundError:
78
+ log(f"==== Vector Store {vs.id} not found ====")
79
+ if all:
80
+ for sf in store_files:
81
+ self.client.files.delete(file_id=sf.id)
82
+ else:
83
+ self.client.files.delete(file_id=file_id)
84
+ self.tools.pop("file_search", None)
85
+ self.save()
86
+ return self.client.files.list()
87
+
88
+ def attach_file(self, file_contents, filename="dbdata.json"):
89
+ # Upload the user provided file to OpenAI
90
+ self.tools["file_search"] = {"type": "file_search"}
91
+ # Create a vector store
92
+ if vs := self.client.beta.vector_stores.list().data:
93
+ self.vector_store = vs[0].id
94
+ else:
95
+ self.vector_store = self.client.beta.vector_stores.create(
96
+ name="Data Reference",
97
+ expires_after={"anchor": "last_active_at", "days": 14},
98
+ ).id
99
+
100
+ file_obj = self.client.files.create(
101
+ file=(filename, file_contents), purpose="assistants"
102
+ )
103
+
104
+ self.client.beta.vector_stores.files.create(
105
+ vector_store_id=self.vector_store,
106
+ file_id=file_obj.id,
107
+ )
108
+ self.client.beta.assistants.update(
109
+ self._get_agent_id(),
110
+ tools=list(self.tools.values()),
111
+ tool_resources={"file_search": {"vector_store_ids": [self.vector_store]}},
112
+ )
113
+ self.save()
114
+ return file_obj.id
115
+
116
+ def _add_function(self, user_function):
117
+ user_function["strict"] = True
118
+ user_function["parameters"]["additionalProperties"] = False
119
+ if not user_function["parameters"].get("required"):
120
+ user_function["parameters"]["required"] = list(
121
+ user_function["parameters"]["properties"].keys()
122
+ )
123
+
124
+ self.tools["function"] = {"type": "function", "function": user_function}
125
+ self.client.beta.assistants.update(
126
+ self._get_agent_id(), tools=list(self.tools.values())
127
+ )
128
+ return """
129
+ IMPORTANT: Always use the function 'response' tool to respond to the user with only the requested JSON schema. DO NOT add any text to the response outside of the JSON schema.
130
+
131
+ """
132
+
133
+ def _format_messages(self, messages):
134
+ message_list = []
135
+ if isinstance(messages, str):
136
+ message_list.insert(0, {"role": "user", "content": messages})
137
+ else:
138
+ for message in messages:
139
+ if isinstance(message, str):
140
+ message_list.insert(0, {"role": "user", "content": message})
141
+ else:
142
+ raise Exception(
143
+ f"==== Invalid message: {message} ====\nMust be a string "
144
+ )
145
+ return message_list
146
+
147
+ def generate_json(self, messages, function, additional_instructions=""):
148
+ # _instructions_addition = self._add_function(function)
149
+ function["strict"] = True
150
+ function["parameters"]["additionalProperties"] = False
151
+ function["parameters"]["required"] = list(
152
+ function["parameters"]["properties"].keys()
153
+ )
154
+
155
+ formatted_messages = self._format_messages(messages)
156
+ thread = self.client.beta.threads.create(messages=formatted_messages)
157
+ # log(function, _print=True)
158
+ running_job = True
159
+ while running_job:
160
+ try:
161
+ run = self.client.beta.threads.runs.create_and_poll(
162
+ thread_id=thread.id,
163
+ assistant_id=self._get_agent_id(),
164
+ additional_instructions=additional_instructions,
165
+ parallel_tool_calls=False,
166
+ tools=[
167
+ {"type": "file_search"},
168
+ {"type": "function", "function": function},
169
+ ],
170
+ tool_choice={
171
+ "type": "function",
172
+ "function": {"name": function["name"]},
173
+ },
174
+ )
175
+ log(f"==== Job Status: {run.status} ====", _print=True)
176
+ if run.status in [
177
+ "failed",
178
+ "expired",
179
+ "canceled",
180
+ "completed",
181
+ "incomplete",
182
+ "requires_action",
183
+ ]:
184
+ running_job = False
185
+
186
+ except openai.error.BadRequestError as e:
187
+ # Handle specific bad request errors
188
+ error_message = e.json_body.get("error", {}).get("message", "")
189
+ if "already has an active run" in error_message:
190
+ log("Previous run is still active. Waiting...", _print=True)
191
+ time.sleep(2) # wait before retrying or checking run status
192
+ else:
193
+ raise e
194
+
195
+ # while run.status in ["queued", "in_progress"]:
196
+ # run = self.client.beta.threads.runs.retrieve(
197
+ # thread_id=thread.id,
198
+ # run_id=run.id,
199
+ # )
200
+ # time.sleep(0.5)
201
+ if run.status in ["failed", "expired", "canceled"]:
202
+ log(f"==== !!! ERROR !!!: {run.last_error} ====", _print=True)
203
+ return None
204
+ log("=================== RUN COMPLETED ===================", _print=True)
205
+ log(run.status, _print=True)
206
+ if run.status == "completed":
207
+ response = self.client.beta.threads.messages.list(thread_id=thread.id)
208
+ results = response.data[0].content[0].text.value
209
+ elif run.status == "requires_action":
210
+ results = run.required_action.submit_tool_outputs.tool_calls[
211
+ 0
212
+ ].function.arguments
213
+ else:
214
+ log(f"====Status: {run.status} Error: {run.last_error} ====", _print=True)
215
+ return None
216
+
217
+ results = results[results.find("{") : results.rfind("}") + 1]
218
+ try:
219
+ results = json.loads(results, strict=False)
220
+ except Exception:
221
+ print(f"==== Invalid JSON:\n{results}", _print=True)
222
+ return {}
223
+ else:
224
+ log(f"==== Results: {results}", _print=True)
225
+ log("=================== END REPORT ===================", _print=True)
226
+ return results
227
+
228
+ def generate_text(self, messages, additional_instructions=""):
229
+ formatted_messages = self._format_messages(messages)
230
+ thread = self.client.beta.threads.create(messages=formatted_messages)
231
+
232
+ run = self.client.beta.threads.runs.create(
233
+ thread_id=thread.id,
234
+ assistant_id=self._get_agent_id(),
235
+ additional_instructions=additional_instructions,
236
+ parallel_tool_calls=False,
237
+ )
238
+
239
+ while run.status in ["queued", "in_progress"]:
240
+ run = self.client.beta.threads.runs.retrieve(
241
+ thread_id=thread.id,
242
+ run_id=run.id,
243
+ )
244
+ time.sleep(0.5)
245
+ log(f"==== Job Status: {run.status} ====", _print=True)
246
+
247
+ if run.status in ["failed", "expired", "canceled"]:
248
+ log(f"==== Error: {run.last_error} ====", _print=True)
249
+ return None
250
+ log("=================== RUN COMPLETED ===================", _print=True)
251
+ log(run.status, _print=True)
252
+ if run.status == "completed":
253
+ response = self.client.beta.threads.messages.list(thread_id=thread.id)
254
+ results = response.data[0].content[0].text.value
255
+ else:
256
+ log(f"====Status: {run.status} Error: {run.last_error} ====", _print=True)
257
+ return None
258
+
259
+ log(results, _print=True)
260
+ log("=================== END REPORT ===================", _print=True)
261
+ return results
262
+
263
+ def generate_audio(self, prompt, file_path, **kwargs):
264
+ voice = kwargs.get("voice") or random.choice(
265
+ ["alloy", "echo", "fable", "onyx", "nova", "shimmer"]
266
+ )
267
+ response = self.client.audio.speech.create(
268
+ model="tts-1",
269
+ voice=voice,
270
+ input=prompt,
271
+ )
272
+
273
+ return response.stream_to_file(file_path)
274
+
275
+ def generate_image(self, prompt, **kwargs):
276
+ image = None
277
+ try:
278
+ response = self.client.images.generate(
279
+ model=self._image_model,
280
+ prompt=prompt,
281
+ response_format="b64_json",
282
+ **kwargs,
283
+ )
284
+ image_dict = response.data[0]
285
+ except Exception as e:
286
+ print(f"==== Error: Unable to create image ====\n\n{e}")
287
+ else:
288
+ image = b64decode(image_dict.b64_json)
289
+ return image
290
+
291
+ def summarize_text(self, text, primer=""):
292
+ message = [
293
+ {
294
+ "role": "system",
295
+ "content": f"You are a highly skilled AI trained in language comprehension and summarization.{primer}",
296
+ },
297
+ {"role": "user", "content": text},
298
+ ]
299
+ response = self.client.chat.completions.create(
300
+ model=self._text_model, messages=message
301
+ )
302
+ try:
303
+ result = response.choices[0].message.content
304
+ except Exception as e:
305
+ log(f"{type(e)}:{e}\n\n Unable to generate content ====")
306
+ return None
307
+
308
+ return result
autonomous/ai/oaiagent.py CHANGED
@@ -1,214 +1,40 @@
1
- import json
2
- import os
3
- import random
4
- import time
5
- from base64 import b64decode
1
+ from autonomous import log
2
+ from autonomous.model.autoattr import ReferenceAttr
3
+ from autonomous.model.automodel import AutoModel
6
4
 
7
- from openai import OpenAI
8
-
9
- from autonomous import AutoModel, log
5
+ from .models.openai import OpenAIModel
10
6
 
11
7
 
12
8
  class OAIAgent(AutoModel):
13
- client = None
14
- attributes = {
15
- "model": "gpt-4o",
16
- "_agent_id": None,
17
- "messages": [],
18
- "tools": {},
19
- "vector_store": None,
20
- "name": "agent",
21
- "instructions": "You are highly skilled AI trained to assist with various tasks.",
22
- "description": "A helpful AI assistant trained to assist with various tasks.",
23
- }
24
-
25
- def __init__(self, **kwargs):
26
- self.client = OpenAI(api_key=os.environ.get("OPENAI_KEY"))
9
+ client_model = ReferenceAttr(choices=[OpenAIModel])
10
+ _ai_model = OpenAIModel
27
11
 
28
12
  @property
29
- def agent_id(self):
30
- if not self._agent_id or not self.client.beta.assistants.retrieve(
31
- self._agent_id
32
- ):
33
- agent = self.client.beta.assistants.create(
34
- instructions=self.instructions,
35
- description=self.description,
36
- name=self.name,
37
- model=self.model,
38
- )
39
- self._agent_id = agent.id
40
- log(f"==== Creating Agent with ID: {self._agent_id} ====")
13
+ def client(self):
14
+ if self.client_model is None:
15
+ self.client_model = self._ai_model()
41
16
  self.save()
42
- return self._agent_id
43
-
44
- @agent_id.setter
45
- def agent_id(self, value):
46
- self._agent_id = value
47
-
48
- def clear_agent(self):
49
- self.client.beta.assistants.delete(self.agent_id)
50
- self.agent_id = ""
51
-
52
- def get_agent(self):
53
- return self.client.beta.assistants.retrieve(self.agent_id)
17
+ return self.client_model
54
18
 
55
19
  def clear_files(self, file_id=None):
56
- if self.vector_store:
57
- if not file_id:
58
- vector_store_files = self.client.beta.vector_stores.files.list(
59
- vector_store_id=self.vector_store
60
- ).data
61
- for vsf in vector_store_files:
62
- self.client.files.delete(file_id=vsf.id)
63
- else:
64
- self.client.files.delete(file_id=file_id)
65
- self.tools.pop("file_search", None)
66
- self.save()
67
- return self.client.files.list()
20
+ return self.client.clear_files(file_id)
68
21
 
69
22
  def attach_file(self, file_contents, filename="dbdata.json"):
70
- # Upload the user provided file to OpenAI
71
- self.tools["file_search"] = {"type": "file_search"}
72
- # Create a vector store
73
- if not self.vector_store:
74
- vs = self.client.beta.vector_stores.list().data
75
- if vs:
76
- self.vector_store = vs[0].id
77
- else:
78
- self.vector_store = self.client.beta.vector_stores.create(
79
- name="Data Reference"
80
- ).id
81
-
82
- file_obj = self.client.files.create(
83
- file=(filename, file_contents), purpose="assistants"
84
- )
85
-
86
- self.client.beta.vector_stores.files.create(
87
- vector_store_id=self.vector_store, file_id=file_obj.id
88
- )
89
- self.client.beta.assistants.update(
90
- self.agent_id,
91
- tools=list(self.tools.values()),
92
- tool_resources={"file_search": {"vector_store_ids": [self.vector_store]}},
93
- )
94
- self.save()
95
- return file_obj.id
96
-
97
- def _add_function(self, user_function):
98
- self.tools["function"] = {"type": "function", "function": user_function}
99
- self.client.beta.assistants.update(
100
- self.agent_id, tools=list(self.tools.values())
101
- )
102
- return """
103
- IMPORTANT: always use the function 'response' tool to respond to the user with the requested JSON schema. Never add any other text to the response.
104
- """
105
-
106
- def _format_messages(self, messages):
107
- message_list = []
108
- if isinstance(messages, str):
109
- message_list.insert(0, {"role": "user", "content": messages})
110
- else:
111
- for message in messages:
112
- if isinstance(message, str):
113
- message_list.insert(0, {"role": "user", "content": message})
114
- else:
115
- raise Exception(
116
- f"==== Invalid message: {message} ====\nMust be a string "
117
- )
118
- return message_list
23
+ return self.client.attach_file(file_contents, filename)
119
24
 
120
25
  def generate(self, messages, function=None, additional_instructions=""):
121
- _instructions_addition = (
122
- self._add_function(function) if function else additional_instructions
123
- )
124
-
125
- formatted_messages = self._format_messages(messages)
126
- thread = self.client.beta.threads.create(messages=formatted_messages)
127
-
128
- run = self.client.beta.threads.runs.create(
129
- thread_id=thread.id,
130
- assistant_id=self.agent_id,
131
- additional_instructions=_instructions_addition,
132
- parallel_tool_calls=False,
133
- )
134
-
135
- while run.status in ["queued", "in_progress"]:
136
- run = self.client.beta.threads.runs.retrieve(
137
- thread_id=thread.id,
138
- run_id=run.id,
139
- )
140
- time.sleep(0.5)
141
- log(f"==== Job Status: {run.status} ====")
142
- print(f"==== Job Status: {run.status} ====")
143
-
144
- if run.status in ["failed", "expired", "canceled"]:
145
- log(f"==== Error: {run.last_error} ====")
146
- print(f"==== Error: {run.last_error} ====")
147
- return None
148
- print("=================== RUN COMPLETED ===================")
149
- print(run.status)
150
- if run.status == "completed":
151
- response = self.client.beta.threads.messages.list(thread_id=thread.id)
152
- results = response.data[0].content[0].text.value
153
- elif run.status == "requires_action":
154
- results = run.required_action.submit_tool_outputs.tool_calls[
155
- 0
156
- ].function.arguments
26
+ if function is None:
27
+ return self.client.generate_text(messages, additional_instructions)
157
28
  else:
158
- log(f"====Status: {run.status} Error: {run.last_error} ====")
159
- print(f"====Status: {run.status} Error: {run.last_error} ====")
160
- return None
161
-
162
- if function:
163
- results = results[results.find("{") : results.rfind("}") + 1]
164
- try:
165
- results = json.loads(results, strict=False)
166
- except Exception:
167
- print(f"==== Invalid JSON:\n{results}")
168
- log(f"==== Invalid JSON:\n{results}")
169
-
170
- print(results)
171
- print("=================== END REPORT ===================")
172
- return results
29
+ return self.client.generate_json(
30
+ messages, function, additional_instructions
31
+ )
173
32
 
174
33
  def generate_audio(self, prompt, file_path, **kwargs):
175
- voice = kwargs.get("voice") or random.choice(
176
- ["alloy", "echo", "fable", "onyx", "nova", "shimmer"]
177
- )
178
- response = self.client.audio.speech.create(
179
- model="tts-1",
180
- voice=voice,
181
- input=prompt,
182
- )
183
-
184
- return response.stream_to_file(file_path)
34
+ return self.client.generate_audio(self, file_path, **kwargs)
185
35
 
186
36
  def generate_image(self, prompt, **kwargs):
187
- image = None
188
- try:
189
- response = self.client.images.generate(
190
- model="dall-e-3", prompt=prompt, response_format="b64_json", **kwargs
191
- )
192
- image_dict = response.data[0]
193
- except Exception as e:
194
- print(f"==== Error: Unable to create image ====\n\n{e}")
195
- else:
196
- image = b64decode(image_dict.b64_json)
197
- return image
37
+ return self.client.generate_image(prompt, **kwargs)
198
38
 
199
39
  def summarize_text(self, text, primer=""):
200
- message = [
201
- {
202
- "role": "system",
203
- "content": f"You are a highly skilled AI trained in language comprehension and summarization.{primer}",
204
- },
205
- {"role": "user", "content": text},
206
- ]
207
- response = self.client.chat.completions.create(model="gpt-4o", messages=message)
208
- try:
209
- result = response.choices[0].message.content
210
- except Exception as e:
211
- log(f"{type(e)}:{e}\n\n Unable to generate content ====")
212
- return None
213
-
214
- return result
40
+ return self.client.generate_image(text, primer)