autonomous-app 0.2.25__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. autonomous/__init__.py +5 -2
  2. autonomous/ai/audioagent.py +32 -0
  3. autonomous/ai/imageagent.py +31 -0
  4. autonomous/ai/jsonagent.py +40 -0
  5. autonomous/ai/models/__init__.py +0 -0
  6. autonomous/ai/models/openai.py +280 -0
  7. autonomous/ai/oaiagent.py +25 -186
  8. autonomous/ai/textagent.py +35 -0
  9. autonomous/auth/autoauth.py +2 -2
  10. autonomous/auth/user.py +8 -10
  11. autonomous/model/autoattr.py +105 -0
  12. autonomous/model/automodel.py +70 -311
  13. autonomous/storage/imagestorage.py +9 -54
  14. autonomous/tasks/autotask.py +0 -25
  15. {autonomous_app-0.2.25.dist-info → autonomous_app-0.3.0.dist-info}/METADATA +7 -8
  16. autonomous_app-0.3.0.dist-info/RECORD +35 -0
  17. {autonomous_app-0.2.25.dist-info → autonomous_app-0.3.0.dist-info}/WHEEL +1 -1
  18. autonomous/db/__init__.py +0 -1
  19. autonomous/db/autodb.py +0 -86
  20. autonomous/db/table.py +0 -156
  21. autonomous/errors/__init__.py +0 -1
  22. autonomous/errors/danglingreferenceerror.py +0 -8
  23. autonomous/model/autoattribute.py +0 -20
  24. autonomous/model/orm.py +0 -86
  25. autonomous/model/serializer.py +0 -110
  26. autonomous_app-0.2.25.dist-info/RECORD +0 -36
  27. /autonomous/{storage → apis}/version_control/GHCallbacks.py +0 -0
  28. /autonomous/{storage → apis}/version_control/GHOrganization.py +0 -0
  29. /autonomous/{storage → apis}/version_control/GHRepo.py +0 -0
  30. /autonomous/{storage → apis}/version_control/GHVersionControl.py +0 -0
  31. /autonomous/{storage → apis}/version_control/__init__.py +0 -0
  32. /autonomous/{storage → utils}/markdown.py +0 -0
  33. {autonomous_app-0.2.25.dist-info → autonomous_app-0.3.0.dist-info}/LICENSE +0 -0
  34. {autonomous_app-0.2.25.dist-info → autonomous_app-0.3.0.dist-info}/top_level.txt +0 -0
autonomous/__init__.py CHANGED
@@ -1,4 +1,7 @@
1
- __version__ = "0.2.25"
1
+ __version__ = "0.3.00"
2
+
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
2
6
 
3
7
  from .logger import log
4
- from .model.automodel import AutoModel
@@ -0,0 +1,32 @@
1
+ from autonomous import log
2
+ from autonomous.model.autoattr import ReferenceAttr, StringAttr
3
+ from autonomous.model.automodel import AutoModel
4
+
5
+ from .models.openai import OpenAIModel
6
+
7
+
8
+ class AudioAgent(AutoModel):
9
+ client = ReferenceAttr()
10
+ name = StringAttr(default="audioagent")
11
+ instructions = StringAttr(
12
+ default="You are highly skilled AI trained to assist with generating audio files."
13
+ )
14
+ description = StringAttr(
15
+ default="A helpful AI assistant trained to assist with generating audio files."
16
+ )
17
+
18
+ _ai_model = OpenAIModel
19
+
20
+ def get_client(self):
21
+ if self.client is None:
22
+ self.client = self._ai_model(
23
+ name=self.name,
24
+ instructions=self.instructions,
25
+ description=self.description,
26
+ )
27
+ self.client.save()
28
+ self.save()
29
+ return self.client
30
+
31
+ def generate(self, prompt, file_path, **kwargs):
32
+ return self.get_client().generate_audio(prompt, file_path, **kwargs)
@@ -0,0 +1,31 @@
1
+ from autonomous.model.autoattr import ReferenceAttr, StringAttr
2
+ from autonomous.model.automodel import AutoModel
3
+
4
+ from .models.openai import OpenAIModel
5
+
6
+
7
+ class ImageAgent(AutoModel):
8
+ client = ReferenceAttr()
9
+ name = StringAttr(default="imageagent")
10
+ instructions = StringAttr(
11
+ default="You are highly skilled AI trained to assist with generating images."
12
+ )
13
+ description = StringAttr(
14
+ default="A helpful AI assistant trained to assist with generating images."
15
+ )
16
+
17
+ _ai_model = OpenAIModel
18
+
19
+ def get_client(self):
20
+ if self.client is None:
21
+ self.client = self._ai_model(
22
+ name=self.name,
23
+ instructions=self.instructions,
24
+ description=self.description,
25
+ )
26
+ self.client.save()
27
+ self.save()
28
+ return self.client
29
+
30
+ def generate(self, prompt, **kwargs):
31
+ return self.get_client().generate_image(prompt, **kwargs)
@@ -0,0 +1,40 @@
1
+ import json
2
+
3
+ from autonomous.model.autoattr import ReferenceAttr, StringAttr
4
+ from autonomous.model.automodel import AutoModel
5
+
6
+ from .models.openai import OpenAIModel
7
+
8
+
9
+ class JSONAgent(AutoModel):
10
+ client = ReferenceAttr()
11
+ name = StringAttr(default="jsonagent")
12
+ instructions = StringAttr(
13
+ default="You are highly skilled AI trained to assist with generating JSON formatted data."
14
+ )
15
+ description = StringAttr(
16
+ default="A helpful AI assistant trained to assist with generating JSON formatted data."
17
+ )
18
+
19
+ _ai_model = OpenAIModel
20
+
21
+ def get_client(self):
22
+ if self.client is None:
23
+ self.client = self._ai_model(
24
+ name=self.name,
25
+ instructions=self.instructions,
26
+ description=self.description,
27
+ )
28
+ self.client.save()
29
+ self.save()
30
+ return self.client
31
+
32
+ def generate(self, messages, function, additional_instructions=""):
33
+ result = self.get_client().generate_json(
34
+ messages, function, additional_instructions
35
+ )
36
+ if isinstance(result, str):
37
+ result = json.loads(result)
38
+ elif not isinstance(result, dict):
39
+ raise ValueError(f"Invalid JSON response from AI model.\n\n{result}")
40
+ return result
File without changes
@@ -0,0 +1,280 @@
1
+ import json
2
+ import os
3
+ import random
4
+ import time
5
+ from base64 import b64decode
6
+
7
+ from openai import NotFoundError as openai_NotFoundError
8
+ from openai import OpenAI
9
+
10
+ from autonomous import log
11
+ from autonomous.model.autoattr import DictAttr, ListAttr, StringAttr
12
+ from autonomous.model.automodel import AutoModel
13
+
14
+
15
+ class OpenAIModel(AutoModel):
16
+ _client = None
17
+ text_model = StringAttr(default="gpt-4o")
18
+ image_model = StringAttr(default="dall-e-3")
19
+ json_model = StringAttr(default="gpt-4o")
20
+ agent_id = StringAttr()
21
+ messages = ListAttr(StringAttr(default=[]))
22
+ tools = DictAttr()
23
+ vector_store = StringAttr()
24
+ name = StringAttr(default="agent")
25
+ instructions = StringAttr(
26
+ default="You are highly skilled AI trained to assist with various tasks."
27
+ )
28
+ description = StringAttr(
29
+ default="A helpful AI assistant trained to assist with various tasks."
30
+ )
31
+
32
+ @property
33
+ def client(self):
34
+ if not self._client:
35
+ self._client = OpenAI(api_key=os.environ.get("OPENAI_KEY"))
36
+ return self._client
37
+
38
+ def clear_agent(self):
39
+ if self.agent_id:
40
+ self.client.beta.assistants.delete(self.agent_id)
41
+ self.agent_id = ""
42
+ self.save()
43
+
44
+ def clear_agents(self):
45
+ assistants = self.client.beta.assistants.list().data
46
+ log(assistants)
47
+ for assistant in assistants:
48
+ log(f"==== Deleting Agent with ID: {assistant.id} ====")
49
+ try:
50
+ self.client.beta.assistants.delete(assistant.id)
51
+ except openai_NotFoundError:
52
+ log(f"==== Agent with ID: {assistant.id} not found ====")
53
+ self.agent_id = ""
54
+ self.save()
55
+
56
+ def _get_agent_id(self):
57
+ if not self.agent_id or not self.client.beta.assistants.retrieve(self.agent_id):
58
+ agent = self.client.beta.assistants.create(
59
+ instructions=self.instructions,
60
+ description=self.description,
61
+ name=self.name,
62
+ model=self.json_model,
63
+ )
64
+ self.agent_id = agent.id
65
+ log(f"==== Creating Agent with ID: {self.agent_id} ====")
66
+ self.save()
67
+ return self.agent_id
68
+
69
+ def clear_files(self, file_id=None):
70
+ if not file_id:
71
+ store_files = self.client.files.list().data
72
+ for sf in store_files:
73
+ self.client.files.delete(file_id=sf.id)
74
+ for vs in self.client.beta.vector_stores.list().data:
75
+ try:
76
+ self.client.beta.vector_stores.delete(vs.id)
77
+ except openai_NotFoundError:
78
+ log(f"==== Vector Store {vs.id} not found ====")
79
+ else:
80
+ self.client.files.delete(file_id=file_id)
81
+ self.tools.pop("file_search", None)
82
+ self.save()
83
+ return self.client.files.list()
84
+
85
+ def attach_file(self, file_contents, filename="dbdata.json"):
86
+ # Upload the user provided file to OpenAI
87
+ self.tools["file_search"] = {"type": "file_search"}
88
+ # Create a vector store
89
+ if vs := self.client.beta.vector_stores.list().data:
90
+ self.vector_store = vs[0].id
91
+ else:
92
+ self.vector_store = self.client.beta.vector_stores.create(
93
+ name="Data Reference",
94
+ expires_after={"anchor": "last_active_at", "days": 14},
95
+ ).id
96
+
97
+ file_obj = self.client.files.create(
98
+ file=(filename, file_contents), purpose="assistants"
99
+ )
100
+
101
+ self.client.beta.vector_stores.files.create(
102
+ vector_store_id=self.vector_store,
103
+ file_id=file_obj.id,
104
+ )
105
+ self.client.beta.assistants.update(
106
+ self._get_agent_id(),
107
+ tools=list(self.tools.values()),
108
+ tool_resources={"file_search": {"vector_store_ids": [self.vector_store]}},
109
+ )
110
+ self.save()
111
+ return file_obj.id
112
+
113
+ def _add_function(self, user_function):
114
+ user_function["strict"] = True
115
+ user_function["parameters"]["required"] = list(
116
+ user_function["parameters"]["properties"].keys()
117
+ )
118
+ user_function["parameters"]["additionalProperties"] = False
119
+
120
+ self.tools["function"] = {"type": "function", "function": user_function}
121
+ self.client.beta.assistants.update(
122
+ self._get_agent_id(), tools=list(self.tools.values())
123
+ )
124
+ return """
125
+ IMPORTANT: Always use the function 'response' tool to respond to the user with the only the requested JSON schema. DO NOT add any text to the response outside of the JSON schema.
126
+
127
+ """
128
+
129
+ def _format_messages(self, messages):
130
+ message_list = []
131
+ if isinstance(messages, str):
132
+ message_list.insert(0, {"role": "user", "content": messages})
133
+ else:
134
+ for message in messages:
135
+ if isinstance(message, str):
136
+ message_list.insert(0, {"role": "user", "content": message})
137
+ else:
138
+ raise Exception(
139
+ f"==== Invalid message: {message} ====\nMust be a string "
140
+ )
141
+ return message_list
142
+
143
+ def generate_json(self, messages, function, additional_instructions=""):
144
+ _instructions_addition = self._add_function(function)
145
+ _instructions_addition += additional_instructions
146
+
147
+ formatted_messages = self._format_messages(messages)
148
+ thread = self.client.beta.threads.create(messages=formatted_messages)
149
+
150
+ run = self.client.beta.threads.runs.create(
151
+ thread_id=thread.id,
152
+ assistant_id=self._get_agent_id(),
153
+ additional_instructions=_instructions_addition,
154
+ parallel_tool_calls=False,
155
+ )
156
+
157
+ while run.status in ["queued", "in_progress"]:
158
+ run = self.client.beta.threads.runs.retrieve(
159
+ thread_id=thread.id,
160
+ run_id=run.id,
161
+ )
162
+ time.sleep(0.5)
163
+ log(f"==== Job Status: {run.status} ====")
164
+ print(f"==== Job Status: {run.status} ====")
165
+
166
+ if run.status in ["failed", "expired", "canceled"]:
167
+ log(f"==== Error: {run.last_error} ====")
168
+ print(f"==== Error: {run.last_error} ====")
169
+ return None
170
+ print("=================== RUN COMPLETED ===================")
171
+ print(run.status)
172
+ if run.status == "completed":
173
+ response = self.client.beta.threads.messages.list(thread_id=thread.id)
174
+ results = response.data[0].content[0].text.value
175
+ elif run.status == "requires_action":
176
+ results = run.required_action.submit_tool_outputs.tool_calls[
177
+ 0
178
+ ].function.arguments
179
+ else:
180
+ log(f"====Status: {run.status} Error: {run.last_error} ====")
181
+ print(f"====Status: {run.status} Error: {run.last_error} ====")
182
+ return None
183
+
184
+ results = results[results.find("{") : results.rfind("}") + 1]
185
+ try:
186
+ results = json.loads(results, strict=False)
187
+ except Exception:
188
+ print(f"==== Invalid JSON:\n{results}")
189
+ log(f"==== Invalid JSON:\n{results}")
190
+ return {}
191
+ else:
192
+ log(f"==== Results: {results} ====")
193
+ print(results)
194
+ print("=================== END REPORT ===================")
195
+ return results
196
+
197
+ def generate_text(self, messages, additional_instructions=""):
198
+ formatted_messages = self._format_messages(messages)
199
+ thread = self.client.beta.threads.create(messages=formatted_messages)
200
+
201
+ run = self.client.beta.threads.runs.create(
202
+ thread_id=thread.id,
203
+ assistant_id=self._get_agent_id(),
204
+ additional_instructions=additional_instructions,
205
+ parallel_tool_calls=False,
206
+ )
207
+
208
+ while run.status in ["queued", "in_progress"]:
209
+ run = self.client.beta.threads.runs.retrieve(
210
+ thread_id=thread.id,
211
+ run_id=run.id,
212
+ )
213
+ time.sleep(0.5)
214
+ log(f"==== Job Status: {run.status} ====")
215
+ print(f"==== Job Status: {run.status} ====")
216
+
217
+ if run.status in ["failed", "expired", "canceled"]:
218
+ log(f"==== Error: {run.last_error} ====")
219
+ print(f"==== Error: {run.last_error} ====")
220
+ return None
221
+ print("=================== RUN COMPLETED ===================")
222
+ print(run.status)
223
+ if run.status == "completed":
224
+ response = self.client.beta.threads.messages.list(thread_id=thread.id)
225
+ results = response.data[0].content[0].text.value
226
+ else:
227
+ log(f"====Status: {run.status} Error: {run.last_error} ====")
228
+ print(f"====Status: {run.status} Error: {run.last_error} ====")
229
+ return None
230
+
231
+ print(results)
232
+ print("=================== END REPORT ===================")
233
+ return results
234
+
235
+ def generate_audio(self, prompt, file_path, **kwargs):
236
+ voice = kwargs.get("voice") or random.choice(
237
+ ["alloy", "echo", "fable", "onyx", "nova", "shimmer"]
238
+ )
239
+ response = self.client.audio.speech.create(
240
+ model="tts-1",
241
+ voice=voice,
242
+ input=prompt,
243
+ )
244
+
245
+ return response.stream_to_file(file_path)
246
+
247
+ def generate_image(self, prompt, **kwargs):
248
+ image = None
249
+ try:
250
+ response = self.client.images.generate(
251
+ model=self.image_model,
252
+ prompt=prompt,
253
+ response_format="b64_json",
254
+ **kwargs,
255
+ )
256
+ image_dict = response.data[0]
257
+ except Exception as e:
258
+ print(f"==== Error: Unable to create image ====\n\n{e}")
259
+ else:
260
+ image = b64decode(image_dict.b64_json)
261
+ return image
262
+
263
+ def summarize_text(self, text, primer=""):
264
+ message = [
265
+ {
266
+ "role": "system",
267
+ "content": f"You are a highly skilled AI trained in language comprehension and summarization.{primer}",
268
+ },
269
+ {"role": "user", "content": text},
270
+ ]
271
+ response = self.client.chat.completions.create(
272
+ model="gpt-4o-mini", messages=message
273
+ )
274
+ try:
275
+ result = response.choices[0].message.content
276
+ except Exception as e:
277
+ log(f"{type(e)}:{e}\n\n Unable to generate content ====")
278
+ return None
279
+
280
+ return result
autonomous/ai/oaiagent.py CHANGED
@@ -1,3 +1,4 @@
1
+ import io
1
2
  import json
2
3
  import os
3
4
  import random
@@ -5,210 +6,48 @@ import time
5
6
  from base64 import b64decode
6
7
 
7
8
  from openai import OpenAI
9
+ from pydantic import model_validator
8
10
 
9
- from autonomous import AutoModel, log
11
+ from autonomous import log
12
+ from autonomous.model.autoattr import ReferenceAttr
13
+ from autonomous.model.automodel import AutoModel
10
14
 
15
+ from .models.openai import OpenAIModel
11
16
 
12
- class OAIAgent(AutoModel):
13
- client = None
14
- attributes = {
15
- "model": "gpt-4o",
16
- "_agent_id": None,
17
- "messages": [],
18
- "tools": {},
19
- "vector_store": None,
20
- "name": "agent",
21
- "instructions": "You are highly skilled AI trained to assist with various tasks.",
22
- "description": "A helpful AI assistant trained to assist with various tasks.",
23
- }
24
17
 
25
- def __init__(self, **kwargs):
26
- self.client = OpenAI(api_key=os.environ.get("OPENAI_KEY"))
18
+ class OAIAgent(AutoModel):
19
+ client_model = ReferenceAttr()
20
+ _ai_model = OpenAIModel
27
21
 
28
22
  @property
29
- def agent_id(self):
30
- if not self._agent_id or not self.client.beta.assistants.retrieve(
31
- self._agent_id
32
- ):
33
- agent = self.client.beta.assistants.create(
34
- instructions=self.instructions,
35
- description=self.description,
36
- name=self.name,
37
- model=self.model,
38
- )
39
- self._agent_id = agent.id
40
- log(f"==== Creating Agent with ID: {self._agent_id} ====")
23
+ def client(self):
24
+ if self.client_model is None:
25
+ self.client_model = self._ai_model()
41
26
  self.save()
42
- return self._agent_id
27
+ return self.client_model
43
28
 
44
- @agent_id.setter
45
- def agent_id(self, value):
46
- self._agent_id = value
47
-
48
- def clear_agent(self):
49
- self.client.beta.assistants.delete(self.agent_id)
50
- self.agent_id = ""
51
-
52
- def get_agent(self):
53
- return self.client.beta.assistants.retrieve(self.agent_id)
29
+ def __init__(self, **kwargs):
30
+ self._client = OpenAI(api_key=os.environ.get("OPENAI_KEY"))
54
31
 
55
32
  def clear_files(self, file_id=None):
56
- if self.vector_store:
57
- if not file_id:
58
- vector_store_files = self.client.beta.vector_stores.files.list(
59
- vector_store_id=self.vector_store
60
- ).data
61
- for vsf in vector_store_files:
62
- self.client.files.delete(file_id=vsf.id)
63
- else:
64
- self.client.files.delete(file_id=file_id)
65
- self.tools.pop("file_search", None)
66
- self.save()
67
- return self.client.files.list()
33
+ return self.client.clear_files(file_id)
68
34
 
69
35
  def attach_file(self, file_contents, filename="dbdata.json"):
70
- # Upload the user provided file to OpenAI
71
- self.tools["file_search"] = {"type": "file_search"}
72
- # Create a vector store
73
- if not self.vector_store:
74
- vs = self.client.beta.vector_stores.list().data
75
- if vs:
76
- self.vector_store = vs[0].id
77
- else:
78
- self.vector_store = self.client.beta.vector_stores.create(
79
- name="Data Reference"
80
- ).id
81
-
82
- file_obj = self.client.files.create(
83
- file=(filename, file_contents), purpose="assistants"
84
- )
85
-
86
- self.client.beta.vector_stores.files.create(
87
- vector_store_id=self.vector_store, file_id=file_obj.id
88
- )
89
- self.client.beta.assistants.update(
90
- self.agent_id,
91
- tools=list(self.tools.values()),
92
- tool_resources={"file_search": {"vector_store_ids": [self.vector_store]}},
93
- )
94
- self.save()
95
- return file_obj.id
96
-
97
- def _add_function(self, user_function):
98
- self.tools["function"] = {"type": "function", "function": user_function}
99
- self.client.beta.assistants.update(
100
- self.agent_id, tools=list(self.tools.values())
101
- )
102
- return """
103
- IMPORTANT: always use the function 'response' tool to respond to the user with the requested JSON schema. Never add any other text to the response.
104
- """
105
-
106
- def _format_messages(self, messages):
107
- message_list = []
108
- if isinstance(messages, str):
109
- message_list.insert(0, {"role": "user", "content": messages})
110
- else:
111
- for message in messages:
112
- if isinstance(message, str):
113
- message_list.insert(0, {"role": "user", "content": message})
114
- else:
115
- raise Exception(
116
- f"==== Invalid message: {message} ====\nMust be a string "
117
- )
118
- return message_list
36
+ return self.client.attach_file(file_contents, filename)
119
37
 
120
38
  def generate(self, messages, function=None, additional_instructions=""):
121
- _instructions_addition = (
122
- self._add_function(function) if function else additional_instructions
123
- )
124
-
125
- formatted_messages = self._format_messages(messages)
126
- thread = self.client.beta.threads.create(messages=formatted_messages)
127
-
128
- run = self.client.beta.threads.runs.create(
129
- thread_id=thread.id,
130
- assistant_id=self.agent_id,
131
- additional_instructions=_instructions_addition,
132
- parallel_tool_calls=False,
133
- )
134
-
135
- while run.status in ["queued", "in_progress"]:
136
- run = self.client.beta.threads.runs.retrieve(
137
- thread_id=thread.id,
138
- run_id=run.id,
139
- )
140
- time.sleep(0.5)
141
- log(f"==== Job Status: {run.status} ====")
142
- print(f"==== Job Status: {run.status} ====")
143
-
144
- if run.status in ["failed", "expired", "canceled"]:
145
- log(f"==== Error: {run.last_error} ====")
146
- print(f"==== Error: {run.last_error} ====")
147
- return None
148
- print("=================== RUN COMPLETED ===================")
149
- print(run.status)
150
- if run.status == "completed":
151
- response = self.client.beta.threads.messages.list(thread_id=thread.id)
152
- results = response.data[0].content[0].text.value
153
- elif run.status == "requires_action":
154
- results = run.required_action.submit_tool_outputs.tool_calls[
155
- 0
156
- ].function.arguments
39
+ if function is None:
40
+ return self.client.generate_text(messages, additional_instructions)
157
41
  else:
158
- log(f"====Status: {run.status} Error: {run.last_error} ====")
159
- print(f"====Status: {run.status} Error: {run.last_error} ====")
160
- return None
161
-
162
- if function:
163
- results = results[results.find("{") : results.rfind("}") + 1]
164
- try:
165
- results = json.loads(results, strict=False)
166
- except Exception:
167
- print(f"==== Invalid JSON:\n{results}")
168
- log(f"==== Invalid JSON:\n{results}")
169
-
170
- print(results)
171
- print("=================== END REPORT ===================")
172
- return results
42
+ return self.client.generate_json(
43
+ messages, function, additional_instructions
44
+ )
173
45
 
174
46
  def generate_audio(self, prompt, file_path, **kwargs):
175
- voice = kwargs.get("voice") or random.choice(
176
- ["alloy", "echo", "fable", "onyx", "nova", "shimmer"]
177
- )
178
- response = self.client.audio.speech.create(
179
- model="tts-1",
180
- voice=voice,
181
- input=prompt,
182
- )
183
-
184
- return response.stream_to_file(file_path)
47
+ return self.client.generate_audio(self, file_path, **kwargs)
185
48
 
186
49
  def generate_image(self, prompt, **kwargs):
187
- image = None
188
- try:
189
- response = self.client.images.generate(
190
- model="dall-e-3", prompt=prompt, response_format="b64_json", **kwargs
191
- )
192
- image_dict = response.data[0]
193
- except Exception as e:
194
- print(f"==== Error: Unable to create image ====\n\n{e}")
195
- else:
196
- image = b64decode(image_dict.b64_json)
197
- return image
50
+ return self.client.generate_image(prompt, **kwargs)
198
51
 
199
52
  def summarize_text(self, text, primer=""):
200
- message = [
201
- {
202
- "role": "system",
203
- "content": f"You are a highly skilled AI trained in language comprehension and summarization.{primer}",
204
- },
205
- {"role": "user", "content": text},
206
- ]
207
- response = self.client.chat.completions.create(model="gpt-4o", messages=message)
208
- try:
209
- result = response.choices[0].message.content
210
- except Exception as e:
211
- log(f"{type(e)}:{e}\n\n Unable to generate content ====")
212
- return None
213
-
214
- return result
53
+ return self.client.generate_image(text, primer)
@@ -0,0 +1,35 @@
1
+ from autonomous import log
2
+ from autonomous.model.autoattr import ReferenceAttr, StringAttr
3
+ from autonomous.model.automodel import AutoModel
4
+
5
+ from .models.openai import OpenAIModel
6
+
7
+
8
+ class TextAgent(AutoModel):
9
+ client = ReferenceAttr()
10
+ name = StringAttr(default="textagent")
11
+ instructions = StringAttr(
12
+ default="You are highly skilled AI trained to assist with generating text according to the given requirements."
13
+ )
14
+ description = StringAttr(
15
+ default="A helpful AI assistant trained to assist with generating text according to the given requirements."
16
+ )
17
+
18
+ _ai_model = OpenAIModel
19
+
20
+ def get_client(self):
21
+ if self.client is None:
22
+ self.client = self._ai_model(
23
+ name=self.name,
24
+ instructions=self.instructions,
25
+ description=self.description,
26
+ )
27
+ self.client.save()
28
+ self.save()
29
+ return self.client
30
+
31
+ def summarize_text(self, text, primer=""):
32
+ return self.get_client().summarize_text(text, primer)
33
+
34
+ def generate(self, messages, additional_instructions=""):
35
+ return self.get_client().generate_text(messages, additional_instructions)
@@ -4,14 +4,14 @@ from functools import wraps
4
4
 
5
5
  import requests
6
6
  from authlib.integrations.requests_client import OAuth2Auth, OAuth2Session
7
- from flask import current_app, redirect, request, session, url_for
7
+ from flask import redirect, session, url_for
8
8
 
9
9
  from autonomous import log
10
10
  from autonomous.auth.user import AutoUser
11
11
 
12
12
 
13
13
  class AutoAuth:
14
- user_class = AutoUser
14
+ user_class: type[AutoUser] = AutoUser
15
15
 
16
16
  def __init__(
17
17
  self,