autonomous-app 0.3.28__tar.gz → 0.3.29__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/PKG-INFO +1 -1
  2. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/__init__.py +1 -1
  3. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/ai/baseagent.py +3 -11
  4. autonomous_app-0.3.29/src/autonomous/ai/models/local_model.py +272 -0
  5. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous_app.egg-info/PKG-INFO +1 -1
  6. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous_app.egg-info/SOURCES.txt +1 -1
  7. autonomous_app-0.3.28/src/autonomous/ai/models/local.py +0 -99
  8. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/README.md +0 -0
  9. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/pyproject.toml +0 -0
  10. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/requirements.txt +0 -0
  11. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/setup.cfg +0 -0
  12. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/setup.py +0 -0
  13. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/ai/__init__.py +0 -0
  14. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/ai/audioagent.py +0 -0
  15. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/ai/imageagent.py +0 -0
  16. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/ai/jsonagent.py +0 -0
  17. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/ai/models/__init__.py +0 -0
  18. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/ai/models/aws.py +0 -0
  19. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/ai/models/deepseek.py +0 -0
  20. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/ai/models/gemini.py +0 -0
  21. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/ai/models/openai.py +0 -0
  22. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/ai/textagent.py +0 -0
  23. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/apis/version_control/GHCallbacks.py +0 -0
  24. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/apis/version_control/GHOrganization.py +0 -0
  25. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/apis/version_control/GHRepo.py +0 -0
  26. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/apis/version_control/GHVersionControl.py +0 -0
  27. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/apis/version_control/__init__.py +0 -0
  28. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/auth/__init__.py +0 -0
  29. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/auth/autoauth.py +0 -0
  30. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/auth/github.py +0 -0
  31. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/auth/google.py +0 -0
  32. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/auth/user.py +0 -0
  33. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/cli.py +0 -0
  34. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/__init__.py +0 -0
  35. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/base/__init__.py +0 -0
  36. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/base/common.py +0 -0
  37. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/base/datastructures.py +0 -0
  38. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/base/document.py +0 -0
  39. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/base/fields.py +0 -0
  40. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/base/metaclasses.py +0 -0
  41. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/base/utils.py +0 -0
  42. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/common.py +0 -0
  43. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/connection.py +0 -0
  44. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/context_managers.py +0 -0
  45. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/dereference.py +0 -0
  46. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/document.py +0 -0
  47. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/errors.py +0 -0
  48. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/fields.py +0 -0
  49. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/mongodb_support.py +0 -0
  50. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/pymongo_support.py +0 -0
  51. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/queryset/__init__.py +0 -0
  52. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/queryset/base.py +0 -0
  53. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/queryset/field_list.py +0 -0
  54. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/queryset/manager.py +0 -0
  55. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/queryset/queryset.py +0 -0
  56. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/queryset/transform.py +0 -0
  57. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/queryset/visitor.py +0 -0
  58. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/db/signals.py +0 -0
  59. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/logger.py +0 -0
  60. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/model/__init__.py +0 -0
  61. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/model/autoattr.py +0 -0
  62. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/model/automodel.py +0 -0
  63. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/storage/__init__.py +0 -0
  64. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/storage/imagestorage.py +0 -0
  65. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/storage/localstorage.py +0 -0
  66. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/tasks/__init__.py +0 -0
  67. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/tasks/autotask.py +0 -0
  68. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous/utils/markdown.py +0 -0
  69. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous_app.egg-info/dependency_links.txt +0 -0
  70. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous_app.egg-info/requires.txt +0 -0
  71. {autonomous_app-0.3.28 → autonomous_app-0.3.29}/src/autonomous_app.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: autonomous-app
3
- Version: 0.3.28
3
+ Version: 0.3.29
4
4
  Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
5
5
  Author-email: Steven A Moore <samoore@binghamton.edu>
6
6
  Project-URL: homepage, https://github.com/Sallenmoore/autonomous
@@ -1,4 +1,4 @@
1
- __version__ = "0.3.28"
1
+ __version__ = "0.3.29"
2
2
 
3
3
  from dotenv import load_dotenv
4
4
 
@@ -3,24 +3,16 @@ from autonomous.model.autoattr import ReferenceAttr
3
3
  from autonomous.model.automodel import AutoModel
4
4
 
5
5
  from .models.gemini import GeminiAIModel
6
+ from .models.local_model import LocalAIModel
6
7
  from .models.openai import OpenAIModel
7
8
 
8
9
 
9
- def clear_agents():
10
- for agent in OpenAIModel.all():
11
- log(f"Deleting {agent.name}")
12
- agent.clear_agents()
13
- agent.clear_files()
14
- agent.delete()
15
- return "Success"
16
-
17
-
18
10
  class BaseAgent(AutoModel):
19
11
  meta = {"abstract": True, "allow_inheritance": True, "strict": False}
20
12
 
21
- client = ReferenceAttr(choices=[GeminiAIModel])
13
+ client = ReferenceAttr(choices=[LocalAIModel])
22
14
 
23
- _ai_model = GeminiAIModel
15
+ _ai_model = LocalAIModel
24
16
 
25
17
  def delete(self):
26
18
  if self.client:
@@ -0,0 +1,272 @@
1
+ import io
2
+ import json
3
+ import os
4
+ import random
5
+ import re
6
+ import wave
7
+
8
+ import requests
9
+ from pydub import AudioSegment
10
+
11
+ from autonomous import log
12
+ from autonomous.model.autoattr import ListAttr, StringAttr
13
+ from autonomous.model.automodel import AutoModel
14
+
15
+
16
+ class LocalAIModel(AutoModel):
17
+ # Configuration
18
+ _ollama_url = os.environ.get("OLLAMA_API_BASE", "http://ollama_internal:11434/api")
19
+ _media_url = os.environ.get("MEDIA_API_BASE", "http://media_ai_internal:5005")
20
+
21
+ # Models to use in Ollama
22
+ _text_model = "mistral-nemo"
23
+ _json_model = "mistral-nemo"
24
+
25
+ messages = ListAttr(StringAttr(default=[]))
26
+ name = StringAttr(default="agent")
27
+ instructions = StringAttr(
28
+ default="You are highly skilled AI trained to assist with various tasks."
29
+ )
30
+
31
+ # Keep your voice list (mapped to random seeds/embeddings in the future)
32
+ VOICES = {
33
+ "Zephyr": ["female"],
34
+ "Puck": ["male"],
35
+ "Charon": ["male"],
36
+ "Kore": ["female"],
37
+ "Fenrir": ["non-binary"],
38
+ "Leda": ["female"],
39
+ "Orus": ["male"],
40
+ "Aoede": ["female"],
41
+ "Callirhoe": ["female"],
42
+ "Autonoe": ["female"],
43
+ "Enceladus": ["male"],
44
+ "Iapetus": ["male"],
45
+ "Umbriel": ["male"],
46
+ "Algieba": ["male"],
47
+ "Despina": ["female"],
48
+ "Erinome": ["female"],
49
+ "Algenib": ["male"],
50
+ "Rasalgethi": ["non-binary"],
51
+ "Laomedeia": ["female"],
52
+ "Achernar": ["female"],
53
+ "Alnilam": ["male"],
54
+ "Schedar": ["male"],
55
+ "Gacrux": ["female"],
56
+ "Pulcherrima": ["non-binary"],
57
+ "Achird": ["male"],
58
+ "Zubenelgenubi": ["male"],
59
+ "Vindemiatrix": ["female"],
60
+ "Sadachbia": ["male"],
61
+ "Sadaltager": ["male"],
62
+ "Sulafar": ["female"],
63
+ }
64
+
65
+ def _convert_tools_to_json_schema(self, user_function):
66
+ """
67
+ Ollama doesn't support 'tools' strictly yet.
68
+ We convert the tool definition into a system prompt instruction.
69
+ """
70
+ schema = {
71
+ "name": user_function.get("name"),
72
+ "parameters": user_function.get("parameters"),
73
+ }
74
+ return json.dumps(schema, indent=2)
75
+
76
+ def generate_json(self, message, function, additional_instructions="", **kwargs):
77
+ """
78
+ Mimics Gemini's tool use by forcing Ollama into JSON mode
79
+ and injecting the schema into the prompt.
80
+ """
81
+ schema_str = self._convert_tools_to_json_schema(function)
82
+
83
+ system_prompt = (
84
+ f"{self.instructions}. {additional_instructions}\n"
85
+ f"You must respond strictly with a valid JSON object matching this schema:\n"
86
+ f"{schema_str}\n"
87
+ f"Do not include markdown formatting or explanations."
88
+ )
89
+
90
+ payload = {
91
+ "model": self._json_model,
92
+ "prompt": message,
93
+ "system": system_prompt,
94
+ "format": "json", # Force JSON mode
95
+ "stream": False,
96
+ }
97
+
98
+ try:
99
+ response = requests.post(f"{self._ollama_url}/generate", json=payload)
100
+ response.raise_for_status()
101
+ result_text = response.json().get("response", "{}")
102
+
103
+ # log(f"Raw Local JSON: {result_text}", _print=True)
104
+ return json.loads(result_text)
105
+
106
+ except Exception as e:
107
+ log(f"==== LocalAI JSON Error: {e} ====", _print=True)
108
+ return {}
109
+
110
+ def generate_text(self, message, additional_instructions="", **kwargs):
111
+ """
112
+ Standard text generation via Ollama.
113
+ """
114
+ payload = {
115
+ "model": self._text_model,
116
+ "prompt": message,
117
+ "system": f"{self.instructions}. {additional_instructions}",
118
+ "stream": False,
119
+ }
120
+
121
+ # Handle 'files' (Ollama supports images in base64, but not arbitrary files easily yet)
122
+ # If files are text, you should read them and append to prompt.
123
+ if file_list := kwargs.get("files"):
124
+ for file_dict in file_list:
125
+ fn = file_dict["name"]
126
+ fileobj = file_dict["file"]
127
+ if fn.lower().endswith((".txt", ".md", ".json", ".csv")):
128
+ content = fileobj.read()
129
+ if isinstance(content, bytes):
130
+ content = content.decode("utf-8", errors="ignore")
131
+ payload["prompt"] += f"\n\nContents of {fn}:\n{content}"
132
+
133
+ try:
134
+ response = requests.post(f"{self._ollama_url}/generate", json=payload)
135
+ response.raise_for_status()
136
+ return response.json().get("response", "")
137
+ except Exception as e:
138
+ log(f"==== LocalAI Text Error: {e} ====", _print=True)
139
+ return "Error generating text."
140
+
141
+ def summarize_text(self, text, primer="", **kwargs):
142
+ primer = primer or "Summarize the following text concisely."
143
+
144
+ # Simple chunking logic (similar to your original)
145
+ # Note: Mistral-Nemo has a large context window (128k), so chunking
146
+ # is less necessary than with older models, but we keep it for safety.
147
+ max_chars = 12000 # Roughly 3k tokens
148
+ chunks = [text[i : i + max_chars] for i in range(0, len(text), max_chars)]
149
+
150
+ full_summary = ""
151
+ for chunk in chunks:
152
+ payload = {
153
+ "model": self._text_model,
154
+ "prompt": f"{primer}:\n\n{chunk}",
155
+ "stream": False,
156
+ }
157
+ try:
158
+ res = requests.post(f"{self._ollama_url}/generate", json=payload)
159
+ full_summary += res.json().get("response", "") + "\n"
160
+ except Exception as e:
161
+ log(f"Summary Error: {e}", _print=True)
162
+ break
163
+
164
+ return full_summary
165
+
166
+ def generate_audio_text(self, audio_file, prompt="", **kwargs):
167
+ """
168
+ Sends audio bytes to the Media AI container for Whisper transcription.
169
+ """
170
+ try:
171
+ # Prepare the file for upload
172
+ # audio_file is likely bytes, so we wrap in BytesIO if needed
173
+ if isinstance(audio_file, bytes):
174
+ f_obj = io.BytesIO(audio_file)
175
+ else:
176
+ f_obj = audio_file
177
+
178
+ files = {"file": ("audio.mp3", f_obj, "audio/mpeg")}
179
+
180
+ response = requests.post(f"{self._media_url}/transcribe", files=files)
181
+ response.raise_for_status()
182
+ return response.json().get("text", "")
183
+
184
+ except Exception as e:
185
+ log(f"STT Error: {e}", _print=True)
186
+ return ""
187
+
188
+ def generate_audio(self, prompt, voice=None, **kwargs):
189
+ """
190
+ Sends text to the Media AI container for TTS.
191
+ """
192
+ voice = voice or random.choice(list(self.VOICES.keys()))
193
+
194
+ try:
195
+ payload = {"text": prompt, "voice": voice}
196
+ response = requests.post(f"{self._media_url}/tts", json=payload)
197
+ response.raise_for_status()
198
+
199
+ # Response content is WAV bytes
200
+ wav_bytes = response.content
201
+
202
+ # Convert to MP3 to match your original interface (using pydub)
203
+ audio = AudioSegment.from_file(io.BytesIO(wav_bytes), format="wav")
204
+ mp3_buffer = io.BytesIO()
205
+ audio.export(mp3_buffer, format="mp3")
206
+ return mp3_buffer.getvalue()
207
+
208
+ except Exception as e:
209
+ log(f"TTS Error: {e}", _print=True)
210
+ return None
211
+
212
+ def generate_image(self, prompt, **kwargs):
213
+ """
214
+ Generates an image using Local AI.
215
+ If 'files' are provided, performs Image-to-Image generation using the first file as reference.
216
+ """
217
+ try:
218
+ # Prepare the multipart data
219
+ # We send the prompt as a form field
220
+ data = {"prompt": prompt}
221
+ files = {}
222
+
223
+ # Check if reference images were passed
224
+ if kwargs.get("files"):
225
+ # Take the first available file
226
+ for fn, f_bytes in kwargs.get("files").items():
227
+ # If f_bytes is bytes, wrap in IO, else assume it's file-like
228
+ if isinstance(f_bytes, bytes):
229
+ file_obj = io.BytesIO(f_bytes)
230
+ else:
231
+ file_obj = f_bytes
232
+
233
+ # Add to the request files
234
+ # Key must be 'file' to match server.py logic
235
+ files["file"] = (fn, file_obj, "image/png")
236
+ break # We only support 1 reference image for SD Img2Img
237
+
238
+ # Send Request
239
+ if files:
240
+ # Multipart/form-data request (Prompt + File)
241
+ response = requests.post(
242
+ f"{self._media_url}/generate-image", data=data, files=files
243
+ )
244
+ else:
245
+ # Standard request (Prompt only) - server.py handles request.form vs json
246
+ # But our updated server expects form data for consistency
247
+ response = requests.post(f"{self._media_url}/generate-image", data=data)
248
+
249
+ response.raise_for_status()
250
+
251
+ # Returns WebP bytes directly
252
+ return response.content
253
+
254
+ except Exception as e:
255
+ log(f"Image Gen Error: {e}", _print=True)
256
+ return None
257
+
258
+ def list_voices(self, filters=[]):
259
+ # Same logic as before
260
+ if not filters:
261
+ return list(self.VOICES.keys())
262
+ voices = []
263
+ for voice, attribs in self.VOICES.items():
264
+ if any(f.lower() in attribs for f in filters):
265
+ voices.append(voice)
266
+ return voices
267
+
268
+ # Unused methods from original that don't apply to Local AI
269
+ def upload(self, file):
270
+ # Local models don't really have a "File Store" API like Gemini.
271
+ # We handle context by passing text directly in prompt.
272
+ pass
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: autonomous-app
3
- Version: 0.3.28
3
+ Version: 0.3.29
4
4
  Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
5
5
  Author-email: Steven A Moore <samoore@binghamton.edu>
6
6
  Project-URL: homepage, https://github.com/Sallenmoore/autonomous
@@ -15,7 +15,7 @@ src/autonomous/ai/models/__init__.py
15
15
  src/autonomous/ai/models/aws.py
16
16
  src/autonomous/ai/models/deepseek.py
17
17
  src/autonomous/ai/models/gemini.py
18
- src/autonomous/ai/models/local.py
18
+ src/autonomous/ai/models/local_model.py
19
19
  src/autonomous/ai/models/openai.py
20
20
  src/autonomous/apis/version_control/GHCallbacks.py
21
21
  src/autonomous/apis/version_control/GHOrganization.py
@@ -1,99 +0,0 @@
1
- import io
2
- import json
3
- import os
4
- import random
5
- import time
6
- from base64 import b64decode
7
-
8
- import openai
9
- from ollama import ChatResponse, chat
10
-
11
- from autonomous import log
12
- from autonomous.model.autoattr import DictAttr, ListAttr, StringAttr
13
- from autonomous.model.automodel import AutoModel
14
-
15
-
16
- class LocalAIModel(AutoModel):
17
- _client = None
18
- instructions = StringAttr(
19
- default="You are highly skilled AI trained to assist with various tasks."
20
- )
21
- description = StringAttr(
22
- default="A helpful AI assistant trained to assist with various tasks."
23
- )
24
-
25
- @property
26
- def client(self):
27
- if not self._client:
28
- self._client = "deepseek-r1" # OpenAI(api_key=os.environ.get("OPENAI_KEY"))
29
- return self._client
30
-
31
- def clear_agent(self):
32
- pass
33
-
34
- def clear_agents(self):
35
- pass
36
-
37
- # def _get_agent_id(self):
38
- # pass
39
-
40
- # def _add_function(self, user_function):
41
- pass
42
-
43
- def _format_messages(self, messages):
44
- pass
45
-
46
- def clear_files(self, file_id=None):
47
- pass
48
-
49
- def attach_file(self, file_contents, filename="dbdata.json"):
50
- pass
51
-
52
- def generate_json(self, messages, function, additional_instructions=""):
53
- message = messages + additional_instructions
54
- message += f"""
55
- IMPORTANT: Respond in JSON FORMAT using the SCHEMA below. DO NOT add any text to the response outside of the supplied JSON schema:
56
- {function}
57
- """
58
- response: ChatResponse = chat(
59
- model=self.client,
60
- messages=[
61
- {
62
- "role": "user",
63
- "content": message,
64
- },
65
- ],
66
- )
67
- return response.message.content
68
-
69
- def generate_text(self, messages, additional_instructions=""):
70
- message = messages + additional_instructions
71
- response: ChatResponse = chat(
72
- model=self.client,
73
- messages=[
74
- {
75
- "role": "user",
76
- "content": message,
77
- },
78
- ],
79
- )
80
- return response.message.content
81
-
82
- def generate_audio(self, prompt, **kwargs):
83
- raise NotImplementedError
84
-
85
- def generate_image(self, prompt, **kwargs):
86
- raise NotImplementedError
87
-
88
- def summarize_text(self, text, primer=""):
89
- response: ChatResponse = chat(
90
- model=self.client,
91
- messages=[
92
- {
93
- "role": "system",
94
- "content": f"You are a highly skilled AI trained in language comprehension and summarization.{primer}",
95
- },
96
- {"role": "user", "content": text},
97
- ],
98
- )
99
- return response.message.content