autonomous-app 0.3.27__py3-none-any.whl → 0.3.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
autonomous/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "0.3.27"
1
+ __version__ = "0.3.29"
2
2
 
3
3
  from dotenv import load_dotenv
4
4
 
@@ -3,24 +3,16 @@ from autonomous.model.autoattr import ReferenceAttr
3
3
  from autonomous.model.automodel import AutoModel
4
4
 
5
5
  from .models.gemini import GeminiAIModel
6
+ from .models.local_model import LocalAIModel
6
7
  from .models.openai import OpenAIModel
7
8
 
8
9
 
9
- def clear_agents():
10
- for agent in OpenAIModel.all():
11
- log(f"Deleting {agent.name}")
12
- agent.clear_agents()
13
- agent.clear_files()
14
- agent.delete()
15
- return "Success"
16
-
17
-
18
10
  class BaseAgent(AutoModel):
19
11
  meta = {"abstract": True, "allow_inheritance": True, "strict": False}
20
12
 
21
- client = ReferenceAttr(choices=[GeminiAIModel])
13
+ client = ReferenceAttr(choices=[LocalAIModel])
22
14
 
23
- _ai_model = GeminiAIModel
15
+ _ai_model = LocalAIModel
24
16
 
25
17
  def delete(self):
26
18
  if self.client:
@@ -1,8 +1,9 @@
1
1
  import json
2
2
 
3
+ from autonomous.ai.baseagent import BaseAgent
3
4
  from autonomous.model.autoattr import ReferenceAttr, StringAttr
4
5
  from autonomous.model.automodel import AutoModel
5
- from autonomous.ai.baseagent import BaseAgent
6
+
6
7
  from .models.openai import OpenAIModel
7
8
 
8
9
 
@@ -24,3 +25,6 @@ class JSONAgent(BaseAgent):
24
25
  elif not isinstance(result, dict):
25
26
  raise ValueError(f"Invalid JSON response from AI model.\n\n{result}")
26
27
  return result
28
+
29
+ def upload(self, file):
30
+ return self.get_client().upload(file=file)
@@ -23,6 +23,16 @@ class GeminiAIModel(AutoModel):
23
23
  _json_model = "gemini-3-pro-preview"
24
24
  _stt_model = "gemini-3-pro-preview"
25
25
  _tts_model = "gemini-2.5-flash-preview-tts"
26
+
27
+ messages = ListAttr(StringAttr(default=[]))
28
+ name = StringAttr(default="agent")
29
+ instructions = StringAttr(
30
+ default="You are highly skilled AI trained to assist with various tasks."
31
+ )
32
+ description = StringAttr(
33
+ default="A helpful AI assistant trained to assist with various tasks."
34
+ )
35
+
26
36
  MAX_FILES = 14
27
37
  MAX_SUMMARY_TOKEN_LENGTH = 10000
28
38
  VOICES = {
@@ -58,15 +68,6 @@ class GeminiAIModel(AutoModel):
58
68
  "Sulafar": ["female"],
59
69
  }
60
70
 
61
- messages = ListAttr(StringAttr(default=[]))
62
- name = StringAttr(default="agent")
63
- instructions = StringAttr(
64
- default="You are highly skilled AI trained to assist with various tasks."
65
- )
66
- description = StringAttr(
67
- default="A helpful AI assistant trained to assist with various tasks."
68
- )
69
-
70
71
  @property
71
72
  def client(self):
72
73
  if not self._client:
@@ -114,8 +115,16 @@ class GeminiAIModel(AutoModel):
114
115
  existing_files = self.client.files.list()
115
116
  log(f"Existing files: {[f.display_name for f in existing_files]}", _print=True)
116
117
  for f in existing_files:
117
- result = self.client.files.delete(name=f.name)
118
- log(f"Deleting old version of {f.name}: {result}", _print=True)
118
+ # Delete old files (older than 10 minutes)
119
+ age_seconds = (
120
+ (time.time() - f.update_time.timestamp())
121
+ if f.update_time
122
+ else (time.time() - f.create_time.timestamp())
123
+ )
124
+ log(age_seconds, _print=True)
125
+ if age_seconds > 900:
126
+ result = self.client.files.delete(name=f.name)
127
+ log(f"Deleting old version of {f.name}: {result}", _print=True)
119
128
  file_refs = []
120
129
  for file_dict in file_list:
121
130
  fn = file_dict["name"]
@@ -132,7 +141,10 @@ class GeminiAIModel(AutoModel):
132
141
  time.sleep(1)
133
142
  uploaded_file = self.client.get_file(uploaded_file.name)
134
143
  file_refs.append(uploaded_file)
135
- return file_refs
144
+ return file_refs
145
+
146
+ def upload(self, file):
147
+ return self._add_files([file])
136
148
 
137
149
  def generate_json(self, message, function, additional_instructions="", **kwargs):
138
150
  # The API call must use the 'tools' parameter instead of 'response_json_schema'
@@ -0,0 +1,272 @@
1
+ import io
2
+ import json
3
+ import os
4
+ import random
5
+ import re
6
+ import wave
7
+
8
+ import requests
9
+ from pydub import AudioSegment
10
+
11
+ from autonomous import log
12
+ from autonomous.model.autoattr import ListAttr, StringAttr
13
+ from autonomous.model.automodel import AutoModel
14
+
15
+
16
+ class LocalAIModel(AutoModel):
17
+ # Configuration
18
+ _ollama_url = os.environ.get("OLLAMA_API_BASE", "http://ollama_internal:11434/api")
19
+ _media_url = os.environ.get("MEDIA_API_BASE", "http://media_ai_internal:5005")
20
+
21
+ # Models to use in Ollama
22
+ _text_model = "mistral-nemo"
23
+ _json_model = "mistral-nemo"
24
+
25
+ messages = ListAttr(StringAttr(default=[]))
26
+ name = StringAttr(default="agent")
27
+ instructions = StringAttr(
28
+ default="You are highly skilled AI trained to assist with various tasks."
29
+ )
30
+
31
+ # Keep your voice list (mapped to random seeds/embeddings in the future)
32
+ VOICES = {
33
+ "Zephyr": ["female"],
34
+ "Puck": ["male"],
35
+ "Charon": ["male"],
36
+ "Kore": ["female"],
37
+ "Fenrir": ["non-binary"],
38
+ "Leda": ["female"],
39
+ "Orus": ["male"],
40
+ "Aoede": ["female"],
41
+ "Callirhoe": ["female"],
42
+ "Autonoe": ["female"],
43
+ "Enceladus": ["male"],
44
+ "Iapetus": ["male"],
45
+ "Umbriel": ["male"],
46
+ "Algieba": ["male"],
47
+ "Despina": ["female"],
48
+ "Erinome": ["female"],
49
+ "Algenib": ["male"],
50
+ "Rasalgethi": ["non-binary"],
51
+ "Laomedeia": ["female"],
52
+ "Achernar": ["female"],
53
+ "Alnilam": ["male"],
54
+ "Schedar": ["male"],
55
+ "Gacrux": ["female"],
56
+ "Pulcherrima": ["non-binary"],
57
+ "Achird": ["male"],
58
+ "Zubenelgenubi": ["male"],
59
+ "Vindemiatrix": ["female"],
60
+ "Sadachbia": ["male"],
61
+ "Sadaltager": ["male"],
62
+ "Sulafar": ["female"],
63
+ }
64
+
65
+ def _convert_tools_to_json_schema(self, user_function):
66
+ """
67
+ Ollama doesn't support 'tools' strictly yet.
68
+ We convert the tool definition into a system prompt instruction.
69
+ """
70
+ schema = {
71
+ "name": user_function.get("name"),
72
+ "parameters": user_function.get("parameters"),
73
+ }
74
+ return json.dumps(schema, indent=2)
75
+
76
+ def generate_json(self, message, function, additional_instructions="", **kwargs):
77
+ """
78
+ Mimics Gemini's tool use by forcing Ollama into JSON mode
79
+ and injecting the schema into the prompt.
80
+ """
81
+ schema_str = self._convert_tools_to_json_schema(function)
82
+
83
+ system_prompt = (
84
+ f"{self.instructions}. {additional_instructions}\n"
85
+ f"You must respond strictly with a valid JSON object matching this schema:\n"
86
+ f"{schema_str}\n"
87
+ f"Do not include markdown formatting or explanations."
88
+ )
89
+
90
+ payload = {
91
+ "model": self._json_model,
92
+ "prompt": message,
93
+ "system": system_prompt,
94
+ "format": "json", # Force JSON mode
95
+ "stream": False,
96
+ }
97
+
98
+ try:
99
+ response = requests.post(f"{self._ollama_url}/generate", json=payload)
100
+ response.raise_for_status()
101
+ result_text = response.json().get("response", "{}")
102
+
103
+ # log(f"Raw Local JSON: {result_text}", _print=True)
104
+ return json.loads(result_text)
105
+
106
+ except Exception as e:
107
+ log(f"==== LocalAI JSON Error: {e} ====", _print=True)
108
+ return {}
109
+
110
+ def generate_text(self, message, additional_instructions="", **kwargs):
111
+ """
112
+ Standard text generation via Ollama.
113
+ """
114
+ payload = {
115
+ "model": self._text_model,
116
+ "prompt": message,
117
+ "system": f"{self.instructions}. {additional_instructions}",
118
+ "stream": False,
119
+ }
120
+
121
+ # Handle 'files' (Ollama supports images in base64, but not arbitrary files easily yet)
122
+ # If files are text, you should read them and append to prompt.
123
+ if file_list := kwargs.get("files"):
124
+ for file_dict in file_list:
125
+ fn = file_dict["name"]
126
+ fileobj = file_dict["file"]
127
+ if fn.lower().endswith((".txt", ".md", ".json", ".csv")):
128
+ content = fileobj.read()
129
+ if isinstance(content, bytes):
130
+ content = content.decode("utf-8", errors="ignore")
131
+ payload["prompt"] += f"\n\nContents of {fn}:\n{content}"
132
+
133
+ try:
134
+ response = requests.post(f"{self._ollama_url}/generate", json=payload)
135
+ response.raise_for_status()
136
+ return response.json().get("response", "")
137
+ except Exception as e:
138
+ log(f"==== LocalAI Text Error: {e} ====", _print=True)
139
+ return "Error generating text."
140
+
141
+ def summarize_text(self, text, primer="", **kwargs):
142
+ primer = primer or "Summarize the following text concisely."
143
+
144
+ # Simple chunking logic (similar to your original)
145
+ # Note: Mistral-Nemo has a large context window (128k), so chunking
146
+ # is less necessary than with older models, but we keep it for safety.
147
+ max_chars = 12000 # Roughly 3k tokens
148
+ chunks = [text[i : i + max_chars] for i in range(0, len(text), max_chars)]
149
+
150
+ full_summary = ""
151
+ for chunk in chunks:
152
+ payload = {
153
+ "model": self._text_model,
154
+ "prompt": f"{primer}:\n\n{chunk}",
155
+ "stream": False,
156
+ }
157
+ try:
158
+ res = requests.post(f"{self._ollama_url}/generate", json=payload)
159
+ full_summary += res.json().get("response", "") + "\n"
160
+ except Exception as e:
161
+ log(f"Summary Error: {e}", _print=True)
162
+ break
163
+
164
+ return full_summary
165
+
166
+ def generate_audio_text(self, audio_file, prompt="", **kwargs):
167
+ """
168
+ Sends audio bytes to the Media AI container for Whisper transcription.
169
+ """
170
+ try:
171
+ # Prepare the file for upload
172
+ # audio_file is likely bytes, so we wrap in BytesIO if needed
173
+ if isinstance(audio_file, bytes):
174
+ f_obj = io.BytesIO(audio_file)
175
+ else:
176
+ f_obj = audio_file
177
+
178
+ files = {"file": ("audio.mp3", f_obj, "audio/mpeg")}
179
+
180
+ response = requests.post(f"{self._media_url}/transcribe", files=files)
181
+ response.raise_for_status()
182
+ return response.json().get("text", "")
183
+
184
+ except Exception as e:
185
+ log(f"STT Error: {e}", _print=True)
186
+ return ""
187
+
188
+ def generate_audio(self, prompt, voice=None, **kwargs):
189
+ """
190
+ Sends text to the Media AI container for TTS.
191
+ """
192
+ voice = voice or random.choice(list(self.VOICES.keys()))
193
+
194
+ try:
195
+ payload = {"text": prompt, "voice": voice}
196
+ response = requests.post(f"{self._media_url}/tts", json=payload)
197
+ response.raise_for_status()
198
+
199
+ # Response content is WAV bytes
200
+ wav_bytes = response.content
201
+
202
+ # Convert to MP3 to match your original interface (using pydub)
203
+ audio = AudioSegment.from_file(io.BytesIO(wav_bytes), format="wav")
204
+ mp3_buffer = io.BytesIO()
205
+ audio.export(mp3_buffer, format="mp3")
206
+ return mp3_buffer.getvalue()
207
+
208
+ except Exception as e:
209
+ log(f"TTS Error: {e}", _print=True)
210
+ return None
211
+
212
+ def generate_image(self, prompt, **kwargs):
213
+ """
214
+ Generates an image using Local AI.
215
+ If 'files' are provided, performs Image-to-Image generation using the first file as reference.
216
+ """
217
+ try:
218
+ # Prepare the multipart data
219
+ # We send the prompt as a form field
220
+ data = {"prompt": prompt}
221
+ files = {}
222
+
223
+ # Check if reference images were passed
224
+ if kwargs.get("files"):
225
+ # Take the first available file
226
+ for fn, f_bytes in kwargs.get("files").items():
227
+ # If f_bytes is bytes, wrap in IO, else assume it's file-like
228
+ if isinstance(f_bytes, bytes):
229
+ file_obj = io.BytesIO(f_bytes)
230
+ else:
231
+ file_obj = f_bytes
232
+
233
+ # Add to the request files
234
+ # Key must be 'file' to match server.py logic
235
+ files["file"] = (fn, file_obj, "image/png")
236
+ break # We only support 1 reference image for SD Img2Img
237
+
238
+ # Send Request
239
+ if files:
240
+ # Multipart/form-data request (Prompt + File)
241
+ response = requests.post(
242
+ f"{self._media_url}/generate-image", data=data, files=files
243
+ )
244
+ else:
245
+ # Standard request (Prompt only) - server.py handles request.form vs json
246
+ # But our updated server expects form data for consistency
247
+ response = requests.post(f"{self._media_url}/generate-image", data=data)
248
+
249
+ response.raise_for_status()
250
+
251
+ # Returns WebP bytes directly
252
+ return response.content
253
+
254
+ except Exception as e:
255
+ log(f"Image Gen Error: {e}", _print=True)
256
+ return None
257
+
258
+ def list_voices(self, filters=[]):
259
+ # Same logic as before
260
+ if not filters:
261
+ return list(self.VOICES.keys())
262
+ voices = []
263
+ for voice, attribs in self.VOICES.items():
264
+ if any(f.lower() in attribs for f in filters):
265
+ voices.append(voice)
266
+ return voices
267
+
268
+ # Unused methods from original that don't apply to Local AI
269
+ def upload(self, file):
270
+ # Local models don't really have a "File Store" API like Gemini.
271
+ # We handle context by passing text directly in prompt.
272
+ pass
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: autonomous-app
3
- Version: 0.3.27
3
+ Version: 0.3.29
4
4
  Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
5
5
  Author-email: Steven A Moore <samoore@binghamton.edu>
6
6
  Project-URL: homepage, https://github.com/Sallenmoore/autonomous
@@ -1,17 +1,17 @@
1
- autonomous/__init__.py,sha256=kn0Y6qhJcvWmWzx8A45mzDbvKLy0PoPhcEa4xeWifcU,95
1
+ autonomous/__init__.py,sha256=j-9rQTP_Ejh4XtrStDlXn9R3weOnMognFa40DRWU_V0,95
2
2
  autonomous/cli.py,sha256=z4AaGeWNW_uBLFAHng0J_lfS9v3fXemK1PeT85u4Eo4,42
3
3
  autonomous/logger.py,sha256=NQtgEaTWNAWfLSgqSP7ksXj1GpOuCgoUV711kSMm-WA,2022
4
4
  autonomous/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  autonomous/ai/audioagent.py,sha256=SvPLzKgqUnrkcsR7y93aURSrStIrryuntQMPS1SzUXw,1033
6
- autonomous/ai/baseagent.py,sha256=HYCqC4HmK5afNMunmTkhRE8O0OaONl2GxXnISkdOM58,1094
6
+ autonomous/ai/baseagent.py,sha256=sJDIrCzhUp1OwkfyRqT0ZaB1fk5pV8K5L3jaTxzi9DI,940
7
7
  autonomous/ai/imageagent.py,sha256=bIOrgg_CM-rgfyLme7V9vPqP8WKVMIAVoB2E9lLtIRk,521
8
- autonomous/ai/jsonagent.py,sha256=ldfWHtKfLa2ypoM95U6PFETAE9R5B53s5oGzIzF7dQk,984
8
+ autonomous/ai/jsonagent.py,sha256=VQGhK0RFo0H_eVH9dAyf4_lp-RIpdgH988joLoKjm94,1065
9
9
  autonomous/ai/textagent.py,sha256=1yM1aMvws64PocvG_L-POMDKjxq2JDuGqgc3haUHybU,926
10
10
  autonomous/ai/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  autonomous/ai/models/aws.py,sha256=bGDjnGTm350zOqor9IsICzUkBUN2bubGI_ZssQuSXIw,12715
12
12
  autonomous/ai/models/deepseek.py,sha256=fkoi-hJp60yFlZ9Cb9PdUrmNSErYltQ5ezkUI75llXc,2734
13
- autonomous/ai/models/gemini.py,sha256=jTCOEoCJd-sCB1oPjYuCitFuIheut3RvN4DpuGabx0c,13839
14
- autonomous/ai/models/local.py,sha256=fkoi-hJp60yFlZ9Cb9PdUrmNSErYltQ5ezkUI75llXc,2734
13
+ autonomous/ai/models/gemini.py,sha256=jrTMbh8SAdzzz27elOhs82iwjyutYcy8fvTOSdW-GFQ,14247
14
+ autonomous/ai/models/local_model.py,sha256=GBXUelGUObo33BYaPVbCqI0asYaFLET2JNMFSXqxngw,9846
15
15
  autonomous/ai/models/openai.py,sha256=2-LttCm6woGklaLbs1H5LjlbfM-7leDwGmC9vksSqW4,13135
16
16
  autonomous/apis/version_control/GHCallbacks.py,sha256=AyiUlYfV5JePi11GVyqYyXoj5UTbPKzS-HRRI94rjJo,1069
17
17
  autonomous/apis/version_control/GHOrganization.py,sha256=mi2livdsGurKiifbvuLwiFbdDzL77IlEfhwEa-tG77I,1155
@@ -57,7 +57,7 @@ autonomous/storage/localstorage.py,sha256=FzrR6O9mMGAZt5dDgqzkeOQVfGRXCygR0kksz2
57
57
  autonomous/tasks/__init__.py,sha256=pn7iZ14MhcHUdzcLkfkd4-45wgPP0tXahAz_cFgb_Tg,32
58
58
  autonomous/tasks/autotask.py,sha256=aK5iapDhgcAic3F5ZYMAhNKJkOepj8yWwbMizKDzUwQ,4153
59
59
  autonomous/utils/markdown.py,sha256=tf8vlHARiQO1X_aGbqlYozzP_TbdiDRT9EEP6aFRQo0,2153
60
- autonomous_app-0.3.27.dist-info/METADATA,sha256=x9PPnKHTy5dMQks_32hax4zXjUk1dxAAmlbpcG-JpIQ,3015
61
- autonomous_app-0.3.27.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
62
- autonomous_app-0.3.27.dist-info/top_level.txt,sha256=ZyxWWDdbvZekF3UFunxl4BQsVDb_FOW3eTn0vun_jb4,11
63
- autonomous_app-0.3.27.dist-info/RECORD,,
60
+ autonomous_app-0.3.29.dist-info/METADATA,sha256=VEPJShGjhIrEukUwiMTlIDiu7bbkEnrvoy2r03WBsQo,3015
61
+ autonomous_app-0.3.29.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
62
+ autonomous_app-0.3.29.dist-info/top_level.txt,sha256=ZyxWWDdbvZekF3UFunxl4BQsVDb_FOW3eTn0vun_jb4,11
63
+ autonomous_app-0.3.29.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,99 +0,0 @@
1
- import io
2
- import json
3
- import os
4
- import random
5
- import time
6
- from base64 import b64decode
7
-
8
- import openai
9
- from ollama import ChatResponse, chat
10
-
11
- from autonomous import log
12
- from autonomous.model.autoattr import DictAttr, ListAttr, StringAttr
13
- from autonomous.model.automodel import AutoModel
14
-
15
-
16
- class LocalAIModel(AutoModel):
17
- _client = None
18
- instructions = StringAttr(
19
- default="You are highly skilled AI trained to assist with various tasks."
20
- )
21
- description = StringAttr(
22
- default="A helpful AI assistant trained to assist with various tasks."
23
- )
24
-
25
- @property
26
- def client(self):
27
- if not self._client:
28
- self._client = "deepseek-r1" # OpenAI(api_key=os.environ.get("OPENAI_KEY"))
29
- return self._client
30
-
31
- def clear_agent(self):
32
- pass
33
-
34
- def clear_agents(self):
35
- pass
36
-
37
- # def _get_agent_id(self):
38
- # pass
39
-
40
- # def _add_function(self, user_function):
41
- pass
42
-
43
- def _format_messages(self, messages):
44
- pass
45
-
46
- def clear_files(self, file_id=None):
47
- pass
48
-
49
- def attach_file(self, file_contents, filename="dbdata.json"):
50
- pass
51
-
52
- def generate_json(self, messages, function, additional_instructions=""):
53
- message = messages + additional_instructions
54
- message += f"""
55
- IMPORTANT: Respond in JSON FORMAT using the SCHEMA below. DO NOT add any text to the response outside of the supplied JSON schema:
56
- {function}
57
- """
58
- response: ChatResponse = chat(
59
- model=self.client,
60
- messages=[
61
- {
62
- "role": "user",
63
- "content": message,
64
- },
65
- ],
66
- )
67
- return response.message.content
68
-
69
- def generate_text(self, messages, additional_instructions=""):
70
- message = messages + additional_instructions
71
- response: ChatResponse = chat(
72
- model=self.client,
73
- messages=[
74
- {
75
- "role": "user",
76
- "content": message,
77
- },
78
- ],
79
- )
80
- return response.message.content
81
-
82
- def generate_audio(self, prompt, **kwargs):
83
- raise NotImplementedError
84
-
85
- def generate_image(self, prompt, **kwargs):
86
- raise NotImplementedError
87
-
88
- def summarize_text(self, text, primer=""):
89
- response: ChatResponse = chat(
90
- model=self.client,
91
- messages=[
92
- {
93
- "role": "system",
94
- "content": f"You are a highly skilled AI trained in language comprehension and summarization.{primer}",
95
- },
96
- {"role": "user", "content": text},
97
- ],
98
- )
99
- return response.message.content