autonomous-app 0.3.25__py3-none-any.whl → 0.3.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
autonomous/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "0.3.25"
1
+ __version__ = "0.3.27"
2
2
 
3
3
  from dotenv import load_dotenv
4
4
 
@@ -15,9 +15,9 @@ class JSONAgent(BaseAgent):
15
15
  default="A helpful AI assistant trained to assist with generating JSON formatted data."
16
16
  )
17
17
 
18
- def generate(self, messages, function, additional_instructions=""):
18
+ def generate(self, messages, function, additional_instructions="", **kwargs):
19
19
  result = self.get_client().generate_json(
20
- messages, function, additional_instructions
20
+ messages, function, additional_instructions, **kwargs
21
21
  )
22
22
  if isinstance(result, str):
23
23
  result = json.loads(result)
@@ -1,10 +1,13 @@
1
1
  import io
2
2
  import os
3
3
  import random
4
+ import re
5
+ import time
4
6
  import wave
5
7
 
6
8
  from google import genai
7
9
  from google.genai import types
10
+ from google.genai.types import Part
8
11
  from pydub import AudioSegment
9
12
 
10
13
  from autonomous import log
@@ -21,6 +24,7 @@ class GeminiAIModel(AutoModel):
21
24
  _stt_model = "gemini-3-pro-preview"
22
25
  _tts_model = "gemini-2.5-flash-preview-tts"
23
26
  MAX_FILES = 14
27
+ MAX_SUMMARY_TOKEN_LENGTH = 10000
24
28
  VOICES = {
25
29
  "Zephyr": ["female"],
26
30
  "Puck": ["male"],
@@ -106,13 +110,49 @@ class GeminiAIModel(AutoModel):
106
110
  buffer.seek(0)
107
111
  return buffer
108
112
 
109
- def generate_json(self, message, function, additional_instructions=""):
113
+ def _add_files(self, file_list):
114
+ existing_files = self.client.files.list()
115
+ log(f"Existing files: {[f.display_name for f in existing_files]}", _print=True)
116
+ for f in existing_files:
117
+ result = self.client.files.delete(name=f.name)
118
+ log(f"Deleting old version of {f.name}: {result}", _print=True)
119
+ file_refs = []
120
+ for file_dict in file_list:
121
+ fn = file_dict["name"]
122
+ fileobj = file_dict["file"]
123
+ log(f"Uploading new {fn}...", _print=True)
124
+ uploaded_file = self.client.files.upload(
125
+ file=fileobj.name,
126
+ config={"mime_type": "application/json", "display_name": fn},
127
+ )
128
+ # 4. Wait for processing (Usually instant for JSON, critical for Video/PDF)
129
+
130
+ # This ensures the file is 'ACTIVE' before you use it in a prompt.
131
+ while uploaded_file.state.name == "PROCESSING":
132
+ time.sleep(1)
133
+ uploaded_file = self.client.get_file(uploaded_file.name)
134
+ file_refs.append(uploaded_file)
135
+ return file_refs
136
+
137
+ def generate_json(self, message, function, additional_instructions="", **kwargs):
110
138
  # The API call must use the 'tools' parameter instead of 'response_json_schema'
111
139
  function_definition = self._add_function(function)
112
140
 
141
+ contents = [message]
142
+ if uri := kwargs.get("uri"):
143
+ contents.append(
144
+ Part.from_uri(
145
+ file_uri=uri,
146
+ mime_type="application/json",
147
+ ),
148
+ )
149
+
150
+ if files := kwargs.get("files"):
151
+ contents += self._add_files(files)
152
+
113
153
  response = self.client.models.generate_content(
114
154
  model=self._json_model,
115
- contents=message,
155
+ contents=contents,
116
156
  config=types.GenerateContentConfig(
117
157
  system_instruction=f"{self.instructions}.{additional_instructions}",
118
158
  tools=[types.Tool(function_declarations=[function_definition])],
@@ -140,36 +180,61 @@ class GeminiAIModel(AutoModel):
140
180
  log(f"==== Failed to parse ToolCall response: {e} ====")
141
181
  return {}
142
182
 
143
- def generate_text(self, message, additional_instructions=""):
183
+ def generate_text(self, message, additional_instructions="", **kwargs):
184
+ contents = [message]
185
+ if uri := kwargs.get("uri"):
186
+ contents.append(
187
+ Part.from_uri(
188
+ file_uri=uri,
189
+ mime_type="application/json",
190
+ ),
191
+ )
192
+
193
+ if files := kwargs.get("files"):
194
+ contents += self._add_files(files)
195
+
144
196
  response = self.client.models.generate_content(
145
197
  model=self._text_model,
146
198
  config=types.GenerateContentConfig(
147
199
  system_instruction=f"{self.instructions}.{additional_instructions}",
148
200
  ),
149
- contents=message,
201
+ contents=contents,
150
202
  )
151
203
 
152
204
  # log(results, _print=True)
153
205
  # log("=================== END REPORT ===================", _print=True)
154
206
  return response.text
155
207
 
156
- def summarize_text(self, text, primer=""):
208
+ def summarize_text(self, text, primer="", **kwargs):
157
209
  primer = primer or self.instructions
158
- response = self.client.models.generate_content(
159
- model=self._summary_model,
160
- config=types.GenerateContentConfig(
161
- system_instruction=f"{primer}",
162
- ),
163
- contents=text,
164
- )
165
- log(response)
166
- try:
167
- result = response.candidates[0].content.parts[0].text
168
- except Exception as e:
169
- log(f"{type(e)}:{e}\n\n Unable to generate content ====")
170
- return None
171
210
 
172
- return result
211
+ updated_prompt_list = []
212
+ # Find all words in the prompt
213
+ words = re.findall(r"\w+", text)
214
+ # Split the words into chunks
215
+ for i in range(0, len(words), self.MAX_SUMMARY_TOKEN_LENGTH):
216
+ # Join a chunk of words and add to the list
217
+ updated_prompt_list.append(
218
+ " ".join(words[i : i + self.MAX_SUMMARY_TOKEN_LENGTH])
219
+ )
220
+
221
+ full_summary = ""
222
+ for p in updated_prompt_list:
223
+ response = self.client.models.generate_content(
224
+ model=self._summary_model,
225
+ config=types.GenerateContentConfig(
226
+ system_instruction=f"{primer}",
227
+ ),
228
+ contents=text,
229
+ )
230
+ try:
231
+ summary = response.candidates[0].content.parts[0].text
232
+ except Exception as e:
233
+ log(f"{type(e)}:{e}\n\n Unable to generate content ====", _print=True)
234
+ break
235
+ else:
236
+ full_summary += summary + "\n"
237
+ return summary
173
238
 
174
239
  def generate_audio_text(
175
240
  self, audio_file, prompt="Transcribe this audio clip", **kwargs
@@ -292,7 +357,7 @@ class GeminiAIModel(AutoModel):
292
357
  ),
293
358
  ),
294
359
  )
295
- log(response.promptFeedback, _print=True)
360
+ log(response, _print=True)
296
361
  log(response.candidates, _print=True)
297
362
  image_parts = [
298
363
  part.inline_data.data
@@ -1,7 +1,8 @@
1
1
  from autonomous import log
2
+ from autonomous.ai.baseagent import BaseAgent
2
3
  from autonomous.model.autoattr import ReferenceAttr, StringAttr
3
4
  from autonomous.model.automodel import AutoModel
4
- from autonomous.ai.baseagent import BaseAgent
5
+
5
6
  from .models.openai import OpenAIModel
6
7
 
7
8
 
@@ -14,8 +15,10 @@ class TextAgent(BaseAgent):
14
15
  default="A helpful AI assistant trained to assist with generating text according to the given requirements."
15
16
  )
16
17
 
17
- def summarize_text(self, text, primer=""):
18
- return self.get_client().summarize_text(text, primer)
18
+ def summarize_text(self, text, primer="", **kwargs):
19
+ return self.get_client().summarize_text(text, primer, **kwargs)
19
20
 
20
- def generate(self, messages, additional_instructions=""):
21
- return self.get_client().generate_text(messages, additional_instructions)
21
+ def generate(self, messages, additional_instructions="", **kwargs):
22
+ return self.get_client().generate_text(
23
+ messages, additional_instructions, **kwargs
24
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: autonomous-app
3
- Version: 0.3.25
3
+ Version: 0.3.27
4
4
  Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
5
5
  Author-email: Steven A Moore <samoore@binghamton.edu>
6
6
  Project-URL: homepage, https://github.com/Sallenmoore/autonomous
@@ -1,16 +1,16 @@
1
- autonomous/__init__.py,sha256=wxu0oJGOssLoVxBOrImCOWeAi8oSYBZm-k8pDAuEkzM,95
1
+ autonomous/__init__.py,sha256=kn0Y6qhJcvWmWzx8A45mzDbvKLy0PoPhcEa4xeWifcU,95
2
2
  autonomous/cli.py,sha256=z4AaGeWNW_uBLFAHng0J_lfS9v3fXemK1PeT85u4Eo4,42
3
3
  autonomous/logger.py,sha256=NQtgEaTWNAWfLSgqSP7ksXj1GpOuCgoUV711kSMm-WA,2022
4
4
  autonomous/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  autonomous/ai/audioagent.py,sha256=SvPLzKgqUnrkcsR7y93aURSrStIrryuntQMPS1SzUXw,1033
6
6
  autonomous/ai/baseagent.py,sha256=HYCqC4HmK5afNMunmTkhRE8O0OaONl2GxXnISkdOM58,1094
7
7
  autonomous/ai/imageagent.py,sha256=bIOrgg_CM-rgfyLme7V9vPqP8WKVMIAVoB2E9lLtIRk,521
8
- autonomous/ai/jsonagent.py,sha256=a_l4HyyVRj3FB6py_P1xdc4Bj9uNI1YmJrWQXAksIvs,964
9
- autonomous/ai/textagent.py,sha256=wI1-VC9zscKYyxYBg4pZ0ZyNJ5ZvKkLfWsIY1vJFChk,863
8
+ autonomous/ai/jsonagent.py,sha256=ldfWHtKfLa2ypoM95U6PFETAE9R5B53s5oGzIzF7dQk,984
9
+ autonomous/ai/textagent.py,sha256=1yM1aMvws64PocvG_L-POMDKjxq2JDuGqgc3haUHybU,926
10
10
  autonomous/ai/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  autonomous/ai/models/aws.py,sha256=bGDjnGTm350zOqor9IsICzUkBUN2bubGI_ZssQuSXIw,12715
12
12
  autonomous/ai/models/deepseek.py,sha256=fkoi-hJp60yFlZ9Cb9PdUrmNSErYltQ5ezkUI75llXc,2734
13
- autonomous/ai/models/gemini.py,sha256=RLQjJyPGT-Alen2334v9Hf58u4K7PhhTfXb3Vo24xVU,11380
13
+ autonomous/ai/models/gemini.py,sha256=jTCOEoCJd-sCB1oPjYuCitFuIheut3RvN4DpuGabx0c,13839
14
14
  autonomous/ai/models/local.py,sha256=fkoi-hJp60yFlZ9Cb9PdUrmNSErYltQ5ezkUI75llXc,2734
15
15
  autonomous/ai/models/openai.py,sha256=2-LttCm6woGklaLbs1H5LjlbfM-7leDwGmC9vksSqW4,13135
16
16
  autonomous/apis/version_control/GHCallbacks.py,sha256=AyiUlYfV5JePi11GVyqYyXoj5UTbPKzS-HRRI94rjJo,1069
@@ -57,7 +57,7 @@ autonomous/storage/localstorage.py,sha256=FzrR6O9mMGAZt5dDgqzkeOQVfGRXCygR0kksz2
57
57
  autonomous/tasks/__init__.py,sha256=pn7iZ14MhcHUdzcLkfkd4-45wgPP0tXahAz_cFgb_Tg,32
58
58
  autonomous/tasks/autotask.py,sha256=aK5iapDhgcAic3F5ZYMAhNKJkOepj8yWwbMizKDzUwQ,4153
59
59
  autonomous/utils/markdown.py,sha256=tf8vlHARiQO1X_aGbqlYozzP_TbdiDRT9EEP6aFRQo0,2153
60
- autonomous_app-0.3.25.dist-info/METADATA,sha256=8GErlEFFJe-Ysm5k1xII-SocecbVNt2CmbkVQ1xeCh8,3015
61
- autonomous_app-0.3.25.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
62
- autonomous_app-0.3.25.dist-info/top_level.txt,sha256=ZyxWWDdbvZekF3UFunxl4BQsVDb_FOW3eTn0vun_jb4,11
63
- autonomous_app-0.3.25.dist-info/RECORD,,
60
+ autonomous_app-0.3.27.dist-info/METADATA,sha256=x9PPnKHTy5dMQks_32hax4zXjUk1dxAAmlbpcG-JpIQ,3015
61
+ autonomous_app-0.3.27.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
62
+ autonomous_app-0.3.27.dist-info/top_level.txt,sha256=ZyxWWDdbvZekF3UFunxl4BQsVDb_FOW3eTn0vun_jb4,11
63
+ autonomous_app-0.3.27.dist-info/RECORD,,