autonomous-app 0.3.48__py3-none-any.whl → 0.3.50__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
autonomous/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "0.3.48"
1
+ __version__ = "0.3.50"
2
2
 
3
3
  from dotenv import load_dotenv
4
4
 
@@ -8,7 +8,7 @@ from autonomous.model.autoattr import StringAttr
8
8
  class AudioAgent(BaseAgent):
9
9
  name = StringAttr(default="audioagent")
10
10
 
11
- provider = StringAttr(default="gemini")
11
+ provider = StringAttr(default="local")
12
12
 
13
13
  instructions = StringAttr(
14
14
  default="You are highly skilled AI trained to assist with generating audio files."
@@ -4,6 +4,7 @@ import os
4
4
  import random
5
5
 
6
6
  import requests
7
+ from PIL import Image
7
8
  from pydub import AudioSegment
8
9
 
9
10
  from autonomous import log
@@ -23,10 +24,36 @@ class LocalAIModel(AutoModel):
23
24
  _text_model = "llama3"
24
25
  _json_model = "llama3"
25
26
 
26
- # ... VOICES dictionary ... (Keep existing voices)
27
27
  VOICES = {
28
28
  "Zephyr": ["female"],
29
- # ... (keep all your voices) ...
29
+ "Puck": ["male"],
30
+ "Charon": ["male"],
31
+ "Kore": ["female"],
32
+ "Fenrir": ["non-binary"],
33
+ "Leda": ["female"],
34
+ "Orus": ["male"],
35
+ "Aoede": ["female"],
36
+ "Callirhoe": ["female"],
37
+ "Autonoe": ["female"],
38
+ "Enceladus": ["male"],
39
+ "Iapetus": ["male"],
40
+ "Umbriel": ["male"],
41
+ "Algieba": ["male"],
42
+ "Despina": ["female"],
43
+ "Erinome": ["female"],
44
+ "Algenib": ["male"],
45
+ "Rasalgethi": ["non-binary"],
46
+ "Laomedeia": ["female"],
47
+ "Achernar": ["female"],
48
+ "Alnilam": ["male"],
49
+ "Schedar": ["male"],
50
+ "Gacrux": ["female"],
51
+ "Pulcherrima": ["non-binary"],
52
+ "Achird": ["male"],
53
+ "Zubenelgenubi": ["male"],
54
+ "Vindemiatrix": ["female"],
55
+ "Sadachbia": ["male"],
56
+ "Sadaltager": ["male"],
30
57
  "Sulafar": ["female"],
31
58
  }
32
59
 
@@ -119,7 +146,7 @@ class LocalAIModel(AutoModel):
119
146
  },
120
147
  }
121
148
 
122
- # log("==== LocalAI JSON Payload ====", payload, _print=True)
149
+ log("==== LocalAI JSON Payload ====", payload, _print=True)
123
150
 
124
151
  result_text = ""
125
152
  try:
@@ -139,7 +166,7 @@ class LocalAIModel(AutoModel):
139
166
  ):
140
167
  params = result_dict.pop("parameters")
141
168
  result_dict.update(params)
142
-
169
+ log("==== LocalAI JSON Result ====", result_dict, _print=True)
143
170
  return result_dict
144
171
 
145
172
  except Exception as e:
@@ -218,11 +245,21 @@ class LocalAIModel(AutoModel):
218
245
  files = {"file": ("audio.mp3", f_obj, "audio/mpeg")}
219
246
  response = requests.post(f"{self._media_url}/transcribe", files=files)
220
247
  response.raise_for_status()
248
+ log(f"Transcription response: {response.json()}", _print=True)
221
249
  return response.json().get("text", "")
222
250
  except Exception as e:
223
251
  log(f"STT Error: {e}", _print=True)
224
252
  return ""
225
253
 
254
+ def list_voices(self, filters=[]):
255
+ if not filters:
256
+ return list(self.VOICES.keys())
257
+ voices = []
258
+ for voice, attribs in self.VOICES.items():
259
+ if any(f.lower() in attribs for f in filters):
260
+ voices.append(voice)
261
+ return voices
262
+
226
263
  def generate_audio(
227
264
  self,
228
265
  prompt,
@@ -242,64 +279,60 @@ class LocalAIModel(AutoModel):
242
279
  log(f"TTS Error: {e}", _print=True)
243
280
  return None
244
281
 
245
- # ... inside LocalAIModel class ...
246
-
247
282
  def _get_dimensions(self, aspect_ratio):
248
283
  """
249
- Maps abstract aspect ratios to optimal SDXL resolutions.
250
- SDXL performs best at ~1024x1024 total pixels.
284
+ Returns a tuple: ((base_w, base_h), (target_w, target_h))
285
+
286
+ 1. base_*: The resolution sent to SDXL (approx 1024x1024).
287
+ 2. target_*: The final resolution to resize/upscale to.
251
288
  """
252
- resolutions = {
289
+ # Standard SDXL buckets (approx 1MP)
290
+ # We use these for the initial generation to ensure good composition.
291
+ sdxl_base = {
253
292
  "1:1": (1024, 1024),
254
- "3:4": (896, 1152),
255
- "4:3": (1152, 896),
256
- "16:9": (1216, 832),
257
- "2K": (2048, 1080),
258
- "2KPortrait": (1080, 2048),
259
- "Portrait": (1080, 2048),
260
- "4K": (3840, 2160),
261
- "Landscape": (3840, 2160),
262
- "4KPortrait": (2160, 3840),
263
- "9:16": (832, 1216),
264
- "3:2": (1216, 832),
265
- "2:3": (832, 1216),
293
+ "Portrait": (896, 1152), # 3:4
294
+ "Landscape": (1216, 832), # 3:2 or 16:9 approx
266
295
  }
267
- # Default to 1:1 (1024x1024) if unknown
268
- return resolutions.get(aspect_ratio, (1024, 1024))
296
+
297
+ # The Logic: Define the target, map it to the closest SDXL base
298
+ # Format: "Key": ((Base_W, Base_H), (Target_W, Target_H))
299
+ resolutions = {
300
+ # Standard
301
+ "1:1": ((832, 832), (1024, 1024)),
302
+ "3:4": ((832, 1152), (1664, 2304)),
303
+ "4:3": ((1152, 832), (2304, 1664)),
304
+ # High Res (The logic changes here)
305
+ "16:9": ((1216, 832), (2048, 1152)),
306
+ "9:16": ((832, 1216), (1152, 2048)),
307
+ # 2K Tier
308
+ "2K": ((1216, 832), (2048, 1152)), # Base is 1216x832 -> Upscale to 2K
309
+ "2KPortrait": ((832, 1216), (1152, 2048)),
310
+ # 4K Tier (The generated image will be upscaled ~3x)
311
+ "4K": ((1216, 832), (3840, 2160)),
312
+ "4KPortrait": ((832, 1216), (2160, 3840)),
313
+ }
314
+
315
+ # Default to 1:1 if unknown
316
+ return resolutions.get(aspect_ratio, ((832, 832), (1024, 1024)))
269
317
 
270
318
  def generate_image(
271
319
  self, prompt, negative_prompt="", files=None, aspect_ratio="2KPortrait"
272
320
  ):
273
- # # 1. CLIP Token Limit Fix (Auto-Summarize)
274
- # if len(prompt) > 800:
275
- # log("⚠️ Prompt exceeds CLIP limit. rewriting...", _print=True)
276
- # summary_instruction = (
277
- # "Convert the description into a comma-separated Stable Diffusion prompt. "
278
- # "Keep visual elements and style. Under 50 words."
279
- # )
280
- # new_prompt = self.generate_text(
281
- # message=prompt, additional_instructions=summary_instruction, context={}
282
- # )
283
- # if new_prompt and len(new_prompt) > 10:
284
- # prompt = new_prompt
285
-
286
- # 2. Resolution Calculation
287
- width, height = self._get_dimensions(aspect_ratio)
288
-
289
- # 3. Construct Payload
290
- # We send both the abstract params (for logging/metadata)
291
- # and the concrete pixels (for the engine).
321
+ # 1. Resolution Calculation
322
+ (base_w, base_h), (target_w, target_h) = self._get_dimensions(aspect_ratio)
323
+
324
+ # 2. Construct Base Generation Payload
325
+ # We tell the AI to generate the smaller, stable size first.
292
326
  data = {
293
327
  "prompt": prompt,
294
328
  "negative_prompt": negative_prompt,
295
329
  "aspect_ratio": aspect_ratio,
296
- "width": width, # <--- Calculated Pixel Width
297
- "height": height, # <--- Calculated Pixel Height
330
+ "width": base_w,
331
+ "height": base_h,
298
332
  }
299
333
 
300
334
  try:
301
- # Handle Files (Corrected List Logic)
302
- # requests.post expects a list of tuples for multiple files with same key
335
+ # Handle Input Files (for Img2Img)
303
336
  files_list = []
304
337
  if files and isinstance(files, dict):
305
338
  for fn, f_bytes in files.items():
@@ -307,20 +340,45 @@ class LocalAIModel(AutoModel):
307
340
  file_obj = io.BytesIO(f_bytes)
308
341
  else:
309
342
  file_obj = f_bytes
310
- # Appending to list instead of overwriting dict key
311
343
  files_list.append(("files", (fn, file_obj, "image/png")))
312
344
 
313
- # Send Request
345
+ # 3. Step 1: Generate Base Image
346
+ url = f"{self._media_url}/generate-image"
314
347
  if files_list:
315
- response = requests.post(
316
- f"{self._media_url}/generate-image", data=data, files=files_list
317
- )
348
+ response = requests.post(url, data=data, files=files_list)
318
349
  else:
319
- response = requests.post(f"{self._media_url}/generate-image", data=data)
350
+ response = requests.post(url, data=data)
320
351
 
321
352
  response.raise_for_status()
322
- log("==== LocalAI Image Payload ====", data, _print=True)
323
- return response.content
353
+ image_content = response.content
354
+
355
+ # 4. Step 2: Upscale (If necessary)
356
+ if (base_w, base_h) != (target_w, target_h):
357
+ log(
358
+ f"Requesting AI Upscale: {base_w}x{base_h} -> {target_w}x{target_h}...",
359
+ _print=True,
360
+ )
361
+
362
+ # Prepare payload for the /upscale route
363
+ upscale_data = {
364
+ "prompt": prompt, # Reuse prompt to guide texture generation
365
+ "width": target_w, # Explicitly tell server the target size
366
+ "height": target_h,
367
+ }
368
+
369
+ # Send the image we just generated back to the server as a file
370
+ upscale_files = {
371
+ "file": ("generated.png", io.BytesIO(image_content), "image/png")
372
+ }
373
+
374
+ upscale_response = requests.post(
375
+ f"{self._media_url}/upscale", data=upscale_data, files=upscale_files
376
+ )
377
+ upscale_response.raise_for_status()
378
+ image_content = upscale_response.content
379
+
380
+ log("==== LocalAI Image Generation Complete ====", data, _print=True)
381
+ return image_content
324
382
 
325
383
  except Exception as e:
326
384
  log(f"Image Gen Error: {e}", _print=True)
@@ -38,15 +38,7 @@ class MockAIModel(AutoModel):
38
38
  log(f"⚡ [MOCK] Generating JSON for prompt: {message[:50]}...", _print=True)
39
39
 
40
40
  # 1. Default Mock Object
41
- mock_response = {
42
- "name": "The Mockingbird Tavern",
43
- "description": "A glitchy, holographic tavern that only exists in offline mode. The ale tastes like static.",
44
- "backstory": "Created by a developer at 30,000 feet, this tavern serves as a placeholder for real content. It was built on the ruins of a NullReferenceException.",
45
- "appearance": "Wireframe walls with textures that haven't loaded yet.",
46
- "secrets": "If you look closely at the bartender, you can see he is just a looping IF statement.",
47
- "tags": ["offline", "dev-mode", "test"],
48
- "type": "Location",
49
- }
41
+ mock_response = {}
50
42
 
51
43
  # 2. Heuristic: If the user provided a context with 'name', use it.
52
44
  # This makes the mock feel slightly responsive.
@@ -1,15 +1,18 @@
1
1
  import os
2
2
  from enum import Enum
3
+
3
4
  from redis import Redis
4
5
  from rq import Queue
5
6
  from rq.job import Job
6
7
 
8
+
7
9
  # 1. Define Priorities clearly
8
10
  class TaskPriority(Enum):
9
11
  HIGH = "high"
10
12
  DEFAULT = "default"
11
13
  LOW = "low"
12
14
 
15
+
13
16
  class AutoTask:
14
17
  def __init__(self, job):
15
18
  self.job = job
@@ -28,9 +31,10 @@ class AutoTask:
28
31
  "id": self.id,
29
32
  "return_value": self.job.result,
30
33
  "status": self.status,
31
- "error": self.job.exc_info
34
+ "error": self.job.exc_info,
32
35
  }
33
36
 
37
+
34
38
  class AutoTasks:
35
39
  _connection = None
36
40
  # We remove the single 'queue' class attribute because we now have multiple
@@ -76,12 +80,7 @@ class AutoTasks:
76
80
  q = self._get_queue(queue_name)
77
81
 
78
82
  # 4. Enqueue
79
- job = q.enqueue(
80
- func,
81
- args=args,
82
- kwargs=kwargs,
83
- job_timeout=job_timeout
84
- )
83
+ job = q.enqueue(func, args=args, kwargs=kwargs, job_timeout=job_timeout)
85
84
 
86
85
  return AutoTask(job)
87
86
 
@@ -90,4 +89,41 @@ class AutoTasks:
90
89
  job = Job.fetch(job_id, connection=AutoTasks._connection)
91
90
  return AutoTask(job)
92
91
  except Exception:
93
- return None
92
+ return None
93
+
94
+ def get_tasks(self):
95
+
96
+ high_queue = Queue("high", connection=self._connection)
97
+ default_queue = Queue("default", connection=self._connection)
98
+ low_queue = Queue("low", connection=self._connection)
99
+
100
+ registries = {
101
+ "started": [
102
+ high_queue.started_job_registry,
103
+ default_queue.started_job_registry,
104
+ low_queue.started_job_registry,
105
+ ],
106
+ "finished": [
107
+ high_queue.finished_job_registry,
108
+ default_queue.finished_job_registry,
109
+ low_queue.finished_job_registry,
110
+ ],
111
+ "failed": [
112
+ high_queue.failed_job_registry,
113
+ default_queue.failed_job_registry,
114
+ low_queue.failed_job_registry,
115
+ ],
116
+ "queued": [high_queue, default_queue, low_queue],
117
+ }
118
+
119
+ tasks = {}
120
+ for status, regs in registries.items():
121
+ all_job_ids = []
122
+ for reg in regs:
123
+ all_job_ids.extend(reg.get_job_ids())
124
+ # Use a set to remove duplicate job_ids if a job is in multiple registries
125
+ unique_job_ids = sorted(list(set(all_job_ids)), reverse=True)
126
+ jobs = Job.fetch_many(unique_job_ids, connection=self._connection)
127
+ # Filter out None values in case a job expired between fetch and get
128
+ tasks[status] = [job for job in jobs if job]
129
+ return tasks
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: autonomous-app
3
- Version: 0.3.48
3
+ Version: 0.3.50
4
4
  Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
5
5
  Author-email: Steven A Moore <samoore@binghamton.edu>
6
6
  Project-URL: homepage, https://github.com/Sallenmoore/autonomous
@@ -1,16 +1,16 @@
1
- autonomous/__init__.py,sha256=5NSHkTcQ1Im9Xo5O3r5nNTJr-fjzyglGsXdHD9FPF8Q,95
1
+ autonomous/__init__.py,sha256=Bfx9V4lwColWq8lzS8MH_02fwl8OseyZms6DSENLM_g,95
2
2
  autonomous/cli.py,sha256=z4AaGeWNW_uBLFAHng0J_lfS9v3fXemK1PeT85u4Eo4,42
3
3
  autonomous/logger.py,sha256=NQtgEaTWNAWfLSgqSP7ksXj1GpOuCgoUV711kSMm-WA,2022
4
4
  autonomous/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
- autonomous/ai/audioagent.py,sha256=aZ25eEdze8S060-a4S0k319tgyl2aDTUa8dJu07mXn0,1092
5
+ autonomous/ai/audioagent.py,sha256=6gMcmLrEuMdbjd9NnwzVmxeToFqXQ6ppAQCRWMcmg_Y,1091
6
6
  autonomous/ai/baseagent.py,sha256=icOPygr1NdH64u1ZYbwHHywYIY1ZtaLY9HtfNmUbx4k,4702
7
7
  autonomous/ai/imageagent.py,sha256=1RT7OYTnRUo3q5k5w83A3cOh3hXUlrx0jRkg0YJSgZ0,900
8
8
  autonomous/ai/jsonagent.py,sha256=NpF-bJXolTBeW9xVdeEfdNwzdlaaRMjFwo_s-d9ApLM,1166
9
9
  autonomous/ai/textagent.py,sha256=0y2Hvb9pup1OnsA51hGPcD8yllZOZtztDLQvCNYABaw,1043
10
10
  autonomous/ai/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  autonomous/ai/models/gemini.py,sha256=JtMw1RatDuTtDaVe5sp_t-nOdWbGvJzljWHhNh9o3yk,13107
12
- autonomous/ai/models/local_model.py,sha256=-2-b_W2tvjgMMI15u9Y83olOgbnyp847zR9GKIOrbYs,12064
13
- autonomous/ai/models/mock_model.py,sha256=fjIThcfOuZYj6mWEQZJg8fIwGABKRR3j5WVjh6hgnPY,3694
12
+ autonomous/ai/models/local_model.py,sha256=o2C7hrSrOyXgDqzYl1AX0cdoPCICfk_pAGm65OQDbtg,14197
13
+ autonomous/ai/models/mock_model.py,sha256=IT4ip821ZewDOFbHrAFJ3Ks_OUIN-Z0lEF6oGs5j4WU,3061
14
14
  autonomous/apis/version_control/GHCallbacks.py,sha256=AyiUlYfV5JePi11GVyqYyXoj5UTbPKzS-HRRI94rjJo,1069
15
15
  autonomous/apis/version_control/GHOrganization.py,sha256=mi2livdsGurKiifbvuLwiFbdDzL77IlEfhwEa-tG77I,1155
16
16
  autonomous/apis/version_control/GHRepo.py,sha256=hTFHMkxSbSlVELfh8S6mq6ijkIKPRQO-Q5775ZjRKD4,4622
@@ -53,10 +53,10 @@ autonomous/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
53
53
  autonomous/storage/imagestorage.py,sha256=SmBjBNBlP1ZEjxdOnGVzCHZhbEhMKTUQC2TbpWbejDE,6168
54
54
  autonomous/storage/localstorage.py,sha256=FzrR6O9mMGAZt5dDgqzkeOQVfGRXCygR0kksz2MPpwE,2286
55
55
  autonomous/taskrunner/__init__.py,sha256=ughX-QfWBas5W3aB2SiF887SWJ3Dzc2X43Yxtmpl43k,47
56
- autonomous/taskrunner/autotasks.py,sha256=2zRaqHYqfdlgC_BQm6B6D2svN1ukyWeJJHwweZFHVoo,2616
56
+ autonomous/taskrunner/autotasks.py,sha256=smrWEGBE9QLpLco4ydOFi_gVcFp1mVxfQioACL2FBA8,4029
57
57
  autonomous/taskrunner/task_router.py,sha256=W09HtRUuhwlnGxM5w4l6Hzw6mfS6L4ljWiMzD3ZVFeU,601
58
58
  autonomous/utils/markdown.py,sha256=tf8vlHARiQO1X_aGbqlYozzP_TbdiDRT9EEP6aFRQo0,2153
59
- autonomous_app-0.3.48.dist-info/METADATA,sha256=jfU8tlq0YqBXvWsTgyqSHF70JTqS3el1X7jG2BdPCJM,3024
60
- autonomous_app-0.3.48.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
61
- autonomous_app-0.3.48.dist-info/top_level.txt,sha256=ZyxWWDdbvZekF3UFunxl4BQsVDb_FOW3eTn0vun_jb4,11
62
- autonomous_app-0.3.48.dist-info/RECORD,,
59
+ autonomous_app-0.3.50.dist-info/METADATA,sha256=g_6i1h0xP6Ee-izUtTmz2-nyAEPBI9pIVwSDGmiGG-s,3024
60
+ autonomous_app-0.3.50.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
61
+ autonomous_app-0.3.50.dist-info/top_level.txt,sha256=ZyxWWDdbvZekF3UFunxl4BQsVDb_FOW3eTn0vun_jb4,11
62
+ autonomous_app-0.3.50.dist-info/RECORD,,