autonomous-app 0.3.28__py3-none-any.whl → 0.3.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
autonomous/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "0.3.28"
1
+ __version__ = "0.3.30"
2
2
 
3
3
  from dotenv import load_dotenv
4
4
 
@@ -3,24 +3,16 @@ from autonomous.model.autoattr import ReferenceAttr
3
3
  from autonomous.model.automodel import AutoModel
4
4
 
5
5
  from .models.gemini import GeminiAIModel
6
+ from .models.local_model import LocalAIModel
6
7
  from .models.openai import OpenAIModel
7
8
 
8
9
 
9
- def clear_agents():
10
- for agent in OpenAIModel.all():
11
- log(f"Deleting {agent.name}")
12
- agent.clear_agents()
13
- agent.clear_files()
14
- agent.delete()
15
- return "Success"
16
-
17
-
18
10
  class BaseAgent(AutoModel):
19
11
  meta = {"abstract": True, "allow_inheritance": True, "strict": False}
20
12
 
21
- client = ReferenceAttr(choices=[GeminiAIModel])
13
+ client = ReferenceAttr(choices=[LocalAIModel])
22
14
 
23
- _ai_model = GeminiAIModel
15
+ _ai_model = LocalAIModel
24
16
 
25
17
  def delete(self):
26
18
  if self.client:
@@ -31,12 +23,11 @@ class BaseAgent(AutoModel):
31
23
  return self.get_client().id
32
24
 
33
25
  def get_client(self):
34
- if self.client is None:
35
- self.client = self._ai_model(
36
- name=self.name,
37
- instructions=self.instructions,
38
- description=self.description,
39
- )
40
- self.client.save()
41
- self.save()
26
+ self.client = self._ai_model(
27
+ name=self.name,
28
+ instructions=self.instructions,
29
+ description=self.description,
30
+ )
31
+ self.client.save()
32
+ self.save()
42
33
  return self.client
@@ -0,0 +1,392 @@
1
+ import io
2
+ import json
3
+ import os
4
+ import random
5
+
6
+ import numpy as np
7
+ import pymongo
8
+ import redis
9
+ import requests
10
+ from bson.objectid import ObjectId
11
+ from pydub import AudioSegment
12
+
13
+ from autonomous import log
14
+ from autonomous.model.autoattr import ListAttr, StringAttr
15
+ from autonomous.model.automodel import AutoModel
16
+
17
+
18
+ class LocalAIModel(AutoModel):
19
+ messages = ListAttr(StringAttr(default=[]))
20
+ name = StringAttr(default="agent")
21
+ instructions = StringAttr(default="You are a helpful AI.")
22
+ description = StringAttr(default="A Local AI Model using Ollama and Media AI.")
23
+
24
+ # Config
25
+ _ollama_url = os.environ.get("OLLAMA_API_BASE", "http://ollama_internal:11434/api")
26
+ _media_url = os.environ.get("MEDIA_API_BASE", "http://media_ai_internal:5005")
27
+ _text_model = "mistral-nemo"
28
+ _json_model = "mistral-nemo"
29
+
30
+ # DB Connections
31
+ _mongo_client = pymongo.MongoClient("mongodb://db:27017/")
32
+ _mongo_db = os.getenv("DB_DB", "default")
33
+ _redis = redis.Redis(host="cachedb", port=6379, decode_responses=True)
34
+
35
+ VOICES = {
36
+ "Zephyr": ["female"],
37
+ "Puck": ["male"],
38
+ "Charon": ["male"],
39
+ "Kore": ["female"],
40
+ "Fenrir": ["non-binary"],
41
+ "Leda": ["female"],
42
+ "Orus": ["male"],
43
+ "Aoede": ["female"],
44
+ "Callirhoe": ["female"],
45
+ "Autonoe": ["female"],
46
+ "Enceladus": ["male"],
47
+ "Iapetus": ["male"],
48
+ "Umbriel": ["male"],
49
+ "Algieba": ["male"],
50
+ "Despina": ["female"],
51
+ "Erinome": ["female"],
52
+ "Algenib": ["male"],
53
+ "Rasalgethi": ["non-binary"],
54
+ "Laomedeia": ["female"],
55
+ "Achernar": ["female"],
56
+ "Alnilam": ["male"],
57
+ "Schedar": ["male"],
58
+ "Gacrux": ["female"],
59
+ "Pulcherrima": ["non-binary"],
60
+ "Achird": ["male"],
61
+ "Zubenelgenubi": ["male"],
62
+ "Vindemiatrix": ["female"],
63
+ "Sadachbia": ["male"],
64
+ "Sadaltager": ["male"],
65
+ "Sulafar": ["female"],
66
+ }
67
+
68
+ def _convert_tools_to_json_schema(self, user_function):
69
+ """
70
+ Ollama doesn't support 'tools' strictly yet.
71
+ We convert the tool definition into a system prompt instruction.
72
+ """
73
+ # If the user passes a raw dictionary (like a Gemini tool definition)
74
+ # we extract the relevant parts for the schema.
75
+ schema = {
76
+ "name": user_function.get("name"),
77
+ "description": user_function.get("description", ""),
78
+ "parameters": user_function.get("parameters", {}),
79
+ }
80
+ return json.dumps(schema, indent=2)
81
+
82
+ def get_embedding(self, text):
83
+ try:
84
+ res = requests.post(f"{self._media_url}/embeddings", json={"text": text})
85
+ res.raise_for_status()
86
+ return res.json()["embedding"]
87
+ except Exception as e:
88
+ log(f"Embedding Error: {e}", _print=True)
89
+ return []
90
+
91
+ def build_hybrid_context(self, prompt, focus_object_id=None):
92
+ """
93
+ Builds context based on RELATIONAL ASSOCIATIONS + SEMANTIC LORE.
94
+ """
95
+
96
+ # 1. Create a Cache Key based on what defines the "Scene"
97
+ # We assume 'focus_object_id' + rough prompt length captures the context enough
98
+ cache_key = f"ctx:{focus_object_id}:{len(prompt) // 50}"
99
+
100
+ # 2. Check Cache
101
+ cached_ctx = self._redis.get(cache_key)
102
+ if cached_ctx:
103
+ return cached_ctx
104
+
105
+ context_str = ""
106
+
107
+ # --- PART 1: MONGODB (Relational Associations) ---
108
+ # If we are focusing on a specific object, fetch it and its specific refs.
109
+ if focus_object_id:
110
+ try:
111
+ # 1. Fetch the Main Object
112
+ # Handle both string ID and ObjectId
113
+ oid = (
114
+ ObjectId(focus_object_id)
115
+ if isinstance(focus_object_id, str)
116
+ else focus_object_id
117
+ )
118
+
119
+ main_obj = self._mongo_db.objects.find_one({"_id": oid})
120
+
121
+ if main_obj:
122
+ # Start the context with the main object itself
123
+ context_str += "### FOCUS OBJECT ###\n"
124
+ context_str += prompt
125
+
126
+ # 2. Extract References (Associations)
127
+ # 1. Start with the main list
128
+ ref_ids = main_obj.get("associations", []) or []
129
+
130
+ # 2. Safely add single fields (if they exist)
131
+ if world_id := main_obj.get("world"):
132
+ ref_ids.append(world_id)
133
+
134
+ # 3. Safely add lists (ensure they are lists)
135
+ ref_ids.extend(main_obj.get("stories", []) or [])
136
+ ref_ids.extend(main_obj.get("events", []) or [])
137
+
138
+ if ref_ids:
139
+ # Convert all to ObjectIds if they are strings
140
+ valid_oids = []
141
+ for rid in ref_ids:
142
+ try:
143
+ valid_oids.append(
144
+ ObjectId(rid) if isinstance(rid, str) else rid
145
+ )
146
+ except:
147
+ pass
148
+
149
+ # 3. Fetch all associated objects in ONE query
150
+ if valid_oids:
151
+ associated_objs = self._mongo_db.objects.find(
152
+ {"_id": {"$in": valid_oids}}
153
+ )
154
+
155
+ context_str += "\n### ASSOCIATED REFERENCES ###\n"
156
+ for obj in associated_objs:
157
+ log(f"Associated Obj: {obj}", _print=True)
158
+ context_str += f"- {obj}\n"
159
+
160
+ context_str += "\n"
161
+ except Exception as e:
162
+ log(f"Mongo Association Error: {e}", _print=True)
163
+
164
+ # --- PART 2: REDIS (Semantic Search) ---
165
+ # We keep this! It catches "Lore" or "Rules" that aren't explicitly linked in the DB.
166
+ # e.g., If the sword is "Elven", this finds "Elven History" even if not linked by ID.
167
+ if len(prompt) > 10:
168
+ vector = self.get_embedding(prompt)
169
+ if vector:
170
+ try:
171
+ q = "*=>[KNN 2 @vector $blob AS score]" # Lowered to 2 to save tokens
172
+ params = {"blob": np.array(vector, dtype=np.float32).tobytes()}
173
+ results = self._redis.ft("search_index").search(
174
+ q, query_params=params
175
+ )
176
+
177
+ if results.docs:
178
+ context_str += "### RELEVANT LORE ###\n"
179
+ for doc in results.docs:
180
+ context_str += f"- {doc.content}\n"
181
+ except Exception as e:
182
+ pass
183
+
184
+ # 3. Save to Cache (Expire in 60s)
185
+ # This prevents hammering the DB/Vector engine during a rapid conversation
186
+ self._redis.set(cache_key, context_str, ex=120)
187
+
188
+ return context_str
189
+
190
+ def generate_json(self, message, function, additional_instructions="", **kwargs):
191
+ """
192
+ Mimics Gemini's tool use by forcing Ollama into JSON mode
193
+ and injecting the schema into the prompt.
194
+ """
195
+ schema_str = self._convert_tools_to_json_schema(function)
196
+
197
+ focus_pk = kwargs.get("focus_object")
198
+
199
+ # Build Relational Context
200
+ world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
201
+
202
+ # Construct System Prompt
203
+ full_system_prompt = (
204
+ f"{self.instructions}. {additional_instructions}\n"
205
+ f"You must respond strictly with a valid JSON object matching this schema:\n"
206
+ f"{schema_str}\n"
207
+ f"Do not include markdown formatting or explanations."
208
+ f"You must strictly adhere to the following context:\n"
209
+ f"{world_context}"
210
+ )
211
+
212
+ payload = {
213
+ "model": self._json_model,
214
+ "prompt": message,
215
+ "system": full_system_prompt,
216
+ "format": "json", # Force JSON mode
217
+ "stream": False,
218
+ "keep_alive": "24h",
219
+ }
220
+
221
+ try:
222
+ response = requests.post(f"{self._ollama_url}/generate", json=payload)
223
+ response.raise_for_status()
224
+ result_text = response.json().get("response", "{}")
225
+
226
+ # log(f"Raw Local JSON: {result_text}", _print=True)
227
+ return json.loads(result_text)
228
+
229
+ except Exception as e:
230
+ log(f"==== LocalAI JSON Error: {e} ====", _print=True)
231
+ return {}
232
+
233
+ def generate_text(self, message, additional_instructions="", **kwargs):
234
+ """
235
+ Standard text generation via Ollama.
236
+ """
237
+ focus_pk = kwargs.get("focus_object")
238
+
239
+ # Build Relational Context
240
+ world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
241
+
242
+ # Construct System Prompt
243
+ full_system_prompt = (
244
+ f"{self.instructions}. {additional_instructions}\n"
245
+ f"You must strictly adhere to the following context:\n"
246
+ f"{world_context}"
247
+ )
248
+
249
+ payload = {
250
+ "model": self._text_model,
251
+ "prompt": message,
252
+ "system": full_system_prompt,
253
+ "stream": False,
254
+ "keep_alive": "24h",
255
+ }
256
+
257
+ try:
258
+ response = requests.post(f"{self._ollama_url}/generate", json=payload)
259
+ response.raise_for_status()
260
+ return response.json().get("response", "")
261
+ except Exception as e:
262
+ log(f"==== LocalAI Text Error: {e} ====", _print=True)
263
+ return "Error generating text."
264
+
265
+ def summarize_text(self, text, primer="", **kwargs):
266
+ primer = primer or "Summarize the following text concisely."
267
+
268
+ # Simple chunking logic (similar to your original)
269
+ # Note: Mistral-Nemo has a large context window (128k), so chunking
270
+ # is less necessary than with older models, but we keep it for safety.
271
+ max_chars = 12000 # Roughly 3k tokens
272
+ chunks = [text[i : i + max_chars] for i in range(0, len(text), max_chars)]
273
+
274
+ full_summary = ""
275
+ for chunk in chunks:
276
+ payload = {
277
+ "model": self._text_model,
278
+ "prompt": f"{primer}:\n\n{chunk}",
279
+ "stream": False,
280
+ "keep_alive": "24h",
281
+ }
282
+ try:
283
+ res = requests.post(f"{self._ollama_url}/generate", json=payload)
284
+ full_summary += res.json().get("response", "") + "\n"
285
+ except Exception as e:
286
+ log(f"Summary Error: {e}", _print=True)
287
+ break
288
+
289
+ return full_summary
290
+
291
+ def generate_audio_text(self, audio_file, prompt="", **kwargs):
292
+ """
293
+ Sends audio bytes to the Media AI container for Whisper transcription.
294
+ """
295
+ try:
296
+ # Prepare the file for upload
297
+ # audio_file is likely bytes, so we wrap in BytesIO if needed
298
+ if isinstance(audio_file, bytes):
299
+ f_obj = io.BytesIO(audio_file)
300
+ else:
301
+ f_obj = audio_file
302
+
303
+ files = {"file": ("audio.mp3", f_obj, "audio/mpeg")}
304
+
305
+ response = requests.post(f"{self._media_url}/transcribe", files=files)
306
+ response.raise_for_status()
307
+ return response.json().get("text", "")
308
+
309
+ except Exception as e:
310
+ log(f"STT Error: {e}", _print=True)
311
+ return ""
312
+
313
+ def generate_audio(self, prompt, voice=None, **kwargs):
314
+ """
315
+ Sends text to the Media AI container for TTS.
316
+ """
317
+ voice = voice or random.choice(list(self.VOICES.keys()))
318
+
319
+ try:
320
+ payload = {"text": prompt, "voice": voice}
321
+ response = requests.post(f"{self._media_url}/tts", json=payload)
322
+ response.raise_for_status()
323
+
324
+ # Response content is WAV bytes
325
+ wav_bytes = response.content
326
+
327
+ # Convert to MP3 to match your original interface (using pydub)
328
+ audio = AudioSegment.from_file(io.BytesIO(wav_bytes), format="wav")
329
+ mp3_buffer = io.BytesIO()
330
+ audio.export(mp3_buffer, format="mp3")
331
+ return mp3_buffer.getvalue()
332
+
333
+ except Exception as e:
334
+ log(f"TTS Error: {e}", _print=True)
335
+ return None
336
+
337
+ def generate_image(self, prompt, negative_prompt="", **kwargs):
338
+ """
339
+ Generates an image using Local AI.
340
+ If 'files' are provided, performs Image-to-Image generation using the first file as reference.
341
+ """
342
+ try:
343
+ # Prepare the multipart data
344
+ # We send the prompt as a form field
345
+ data = {"prompt": prompt, "negative_prompt": negative_prompt}
346
+ files = {}
347
+
348
+ # Check if reference images were passed
349
+ if kwargs.get("files"):
350
+ # Take the first available file
351
+ for fn, f_bytes in kwargs.get("files").items():
352
+ # If f_bytes is bytes, wrap in IO, else assume it's file-like
353
+ if isinstance(f_bytes, bytes):
354
+ file_obj = io.BytesIO(f_bytes)
355
+ else:
356
+ file_obj = f_bytes
357
+
358
+ # Add to the request files
359
+ # Key must be 'file' to match server.py logic
360
+ # TODO: Support multiple images if needed
361
+ files["file"] = (fn, file_obj, "image/png")
362
+ break # We only support 1 reference image for SD Img2Img
363
+
364
+ # Send Request
365
+ if files:
366
+ # Multipart/form-data request (Prompt + File)
367
+ response = requests.post(
368
+ f"{self._media_url}/generate-image", data=data, files=files
369
+ )
370
+ else:
371
+ # Standard request (Prompt only) - server.py handles request.form vs json
372
+ # But our updated server expects form data for consistency
373
+ response = requests.post(f"{self._media_url}/generate-image", data=data)
374
+
375
+ response.raise_for_status()
376
+
377
+ # Returns WebP bytes directly
378
+ return response.content
379
+
380
+ except Exception as e:
381
+ log(f"Image Gen Error: {e}", _print=True)
382
+ return None
383
+
384
+ def list_voices(self, filters=[]):
385
+ # Same logic as before
386
+ if not filters:
387
+ return list(self.VOICES.keys())
388
+ voices = []
389
+ for voice, attribs in self.VOICES.items():
390
+ if any(f.lower() in attribs for f in filters):
391
+ voices.append(voice)
392
+ return voices
autonomous/db/__init__.py CHANGED
@@ -5,6 +5,7 @@
5
5
  # `from autonomous.db import *` and then `connect('testdb')`.
6
6
  from autonomous.db import (
7
7
  connection,
8
+ db_sync,
8
9
  document,
9
10
  errors,
10
11
  fields,
@@ -0,0 +1,140 @@
1
+ import os
2
+ import time
3
+ import urllib.parse
4
+ import uuid
5
+ from datetime import datetime
6
+
7
+ import numpy as np
8
+ import pymongo
9
+ import redis
10
+ import requests
11
+
12
+ # CONFIGURATION
13
+ db_host = os.getenv("DB_HOST", "db")
14
+ db_port = os.getenv("DB_PORT", 27017)
15
+ password = urllib.parse.quote_plus(str(os.getenv("DB_PASSWORD")))
16
+ username = urllib.parse.quote_plus(str(os.getenv("DB_USERNAME")))
17
+ MEDIA_URL = "http://media_ai_internal:5005"
18
+ REDIS_HOST = os.getenv("REDIS_HOST", "cachedb")
19
+ MONGO_URI = f"mongodb://{username}:{password}@{db_host}:{db_port}/?authSource=admin"
20
+
21
+ # DB SETUP
22
+ r = redis.Redis(host=REDIS_HOST, port=6379, decode_responses=True)
23
+
24
+ mongo = pymongo.MongoClient(MONGO_URI)
25
+ db = mongo[os.getenv("DB_DB")]
26
+ # connect(host=f"mongodb://{username}:{password}@{host}:{port}/{dbname}?authSource=admin")
27
+
28
+
29
+ def get_vector(text):
30
+ """Helper to get embedding from your Media AI container"""
31
+ try:
32
+ resp = requests.post(f"{MEDIA_URL}/embeddings", json={"text": text}, timeout=30)
33
+ if resp.status_code == 200:
34
+ return resp.json()["embedding"]
35
+ except Exception as e:
36
+ print(f"Vector Gen Failed: {e}")
37
+ return None
38
+
39
+
40
+ def process_single_object_sync(object_id, collection_name, token):
41
+ """
42
+ THE WORKER FUNCTION (Runs in Background).
43
+ It is safe to sleep here because we are not in the web request.
44
+ """
45
+ str_id = str(object_id)
46
+ token_key = f"sync_token:{collection_name}:{str_id}"
47
+
48
+ # 1. THE DEBOUNCE WAIT (Happens in background)
49
+ print(f"Debouncing {str_id} for 5 seconds...")
50
+ time.sleep(5)
51
+
52
+ # 2. THE VERIFICATION
53
+ # Check if a newer save happened while we slept
54
+ current_active_token = r.get(token_key)
55
+
56
+ if current_active_token != token:
57
+ print(f"Skipping sync for {str_id}: Superseded by a newer save.")
58
+ return
59
+
60
+ # 3. THE EXECUTION (Embedding generation)
61
+ print(f"Processing Sync for: {str_id} in {collection_name}")
62
+
63
+ from bson.objectid import ObjectId
64
+
65
+ # FIX: Use dynamic collection access instead of db.objects
66
+ try:
67
+ # Tries to convert string ID to ObjectId.
68
+ # If your DB uses String IDs, remove the ObjectId() wrapper.
69
+ oid = ObjectId(object_id)
70
+ doc = db[collection_name].find_one({"_id": oid})
71
+ except Exception:
72
+ # Fallback if ID is not a valid ObjectId string
73
+ doc = db[collection_name].find_one({"_id": object_id})
74
+
75
+ if not doc:
76
+ print(f"Object {object_id} not found in collection '{collection_name}'")
77
+ # Optional: Remove from Redis index if it exists
78
+ r.delete(f"lore:{object_id}")
79
+ return
80
+
81
+ # 2. Construct Searchable Text
82
+ # (Existing logic...)
83
+ searchable_text = (
84
+ f"{doc.get('name', '')}: {doc.get('description', '')} {doc.get('history', '')}"
85
+ )
86
+
87
+ if len(searchable_text) < 10:
88
+ return
89
+
90
+ # 3. Generate Vector
91
+ vector = get_vector(searchable_text)
92
+
93
+ # 4. Save to Redis Index
94
+ if vector:
95
+ r.hset(
96
+ f"lore:{object_id}",
97
+ mapping={
98
+ "mongo_id": str(object_id),
99
+ "collection": collection_name, # Useful for debugging
100
+ "content": searchable_text,
101
+ "vector": np.array(vector, dtype=np.float32).tobytes(),
102
+ "last_synced": datetime.utcnow().isoformat(),
103
+ },
104
+ )
105
+ print(f"Successfully Indexed: {doc.get('name')}")
106
+
107
+
108
+ def request_indexing(object_id, collection_name):
109
+ """
110
+ THE TRIGGER FUNCTION (Runs in Main App).
111
+ MUST BE FAST. NO SLEEPING HERE.
112
+ """
113
+ # Import your Queue Wrapper
114
+ from autonomous.tasks.autotask import AutoTasks
115
+
116
+ # Initialize the Task Runner
117
+ task_runner = AutoTasks()
118
+
119
+ str_id = str(object_id)
120
+ token_key = f"sync_token:{collection_name}:{str_id}"
121
+
122
+ # 1. GENERATE NEW TOKEN
123
+ current_token = str(uuid.uuid4())
124
+
125
+ # 2. SAVE TOKEN TO REDIS (Instant)
126
+ r.set(token_key, current_token, ex=300)
127
+
128
+ # 3. ENQUEUE THE TASK (Instant)
129
+ # CRITICAL CHANGE: We use task_runner.task() instead of calling the function directly.
130
+ try:
131
+ task_runner.task(
132
+ process_single_object_sync, # The function to run later
133
+ object_id=str_id,
134
+ collection_name=collection_name,
135
+ token=current_token,
136
+ )
137
+ return True
138
+ except Exception as e:
139
+ print(f"Sync Enqueue failed: {e}")
140
+ return False
@@ -8,6 +8,7 @@ from autonomous import log
8
8
  from autonomous.db import Document, connect, signals
9
9
  from autonomous.db.errors import ValidationError
10
10
  from autonomous.db.fields import DateTimeField
11
+ from autonomous.db import db_sync
11
12
 
12
13
  host = os.getenv("DB_HOST", "db")
13
14
  port = os.getenv("DB_PORT", 27017)
@@ -240,6 +241,9 @@ class AutoModel(Document):
240
241
  """
241
242
  obj = super().save()
242
243
  self.pk = obj.pk
244
+
245
+ db_sync.request_indexing(self.pk, collection_name=self._get_collection_name())
246
+
243
247
  return self.pk
244
248
 
245
249
  @classmethod
@@ -1,12 +1,7 @@
1
- import importlib
2
1
  import os
3
- import subprocess
4
-
5
2
  from redis import Redis
6
- from rq import Queue, Worker
7
-
8
- from autonomous import log
9
-
3
+ from rq import Queue
4
+ from rq.job import Job
10
5
 
11
6
  class AutoTask:
12
7
  def __init__(self, job):
@@ -18,127 +13,68 @@ class AutoTask:
18
13
 
19
14
  @property
20
15
  def status(self):
21
- status = self.job.get_status()
22
- if status in ["running", "queued", "started"]:
23
- return "running"
24
- return status
25
-
26
- @property
27
- def running(self):
28
- return self.status == "running"
29
-
30
- @property
31
- def finished(self):
32
- return self.status == "finished"
33
-
34
- @property
35
- def failed(self):
36
- return self.status == "failed"
16
+ return self.job.get_status()
37
17
 
38
18
  @property
39
19
  def result(self):
40
- result = self.job.latest_result()
41
- result_dict = {
20
+ # Simplified result fetching
21
+ return {
42
22
  "id": self.id,
43
- "return_value": result.return_value if result else None,
23
+ "return_value": self.job.result,
44
24
  "status": self.status,
45
- "error": result.exc_string
46
- if result and result.type in [result.Type.FAILED, result.Type.STOPPED]
47
- else None,
25
+ "error": self.job.exc_info
48
26
  }
49
27
 
50
- return result_dict
51
-
52
- @property
53
- def return_value(self):
54
- return self.result.get("return_value")
55
-
56
-
57
28
  class AutoTasks:
58
29
  _connection = None
59
30
  queue = None
60
- workers = []
61
- all_tasks = []
31
+
32
+ # Config stays the same
62
33
  config = {
63
- "host": os.environ.get("REDIS_HOST"),
64
- "port": os.environ.get("REDIS_PORT"),
34
+ "host": os.environ.get("REDIS_HOST", "cachedb"),
35
+ "port": os.environ.get("REDIS_PORT", 6379),
65
36
  "password": os.environ.get("REDIS_PASSWORD"),
66
37
  "username": os.environ.get("REDIS_USERNAME"),
67
38
  "db": os.environ.get("REDIS_DB", 0),
68
39
  }
69
40
 
70
- def __init__(self, queue="default", num_workers=3):
41
+ def __init__(self, queue_name="default"):
71
42
  if not AutoTasks._connection:
72
43
  options = {}
73
-
74
- if AutoTasks.config.get("username"):
75
- options["username"] = AutoTasks.config.get("username")
76
- if AutoTasks.config.get("username"):
44
+ if AutoTasks.config.get("password"):
77
45
  options["password"] = AutoTasks.config.get("password")
78
- if AutoTasks.config.get("db"):
79
- options["db"] = AutoTasks.config.get("db")
80
46
 
47
+ # Create Redis Connection
81
48
  AutoTasks._connection = Redis(
82
49
  host=AutoTasks.config.get("host"),
83
50
  port=AutoTasks.config.get("port"),
51
+ decode_responses=False, # RQ requires bytes, not strings
84
52
  **options,
85
53
  )
86
- AutoTasks.queue = Queue(queue, connection=AutoTasks._connection)
54
+
55
+ # Initialize Queue
56
+ AutoTasks.queue = Queue(queue_name, connection=AutoTasks._connection)
87
57
 
88
58
  def task(self, func, *args, **kwargs):
89
59
  """
90
- :param job: job function
91
- :param args: job function args
92
- :param kwargs: job function kwargs
93
- args and kwargs: use these to explicitly pass arguments and keyword to the underlying job function.
94
- _task_<option>:pass options to the task object
95
- :return: job
60
+ Enqueues a job to Redis. Does NOT start a worker.
96
61
  """
62
+ job_timeout = kwargs.pop("_task_job_timeout", 3600)
97
63
 
64
+ # Enqueue the job
65
+ # func can be a string path or the function object itself
98
66
  job = AutoTasks.queue.enqueue(
99
67
  func,
100
- job_timeout=kwargs.get("_task_job_timeout", 3600),
101
68
  args=args,
102
69
  kwargs=kwargs,
70
+ job_timeout=job_timeout
103
71
  )
104
- self.create_worker(func)
105
- new_task = AutoTask(job)
106
- AutoTasks.all_tasks.append(new_task)
107
- return new_task
108
-
109
- def create_worker(self, func):
110
- # Get the module containing the target_function
111
- module = func.__module__
112
-
113
- # Get the file path of the module
114
- module_path = importlib.import_module(module).__file__
115
72
 
116
- # Set the PYTHONPATH environment variable
117
- pythonpath = os.path.dirname(module_path)
118
- env = os.environ.copy()
119
- env["PYTHONPATH"] = pythonpath
73
+ return AutoTask(job)
120
74
 
121
- rq_user_pass = f"{self.config['username']}:{self.config['password']}"
122
- rq_url = f"{self.config['host']}:{self.config['port']}"
123
- rq_db = self.config["db"]
124
- rq_worker_command = (
125
- f"rq worker --url redis://{rq_user_pass}@{rq_url}/{rq_db} --burst"
126
- )
127
-
128
- worker = subprocess.Popen(rq_worker_command, shell=True, env=env)
129
- self.workers.append(worker)
130
- return worker
131
-
132
- # get job given its id
133
75
  def get_task(self, job_id):
134
- # breakpoint()
135
- task = AutoTasks.queue.fetch_job(job_id)
136
- return AutoTask(task)
137
-
138
- # get job given its id
139
- def get_tasks(self):
140
- return [AutoTask(w) for w in Worker.all(queue=AutoTasks.queue)]
141
-
142
- def clear(self):
143
- AutoTasks.queue.empty()
144
- AutoTasks.all_tasks = []
76
+ try:
77
+ job = Job.fetch(job_id, connection=AutoTasks._connection)
78
+ return AutoTask(job)
79
+ except Exception:
80
+ return None
@@ -0,0 +1,26 @@
1
+ import re
2
+ import tasks
3
+
4
+ class TaskRouterBase:
5
+ """
6
+ Maps URL paths to Task Functions.
7
+ Acts as the central registry for all background tasks.
8
+ """
9
+
10
+ # Format: (Regex Pattern, Function Object)
11
+
12
+
13
+ @classmethod
14
+ def resolve(cls, path):
15
+ """
16
+ Parses the path, finds the matching function, and extracts arguments.
17
+ Returns: (function_obj, kwargs_dict) or (None, None)
18
+ """
19
+ for pattern, func in cls.ROUTES:
20
+ match = re.match(pattern, path)
21
+ if match:
22
+ return func, match.groupdict()
23
+ return None, None
24
+
25
+
26
+ ROUTES = []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: autonomous-app
3
- Version: 0.3.28
3
+ Version: 0.3.30
4
4
  Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
5
5
  Author-email: Steven A Moore <samoore@binghamton.edu>
6
6
  Project-URL: homepage, https://github.com/Sallenmoore/autonomous
@@ -24,8 +24,8 @@ Requires-Dist: gunicorn
24
24
  Requires-Dist: Authlib
25
25
  Requires-Dist: rq
26
26
  Requires-Dist: ollama
27
- Requires-Dist: openai>=1.42
28
27
  Requires-Dist: google-genai
28
+ Requires-Dist: sentence-transformers
29
29
  Requires-Dist: dateparser
30
30
  Requires-Dist: python-slugify
31
31
  Requires-Dist: pydub
@@ -1,9 +1,9 @@
1
- autonomous/__init__.py,sha256=cf_i1xIW4gWRL34z5iRAHqNKq4XELsusys3f4hdY1sQ,95
1
+ autonomous/__init__.py,sha256=yFrrhxW5lT1IN2kECktEvBt4l8fh93BpQSg-lHky7BQ,95
2
2
  autonomous/cli.py,sha256=z4AaGeWNW_uBLFAHng0J_lfS9v3fXemK1PeT85u4Eo4,42
3
3
  autonomous/logger.py,sha256=NQtgEaTWNAWfLSgqSP7ksXj1GpOuCgoUV711kSMm-WA,2022
4
4
  autonomous/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  autonomous/ai/audioagent.py,sha256=SvPLzKgqUnrkcsR7y93aURSrStIrryuntQMPS1SzUXw,1033
6
- autonomous/ai/baseagent.py,sha256=HYCqC4HmK5afNMunmTkhRE8O0OaONl2GxXnISkdOM58,1094
6
+ autonomous/ai/baseagent.py,sha256=d6OYOk8LGHlDPNcqDmEX2PpWeerl-bIBL73IZ_T78oU,880
7
7
  autonomous/ai/imageagent.py,sha256=bIOrgg_CM-rgfyLme7V9vPqP8WKVMIAVoB2E9lLtIRk,521
8
8
  autonomous/ai/jsonagent.py,sha256=VQGhK0RFo0H_eVH9dAyf4_lp-RIpdgH988joLoKjm94,1065
9
9
  autonomous/ai/textagent.py,sha256=1yM1aMvws64PocvG_L-POMDKjxq2JDuGqgc3haUHybU,926
@@ -11,7 +11,7 @@ autonomous/ai/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
11
11
  autonomous/ai/models/aws.py,sha256=bGDjnGTm350zOqor9IsICzUkBUN2bubGI_ZssQuSXIw,12715
12
12
  autonomous/ai/models/deepseek.py,sha256=fkoi-hJp60yFlZ9Cb9PdUrmNSErYltQ5ezkUI75llXc,2734
13
13
  autonomous/ai/models/gemini.py,sha256=jrTMbh8SAdzzz27elOhs82iwjyutYcy8fvTOSdW-GFQ,14247
14
- autonomous/ai/models/local.py,sha256=fkoi-hJp60yFlZ9Cb9PdUrmNSErYltQ5ezkUI75llXc,2734
14
+ autonomous/ai/models/local_model.py,sha256=xBwj2f_gvQQhL5YuTHZiStPAunDiPfpjOj_SDPajMN0,14710
15
15
  autonomous/ai/models/openai.py,sha256=2-LttCm6woGklaLbs1H5LjlbfM-7leDwGmC9vksSqW4,13135
16
16
  autonomous/apis/version_control/GHCallbacks.py,sha256=AyiUlYfV5JePi11GVyqYyXoj5UTbPKzS-HRRI94rjJo,1069
17
17
  autonomous/apis/version_control/GHOrganization.py,sha256=mi2livdsGurKiifbvuLwiFbdDzL77IlEfhwEa-tG77I,1155
@@ -23,10 +23,11 @@ autonomous/auth/autoauth.py,sha256=OizuMhmFjNzmsUijIbjGcQ5FxzVeoy9-NMFsx_TDsOE,3
23
23
  autonomous/auth/github.py,sha256=dHf84bJdV9rXGcvRLzWCPW9CvuA-VEmqYi_QQFwd2kY,886
24
24
  autonomous/auth/google.py,sha256=cHmqbyNEPTKipc3WkYcD1XPOyqcWEFW0Ks4qJYmGvPw,1049
25
25
  autonomous/auth/user.py,sha256=1yDu04yNSURzBzok6C5Dn-_mv0fGefvjrxj9ikCktqY,2726
26
- autonomous/db/__init__.py,sha256=9frkXJrl_OUemUQteXCTPqC8ECyxjE91Gi2mgTq26Fw,1159
26
+ autonomous/db/__init__.py,sha256=jn6bmC6lM-Q9RxW_1WXmj6ogwQYd_5HDIDbAGLXlbu4,1172
27
27
  autonomous/db/common.py,sha256=BUN2x_XuQBRFcq54TGPx4yLMLJdgytdbIt07QWr4CSM,2551
28
28
  autonomous/db/connection.py,sha256=j_-eMre4ade9Y8GejJcMbQQiSEimL4j2vIQxaXViKxI,17754
29
29
  autonomous/db/context_managers.py,sha256=_nH2ajCL8Xy90AuB2rKaryR4iF8Q8ksU3Nei_mZj-DE,9918
30
+ autonomous/db/db_sync.py,sha256=wURiDhfI_RfYDDcdRtYLZbjmeK0hUn-9A4IrI0sfi4o,4363
30
31
  autonomous/db/dereference.py,sha256=EgbpPCXtDZqD_ZuY1Wd4o3ltRy8qEo3C5yRh5_c9fLE,12776
31
32
  autonomous/db/document.py,sha256=oZKdTaoqwv9fCHiv450rIxgINASQF3J9FzIsUOUXHhw,44428
32
33
  autonomous/db/errors.py,sha256=_QeCotid1kmr7_W0QyH6NUrwwYN9eced_yyyiop0Xlw,4108
@@ -48,16 +49,16 @@ autonomous/db/queryset/manager.py,sha256=fXu95TlGChdJWTRA4OnY_Ik25JzezJ2_qPqmH78
48
49
  autonomous/db/queryset/queryset.py,sha256=SRLYWAQUXgcfNzeMPiH5Mm4WZIemHTNQ24y2EIisNQU,5969
49
50
  autonomous/db/queryset/transform.py,sha256=IIZKf_io60zPTIwJ5KcPcrJOOOOjD2yQU7coYClL1Iw,19461
50
51
  autonomous/db/queryset/visitor.py,sha256=AN09lR6hWYUlKJC7G1sktvnWy5hrFnpoQhi58bOXbA4,5470
51
- autonomous/model/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
52
52
  autonomous/model/autoattr.py,sha256=FUnQrw65CcZumYiTsQ7U6G6UDGqbeekka-cjz6Sfchc,2675
53
- autonomous/model/automodel.py,sha256=F9rlsna1QYg8mVb-5ErKx5fEXxvaogVxWeeaJQBOOjs,8166
53
+ autonomous/model/automodel.py,sha256=9QE9_m6oW7kgZc-eIzzRA9Ib-RkX4fpmHRjl7ns_pwg,8289
54
54
  autonomous/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
55
  autonomous/storage/imagestorage.py,sha256=SmBjBNBlP1ZEjxdOnGVzCHZhbEhMKTUQC2TbpWbejDE,6168
56
56
  autonomous/storage/localstorage.py,sha256=FzrR6O9mMGAZt5dDgqzkeOQVfGRXCygR0kksz2MPpwE,2286
57
57
  autonomous/tasks/__init__.py,sha256=pn7iZ14MhcHUdzcLkfkd4-45wgPP0tXahAz_cFgb_Tg,32
58
- autonomous/tasks/autotask.py,sha256=aK5iapDhgcAic3F5ZYMAhNKJkOepj8yWwbMizKDzUwQ,4153
58
+ autonomous/tasks/autotask.py,sha256=9Fi7juGEEq8OVEQYES7sEkU21bkBhtBAIQ-Js5fMXDc,2193
59
+ autonomous/tasks/task_router.py,sha256=-MrohYTwjj6oTvhRytod6JFCzojAH6dGDnbIwrb18mQ,614
59
60
  autonomous/utils/markdown.py,sha256=tf8vlHARiQO1X_aGbqlYozzP_TbdiDRT9EEP6aFRQo0,2153
60
- autonomous_app-0.3.28.dist-info/METADATA,sha256=HgUXCoGnskzg9FAnSZwfEGXB-5l1Rksw0XTMnGp6bBk,3015
61
- autonomous_app-0.3.28.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
62
- autonomous_app-0.3.28.dist-info/top_level.txt,sha256=ZyxWWDdbvZekF3UFunxl4BQsVDb_FOW3eTn0vun_jb4,11
63
- autonomous_app-0.3.28.dist-info/RECORD,,
61
+ autonomous_app-0.3.30.dist-info/METADATA,sha256=lRnKwOxeMj3hzPPrUEuob3c3QbnSaUjPjqrkfFumGDg,3024
62
+ autonomous_app-0.3.30.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
63
+ autonomous_app-0.3.30.dist-info/top_level.txt,sha256=ZyxWWDdbvZekF3UFunxl4BQsVDb_FOW3eTn0vun_jb4,11
64
+ autonomous_app-0.3.30.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,99 +0,0 @@
1
- import io
2
- import json
3
- import os
4
- import random
5
- import time
6
- from base64 import b64decode
7
-
8
- import openai
9
- from ollama import ChatResponse, chat
10
-
11
- from autonomous import log
12
- from autonomous.model.autoattr import DictAttr, ListAttr, StringAttr
13
- from autonomous.model.automodel import AutoModel
14
-
15
-
16
- class LocalAIModel(AutoModel):
17
- _client = None
18
- instructions = StringAttr(
19
- default="You are highly skilled AI trained to assist with various tasks."
20
- )
21
- description = StringAttr(
22
- default="A helpful AI assistant trained to assist with various tasks."
23
- )
24
-
25
- @property
26
- def client(self):
27
- if not self._client:
28
- self._client = "deepseek-r1" # OpenAI(api_key=os.environ.get("OPENAI_KEY"))
29
- return self._client
30
-
31
- def clear_agent(self):
32
- pass
33
-
34
- def clear_agents(self):
35
- pass
36
-
37
- # def _get_agent_id(self):
38
- # pass
39
-
40
- # def _add_function(self, user_function):
41
- pass
42
-
43
- def _format_messages(self, messages):
44
- pass
45
-
46
- def clear_files(self, file_id=None):
47
- pass
48
-
49
- def attach_file(self, file_contents, filename="dbdata.json"):
50
- pass
51
-
52
- def generate_json(self, messages, function, additional_instructions=""):
53
- message = messages + additional_instructions
54
- message += f"""
55
- IMPORTANT: Respond in JSON FORMAT using the SCHEMA below. DO NOT add any text to the response outside of the supplied JSON schema:
56
- {function}
57
- """
58
- response: ChatResponse = chat(
59
- model=self.client,
60
- messages=[
61
- {
62
- "role": "user",
63
- "content": message,
64
- },
65
- ],
66
- )
67
- return response.message.content
68
-
69
- def generate_text(self, messages, additional_instructions=""):
70
- message = messages + additional_instructions
71
- response: ChatResponse = chat(
72
- model=self.client,
73
- messages=[
74
- {
75
- "role": "user",
76
- "content": message,
77
- },
78
- ],
79
- )
80
- return response.message.content
81
-
82
- def generate_audio(self, prompt, **kwargs):
83
- raise NotImplementedError
84
-
85
- def generate_image(self, prompt, **kwargs):
86
- raise NotImplementedError
87
-
88
- def summarize_text(self, text, primer=""):
89
- response: ChatResponse = chat(
90
- model=self.client,
91
- messages=[
92
- {
93
- "role": "system",
94
- "content": f"You are a highly skilled AI trained in language comprehension and summarization.{primer}",
95
- },
96
- {"role": "user", "content": text},
97
- ],
98
- )
99
- return response.message.content
@@ -1 +0,0 @@
1
-