autonomous-app 0.3.29__py3-none-any.whl → 0.3.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
autonomous/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "0.3.29"
1
+ __version__ = "0.3.31"
2
2
 
3
3
  from dotenv import load_dotenv
4
4
 
@@ -23,12 +23,11 @@ class BaseAgent(AutoModel):
23
23
  return self.get_client().id
24
24
 
25
25
  def get_client(self):
26
- if self.client is None:
27
- self.client = self._ai_model(
28
- name=self.name,
29
- instructions=self.instructions,
30
- description=self.description,
31
- )
32
- self.client.save()
33
- self.save()
26
+ self.client = self._ai_model(
27
+ name=self.name,
28
+ instructions=self.instructions,
29
+ description=self.description,
30
+ )
31
+ self.client.save()
32
+ self.save()
34
33
  return self.client
@@ -2,10 +2,12 @@ import io
2
2
  import json
3
3
  import os
4
4
  import random
5
- import re
6
- import wave
7
5
 
6
+ import numpy as np
7
+ import pymongo
8
+ import redis
8
9
  import requests
10
+ from bson.objectid import ObjectId
9
11
  from pydub import AudioSegment
10
12
 
11
13
  from autonomous import log
@@ -14,21 +16,22 @@ from autonomous.model.automodel import AutoModel
14
16
 
15
17
 
16
18
  class LocalAIModel(AutoModel):
17
- # Configuration
18
- _ollama_url = os.environ.get("OLLAMA_API_BASE", "http://ollama_internal:11434/api")
19
- _media_url = os.environ.get("MEDIA_API_BASE", "http://media_ai_internal:5005")
20
-
21
- # Models to use in Ollama
22
- _text_model = "mistral-nemo"
23
- _json_model = "mistral-nemo"
24
-
25
19
  messages = ListAttr(StringAttr(default=[]))
26
20
  name = StringAttr(default="agent")
27
- instructions = StringAttr(
28
- default="You are highly skilled AI trained to assist with various tasks."
29
- )
21
+ instructions = StringAttr(default="You are a helpful AI.")
22
+ description = StringAttr(default="A Local AI Model using Ollama and Media AI.")
23
+
24
+ # Config
25
+ _ollama_url = os.environ.get("OLLAMA_API_BASE", "http://ollama:11434/api")
26
+ _media_url = os.environ.get("MEDIA_API_BASE", "http://media_ai:5005")
27
+ _text_model = "llama3"
28
+ _json_model = "llama3"
29
+
30
+ # DB Connections
31
+ _mongo_client = pymongo.MongoClient("mongodb://db:27017/")
32
+ _mongo_db = os.getenv("DB_DB", "default")
33
+ _redis = redis.Redis(host="cachedb", port=6379, decode_responses=True)
30
34
 
31
- # Keep your voice list (mapped to random seeds/embeddings in the future)
32
35
  VOICES = {
33
36
  "Zephyr": ["female"],
34
37
  "Puck": ["male"],
@@ -63,44 +66,118 @@ class LocalAIModel(AutoModel):
63
66
  }
64
67
 
65
68
  def _convert_tools_to_json_schema(self, user_function):
66
- """
67
- Ollama doesn't support 'tools' strictly yet.
68
- We convert the tool definition into a system prompt instruction.
69
- """
70
69
  schema = {
71
70
  "name": user_function.get("name"),
72
- "parameters": user_function.get("parameters"),
71
+ "description": user_function.get("description", ""),
72
+ "parameters": user_function.get("parameters", {}),
73
73
  }
74
74
  return json.dumps(schema, indent=2)
75
75
 
76
+ def get_embedding(self, text):
77
+ try:
78
+ res = requests.post(f"{self._media_url}/embeddings", json={"text": text})
79
+ res.raise_for_status()
80
+ return res.json()["embedding"]
81
+ except Exception as e:
82
+ log(f"Embedding Error: {e}", _print=True)
83
+ return []
84
+
85
+ def build_hybrid_context(self, prompt, focus_object_id=None):
86
+ cache_key = f"ctx:{focus_object_id}:{len(prompt) // 50}"
87
+ cached_ctx = self._redis.get(cache_key)
88
+ if cached_ctx:
89
+ return cached_ctx
90
+
91
+ context_str = ""
92
+ # --- PART 1: MONGODB ---
93
+ if focus_object_id:
94
+ try:
95
+ oid = (
96
+ ObjectId(focus_object_id)
97
+ if isinstance(focus_object_id, str)
98
+ else focus_object_id
99
+ )
100
+ main_obj = self._mongo_db.objects.find_one({"_id": oid})
101
+
102
+ if main_obj:
103
+ context_str += "### FOCUS OBJECT ###\n" + prompt
104
+ ref_ids = main_obj.get("associations", []) or []
105
+ if world_id := main_obj.get("world"):
106
+ ref_ids.append(world_id)
107
+ ref_ids.extend(main_obj.get("stories", []) or [])
108
+ ref_ids.extend(main_obj.get("events", []) or [])
109
+
110
+ if ref_ids:
111
+ valid_oids = [
112
+ ObjectId(rid) if isinstance(rid, str) else rid
113
+ for rid in ref_ids
114
+ ]
115
+ if valid_oids:
116
+ associated_objs = self._mongo_db.objects.find(
117
+ {"_id": {"$in": valid_oids}}
118
+ )
119
+ context_str += "\n### ASSOCIATED REFERENCES ###\n"
120
+ for obj in associated_objs:
121
+ context_str += f"- {obj}\n"
122
+ context_str += "\n"
123
+ except Exception as e:
124
+ log(f"Mongo Association Error: {e}", _print=True)
125
+
126
+ # --- PART 2: REDIS ---
127
+ if len(prompt) > 10:
128
+ vector = self.get_embedding(prompt)
129
+ if vector:
130
+ try:
131
+ q = "*=>[KNN 2 @vector $blob AS score]"
132
+ params = {"blob": np.array(vector, dtype=np.float32).tobytes()}
133
+ results = self._redis.ft("search_index").search(
134
+ q, query_params=params
135
+ )
136
+ if results.docs:
137
+ context_str += "### RELEVANT LORE ###\n"
138
+ for doc in results.docs:
139
+ context_str += f"- {doc.content}\n"
140
+ except Exception:
141
+ pass
142
+
143
+ self._redis.set(cache_key, context_str, ex=120)
144
+ return context_str
145
+
76
146
  def generate_json(self, message, function, additional_instructions="", **kwargs):
77
147
  """
78
- Mimics Gemini's tool use by forcing Ollama into JSON mode
79
- and injecting the schema into the prompt.
148
+ UPDATED: Uses correct /api/chat payload structure (messages list)
80
149
  """
81
150
  schema_str = self._convert_tools_to_json_schema(function)
151
+ focus_pk = kwargs.get("focus_object")
152
+ world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
82
153
 
83
- system_prompt = (
154
+ full_system_prompt = (
84
155
  f"{self.instructions}. {additional_instructions}\n"
85
156
  f"You must respond strictly with a valid JSON object matching this schema:\n"
86
157
  f"{schema_str}\n"
87
158
  f"Do not include markdown formatting or explanations."
159
+ f"You must strictly adhere to the following context:\n"
160
+ f"{world_context}"
88
161
  )
89
162
 
163
+ # FIX: Using 'messages' instead of 'prompt'/'system'
90
164
  payload = {
91
- "model": self._json_model,
92
- "prompt": message,
93
- "system": system_prompt,
94
- "format": "json", # Force JSON mode
165
+ "model": "llama3",
166
+ "messages": [
167
+ {"role": "system", "content": full_system_prompt},
168
+ {"role": "user", "content": message},
169
+ ],
170
+ "format": "json",
95
171
  "stream": False,
172
+ "keep_alive": "24h",
96
173
  }
97
174
 
98
175
  try:
99
- response = requests.post(f"{self._ollama_url}/generate", json=payload)
176
+ response = requests.post(f"{self._ollama_url}/chat", json=payload)
100
177
  response.raise_for_status()
101
- result_text = response.json().get("response", "{}")
102
178
 
103
- # log(f"Raw Local JSON: {result_text}", _print=True)
179
+ # FIX: Chat API returns 'message' -> 'content'
180
+ result_text = response.json().get("message", {}).get("content", "{}")
104
181
  return json.loads(result_text)
105
182
 
106
183
  except Exception as e:
@@ -109,54 +186,52 @@ class LocalAIModel(AutoModel):
109
186
 
110
187
  def generate_text(self, message, additional_instructions="", **kwargs):
111
188
  """
112
- Standard text generation via Ollama.
189
+ UPDATED: Uses correct /api/chat payload structure
113
190
  """
191
+ focus_pk = kwargs.get("focus_object")
192
+ world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
193
+
194
+ full_system_prompt = (
195
+ f"{self.instructions}. {additional_instructions}\n"
196
+ f"You must strictly adhere to the following context:\n"
197
+ f"{world_context}"
198
+ )
199
+
114
200
  payload = {
115
- "model": self._text_model,
116
- "prompt": message,
117
- "system": f"{self.instructions}. {additional_instructions}",
201
+ "model": "llama3",
202
+ "messages": [
203
+ {"role": "system", "content": full_system_prompt},
204
+ {"role": "user", "content": message},
205
+ ],
118
206
  "stream": False,
207
+ "keep_alive": "24h",
119
208
  }
120
209
 
121
- # Handle 'files' (Ollama supports images in base64, but not arbitrary files easily yet)
122
- # If files are text, you should read them and append to prompt.
123
- if file_list := kwargs.get("files"):
124
- for file_dict in file_list:
125
- fn = file_dict["name"]
126
- fileobj = file_dict["file"]
127
- if fn.lower().endswith((".txt", ".md", ".json", ".csv")):
128
- content = fileobj.read()
129
- if isinstance(content, bytes):
130
- content = content.decode("utf-8", errors="ignore")
131
- payload["prompt"] += f"\n\nContents of {fn}:\n{content}"
132
-
133
210
  try:
134
- response = requests.post(f"{self._ollama_url}/generate", json=payload)
211
+ response = requests.post(f"{self._ollama_url}/chat", json=payload)
135
212
  response.raise_for_status()
136
- return response.json().get("response", "")
213
+ # FIX: Chat API returns 'message' -> 'content'
214
+ return response.json().get("message", {}).get("content", "")
137
215
  except Exception as e:
138
216
  log(f"==== LocalAI Text Error: {e} ====", _print=True)
139
217
  return "Error generating text."
140
218
 
141
219
  def summarize_text(self, text, primer="", **kwargs):
142
220
  primer = primer or "Summarize the following text concisely."
143
-
144
- # Simple chunking logic (similar to your original)
145
- # Note: Mistral-Nemo has a large context window (128k), so chunking
146
- # is less necessary than with older models, but we keep it for safety.
147
- max_chars = 12000 # Roughly 3k tokens
221
+ max_chars = 12000
148
222
  chunks = [text[i : i + max_chars] for i in range(0, len(text), max_chars)]
149
223
 
150
224
  full_summary = ""
151
225
  for chunk in chunks:
152
226
  payload = {
153
- "model": self._text_model,
154
- "prompt": f"{primer}:\n\n{chunk}",
227
+ "model": "llama3",
228
+ "messages": [{"role": "user", "content": f"{primer}:\n\n{chunk}"}],
155
229
  "stream": False,
230
+ "keep_alive": "24h",
156
231
  }
157
232
  try:
158
- res = requests.post(f"{self._ollama_url}/generate", json=payload)
159
- full_summary += res.json().get("response", "") + "\n"
233
+ res = requests.post(f"{self._ollama_url}/chat", json=payload)
234
+ full_summary += res.json().get("message", {}).get("content", "") + "\n"
160
235
  except Exception as e:
161
236
  log(f"Summary Error: {e}", _print=True)
162
237
  break
@@ -164,99 +239,59 @@ class LocalAIModel(AutoModel):
164
239
  return full_summary
165
240
 
166
241
  def generate_audio_text(self, audio_file, prompt="", **kwargs):
167
- """
168
- Sends audio bytes to the Media AI container for Whisper transcription.
169
- """
170
242
  try:
171
- # Prepare the file for upload
172
- # audio_file is likely bytes, so we wrap in BytesIO if needed
173
243
  if isinstance(audio_file, bytes):
174
244
  f_obj = io.BytesIO(audio_file)
175
245
  else:
176
246
  f_obj = audio_file
177
-
178
247
  files = {"file": ("audio.mp3", f_obj, "audio/mpeg")}
179
-
180
248
  response = requests.post(f"{self._media_url}/transcribe", files=files)
181
249
  response.raise_for_status()
182
250
  return response.json().get("text", "")
183
-
184
251
  except Exception as e:
185
252
  log(f"STT Error: {e}", _print=True)
186
253
  return ""
187
254
 
188
255
  def generate_audio(self, prompt, voice=None, **kwargs):
189
- """
190
- Sends text to the Media AI container for TTS.
191
- """
192
256
  voice = voice or random.choice(list(self.VOICES.keys()))
193
-
194
257
  try:
195
258
  payload = {"text": prompt, "voice": voice}
196
259
  response = requests.post(f"{self._media_url}/tts", json=payload)
197
260
  response.raise_for_status()
198
-
199
- # Response content is WAV bytes
200
261
  wav_bytes = response.content
201
-
202
- # Convert to MP3 to match your original interface (using pydub)
203
262
  audio = AudioSegment.from_file(io.BytesIO(wav_bytes), format="wav")
204
263
  mp3_buffer = io.BytesIO()
205
264
  audio.export(mp3_buffer, format="mp3")
206
265
  return mp3_buffer.getvalue()
207
-
208
266
  except Exception as e:
209
267
  log(f"TTS Error: {e}", _print=True)
210
268
  return None
211
269
 
212
- def generate_image(self, prompt, **kwargs):
213
- """
214
- Generates an image using Local AI.
215
- If 'files' are provided, performs Image-to-Image generation using the first file as reference.
216
- """
270
+ def generate_image(self, prompt, negative_prompt="", **kwargs):
217
271
  try:
218
- # Prepare the multipart data
219
- # We send the prompt as a form field
220
- data = {"prompt": prompt}
272
+ data = {"prompt": prompt, "negative_prompt": negative_prompt}
221
273
  files = {}
222
-
223
- # Check if reference images were passed
224
274
  if kwargs.get("files"):
225
- # Take the first available file
226
275
  for fn, f_bytes in kwargs.get("files").items():
227
- # If f_bytes is bytes, wrap in IO, else assume it's file-like
228
276
  if isinstance(f_bytes, bytes):
229
277
  file_obj = io.BytesIO(f_bytes)
230
278
  else:
231
279
  file_obj = f_bytes
232
-
233
- # Add to the request files
234
- # Key must be 'file' to match server.py logic
235
280
  files["file"] = (fn, file_obj, "image/png")
236
- break # We only support 1 reference image for SD Img2Img
237
-
238
- # Send Request
281
+ break
239
282
  if files:
240
- # Multipart/form-data request (Prompt + File)
241
283
  response = requests.post(
242
284
  f"{self._media_url}/generate-image", data=data, files=files
243
285
  )
244
286
  else:
245
- # Standard request (Prompt only) - server.py handles request.form vs json
246
- # But our updated server expects form data for consistency
247
287
  response = requests.post(f"{self._media_url}/generate-image", data=data)
248
-
249
288
  response.raise_for_status()
250
-
251
- # Returns WebP bytes directly
252
289
  return response.content
253
-
254
290
  except Exception as e:
255
291
  log(f"Image Gen Error: {e}", _print=True)
256
292
  return None
257
293
 
258
294
  def list_voices(self, filters=[]):
259
- # Same logic as before
260
295
  if not filters:
261
296
  return list(self.VOICES.keys())
262
297
  voices = []
@@ -264,9 +299,3 @@ class LocalAIModel(AutoModel):
264
299
  if any(f.lower() in attribs for f in filters):
265
300
  voices.append(voice)
266
301
  return voices
267
-
268
- # Unused methods from original that don't apply to Local AI
269
- def upload(self, file):
270
- # Local models don't really have a "File Store" API like Gemini.
271
- # We handle context by passing text directly in prompt.
272
- pass
autonomous/db/__init__.py CHANGED
@@ -5,6 +5,7 @@
5
5
  # `from autonomous.db import *` and then `connect('testdb')`.
6
6
  from autonomous.db import (
7
7
  connection,
8
+ db_sync,
8
9
  document,
9
10
  errors,
10
11
  fields,
@@ -12,6 +13,7 @@ from autonomous.db import (
12
13
  signals,
13
14
  )
14
15
  from autonomous.db.connection import * # noqa: F401
16
+ from autonomous.db.db_sync import * # noqa: F401
15
17
  from autonomous.db.document import * # noqa: F401
16
18
  from autonomous.db.errors import * # noqa: F401
17
19
  from autonomous.db.fields import * # noqa: F401
@@ -28,14 +30,11 @@ __all__ = (
28
30
  )
29
31
 
30
32
 
31
- VERSION = (0, 29, 0)
33
+ VERSION = (0, 30, 0)
32
34
 
33
35
 
34
36
  def get_version():
35
- """Return the VERSION as a string.
36
-
37
- For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.
38
- """
37
+ """Return the VERSION as a string."""
39
38
  return ".".join(map(str, VERSION))
40
39
 
41
40
 
@@ -0,0 +1,140 @@
1
+ import os
2
+ import time
3
+ import urllib.parse
4
+ import uuid
5
+ from datetime import datetime
6
+
7
+ import numpy as np
8
+ import pymongo
9
+ import redis
10
+ import requests
11
+
12
+ # CONFIGURATION
13
+ db_host = os.getenv("DB_HOST", "db")
14
+ db_port = os.getenv("DB_PORT", 27017)
15
+ password = urllib.parse.quote_plus(str(os.getenv("DB_PASSWORD")))
16
+ username = urllib.parse.quote_plus(str(os.getenv("DB_USERNAME")))
17
+ MEDIA_URL = "http://media_ai_internal:5005"
18
+ REDIS_HOST = os.getenv("REDIS_HOST", "cachedb")
19
+ MONGO_URI = f"mongodb://{username}:{password}@{db_host}:{db_port}/?authSource=admin"
20
+
21
+ # DB SETUP
22
+ r = redis.Redis(host=REDIS_HOST, port=6379, decode_responses=True)
23
+
24
+ mongo = pymongo.MongoClient(MONGO_URI)
25
+ db = mongo[os.getenv("DB_DB")]
26
+ # connect(host=f"mongodb://{username}:{password}@{host}:{port}/{dbname}?authSource=admin")
27
+
28
+
29
+ def get_vector(text):
30
+ """Helper to get embedding from your Media AI container"""
31
+ try:
32
+ resp = requests.post(f"{MEDIA_URL}/embeddings", json={"text": text}, timeout=30)
33
+ if resp.status_code == 200:
34
+ return resp.json()["embedding"]
35
+ except Exception as e:
36
+ print(f"Vector Gen Failed: {e}")
37
+ return None
38
+
39
+
40
+ def process_single_object_sync(object_id, collection_name, token):
41
+ """
42
+ THE WORKER FUNCTION (Runs in Background).
43
+ It is safe to sleep here because we are not in the web request.
44
+ """
45
+ str_id = str(object_id)
46
+ token_key = f"sync_token:{collection_name}:{str_id}"
47
+
48
+ # 1. THE DEBOUNCE WAIT (Happens in background)
49
+ print(f"Debouncing {str_id} for 5 seconds...")
50
+ time.sleep(5)
51
+
52
+ # 2. THE VERIFICATION
53
+ # Check if a newer save happened while we slept
54
+ current_active_token = r.get(token_key)
55
+
56
+ if current_active_token != token:
57
+ print(f"Skipping sync for {str_id}: Superseded by a newer save.")
58
+ return
59
+
60
+ # 3. THE EXECUTION (Embedding generation)
61
+ print(f"Processing Sync for: {str_id} in {collection_name}")
62
+
63
+ from bson.objectid import ObjectId
64
+
65
+ # FIX: Use dynamic collection access instead of db.objects
66
+ try:
67
+ # Tries to convert string ID to ObjectId.
68
+ # If your DB uses String IDs, remove the ObjectId() wrapper.
69
+ oid = ObjectId(object_id)
70
+ doc = db[collection_name].find_one({"_id": oid})
71
+ except Exception:
72
+ # Fallback if ID is not a valid ObjectId string
73
+ doc = db[collection_name].find_one({"_id": object_id})
74
+
75
+ if not doc:
76
+ print(f"Object {object_id} not found in collection '{collection_name}'")
77
+ # Optional: Remove from Redis index if it exists
78
+ r.delete(f"lore:{object_id}")
79
+ return
80
+
81
+ # 2. Construct Searchable Text
82
+ # (Existing logic...)
83
+ searchable_text = (
84
+ f"{doc.get('name', '')}: {doc.get('description', '')} {doc.get('history', '')}"
85
+ )
86
+
87
+ if len(searchable_text) < 10:
88
+ return
89
+
90
+ # 3. Generate Vector
91
+ vector = get_vector(searchable_text)
92
+
93
+ # 4. Save to Redis Index
94
+ if vector:
95
+ r.hset(
96
+ f"lore:{object_id}",
97
+ mapping={
98
+ "mongo_id": str(object_id),
99
+ "collection": collection_name, # Useful for debugging
100
+ "content": searchable_text,
101
+ "vector": np.array(vector, dtype=np.float32).tobytes(),
102
+ "last_synced": datetime.utcnow().isoformat(),
103
+ },
104
+ )
105
+ print(f"Successfully Indexed: {doc.get('name')}")
106
+
107
+
108
+ def request_indexing(object_id, collection_name):
109
+ """
110
+ THE TRIGGER FUNCTION (Runs in Main App).
111
+ MUST BE FAST. NO SLEEPING HERE.
112
+ """
113
+ print("Requesting Indexing...")
114
+ # Import your Queue Wrapper
115
+ from autonomous.tasks.autotask import AutoTasks
116
+
117
+ # Initialize the Task Runner
118
+ task_runner = AutoTasks()
119
+
120
+ str_id = str(object_id)
121
+ token_key = f"sync_token:{collection_name}:{str_id}"
122
+
123
+ # 1. GENERATE NEW TOKEN
124
+ current_token = str(uuid.uuid4())
125
+
126
+ # 2. SAVE TOKEN TO REDIS (Instant)
127
+ r.set(token_key, current_token, ex=300)
128
+
129
+ # 3. ENQUEUE THE TASK (Instant)
130
+ try:
131
+ task_runner.task(
132
+ process_single_object_sync, # The function to run later
133
+ object_id=str_id,
134
+ collection_name=collection_name,
135
+ token=current_token,
136
+ )
137
+ return True
138
+ except Exception as e:
139
+ print(f"Sync Enqueue failed: {e}")
140
+ return False
@@ -8,6 +8,7 @@ from autonomous import log
8
8
  from autonomous.db import Document, connect, signals
9
9
  from autonomous.db.errors import ValidationError
10
10
  from autonomous.db.fields import DateTimeField
11
+ from autonomous.db import db_sync
11
12
 
12
13
  host = os.getenv("DB_HOST", "db")
13
14
  port = os.getenv("DB_PORT", 27017)
@@ -240,6 +241,9 @@ class AutoModel(Document):
240
241
  """
241
242
  obj = super().save()
242
243
  self.pk = obj.pk
244
+
245
+ db_sync.request_indexing(self.pk, collection_name=self._get_collection_name())
246
+
243
247
  return self.pk
244
248
 
245
249
  @classmethod
@@ -1,12 +1,7 @@
1
- import importlib
2
1
  import os
3
- import subprocess
4
-
5
2
  from redis import Redis
6
- from rq import Queue, Worker
7
-
8
- from autonomous import log
9
-
3
+ from rq import Queue
4
+ from rq.job import Job
10
5
 
11
6
  class AutoTask:
12
7
  def __init__(self, job):
@@ -18,127 +13,68 @@ class AutoTask:
18
13
 
19
14
  @property
20
15
  def status(self):
21
- status = self.job.get_status()
22
- if status in ["running", "queued", "started"]:
23
- return "running"
24
- return status
25
-
26
- @property
27
- def running(self):
28
- return self.status == "running"
29
-
30
- @property
31
- def finished(self):
32
- return self.status == "finished"
33
-
34
- @property
35
- def failed(self):
36
- return self.status == "failed"
16
+ return self.job.get_status()
37
17
 
38
18
  @property
39
19
  def result(self):
40
- result = self.job.latest_result()
41
- result_dict = {
20
+ # Simplified result fetching
21
+ return {
42
22
  "id": self.id,
43
- "return_value": result.return_value if result else None,
23
+ "return_value": self.job.result,
44
24
  "status": self.status,
45
- "error": result.exc_string
46
- if result and result.type in [result.Type.FAILED, result.Type.STOPPED]
47
- else None,
25
+ "error": self.job.exc_info
48
26
  }
49
27
 
50
- return result_dict
51
-
52
- @property
53
- def return_value(self):
54
- return self.result.get("return_value")
55
-
56
-
57
28
  class AutoTasks:
58
29
  _connection = None
59
30
  queue = None
60
- workers = []
61
- all_tasks = []
31
+
32
+ # Config stays the same
62
33
  config = {
63
- "host": os.environ.get("REDIS_HOST"),
64
- "port": os.environ.get("REDIS_PORT"),
34
+ "host": os.environ.get("REDIS_HOST", "cachedb"),
35
+ "port": os.environ.get("REDIS_PORT", 6379),
65
36
  "password": os.environ.get("REDIS_PASSWORD"),
66
37
  "username": os.environ.get("REDIS_USERNAME"),
67
38
  "db": os.environ.get("REDIS_DB", 0),
68
39
  }
69
40
 
70
- def __init__(self, queue="default", num_workers=3):
41
+ def __init__(self, queue_name="default"):
71
42
  if not AutoTasks._connection:
72
43
  options = {}
73
-
74
- if AutoTasks.config.get("username"):
75
- options["username"] = AutoTasks.config.get("username")
76
- if AutoTasks.config.get("username"):
44
+ if AutoTasks.config.get("password"):
77
45
  options["password"] = AutoTasks.config.get("password")
78
- if AutoTasks.config.get("db"):
79
- options["db"] = AutoTasks.config.get("db")
80
46
 
47
+ # Create Redis Connection
81
48
  AutoTasks._connection = Redis(
82
49
  host=AutoTasks.config.get("host"),
83
50
  port=AutoTasks.config.get("port"),
51
+ decode_responses=False, # RQ requires bytes, not strings
84
52
  **options,
85
53
  )
86
- AutoTasks.queue = Queue(queue, connection=AutoTasks._connection)
54
+
55
+ # Initialize Queue
56
+ AutoTasks.queue = Queue(queue_name, connection=AutoTasks._connection)
87
57
 
88
58
  def task(self, func, *args, **kwargs):
89
59
  """
90
- :param job: job function
91
- :param args: job function args
92
- :param kwargs: job function kwargs
93
- args and kwargs: use these to explicitly pass arguments and keyword to the underlying job function.
94
- _task_<option>:pass options to the task object
95
- :return: job
60
+ Enqueues a job to Redis. Does NOT start a worker.
96
61
  """
62
+ job_timeout = kwargs.pop("_task_job_timeout", 3600)
97
63
 
64
+ # Enqueue the job
65
+ # func can be a string path or the function object itself
98
66
  job = AutoTasks.queue.enqueue(
99
67
  func,
100
- job_timeout=kwargs.get("_task_job_timeout", 3600),
101
68
  args=args,
102
69
  kwargs=kwargs,
70
+ job_timeout=job_timeout
103
71
  )
104
- self.create_worker(func)
105
- new_task = AutoTask(job)
106
- AutoTasks.all_tasks.append(new_task)
107
- return new_task
108
-
109
- def create_worker(self, func):
110
- # Get the module containing the target_function
111
- module = func.__module__
112
-
113
- # Get the file path of the module
114
- module_path = importlib.import_module(module).__file__
115
72
 
116
- # Set the PYTHONPATH environment variable
117
- pythonpath = os.path.dirname(module_path)
118
- env = os.environ.copy()
119
- env["PYTHONPATH"] = pythonpath
73
+ return AutoTask(job)
120
74
 
121
- rq_user_pass = f"{self.config['username']}:{self.config['password']}"
122
- rq_url = f"{self.config['host']}:{self.config['port']}"
123
- rq_db = self.config["db"]
124
- rq_worker_command = (
125
- f"rq worker --url redis://{rq_user_pass}@{rq_url}/{rq_db} --burst"
126
- )
127
-
128
- worker = subprocess.Popen(rq_worker_command, shell=True, env=env)
129
- self.workers.append(worker)
130
- return worker
131
-
132
- # get job given its id
133
75
  def get_task(self, job_id):
134
- # breakpoint()
135
- task = AutoTasks.queue.fetch_job(job_id)
136
- return AutoTask(task)
137
-
138
- # get job given its id
139
- def get_tasks(self):
140
- return [AutoTask(w) for w in Worker.all(queue=AutoTasks.queue)]
141
-
142
- def clear(self):
143
- AutoTasks.queue.empty()
144
- AutoTasks.all_tasks = []
76
+ try:
77
+ job = Job.fetch(job_id, connection=AutoTasks._connection)
78
+ return AutoTask(job)
79
+ except Exception:
80
+ return None
@@ -0,0 +1,24 @@
1
+ import re
2
+
3
+
4
+ class TaskRouterBase:
5
+ """
6
+ Maps URL paths to Task Functions.
7
+ Acts as the central registry for all background tasks.
8
+ """
9
+
10
+ # Format: (Regex Pattern, Function Object)
11
+
12
+ @classmethod
13
+ def resolve(cls, path):
14
+ """
15
+ Parses the path, finds the matching function, and extracts arguments.
16
+ Returns: (function_obj, kwargs_dict) or (None, None)
17
+ """
18
+ for pattern, func in cls.ROUTES:
19
+ match = re.match(pattern, path)
20
+ if match:
21
+ return func, match.groupdict()
22
+ return None, None
23
+
24
+ ROUTES = []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: autonomous-app
3
- Version: 0.3.29
3
+ Version: 0.3.31
4
4
  Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
5
5
  Author-email: Steven A Moore <samoore@binghamton.edu>
6
6
  Project-URL: homepage, https://github.com/Sallenmoore/autonomous
@@ -24,8 +24,8 @@ Requires-Dist: gunicorn
24
24
  Requires-Dist: Authlib
25
25
  Requires-Dist: rq
26
26
  Requires-Dist: ollama
27
- Requires-Dist: openai>=1.42
28
27
  Requires-Dist: google-genai
28
+ Requires-Dist: sentence-transformers
29
29
  Requires-Dist: dateparser
30
30
  Requires-Dist: python-slugify
31
31
  Requires-Dist: pydub
@@ -1,9 +1,9 @@
1
- autonomous/__init__.py,sha256=j-9rQTP_Ejh4XtrStDlXn9R3weOnMognFa40DRWU_V0,95
1
+ autonomous/__init__.py,sha256=IYpCcBa6pM-33askQSaMEQIruv9EItiurnkTkM6Q-Uc,95
2
2
  autonomous/cli.py,sha256=z4AaGeWNW_uBLFAHng0J_lfS9v3fXemK1PeT85u4Eo4,42
3
3
  autonomous/logger.py,sha256=NQtgEaTWNAWfLSgqSP7ksXj1GpOuCgoUV711kSMm-WA,2022
4
4
  autonomous/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  autonomous/ai/audioagent.py,sha256=SvPLzKgqUnrkcsR7y93aURSrStIrryuntQMPS1SzUXw,1033
6
- autonomous/ai/baseagent.py,sha256=sJDIrCzhUp1OwkfyRqT0ZaB1fk5pV8K5L3jaTxzi9DI,940
6
+ autonomous/ai/baseagent.py,sha256=d6OYOk8LGHlDPNcqDmEX2PpWeerl-bIBL73IZ_T78oU,880
7
7
  autonomous/ai/imageagent.py,sha256=bIOrgg_CM-rgfyLme7V9vPqP8WKVMIAVoB2E9lLtIRk,521
8
8
  autonomous/ai/jsonagent.py,sha256=VQGhK0RFo0H_eVH9dAyf4_lp-RIpdgH988joLoKjm94,1065
9
9
  autonomous/ai/textagent.py,sha256=1yM1aMvws64PocvG_L-POMDKjxq2JDuGqgc3haUHybU,926
@@ -11,7 +11,7 @@ autonomous/ai/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
11
11
  autonomous/ai/models/aws.py,sha256=bGDjnGTm350zOqor9IsICzUkBUN2bubGI_ZssQuSXIw,12715
12
12
  autonomous/ai/models/deepseek.py,sha256=fkoi-hJp60yFlZ9Cb9PdUrmNSErYltQ5ezkUI75llXc,2734
13
13
  autonomous/ai/models/gemini.py,sha256=jrTMbh8SAdzzz27elOhs82iwjyutYcy8fvTOSdW-GFQ,14247
14
- autonomous/ai/models/local_model.py,sha256=GBXUelGUObo33BYaPVbCqI0asYaFLET2JNMFSXqxngw,9846
14
+ autonomous/ai/models/local_model.py,sha256=jWLTHBLb-6GtrMeDNctUTucX7HFZfla5NsfGfFsNPJ0,11297
15
15
  autonomous/ai/models/openai.py,sha256=2-LttCm6woGklaLbs1H5LjlbfM-7leDwGmC9vksSqW4,13135
16
16
  autonomous/apis/version_control/GHCallbacks.py,sha256=AyiUlYfV5JePi11GVyqYyXoj5UTbPKzS-HRRI94rjJo,1069
17
17
  autonomous/apis/version_control/GHOrganization.py,sha256=mi2livdsGurKiifbvuLwiFbdDzL77IlEfhwEa-tG77I,1155
@@ -23,10 +23,11 @@ autonomous/auth/autoauth.py,sha256=OizuMhmFjNzmsUijIbjGcQ5FxzVeoy9-NMFsx_TDsOE,3
23
23
  autonomous/auth/github.py,sha256=dHf84bJdV9rXGcvRLzWCPW9CvuA-VEmqYi_QQFwd2kY,886
24
24
  autonomous/auth/google.py,sha256=cHmqbyNEPTKipc3WkYcD1XPOyqcWEFW0Ks4qJYmGvPw,1049
25
25
  autonomous/auth/user.py,sha256=1yDu04yNSURzBzok6C5Dn-_mv0fGefvjrxj9ikCktqY,2726
26
- autonomous/db/__init__.py,sha256=9frkXJrl_OUemUQteXCTPqC8ECyxjE91Gi2mgTq26Fw,1159
26
+ autonomous/db/__init__.py,sha256=2mNynmYV0I_J3-W4Aw1cojAQrHf4aHZT1Ow9xUdmM18,1154
27
27
  autonomous/db/common.py,sha256=BUN2x_XuQBRFcq54TGPx4yLMLJdgytdbIt07QWr4CSM,2551
28
28
  autonomous/db/connection.py,sha256=j_-eMre4ade9Y8GejJcMbQQiSEimL4j2vIQxaXViKxI,17754
29
29
  autonomous/db/context_managers.py,sha256=_nH2ajCL8Xy90AuB2rKaryR4iF8Q8ksU3Nei_mZj-DE,9918
30
+ autonomous/db/db_sync.py,sha256=dTJ8QO1-_OJkNJL-Bb8lHQkWpdOHyDRIVB4w3WW5lCU,4308
30
31
  autonomous/db/dereference.py,sha256=EgbpPCXtDZqD_ZuY1Wd4o3ltRy8qEo3C5yRh5_c9fLE,12776
31
32
  autonomous/db/document.py,sha256=oZKdTaoqwv9fCHiv450rIxgINASQF3J9FzIsUOUXHhw,44428
32
33
  autonomous/db/errors.py,sha256=_QeCotid1kmr7_W0QyH6NUrwwYN9eced_yyyiop0Xlw,4108
@@ -48,16 +49,16 @@ autonomous/db/queryset/manager.py,sha256=fXu95TlGChdJWTRA4OnY_Ik25JzezJ2_qPqmH78
48
49
  autonomous/db/queryset/queryset.py,sha256=SRLYWAQUXgcfNzeMPiH5Mm4WZIemHTNQ24y2EIisNQU,5969
49
50
  autonomous/db/queryset/transform.py,sha256=IIZKf_io60zPTIwJ5KcPcrJOOOOjD2yQU7coYClL1Iw,19461
50
51
  autonomous/db/queryset/visitor.py,sha256=AN09lR6hWYUlKJC7G1sktvnWy5hrFnpoQhi58bOXbA4,5470
51
- autonomous/model/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
52
52
  autonomous/model/autoattr.py,sha256=FUnQrw65CcZumYiTsQ7U6G6UDGqbeekka-cjz6Sfchc,2675
53
- autonomous/model/automodel.py,sha256=F9rlsna1QYg8mVb-5ErKx5fEXxvaogVxWeeaJQBOOjs,8166
53
+ autonomous/model/automodel.py,sha256=9QE9_m6oW7kgZc-eIzzRA9Ib-RkX4fpmHRjl7ns_pwg,8289
54
54
  autonomous/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
55
  autonomous/storage/imagestorage.py,sha256=SmBjBNBlP1ZEjxdOnGVzCHZhbEhMKTUQC2TbpWbejDE,6168
56
56
  autonomous/storage/localstorage.py,sha256=FzrR6O9mMGAZt5dDgqzkeOQVfGRXCygR0kksz2MPpwE,2286
57
57
  autonomous/tasks/__init__.py,sha256=pn7iZ14MhcHUdzcLkfkd4-45wgPP0tXahAz_cFgb_Tg,32
58
- autonomous/tasks/autotask.py,sha256=aK5iapDhgcAic3F5ZYMAhNKJkOepj8yWwbMizKDzUwQ,4153
58
+ autonomous/tasks/autotask.py,sha256=9Fi7juGEEq8OVEQYES7sEkU21bkBhtBAIQ-Js5fMXDc,2193
59
+ autonomous/tasks/task_router.py,sha256=W09HtRUuhwlnGxM5w4l6Hzw6mfS6L4ljWiMzD3ZVFeU,601
59
60
  autonomous/utils/markdown.py,sha256=tf8vlHARiQO1X_aGbqlYozzP_TbdiDRT9EEP6aFRQo0,2153
60
- autonomous_app-0.3.29.dist-info/METADATA,sha256=VEPJShGjhIrEukUwiMTlIDiu7bbkEnrvoy2r03WBsQo,3015
61
- autonomous_app-0.3.29.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
62
- autonomous_app-0.3.29.dist-info/top_level.txt,sha256=ZyxWWDdbvZekF3UFunxl4BQsVDb_FOW3eTn0vun_jb4,11
63
- autonomous_app-0.3.29.dist-info/RECORD,,
61
+ autonomous_app-0.3.31.dist-info/METADATA,sha256=Nz_e0D8StzIr6uAFxzXlazvrva2rFu8GzV2WTayCRDE,3024
62
+ autonomous_app-0.3.31.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
63
+ autonomous_app-0.3.31.dist-info/top_level.txt,sha256=ZyxWWDdbvZekF3UFunxl4BQsVDb_FOW3eTn0vun_jb4,11
64
+ autonomous_app-0.3.31.dist-info/RECORD,,
@@ -1 +0,0 @@
1
-