autonomous-app 0.3.30__py3-none-any.whl → 0.3.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
autonomous/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "0.3.30"
1
+ __version__ = "0.3.32"
2
2
 
3
3
  from dotenv import load_dotenv
4
4
 
@@ -22,10 +22,10 @@ class LocalAIModel(AutoModel):
22
22
  description = StringAttr(default="A Local AI Model using Ollama and Media AI.")
23
23
 
24
24
  # Config
25
- _ollama_url = os.environ.get("OLLAMA_API_BASE", "http://ollama_internal:11434/api")
26
- _media_url = os.environ.get("MEDIA_API_BASE", "http://media_ai_internal:5005")
27
- _text_model = "mistral-nemo"
28
- _json_model = "mistral-nemo"
25
+ _ollama_url = os.environ.get("OLLAMA_API_BASE", "http://ollama:11434/api")
26
+ _media_url = os.environ.get("MEDIA_API_BASE", "http://media_ai:5005")
27
+ _text_model = "llama3"
28
+ _json_model = "llama3"
29
29
 
30
30
  # DB Connections
31
31
  _mongo_client = pymongo.MongoClient("mongodb://db:27017/")
@@ -66,12 +66,6 @@ class LocalAIModel(AutoModel):
66
66
  }
67
67
 
68
68
  def _convert_tools_to_json_schema(self, user_function):
69
- """
70
- Ollama doesn't support 'tools' strictly yet.
71
- We convert the tool definition into a system prompt instruction.
72
- """
73
- # If the user passes a raw dictionary (like a Gemini tool definition)
74
- # we extract the relevant parts for the schema.
75
69
  schema = {
76
70
  "name": user_function.get("name"),
77
71
  "description": user_function.get("description", ""),
@@ -89,117 +83,74 @@ class LocalAIModel(AutoModel):
89
83
  return []
90
84
 
91
85
  def build_hybrid_context(self, prompt, focus_object_id=None):
92
- """
93
- Builds context based on RELATIONAL ASSOCIATIONS + SEMANTIC LORE.
94
- """
95
-
96
- # 1. Create a Cache Key based on what defines the "Scene"
97
- # We assume 'focus_object_id' + rough prompt length captures the context enough
98
86
  cache_key = f"ctx:{focus_object_id}:{len(prompt) // 50}"
99
-
100
- # 2. Check Cache
101
87
  cached_ctx = self._redis.get(cache_key)
102
88
  if cached_ctx:
103
89
  return cached_ctx
104
90
 
105
91
  context_str = ""
106
-
107
- # --- PART 1: MONGODB (Relational Associations) ---
108
- # If we are focusing on a specific object, fetch it and its specific refs.
92
+ # --- PART 1: MONGODB ---
109
93
  if focus_object_id:
110
94
  try:
111
- # 1. Fetch the Main Object
112
- # Handle both string ID and ObjectId
113
95
  oid = (
114
96
  ObjectId(focus_object_id)
115
97
  if isinstance(focus_object_id, str)
116
98
  else focus_object_id
117
99
  )
118
-
119
100
  main_obj = self._mongo_db.objects.find_one({"_id": oid})
120
101
 
121
102
  if main_obj:
122
- # Start the context with the main object itself
123
- context_str += "### FOCUS OBJECT ###\n"
124
- context_str += prompt
125
-
126
- # 2. Extract References (Associations)
127
- # 1. Start with the main list
103
+ context_str += "### FOCUS OBJECT ###\n" + prompt
128
104
  ref_ids = main_obj.get("associations", []) or []
129
-
130
- # 2. Safely add single fields (if they exist)
131
105
  if world_id := main_obj.get("world"):
132
106
  ref_ids.append(world_id)
133
-
134
- # 3. Safely add lists (ensure they are lists)
135
107
  ref_ids.extend(main_obj.get("stories", []) or [])
136
108
  ref_ids.extend(main_obj.get("events", []) or [])
137
109
 
138
110
  if ref_ids:
139
- # Convert all to ObjectIds if they are strings
140
- valid_oids = []
141
- for rid in ref_ids:
142
- try:
143
- valid_oids.append(
144
- ObjectId(rid) if isinstance(rid, str) else rid
145
- )
146
- except:
147
- pass
148
-
149
- # 3. Fetch all associated objects in ONE query
111
+ valid_oids = [
112
+ ObjectId(rid) if isinstance(rid, str) else rid
113
+ for rid in ref_ids
114
+ ]
150
115
  if valid_oids:
151
116
  associated_objs = self._mongo_db.objects.find(
152
117
  {"_id": {"$in": valid_oids}}
153
118
  )
154
-
155
119
  context_str += "\n### ASSOCIATED REFERENCES ###\n"
156
120
  for obj in associated_objs:
157
- log(f"Associated Obj: {obj}", _print=True)
158
121
  context_str += f"- {obj}\n"
159
-
160
122
  context_str += "\n"
161
123
  except Exception as e:
162
124
  log(f"Mongo Association Error: {e}", _print=True)
163
125
 
164
- # --- PART 2: REDIS (Semantic Search) ---
165
- # We keep this! It catches "Lore" or "Rules" that aren't explicitly linked in the DB.
166
- # e.g., If the sword is "Elven", this finds "Elven History" even if not linked by ID.
126
+ # --- PART 2: REDIS ---
167
127
  if len(prompt) > 10:
168
128
  vector = self.get_embedding(prompt)
169
129
  if vector:
170
130
  try:
171
- q = "*=>[KNN 2 @vector $blob AS score]" # Lowered to 2 to save tokens
131
+ q = "*=>[KNN 2 @vector $blob AS score]"
172
132
  params = {"blob": np.array(vector, dtype=np.float32).tobytes()}
173
133
  results = self._redis.ft("search_index").search(
174
134
  q, query_params=params
175
135
  )
176
-
177
136
  if results.docs:
178
137
  context_str += "### RELEVANT LORE ###\n"
179
138
  for doc in results.docs:
180
139
  context_str += f"- {doc.content}\n"
181
- except Exception as e:
140
+ except Exception:
182
141
  pass
183
142
 
184
- # 3. Save to Cache (Expire in 60s)
185
- # This prevents hammering the DB/Vector engine during a rapid conversation
186
143
  self._redis.set(cache_key, context_str, ex=120)
187
-
188
144
  return context_str
189
145
 
190
146
  def generate_json(self, message, function, additional_instructions="", **kwargs):
191
147
  """
192
- Mimics Gemini's tool use by forcing Ollama into JSON mode
193
- and injecting the schema into the prompt.
148
+ UPDATED: Uses correct /api/chat payload structure (messages list)
194
149
  """
195
150
  schema_str = self._convert_tools_to_json_schema(function)
196
-
197
151
  focus_pk = kwargs.get("focus_object")
198
-
199
- # Build Relational Context
200
152
  world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
201
153
 
202
- # Construct System Prompt
203
154
  full_system_prompt = (
204
155
  f"{self.instructions}. {additional_instructions}\n"
205
156
  f"You must respond strictly with a valid JSON object matching this schema:\n"
@@ -209,21 +160,24 @@ class LocalAIModel(AutoModel):
209
160
  f"{world_context}"
210
161
  )
211
162
 
163
+ # FIX: Using 'messages' instead of 'prompt'/'system'
212
164
  payload = {
213
- "model": self._json_model,
214
- "prompt": message,
215
- "system": full_system_prompt,
216
- "format": "json", # Force JSON mode
165
+ "model": "llama3",
166
+ "messages": [
167
+ {"role": "system", "content": full_system_prompt},
168
+ {"role": "user", "content": message},
169
+ ],
170
+ "format": "json",
217
171
  "stream": False,
218
172
  "keep_alive": "24h",
219
173
  }
220
174
 
221
175
  try:
222
- response = requests.post(f"{self._ollama_url}/generate", json=payload)
176
+ response = requests.post(f"{self._ollama_url}/chat", json=payload)
223
177
  response.raise_for_status()
224
- result_text = response.json().get("response", "{}")
225
178
 
226
- # log(f"Raw Local JSON: {result_text}", _print=True)
179
+ # FIX: Chat API returns 'message' -> 'content'
180
+ result_text = response.json().get("message", {}).get("content", "{}")
227
181
  return json.loads(result_text)
228
182
 
229
183
  except Exception as e:
@@ -232,14 +186,11 @@ class LocalAIModel(AutoModel):
232
186
 
233
187
  def generate_text(self, message, additional_instructions="", **kwargs):
234
188
  """
235
- Standard text generation via Ollama.
189
+ UPDATED: Uses correct /api/chat payload structure
236
190
  """
237
191
  focus_pk = kwargs.get("focus_object")
238
-
239
- # Build Relational Context
240
192
  world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
241
193
 
242
- # Construct System Prompt
243
194
  full_system_prompt = (
244
195
  f"{self.instructions}. {additional_instructions}\n"
245
196
  f"You must strictly adhere to the following context:\n"
@@ -247,41 +198,40 @@ class LocalAIModel(AutoModel):
247
198
  )
248
199
 
249
200
  payload = {
250
- "model": self._text_model,
251
- "prompt": message,
252
- "system": full_system_prompt,
201
+ "model": "llama3",
202
+ "messages": [
203
+ {"role": "system", "content": full_system_prompt},
204
+ {"role": "user", "content": message},
205
+ ],
253
206
  "stream": False,
254
207
  "keep_alive": "24h",
255
208
  }
256
209
 
257
210
  try:
258
- response = requests.post(f"{self._ollama_url}/generate", json=payload)
211
+ response = requests.post(f"{self._ollama_url}/chat", json=payload)
259
212
  response.raise_for_status()
260
- return response.json().get("response", "")
213
+ # FIX: Chat API returns 'message' -> 'content'
214
+ return response.json().get("message", {}).get("content", "")
261
215
  except Exception as e:
262
216
  log(f"==== LocalAI Text Error: {e} ====", _print=True)
263
217
  return "Error generating text."
264
218
 
265
219
  def summarize_text(self, text, primer="", **kwargs):
266
220
  primer = primer or "Summarize the following text concisely."
267
-
268
- # Simple chunking logic (similar to your original)
269
- # Note: Mistral-Nemo has a large context window (128k), so chunking
270
- # is less necessary than with older models, but we keep it for safety.
271
- max_chars = 12000 # Roughly 3k tokens
221
+ max_chars = 12000
272
222
  chunks = [text[i : i + max_chars] for i in range(0, len(text), max_chars)]
273
223
 
274
224
  full_summary = ""
275
225
  for chunk in chunks:
276
226
  payload = {
277
- "model": self._text_model,
278
- "prompt": f"{primer}:\n\n{chunk}",
227
+ "model": "llama3",
228
+ "messages": [{"role": "user", "content": f"{primer}:\n\n{chunk}"}],
279
229
  "stream": False,
280
230
  "keep_alive": "24h",
281
231
  }
282
232
  try:
283
- res = requests.post(f"{self._ollama_url}/generate", json=payload)
284
- full_summary += res.json().get("response", "") + "\n"
233
+ res = requests.post(f"{self._ollama_url}/chat", json=payload)
234
+ full_summary += res.json().get("message", {}).get("content", "") + "\n"
285
235
  except Exception as e:
286
236
  log(f"Summary Error: {e}", _print=True)
287
237
  break
@@ -289,100 +239,59 @@ class LocalAIModel(AutoModel):
289
239
  return full_summary
290
240
 
291
241
  def generate_audio_text(self, audio_file, prompt="", **kwargs):
292
- """
293
- Sends audio bytes to the Media AI container for Whisper transcription.
294
- """
295
242
  try:
296
- # Prepare the file for upload
297
- # audio_file is likely bytes, so we wrap in BytesIO if needed
298
243
  if isinstance(audio_file, bytes):
299
244
  f_obj = io.BytesIO(audio_file)
300
245
  else:
301
246
  f_obj = audio_file
302
-
303
247
  files = {"file": ("audio.mp3", f_obj, "audio/mpeg")}
304
-
305
248
  response = requests.post(f"{self._media_url}/transcribe", files=files)
306
249
  response.raise_for_status()
307
250
  return response.json().get("text", "")
308
-
309
251
  except Exception as e:
310
252
  log(f"STT Error: {e}", _print=True)
311
253
  return ""
312
254
 
313
255
  def generate_audio(self, prompt, voice=None, **kwargs):
314
- """
315
- Sends text to the Media AI container for TTS.
316
- """
317
256
  voice = voice or random.choice(list(self.VOICES.keys()))
318
-
319
257
  try:
320
258
  payload = {"text": prompt, "voice": voice}
321
259
  response = requests.post(f"{self._media_url}/tts", json=payload)
322
260
  response.raise_for_status()
323
-
324
- # Response content is WAV bytes
325
261
  wav_bytes = response.content
326
-
327
- # Convert to MP3 to match your original interface (using pydub)
328
262
  audio = AudioSegment.from_file(io.BytesIO(wav_bytes), format="wav")
329
263
  mp3_buffer = io.BytesIO()
330
264
  audio.export(mp3_buffer, format="mp3")
331
265
  return mp3_buffer.getvalue()
332
-
333
266
  except Exception as e:
334
267
  log(f"TTS Error: {e}", _print=True)
335
268
  return None
336
269
 
337
270
  def generate_image(self, prompt, negative_prompt="", **kwargs):
338
- """
339
- Generates an image using Local AI.
340
- If 'files' are provided, performs Image-to-Image generation using the first file as reference.
341
- """
342
271
  try:
343
- # Prepare the multipart data
344
- # We send the prompt as a form field
345
272
  data = {"prompt": prompt, "negative_prompt": negative_prompt}
346
273
  files = {}
347
-
348
- # Check if reference images were passed
349
274
  if kwargs.get("files"):
350
- # Take the first available file
351
275
  for fn, f_bytes in kwargs.get("files").items():
352
- # If f_bytes is bytes, wrap in IO, else assume it's file-like
353
276
  if isinstance(f_bytes, bytes):
354
277
  file_obj = io.BytesIO(f_bytes)
355
278
  else:
356
279
  file_obj = f_bytes
357
-
358
- # Add to the request files
359
- # Key must be 'file' to match server.py logic
360
- # TODO: Support multiple images if needed
361
280
  files["file"] = (fn, file_obj, "image/png")
362
- break # We only support 1 reference image for SD Img2Img
363
-
364
- # Send Request
281
+ break
365
282
  if files:
366
- # Multipart/form-data request (Prompt + File)
367
283
  response = requests.post(
368
284
  f"{self._media_url}/generate-image", data=data, files=files
369
285
  )
370
286
  else:
371
- # Standard request (Prompt only) - server.py handles request.form vs json
372
- # But our updated server expects form data for consistency
373
287
  response = requests.post(f"{self._media_url}/generate-image", data=data)
374
-
375
288
  response.raise_for_status()
376
-
377
- # Returns WebP bytes directly
378
289
  return response.content
379
-
380
290
  except Exception as e:
381
291
  log(f"Image Gen Error: {e}", _print=True)
382
292
  return None
383
293
 
384
294
  def list_voices(self, filters=[]):
385
- # Same logic as before
386
295
  if not filters:
387
296
  return list(self.VOICES.keys())
388
297
  voices = []
autonomous/db/__init__.py CHANGED
@@ -13,6 +13,7 @@ from autonomous.db import (
13
13
  signals,
14
14
  )
15
15
  from autonomous.db.connection import * # noqa: F401
16
+ from autonomous.db.db_sync import * # noqa: F401
16
17
  from autonomous.db.document import * # noqa: F401
17
18
  from autonomous.db.errors import * # noqa: F401
18
19
  from autonomous.db.fields import * # noqa: F401
@@ -29,14 +30,11 @@ __all__ = (
29
30
  )
30
31
 
31
32
 
32
- VERSION = (0, 29, 0)
33
+ VERSION = (0, 30, 0)
33
34
 
34
35
 
35
36
  def get_version():
36
- """Return the VERSION as a string.
37
-
38
- For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.
39
- """
37
+ """Return the VERSION as a string."""
40
38
  return ".".join(map(str, VERSION))
41
39
 
42
40
 
autonomous/db/db_sync.py CHANGED
@@ -46,8 +46,8 @@ def process_single_object_sync(object_id, collection_name, token):
46
46
  token_key = f"sync_token:{collection_name}:{str_id}"
47
47
 
48
48
  # 1. THE DEBOUNCE WAIT (Happens in background)
49
- print(f"Debouncing {str_id} for 5 seconds...")
50
- time.sleep(5)
49
+ print(f"Debouncing {str_id} for 2 seconds...")
50
+ time.sleep(2)
51
51
 
52
52
  # 2. THE VERIFICATION
53
53
  # Check if a newer save happened while we slept
@@ -110,6 +110,7 @@ def request_indexing(object_id, collection_name):
110
110
  THE TRIGGER FUNCTION (Runs in Main App).
111
111
  MUST BE FAST. NO SLEEPING HERE.
112
112
  """
113
+ print("Requesting Indexing...")
113
114
  # Import your Queue Wrapper
114
115
  from autonomous.tasks.autotask import AutoTasks
115
116
 
@@ -126,13 +127,13 @@ def request_indexing(object_id, collection_name):
126
127
  r.set(token_key, current_token, ex=300)
127
128
 
128
129
  # 3. ENQUEUE THE TASK (Instant)
129
- # CRITICAL CHANGE: We use task_runner.task() instead of calling the function directly.
130
130
  try:
131
131
  task_runner.task(
132
132
  process_single_object_sync, # The function to run later
133
133
  object_id=str_id,
134
134
  collection_name=collection_name,
135
135
  token=current_token,
136
+ priority=TaskPriority.LOW
136
137
  )
137
138
  return True
138
139
  except Exception as e:
@@ -1,8 +1,15 @@
1
1
  import os
2
+ from enum import Enum
2
3
  from redis import Redis
3
4
  from rq import Queue
4
5
  from rq.job import Job
5
6
 
7
+ # 1. Define Priorities clearly
8
+ class TaskPriority(Enum):
9
+ HIGH = "high"
10
+ DEFAULT = "default"
11
+ LOW = "low"
12
+
6
13
  class AutoTask:
7
14
  def __init__(self, job):
8
15
  self.job = job
@@ -17,7 +24,6 @@ class AutoTask:
17
24
 
18
25
  @property
19
26
  def result(self):
20
- # Simplified result fetching
21
27
  return {
22
28
  "id": self.id,
23
29
  "return_value": self.job.result,
@@ -27,9 +33,8 @@ class AutoTask:
27
33
 
28
34
  class AutoTasks:
29
35
  _connection = None
30
- queue = None
36
+ # We remove the single 'queue' class attribute because we now have multiple
31
37
 
32
- # Config stays the same
33
38
  config = {
34
39
  "host": os.environ.get("REDIS_HOST", "cachedb"),
35
40
  "port": os.environ.get("REDIS_PORT", 6379),
@@ -38,32 +43,40 @@ class AutoTasks:
38
43
  "db": os.environ.get("REDIS_DB", 0),
39
44
  }
40
45
 
41
- def __init__(self, queue_name="default"):
46
+ def __init__(self):
47
+ # Establish connection once (Singleton pattern logic)
42
48
  if not AutoTasks._connection:
43
49
  options = {}
44
50
  if AutoTasks.config.get("password"):
45
51
  options["password"] = AutoTasks.config.get("password")
46
52
 
47
- # Create Redis Connection
48
53
  AutoTasks._connection = Redis(
49
54
  host=AutoTasks.config.get("host"),
50
55
  port=AutoTasks.config.get("port"),
51
- decode_responses=False, # RQ requires bytes, not strings
56
+ decode_responses=False,
52
57
  **options,
53
58
  )
54
59
 
55
- # Initialize Queue
56
- AutoTasks.queue = Queue(queue_name, connection=AutoTasks._connection)
60
+ def _get_queue(self, priority_name):
61
+ """Helper to get or create the queue object for a specific priority"""
62
+ return Queue(priority_name, connection=AutoTasks._connection)
57
63
 
58
64
  def task(self, func, *args, **kwargs):
59
65
  """
60
- Enqueues a job to Redis. Does NOT start a worker.
66
+ Enqueues a job.
67
+ kwarg 'priority' determines the queue (default: 'default').
61
68
  """
62
69
  job_timeout = kwargs.pop("_task_job_timeout", 3600)
63
70
 
64
- # Enqueue the job
65
- # func can be a string path or the function object itself
66
- job = AutoTasks.queue.enqueue(
71
+ # 2. Extract Priority (support Enum or string)
72
+ priority = kwargs.pop("priority", TaskPriority.DEFAULT)
73
+ queue_name = priority.value if isinstance(priority, TaskPriority) else priority
74
+
75
+ # 3. Get the specific queue
76
+ q = self._get_queue(queue_name)
77
+
78
+ # 4. Enqueue
79
+ job = q.enqueue(
67
80
  func,
68
81
  args=args,
69
82
  kwargs=kwargs,
@@ -77,4 +90,4 @@ class AutoTasks:
77
90
  job = Job.fetch(job_id, connection=AutoTasks._connection)
78
91
  return AutoTask(job)
79
92
  except Exception:
80
- return None
93
+ return None
@@ -1,5 +1,5 @@
1
1
  import re
2
- import tasks
2
+
3
3
 
4
4
  class TaskRouterBase:
5
5
  """
@@ -9,7 +9,6 @@ class TaskRouterBase:
9
9
 
10
10
  # Format: (Regex Pattern, Function Object)
11
11
 
12
-
13
12
  @classmethod
14
13
  def resolve(cls, path):
15
14
  """
@@ -22,5 +21,4 @@ class TaskRouterBase:
22
21
  return func, match.groupdict()
23
22
  return None, None
24
23
 
25
-
26
- ROUTES = []
24
+ ROUTES = []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: autonomous-app
3
- Version: 0.3.30
3
+ Version: 0.3.32
4
4
  Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
5
5
  Author-email: Steven A Moore <samoore@binghamton.edu>
6
6
  Project-URL: homepage, https://github.com/Sallenmoore/autonomous
@@ -1,4 +1,4 @@
1
- autonomous/__init__.py,sha256=yFrrhxW5lT1IN2kECktEvBt4l8fh93BpQSg-lHky7BQ,95
1
+ autonomous/__init__.py,sha256=o0OBgjtEx3l3_EWwzavqnwIF8TyTqfqpMo9MyaB2SOY,95
2
2
  autonomous/cli.py,sha256=z4AaGeWNW_uBLFAHng0J_lfS9v3fXemK1PeT85u4Eo4,42
3
3
  autonomous/logger.py,sha256=NQtgEaTWNAWfLSgqSP7ksXj1GpOuCgoUV711kSMm-WA,2022
4
4
  autonomous/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -11,7 +11,7 @@ autonomous/ai/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
11
11
  autonomous/ai/models/aws.py,sha256=bGDjnGTm350zOqor9IsICzUkBUN2bubGI_ZssQuSXIw,12715
12
12
  autonomous/ai/models/deepseek.py,sha256=fkoi-hJp60yFlZ9Cb9PdUrmNSErYltQ5ezkUI75llXc,2734
13
13
  autonomous/ai/models/gemini.py,sha256=jrTMbh8SAdzzz27elOhs82iwjyutYcy8fvTOSdW-GFQ,14247
14
- autonomous/ai/models/local_model.py,sha256=xBwj2f_gvQQhL5YuTHZiStPAunDiPfpjOj_SDPajMN0,14710
14
+ autonomous/ai/models/local_model.py,sha256=jWLTHBLb-6GtrMeDNctUTucX7HFZfla5NsfGfFsNPJ0,11297
15
15
  autonomous/ai/models/openai.py,sha256=2-LttCm6woGklaLbs1H5LjlbfM-7leDwGmC9vksSqW4,13135
16
16
  autonomous/apis/version_control/GHCallbacks.py,sha256=AyiUlYfV5JePi11GVyqYyXoj5UTbPKzS-HRRI94rjJo,1069
17
17
  autonomous/apis/version_control/GHOrganization.py,sha256=mi2livdsGurKiifbvuLwiFbdDzL77IlEfhwEa-tG77I,1155
@@ -23,11 +23,11 @@ autonomous/auth/autoauth.py,sha256=OizuMhmFjNzmsUijIbjGcQ5FxzVeoy9-NMFsx_TDsOE,3
23
23
  autonomous/auth/github.py,sha256=dHf84bJdV9rXGcvRLzWCPW9CvuA-VEmqYi_QQFwd2kY,886
24
24
  autonomous/auth/google.py,sha256=cHmqbyNEPTKipc3WkYcD1XPOyqcWEFW0Ks4qJYmGvPw,1049
25
25
  autonomous/auth/user.py,sha256=1yDu04yNSURzBzok6C5Dn-_mv0fGefvjrxj9ikCktqY,2726
26
- autonomous/db/__init__.py,sha256=jn6bmC6lM-Q9RxW_1WXmj6ogwQYd_5HDIDbAGLXlbu4,1172
26
+ autonomous/db/__init__.py,sha256=2mNynmYV0I_J3-W4Aw1cojAQrHf4aHZT1Ow9xUdmM18,1154
27
27
  autonomous/db/common.py,sha256=BUN2x_XuQBRFcq54TGPx4yLMLJdgytdbIt07QWr4CSM,2551
28
28
  autonomous/db/connection.py,sha256=j_-eMre4ade9Y8GejJcMbQQiSEimL4j2vIQxaXViKxI,17754
29
29
  autonomous/db/context_managers.py,sha256=_nH2ajCL8Xy90AuB2rKaryR4iF8Q8ksU3Nei_mZj-DE,9918
30
- autonomous/db/db_sync.py,sha256=wURiDhfI_RfYDDcdRtYLZbjmeK0hUn-9A4IrI0sfi4o,4363
30
+ autonomous/db/db_sync.py,sha256=o_7Fw-6eL-Wco-ktSFneq2CRQNuOneO1f4YzgOa4Wcs,4346
31
31
  autonomous/db/dereference.py,sha256=EgbpPCXtDZqD_ZuY1Wd4o3ltRy8qEo3C5yRh5_c9fLE,12776
32
32
  autonomous/db/document.py,sha256=oZKdTaoqwv9fCHiv450rIxgINASQF3J9FzIsUOUXHhw,44428
33
33
  autonomous/db/errors.py,sha256=_QeCotid1kmr7_W0QyH6NUrwwYN9eced_yyyiop0Xlw,4108
@@ -55,10 +55,10 @@ autonomous/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
55
55
  autonomous/storage/imagestorage.py,sha256=SmBjBNBlP1ZEjxdOnGVzCHZhbEhMKTUQC2TbpWbejDE,6168
56
56
  autonomous/storage/localstorage.py,sha256=FzrR6O9mMGAZt5dDgqzkeOQVfGRXCygR0kksz2MPpwE,2286
57
57
  autonomous/tasks/__init__.py,sha256=pn7iZ14MhcHUdzcLkfkd4-45wgPP0tXahAz_cFgb_Tg,32
58
- autonomous/tasks/autotask.py,sha256=9Fi7juGEEq8OVEQYES7sEkU21bkBhtBAIQ-Js5fMXDc,2193
59
- autonomous/tasks/task_router.py,sha256=-MrohYTwjj6oTvhRytod6JFCzojAH6dGDnbIwrb18mQ,614
58
+ autonomous/tasks/autotask.py,sha256=2zRaqHYqfdlgC_BQm6B6D2svN1ukyWeJJHwweZFHVoo,2616
59
+ autonomous/tasks/task_router.py,sha256=W09HtRUuhwlnGxM5w4l6Hzw6mfS6L4ljWiMzD3ZVFeU,601
60
60
  autonomous/utils/markdown.py,sha256=tf8vlHARiQO1X_aGbqlYozzP_TbdiDRT9EEP6aFRQo0,2153
61
- autonomous_app-0.3.30.dist-info/METADATA,sha256=lRnKwOxeMj3hzPPrUEuob3c3QbnSaUjPjqrkfFumGDg,3024
62
- autonomous_app-0.3.30.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
63
- autonomous_app-0.3.30.dist-info/top_level.txt,sha256=ZyxWWDdbvZekF3UFunxl4BQsVDb_FOW3eTn0vun_jb4,11
64
- autonomous_app-0.3.30.dist-info/RECORD,,
61
+ autonomous_app-0.3.32.dist-info/METADATA,sha256=d3w42OsH0w0u6PpAY8ADR1SJSeLsee0f-UbbfieVWfE,3024
62
+ autonomous_app-0.3.32.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
63
+ autonomous_app-0.3.32.dist-info/top_level.txt,sha256=ZyxWWDdbvZekF3UFunxl4BQsVDb_FOW3eTn0vun_jb4,11
64
+ autonomous_app-0.3.32.dist-info/RECORD,,