autonomous-app 0.3.29__tar.gz → 0.3.30__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/PKG-INFO +2 -2
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/requirements.txt +1 -1
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/__init__.py +1 -1
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/ai/baseagent.py +7 -8
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/ai/models/local_model.py +155 -35
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/__init__.py +1 -0
- autonomous_app-0.3.30/src/autonomous/db/db_sync.py +140 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/model/automodel.py +4 -0
- autonomous_app-0.3.30/src/autonomous/tasks/autotask.py +80 -0
- autonomous_app-0.3.30/src/autonomous/tasks/task_router.py +26 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous_app.egg-info/PKG-INFO +2 -2
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous_app.egg-info/SOURCES.txt +2 -1
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous_app.egg-info/requires.txt +1 -1
- autonomous_app-0.3.29/src/autonomous/model/__init__.py +0 -1
- autonomous_app-0.3.29/src/autonomous/tasks/autotask.py +0 -144
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/README.md +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/pyproject.toml +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/setup.cfg +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/setup.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/ai/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/ai/audioagent.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/ai/imageagent.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/ai/jsonagent.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/ai/models/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/ai/models/aws.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/ai/models/deepseek.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/ai/models/gemini.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/ai/models/openai.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/ai/textagent.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/apis/version_control/GHCallbacks.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/apis/version_control/GHOrganization.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/apis/version_control/GHRepo.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/apis/version_control/GHVersionControl.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/apis/version_control/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/auth/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/auth/autoauth.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/auth/github.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/auth/google.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/auth/user.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/cli.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/base/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/base/common.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/base/datastructures.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/base/document.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/base/fields.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/base/metaclasses.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/base/utils.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/common.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/connection.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/context_managers.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/dereference.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/document.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/errors.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/fields.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/mongodb_support.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/pymongo_support.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/queryset/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/queryset/base.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/queryset/field_list.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/queryset/manager.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/queryset/queryset.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/queryset/transform.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/queryset/visitor.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/db/signals.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/logger.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/model/autoattr.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/storage/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/storage/imagestorage.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/storage/localstorage.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/tasks/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/utils/markdown.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous_app.egg-info/dependency_links.txt +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous_app.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: autonomous-app
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.30
|
|
4
4
|
Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
|
|
5
5
|
Author-email: Steven A Moore <samoore@binghamton.edu>
|
|
6
6
|
Project-URL: homepage, https://github.com/Sallenmoore/autonomous
|
|
@@ -24,8 +24,8 @@ Requires-Dist: gunicorn
|
|
|
24
24
|
Requires-Dist: Authlib
|
|
25
25
|
Requires-Dist: rq
|
|
26
26
|
Requires-Dist: ollama
|
|
27
|
-
Requires-Dist: openai>=1.42
|
|
28
27
|
Requires-Dist: google-genai
|
|
28
|
+
Requires-Dist: sentence-transformers
|
|
29
29
|
Requires-Dist: dateparser
|
|
30
30
|
Requires-Dist: python-slugify
|
|
31
31
|
Requires-Dist: pydub
|
|
@@ -23,12 +23,11 @@ class BaseAgent(AutoModel):
|
|
|
23
23
|
return self.get_client().id
|
|
24
24
|
|
|
25
25
|
def get_client(self):
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
self.save()
|
|
26
|
+
self.client = self._ai_model(
|
|
27
|
+
name=self.name,
|
|
28
|
+
instructions=self.instructions,
|
|
29
|
+
description=self.description,
|
|
30
|
+
)
|
|
31
|
+
self.client.save()
|
|
32
|
+
self.save()
|
|
34
33
|
return self.client
|
|
@@ -2,10 +2,12 @@ import io
|
|
|
2
2
|
import json
|
|
3
3
|
import os
|
|
4
4
|
import random
|
|
5
|
-
import re
|
|
6
|
-
import wave
|
|
7
5
|
|
|
6
|
+
import numpy as np
|
|
7
|
+
import pymongo
|
|
8
|
+
import redis
|
|
8
9
|
import requests
|
|
10
|
+
from bson.objectid import ObjectId
|
|
9
11
|
from pydub import AudioSegment
|
|
10
12
|
|
|
11
13
|
from autonomous import log
|
|
@@ -14,21 +16,22 @@ from autonomous.model.automodel import AutoModel
|
|
|
14
16
|
|
|
15
17
|
|
|
16
18
|
class LocalAIModel(AutoModel):
|
|
17
|
-
|
|
19
|
+
messages = ListAttr(StringAttr(default=[]))
|
|
20
|
+
name = StringAttr(default="agent")
|
|
21
|
+
instructions = StringAttr(default="You are a helpful AI.")
|
|
22
|
+
description = StringAttr(default="A Local AI Model using Ollama and Media AI.")
|
|
23
|
+
|
|
24
|
+
# Config
|
|
18
25
|
_ollama_url = os.environ.get("OLLAMA_API_BASE", "http://ollama_internal:11434/api")
|
|
19
26
|
_media_url = os.environ.get("MEDIA_API_BASE", "http://media_ai_internal:5005")
|
|
20
|
-
|
|
21
|
-
# Models to use in Ollama
|
|
22
27
|
_text_model = "mistral-nemo"
|
|
23
28
|
_json_model = "mistral-nemo"
|
|
24
29
|
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
)
|
|
30
|
+
# DB Connections
|
|
31
|
+
_mongo_client = pymongo.MongoClient("mongodb://db:27017/")
|
|
32
|
+
_mongo_db = os.getenv("DB_DB", "default")
|
|
33
|
+
_redis = redis.Redis(host="cachedb", port=6379, decode_responses=True)
|
|
30
34
|
|
|
31
|
-
# Keep your voice list (mapped to random seeds/embeddings in the future)
|
|
32
35
|
VOICES = {
|
|
33
36
|
"Zephyr": ["female"],
|
|
34
37
|
"Puck": ["male"],
|
|
@@ -67,12 +70,123 @@ class LocalAIModel(AutoModel):
|
|
|
67
70
|
Ollama doesn't support 'tools' strictly yet.
|
|
68
71
|
We convert the tool definition into a system prompt instruction.
|
|
69
72
|
"""
|
|
73
|
+
# If the user passes a raw dictionary (like a Gemini tool definition)
|
|
74
|
+
# we extract the relevant parts for the schema.
|
|
70
75
|
schema = {
|
|
71
76
|
"name": user_function.get("name"),
|
|
72
|
-
"
|
|
77
|
+
"description": user_function.get("description", ""),
|
|
78
|
+
"parameters": user_function.get("parameters", {}),
|
|
73
79
|
}
|
|
74
80
|
return json.dumps(schema, indent=2)
|
|
75
81
|
|
|
82
|
+
def get_embedding(self, text):
|
|
83
|
+
try:
|
|
84
|
+
res = requests.post(f"{self._media_url}/embeddings", json={"text": text})
|
|
85
|
+
res.raise_for_status()
|
|
86
|
+
return res.json()["embedding"]
|
|
87
|
+
except Exception as e:
|
|
88
|
+
log(f"Embedding Error: {e}", _print=True)
|
|
89
|
+
return []
|
|
90
|
+
|
|
91
|
+
def build_hybrid_context(self, prompt, focus_object_id=None):
|
|
92
|
+
"""
|
|
93
|
+
Builds context based on RELATIONAL ASSOCIATIONS + SEMANTIC LORE.
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
# 1. Create a Cache Key based on what defines the "Scene"
|
|
97
|
+
# We assume 'focus_object_id' + rough prompt length captures the context enough
|
|
98
|
+
cache_key = f"ctx:{focus_object_id}:{len(prompt) // 50}"
|
|
99
|
+
|
|
100
|
+
# 2. Check Cache
|
|
101
|
+
cached_ctx = self._redis.get(cache_key)
|
|
102
|
+
if cached_ctx:
|
|
103
|
+
return cached_ctx
|
|
104
|
+
|
|
105
|
+
context_str = ""
|
|
106
|
+
|
|
107
|
+
# --- PART 1: MONGODB (Relational Associations) ---
|
|
108
|
+
# If we are focusing on a specific object, fetch it and its specific refs.
|
|
109
|
+
if focus_object_id:
|
|
110
|
+
try:
|
|
111
|
+
# 1. Fetch the Main Object
|
|
112
|
+
# Handle both string ID and ObjectId
|
|
113
|
+
oid = (
|
|
114
|
+
ObjectId(focus_object_id)
|
|
115
|
+
if isinstance(focus_object_id, str)
|
|
116
|
+
else focus_object_id
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
main_obj = self._mongo_db.objects.find_one({"_id": oid})
|
|
120
|
+
|
|
121
|
+
if main_obj:
|
|
122
|
+
# Start the context with the main object itself
|
|
123
|
+
context_str += "### FOCUS OBJECT ###\n"
|
|
124
|
+
context_str += prompt
|
|
125
|
+
|
|
126
|
+
# 2. Extract References (Associations)
|
|
127
|
+
# 1. Start with the main list
|
|
128
|
+
ref_ids = main_obj.get("associations", []) or []
|
|
129
|
+
|
|
130
|
+
# 2. Safely add single fields (if they exist)
|
|
131
|
+
if world_id := main_obj.get("world"):
|
|
132
|
+
ref_ids.append(world_id)
|
|
133
|
+
|
|
134
|
+
# 3. Safely add lists (ensure they are lists)
|
|
135
|
+
ref_ids.extend(main_obj.get("stories", []) or [])
|
|
136
|
+
ref_ids.extend(main_obj.get("events", []) or [])
|
|
137
|
+
|
|
138
|
+
if ref_ids:
|
|
139
|
+
# Convert all to ObjectIds if they are strings
|
|
140
|
+
valid_oids = []
|
|
141
|
+
for rid in ref_ids:
|
|
142
|
+
try:
|
|
143
|
+
valid_oids.append(
|
|
144
|
+
ObjectId(rid) if isinstance(rid, str) else rid
|
|
145
|
+
)
|
|
146
|
+
except:
|
|
147
|
+
pass
|
|
148
|
+
|
|
149
|
+
# 3. Fetch all associated objects in ONE query
|
|
150
|
+
if valid_oids:
|
|
151
|
+
associated_objs = self._mongo_db.objects.find(
|
|
152
|
+
{"_id": {"$in": valid_oids}}
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
context_str += "\n### ASSOCIATED REFERENCES ###\n"
|
|
156
|
+
for obj in associated_objs:
|
|
157
|
+
log(f"Associated Obj: {obj}", _print=True)
|
|
158
|
+
context_str += f"- {obj}\n"
|
|
159
|
+
|
|
160
|
+
context_str += "\n"
|
|
161
|
+
except Exception as e:
|
|
162
|
+
log(f"Mongo Association Error: {e}", _print=True)
|
|
163
|
+
|
|
164
|
+
# --- PART 2: REDIS (Semantic Search) ---
|
|
165
|
+
# We keep this! It catches "Lore" or "Rules" that aren't explicitly linked in the DB.
|
|
166
|
+
# e.g., If the sword is "Elven", this finds "Elven History" even if not linked by ID.
|
|
167
|
+
if len(prompt) > 10:
|
|
168
|
+
vector = self.get_embedding(prompt)
|
|
169
|
+
if vector:
|
|
170
|
+
try:
|
|
171
|
+
q = "*=>[KNN 2 @vector $blob AS score]" # Lowered to 2 to save tokens
|
|
172
|
+
params = {"blob": np.array(vector, dtype=np.float32).tobytes()}
|
|
173
|
+
results = self._redis.ft("search_index").search(
|
|
174
|
+
q, query_params=params
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
if results.docs:
|
|
178
|
+
context_str += "### RELEVANT LORE ###\n"
|
|
179
|
+
for doc in results.docs:
|
|
180
|
+
context_str += f"- {doc.content}\n"
|
|
181
|
+
except Exception as e:
|
|
182
|
+
pass
|
|
183
|
+
|
|
184
|
+
# 3. Save to Cache (Expire in 60s)
|
|
185
|
+
# This prevents hammering the DB/Vector engine during a rapid conversation
|
|
186
|
+
self._redis.set(cache_key, context_str, ex=120)
|
|
187
|
+
|
|
188
|
+
return context_str
|
|
189
|
+
|
|
76
190
|
def generate_json(self, message, function, additional_instructions="", **kwargs):
|
|
77
191
|
"""
|
|
78
192
|
Mimics Gemini's tool use by forcing Ollama into JSON mode
|
|
@@ -80,19 +194,28 @@ class LocalAIModel(AutoModel):
|
|
|
80
194
|
"""
|
|
81
195
|
schema_str = self._convert_tools_to_json_schema(function)
|
|
82
196
|
|
|
83
|
-
|
|
197
|
+
focus_pk = kwargs.get("focus_object")
|
|
198
|
+
|
|
199
|
+
# Build Relational Context
|
|
200
|
+
world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
|
|
201
|
+
|
|
202
|
+
# Construct System Prompt
|
|
203
|
+
full_system_prompt = (
|
|
84
204
|
f"{self.instructions}. {additional_instructions}\n"
|
|
85
205
|
f"You must respond strictly with a valid JSON object matching this schema:\n"
|
|
86
206
|
f"{schema_str}\n"
|
|
87
207
|
f"Do not include markdown formatting or explanations."
|
|
208
|
+
f"You must strictly adhere to the following context:\n"
|
|
209
|
+
f"{world_context}"
|
|
88
210
|
)
|
|
89
211
|
|
|
90
212
|
payload = {
|
|
91
213
|
"model": self._json_model,
|
|
92
214
|
"prompt": message,
|
|
93
|
-
"system":
|
|
215
|
+
"system": full_system_prompt,
|
|
94
216
|
"format": "json", # Force JSON mode
|
|
95
217
|
"stream": False,
|
|
218
|
+
"keep_alive": "24h",
|
|
96
219
|
}
|
|
97
220
|
|
|
98
221
|
try:
|
|
@@ -111,25 +234,26 @@ class LocalAIModel(AutoModel):
|
|
|
111
234
|
"""
|
|
112
235
|
Standard text generation via Ollama.
|
|
113
236
|
"""
|
|
237
|
+
focus_pk = kwargs.get("focus_object")
|
|
238
|
+
|
|
239
|
+
# Build Relational Context
|
|
240
|
+
world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
|
|
241
|
+
|
|
242
|
+
# Construct System Prompt
|
|
243
|
+
full_system_prompt = (
|
|
244
|
+
f"{self.instructions}. {additional_instructions}\n"
|
|
245
|
+
f"You must strictly adhere to the following context:\n"
|
|
246
|
+
f"{world_context}"
|
|
247
|
+
)
|
|
248
|
+
|
|
114
249
|
payload = {
|
|
115
250
|
"model": self._text_model,
|
|
116
251
|
"prompt": message,
|
|
117
|
-
"system":
|
|
252
|
+
"system": full_system_prompt,
|
|
118
253
|
"stream": False,
|
|
254
|
+
"keep_alive": "24h",
|
|
119
255
|
}
|
|
120
256
|
|
|
121
|
-
# Handle 'files' (Ollama supports images in base64, but not arbitrary files easily yet)
|
|
122
|
-
# If files are text, you should read them and append to prompt.
|
|
123
|
-
if file_list := kwargs.get("files"):
|
|
124
|
-
for file_dict in file_list:
|
|
125
|
-
fn = file_dict["name"]
|
|
126
|
-
fileobj = file_dict["file"]
|
|
127
|
-
if fn.lower().endswith((".txt", ".md", ".json", ".csv")):
|
|
128
|
-
content = fileobj.read()
|
|
129
|
-
if isinstance(content, bytes):
|
|
130
|
-
content = content.decode("utf-8", errors="ignore")
|
|
131
|
-
payload["prompt"] += f"\n\nContents of {fn}:\n{content}"
|
|
132
|
-
|
|
133
257
|
try:
|
|
134
258
|
response = requests.post(f"{self._ollama_url}/generate", json=payload)
|
|
135
259
|
response.raise_for_status()
|
|
@@ -153,6 +277,7 @@ class LocalAIModel(AutoModel):
|
|
|
153
277
|
"model": self._text_model,
|
|
154
278
|
"prompt": f"{primer}:\n\n{chunk}",
|
|
155
279
|
"stream": False,
|
|
280
|
+
"keep_alive": "24h",
|
|
156
281
|
}
|
|
157
282
|
try:
|
|
158
283
|
res = requests.post(f"{self._ollama_url}/generate", json=payload)
|
|
@@ -209,7 +334,7 @@ class LocalAIModel(AutoModel):
|
|
|
209
334
|
log(f"TTS Error: {e}", _print=True)
|
|
210
335
|
return None
|
|
211
336
|
|
|
212
|
-
def generate_image(self, prompt, **kwargs):
|
|
337
|
+
def generate_image(self, prompt, negative_prompt="", **kwargs):
|
|
213
338
|
"""
|
|
214
339
|
Generates an image using Local AI.
|
|
215
340
|
If 'files' are provided, performs Image-to-Image generation using the first file as reference.
|
|
@@ -217,7 +342,7 @@ class LocalAIModel(AutoModel):
|
|
|
217
342
|
try:
|
|
218
343
|
# Prepare the multipart data
|
|
219
344
|
# We send the prompt as a form field
|
|
220
|
-
data = {"prompt": prompt}
|
|
345
|
+
data = {"prompt": prompt, "negative_prompt": negative_prompt}
|
|
221
346
|
files = {}
|
|
222
347
|
|
|
223
348
|
# Check if reference images were passed
|
|
@@ -232,6 +357,7 @@ class LocalAIModel(AutoModel):
|
|
|
232
357
|
|
|
233
358
|
# Add to the request files
|
|
234
359
|
# Key must be 'file' to match server.py logic
|
|
360
|
+
# TODO: Support multiple images if needed
|
|
235
361
|
files["file"] = (fn, file_obj, "image/png")
|
|
236
362
|
break # We only support 1 reference image for SD Img2Img
|
|
237
363
|
|
|
@@ -264,9 +390,3 @@ class LocalAIModel(AutoModel):
|
|
|
264
390
|
if any(f.lower() in attribs for f in filters):
|
|
265
391
|
voices.append(voice)
|
|
266
392
|
return voices
|
|
267
|
-
|
|
268
|
-
# Unused methods from original that don't apply to Local AI
|
|
269
|
-
def upload(self, file):
|
|
270
|
-
# Local models don't really have a "File Store" API like Gemini.
|
|
271
|
-
# We handle context by passing text directly in prompt.
|
|
272
|
-
pass
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import time
|
|
3
|
+
import urllib.parse
|
|
4
|
+
import uuid
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
import pymongo
|
|
9
|
+
import redis
|
|
10
|
+
import requests
|
|
11
|
+
|
|
12
|
+
# CONFIGURATION
|
|
13
|
+
db_host = os.getenv("DB_HOST", "db")
|
|
14
|
+
db_port = os.getenv("DB_PORT", 27017)
|
|
15
|
+
password = urllib.parse.quote_plus(str(os.getenv("DB_PASSWORD")))
|
|
16
|
+
username = urllib.parse.quote_plus(str(os.getenv("DB_USERNAME")))
|
|
17
|
+
MEDIA_URL = "http://media_ai_internal:5005"
|
|
18
|
+
REDIS_HOST = os.getenv("REDIS_HOST", "cachedb")
|
|
19
|
+
MONGO_URI = f"mongodb://{username}:{password}@{db_host}:{db_port}/?authSource=admin"
|
|
20
|
+
|
|
21
|
+
# DB SETUP
|
|
22
|
+
r = redis.Redis(host=REDIS_HOST, port=6379, decode_responses=True)
|
|
23
|
+
|
|
24
|
+
mongo = pymongo.MongoClient(MONGO_URI)
|
|
25
|
+
db = mongo[os.getenv("DB_DB")]
|
|
26
|
+
# connect(host=f"mongodb://{username}:{password}@{host}:{port}/{dbname}?authSource=admin")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def get_vector(text):
|
|
30
|
+
"""Helper to get embedding from your Media AI container"""
|
|
31
|
+
try:
|
|
32
|
+
resp = requests.post(f"{MEDIA_URL}/embeddings", json={"text": text}, timeout=30)
|
|
33
|
+
if resp.status_code == 200:
|
|
34
|
+
return resp.json()["embedding"]
|
|
35
|
+
except Exception as e:
|
|
36
|
+
print(f"Vector Gen Failed: {e}")
|
|
37
|
+
return None
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def process_single_object_sync(object_id, collection_name, token):
|
|
41
|
+
"""
|
|
42
|
+
THE WORKER FUNCTION (Runs in Background).
|
|
43
|
+
It is safe to sleep here because we are not in the web request.
|
|
44
|
+
"""
|
|
45
|
+
str_id = str(object_id)
|
|
46
|
+
token_key = f"sync_token:{collection_name}:{str_id}"
|
|
47
|
+
|
|
48
|
+
# 1. THE DEBOUNCE WAIT (Happens in background)
|
|
49
|
+
print(f"Debouncing {str_id} for 5 seconds...")
|
|
50
|
+
time.sleep(5)
|
|
51
|
+
|
|
52
|
+
# 2. THE VERIFICATION
|
|
53
|
+
# Check if a newer save happened while we slept
|
|
54
|
+
current_active_token = r.get(token_key)
|
|
55
|
+
|
|
56
|
+
if current_active_token != token:
|
|
57
|
+
print(f"Skipping sync for {str_id}: Superseded by a newer save.")
|
|
58
|
+
return
|
|
59
|
+
|
|
60
|
+
# 3. THE EXECUTION (Embedding generation)
|
|
61
|
+
print(f"Processing Sync for: {str_id} in {collection_name}")
|
|
62
|
+
|
|
63
|
+
from bson.objectid import ObjectId
|
|
64
|
+
|
|
65
|
+
# FIX: Use dynamic collection access instead of db.objects
|
|
66
|
+
try:
|
|
67
|
+
# Tries to convert string ID to ObjectId.
|
|
68
|
+
# If your DB uses String IDs, remove the ObjectId() wrapper.
|
|
69
|
+
oid = ObjectId(object_id)
|
|
70
|
+
doc = db[collection_name].find_one({"_id": oid})
|
|
71
|
+
except Exception:
|
|
72
|
+
# Fallback if ID is not a valid ObjectId string
|
|
73
|
+
doc = db[collection_name].find_one({"_id": object_id})
|
|
74
|
+
|
|
75
|
+
if not doc:
|
|
76
|
+
print(f"Object {object_id} not found in collection '{collection_name}'")
|
|
77
|
+
# Optional: Remove from Redis index if it exists
|
|
78
|
+
r.delete(f"lore:{object_id}")
|
|
79
|
+
return
|
|
80
|
+
|
|
81
|
+
# 2. Construct Searchable Text
|
|
82
|
+
# (Existing logic...)
|
|
83
|
+
searchable_text = (
|
|
84
|
+
f"{doc.get('name', '')}: {doc.get('description', '')} {doc.get('history', '')}"
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
if len(searchable_text) < 10:
|
|
88
|
+
return
|
|
89
|
+
|
|
90
|
+
# 3. Generate Vector
|
|
91
|
+
vector = get_vector(searchable_text)
|
|
92
|
+
|
|
93
|
+
# 4. Save to Redis Index
|
|
94
|
+
if vector:
|
|
95
|
+
r.hset(
|
|
96
|
+
f"lore:{object_id}",
|
|
97
|
+
mapping={
|
|
98
|
+
"mongo_id": str(object_id),
|
|
99
|
+
"collection": collection_name, # Useful for debugging
|
|
100
|
+
"content": searchable_text,
|
|
101
|
+
"vector": np.array(vector, dtype=np.float32).tobytes(),
|
|
102
|
+
"last_synced": datetime.utcnow().isoformat(),
|
|
103
|
+
},
|
|
104
|
+
)
|
|
105
|
+
print(f"Successfully Indexed: {doc.get('name')}")
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def request_indexing(object_id, collection_name):
|
|
109
|
+
"""
|
|
110
|
+
THE TRIGGER FUNCTION (Runs in Main App).
|
|
111
|
+
MUST BE FAST. NO SLEEPING HERE.
|
|
112
|
+
"""
|
|
113
|
+
# Import your Queue Wrapper
|
|
114
|
+
from autonomous.tasks.autotask import AutoTasks
|
|
115
|
+
|
|
116
|
+
# Initialize the Task Runner
|
|
117
|
+
task_runner = AutoTasks()
|
|
118
|
+
|
|
119
|
+
str_id = str(object_id)
|
|
120
|
+
token_key = f"sync_token:{collection_name}:{str_id}"
|
|
121
|
+
|
|
122
|
+
# 1. GENERATE NEW TOKEN
|
|
123
|
+
current_token = str(uuid.uuid4())
|
|
124
|
+
|
|
125
|
+
# 2. SAVE TOKEN TO REDIS (Instant)
|
|
126
|
+
r.set(token_key, current_token, ex=300)
|
|
127
|
+
|
|
128
|
+
# 3. ENQUEUE THE TASK (Instant)
|
|
129
|
+
# CRITICAL CHANGE: We use task_runner.task() instead of calling the function directly.
|
|
130
|
+
try:
|
|
131
|
+
task_runner.task(
|
|
132
|
+
process_single_object_sync, # The function to run later
|
|
133
|
+
object_id=str_id,
|
|
134
|
+
collection_name=collection_name,
|
|
135
|
+
token=current_token,
|
|
136
|
+
)
|
|
137
|
+
return True
|
|
138
|
+
except Exception as e:
|
|
139
|
+
print(f"Sync Enqueue failed: {e}")
|
|
140
|
+
return False
|
|
@@ -8,6 +8,7 @@ from autonomous import log
|
|
|
8
8
|
from autonomous.db import Document, connect, signals
|
|
9
9
|
from autonomous.db.errors import ValidationError
|
|
10
10
|
from autonomous.db.fields import DateTimeField
|
|
11
|
+
from autonomous.db import db_sync
|
|
11
12
|
|
|
12
13
|
host = os.getenv("DB_HOST", "db")
|
|
13
14
|
port = os.getenv("DB_PORT", 27017)
|
|
@@ -240,6 +241,9 @@ class AutoModel(Document):
|
|
|
240
241
|
"""
|
|
241
242
|
obj = super().save()
|
|
242
243
|
self.pk = obj.pk
|
|
244
|
+
|
|
245
|
+
db_sync.request_indexing(self.pk, collection_name=self._get_collection_name())
|
|
246
|
+
|
|
243
247
|
return self.pk
|
|
244
248
|
|
|
245
249
|
@classmethod
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from redis import Redis
|
|
3
|
+
from rq import Queue
|
|
4
|
+
from rq.job import Job
|
|
5
|
+
|
|
6
|
+
class AutoTask:
|
|
7
|
+
def __init__(self, job):
|
|
8
|
+
self.job = job
|
|
9
|
+
|
|
10
|
+
@property
|
|
11
|
+
def id(self):
|
|
12
|
+
return self.job.id
|
|
13
|
+
|
|
14
|
+
@property
|
|
15
|
+
def status(self):
|
|
16
|
+
return self.job.get_status()
|
|
17
|
+
|
|
18
|
+
@property
|
|
19
|
+
def result(self):
|
|
20
|
+
# Simplified result fetching
|
|
21
|
+
return {
|
|
22
|
+
"id": self.id,
|
|
23
|
+
"return_value": self.job.result,
|
|
24
|
+
"status": self.status,
|
|
25
|
+
"error": self.job.exc_info
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
class AutoTasks:
|
|
29
|
+
_connection = None
|
|
30
|
+
queue = None
|
|
31
|
+
|
|
32
|
+
# Config stays the same
|
|
33
|
+
config = {
|
|
34
|
+
"host": os.environ.get("REDIS_HOST", "cachedb"),
|
|
35
|
+
"port": os.environ.get("REDIS_PORT", 6379),
|
|
36
|
+
"password": os.environ.get("REDIS_PASSWORD"),
|
|
37
|
+
"username": os.environ.get("REDIS_USERNAME"),
|
|
38
|
+
"db": os.environ.get("REDIS_DB", 0),
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
def __init__(self, queue_name="default"):
|
|
42
|
+
if not AutoTasks._connection:
|
|
43
|
+
options = {}
|
|
44
|
+
if AutoTasks.config.get("password"):
|
|
45
|
+
options["password"] = AutoTasks.config.get("password")
|
|
46
|
+
|
|
47
|
+
# Create Redis Connection
|
|
48
|
+
AutoTasks._connection = Redis(
|
|
49
|
+
host=AutoTasks.config.get("host"),
|
|
50
|
+
port=AutoTasks.config.get("port"),
|
|
51
|
+
decode_responses=False, # RQ requires bytes, not strings
|
|
52
|
+
**options,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# Initialize Queue
|
|
56
|
+
AutoTasks.queue = Queue(queue_name, connection=AutoTasks._connection)
|
|
57
|
+
|
|
58
|
+
def task(self, func, *args, **kwargs):
|
|
59
|
+
"""
|
|
60
|
+
Enqueues a job to Redis. Does NOT start a worker.
|
|
61
|
+
"""
|
|
62
|
+
job_timeout = kwargs.pop("_task_job_timeout", 3600)
|
|
63
|
+
|
|
64
|
+
# Enqueue the job
|
|
65
|
+
# func can be a string path or the function object itself
|
|
66
|
+
job = AutoTasks.queue.enqueue(
|
|
67
|
+
func,
|
|
68
|
+
args=args,
|
|
69
|
+
kwargs=kwargs,
|
|
70
|
+
job_timeout=job_timeout
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
return AutoTask(job)
|
|
74
|
+
|
|
75
|
+
def get_task(self, job_id):
|
|
76
|
+
try:
|
|
77
|
+
job = Job.fetch(job_id, connection=AutoTasks._connection)
|
|
78
|
+
return AutoTask(job)
|
|
79
|
+
except Exception:
|
|
80
|
+
return None
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import tasks
|
|
3
|
+
|
|
4
|
+
class TaskRouterBase:
|
|
5
|
+
"""
|
|
6
|
+
Maps URL paths to Task Functions.
|
|
7
|
+
Acts as the central registry for all background tasks.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
# Format: (Regex Pattern, Function Object)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@classmethod
|
|
14
|
+
def resolve(cls, path):
|
|
15
|
+
"""
|
|
16
|
+
Parses the path, finds the matching function, and extracts arguments.
|
|
17
|
+
Returns: (function_obj, kwargs_dict) or (None, None)
|
|
18
|
+
"""
|
|
19
|
+
for pattern, func in cls.ROUTES:
|
|
20
|
+
match = re.match(pattern, path)
|
|
21
|
+
if match:
|
|
22
|
+
return func, match.groupdict()
|
|
23
|
+
return None, None
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
ROUTES = []
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: autonomous-app
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.30
|
|
4
4
|
Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
|
|
5
5
|
Author-email: Steven A Moore <samoore@binghamton.edu>
|
|
6
6
|
Project-URL: homepage, https://github.com/Sallenmoore/autonomous
|
|
@@ -24,8 +24,8 @@ Requires-Dist: gunicorn
|
|
|
24
24
|
Requires-Dist: Authlib
|
|
25
25
|
Requires-Dist: rq
|
|
26
26
|
Requires-Dist: ollama
|
|
27
|
-
Requires-Dist: openai>=1.42
|
|
28
27
|
Requires-Dist: google-genai
|
|
28
|
+
Requires-Dist: sentence-transformers
|
|
29
29
|
Requires-Dist: dateparser
|
|
30
30
|
Requires-Dist: python-slugify
|
|
31
31
|
Requires-Dist: pydub
|
|
@@ -31,6 +31,7 @@ src/autonomous/db/__init__.py
|
|
|
31
31
|
src/autonomous/db/common.py
|
|
32
32
|
src/autonomous/db/connection.py
|
|
33
33
|
src/autonomous/db/context_managers.py
|
|
34
|
+
src/autonomous/db/db_sync.py
|
|
34
35
|
src/autonomous/db/dereference.py
|
|
35
36
|
src/autonomous/db/document.py
|
|
36
37
|
src/autonomous/db/errors.py
|
|
@@ -52,7 +53,6 @@ src/autonomous/db/queryset/manager.py
|
|
|
52
53
|
src/autonomous/db/queryset/queryset.py
|
|
53
54
|
src/autonomous/db/queryset/transform.py
|
|
54
55
|
src/autonomous/db/queryset/visitor.py
|
|
55
|
-
src/autonomous/model/__init__.py
|
|
56
56
|
src/autonomous/model/autoattr.py
|
|
57
57
|
src/autonomous/model/automodel.py
|
|
58
58
|
src/autonomous/storage/__init__.py
|
|
@@ -60,6 +60,7 @@ src/autonomous/storage/imagestorage.py
|
|
|
60
60
|
src/autonomous/storage/localstorage.py
|
|
61
61
|
src/autonomous/tasks/__init__.py
|
|
62
62
|
src/autonomous/tasks/autotask.py
|
|
63
|
+
src/autonomous/tasks/task_router.py
|
|
63
64
|
src/autonomous/utils/markdown.py
|
|
64
65
|
src/autonomous_app.egg-info/PKG-INFO
|
|
65
66
|
src/autonomous_app.egg-info/SOURCES.txt
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
|
|
@@ -1,144 +0,0 @@
|
|
|
1
|
-
import importlib
|
|
2
|
-
import os
|
|
3
|
-
import subprocess
|
|
4
|
-
|
|
5
|
-
from redis import Redis
|
|
6
|
-
from rq import Queue, Worker
|
|
7
|
-
|
|
8
|
-
from autonomous import log
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class AutoTask:
|
|
12
|
-
def __init__(self, job):
|
|
13
|
-
self.job = job
|
|
14
|
-
|
|
15
|
-
@property
|
|
16
|
-
def id(self):
|
|
17
|
-
return self.job.id
|
|
18
|
-
|
|
19
|
-
@property
|
|
20
|
-
def status(self):
|
|
21
|
-
status = self.job.get_status()
|
|
22
|
-
if status in ["running", "queued", "started"]:
|
|
23
|
-
return "running"
|
|
24
|
-
return status
|
|
25
|
-
|
|
26
|
-
@property
|
|
27
|
-
def running(self):
|
|
28
|
-
return self.status == "running"
|
|
29
|
-
|
|
30
|
-
@property
|
|
31
|
-
def finished(self):
|
|
32
|
-
return self.status == "finished"
|
|
33
|
-
|
|
34
|
-
@property
|
|
35
|
-
def failed(self):
|
|
36
|
-
return self.status == "failed"
|
|
37
|
-
|
|
38
|
-
@property
|
|
39
|
-
def result(self):
|
|
40
|
-
result = self.job.latest_result()
|
|
41
|
-
result_dict = {
|
|
42
|
-
"id": self.id,
|
|
43
|
-
"return_value": result.return_value if result else None,
|
|
44
|
-
"status": self.status,
|
|
45
|
-
"error": result.exc_string
|
|
46
|
-
if result and result.type in [result.Type.FAILED, result.Type.STOPPED]
|
|
47
|
-
else None,
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
return result_dict
|
|
51
|
-
|
|
52
|
-
@property
|
|
53
|
-
def return_value(self):
|
|
54
|
-
return self.result.get("return_value")
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
class AutoTasks:
|
|
58
|
-
_connection = None
|
|
59
|
-
queue = None
|
|
60
|
-
workers = []
|
|
61
|
-
all_tasks = []
|
|
62
|
-
config = {
|
|
63
|
-
"host": os.environ.get("REDIS_HOST"),
|
|
64
|
-
"port": os.environ.get("REDIS_PORT"),
|
|
65
|
-
"password": os.environ.get("REDIS_PASSWORD"),
|
|
66
|
-
"username": os.environ.get("REDIS_USERNAME"),
|
|
67
|
-
"db": os.environ.get("REDIS_DB", 0),
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
def __init__(self, queue="default", num_workers=3):
|
|
71
|
-
if not AutoTasks._connection:
|
|
72
|
-
options = {}
|
|
73
|
-
|
|
74
|
-
if AutoTasks.config.get("username"):
|
|
75
|
-
options["username"] = AutoTasks.config.get("username")
|
|
76
|
-
if AutoTasks.config.get("username"):
|
|
77
|
-
options["password"] = AutoTasks.config.get("password")
|
|
78
|
-
if AutoTasks.config.get("db"):
|
|
79
|
-
options["db"] = AutoTasks.config.get("db")
|
|
80
|
-
|
|
81
|
-
AutoTasks._connection = Redis(
|
|
82
|
-
host=AutoTasks.config.get("host"),
|
|
83
|
-
port=AutoTasks.config.get("port"),
|
|
84
|
-
**options,
|
|
85
|
-
)
|
|
86
|
-
AutoTasks.queue = Queue(queue, connection=AutoTasks._connection)
|
|
87
|
-
|
|
88
|
-
def task(self, func, *args, **kwargs):
|
|
89
|
-
"""
|
|
90
|
-
:param job: job function
|
|
91
|
-
:param args: job function args
|
|
92
|
-
:param kwargs: job function kwargs
|
|
93
|
-
args and kwargs: use these to explicitly pass arguments and keyword to the underlying job function.
|
|
94
|
-
_task_<option>:pass options to the task object
|
|
95
|
-
:return: job
|
|
96
|
-
"""
|
|
97
|
-
|
|
98
|
-
job = AutoTasks.queue.enqueue(
|
|
99
|
-
func,
|
|
100
|
-
job_timeout=kwargs.get("_task_job_timeout", 3600),
|
|
101
|
-
args=args,
|
|
102
|
-
kwargs=kwargs,
|
|
103
|
-
)
|
|
104
|
-
self.create_worker(func)
|
|
105
|
-
new_task = AutoTask(job)
|
|
106
|
-
AutoTasks.all_tasks.append(new_task)
|
|
107
|
-
return new_task
|
|
108
|
-
|
|
109
|
-
def create_worker(self, func):
|
|
110
|
-
# Get the module containing the target_function
|
|
111
|
-
module = func.__module__
|
|
112
|
-
|
|
113
|
-
# Get the file path of the module
|
|
114
|
-
module_path = importlib.import_module(module).__file__
|
|
115
|
-
|
|
116
|
-
# Set the PYTHONPATH environment variable
|
|
117
|
-
pythonpath = os.path.dirname(module_path)
|
|
118
|
-
env = os.environ.copy()
|
|
119
|
-
env["PYTHONPATH"] = pythonpath
|
|
120
|
-
|
|
121
|
-
rq_user_pass = f"{self.config['username']}:{self.config['password']}"
|
|
122
|
-
rq_url = f"{self.config['host']}:{self.config['port']}"
|
|
123
|
-
rq_db = self.config["db"]
|
|
124
|
-
rq_worker_command = (
|
|
125
|
-
f"rq worker --url redis://{rq_user_pass}@{rq_url}/{rq_db} --burst"
|
|
126
|
-
)
|
|
127
|
-
|
|
128
|
-
worker = subprocess.Popen(rq_worker_command, shell=True, env=env)
|
|
129
|
-
self.workers.append(worker)
|
|
130
|
-
return worker
|
|
131
|
-
|
|
132
|
-
# get job given its id
|
|
133
|
-
def get_task(self, job_id):
|
|
134
|
-
# breakpoint()
|
|
135
|
-
task = AutoTasks.queue.fetch_job(job_id)
|
|
136
|
-
return AutoTask(task)
|
|
137
|
-
|
|
138
|
-
# get job given its id
|
|
139
|
-
def get_tasks(self):
|
|
140
|
-
return [AutoTask(w) for w in Worker.all(queue=AutoTasks.queue)]
|
|
141
|
-
|
|
142
|
-
def clear(self):
|
|
143
|
-
AutoTasks.queue.empty()
|
|
144
|
-
AutoTasks.all_tasks = []
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/apis/version_control/GHCallbacks.py
RENAMED
|
File without changes
|
|
File without changes
|
{autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/apis/version_control/GHRepo.py
RENAMED
|
File without changes
|
|
File without changes
|
{autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous/apis/version_control/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{autonomous_app-0.3.29 → autonomous_app-0.3.30}/src/autonomous_app.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|