autonomous-app 0.3.30__tar.gz → 0.3.31__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/PKG-INFO +1 -1
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/__init__.py +1 -1
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/ai/models/local_model.py +39 -130
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/__init__.py +3 -5
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/db_sync.py +1 -1
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/tasks/task_router.py +2 -4
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous_app.egg-info/PKG-INFO +1 -1
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/README.md +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/pyproject.toml +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/requirements.txt +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/setup.cfg +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/setup.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/ai/__init__.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/ai/audioagent.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/ai/baseagent.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/ai/imageagent.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/ai/jsonagent.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/ai/models/__init__.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/ai/models/aws.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/ai/models/deepseek.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/ai/models/gemini.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/ai/models/openai.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/ai/textagent.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/apis/version_control/GHCallbacks.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/apis/version_control/GHOrganization.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/apis/version_control/GHRepo.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/apis/version_control/GHVersionControl.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/apis/version_control/__init__.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/auth/__init__.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/auth/autoauth.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/auth/github.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/auth/google.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/auth/user.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/cli.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/base/__init__.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/base/common.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/base/datastructures.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/base/document.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/base/fields.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/base/metaclasses.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/base/utils.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/common.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/connection.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/context_managers.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/dereference.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/document.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/errors.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/fields.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/mongodb_support.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/pymongo_support.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/queryset/__init__.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/queryset/base.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/queryset/field_list.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/queryset/manager.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/queryset/queryset.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/queryset/transform.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/queryset/visitor.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/db/signals.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/logger.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/model/autoattr.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/model/automodel.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/storage/__init__.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/storage/imagestorage.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/storage/localstorage.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/tasks/__init__.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/tasks/autotask.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/utils/markdown.py +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous_app.egg-info/SOURCES.txt +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous_app.egg-info/dependency_links.txt +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous_app.egg-info/requires.txt +0 -0
- {autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous_app.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: autonomous-app
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.31
|
|
4
4
|
Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
|
|
5
5
|
Author-email: Steven A Moore <samoore@binghamton.edu>
|
|
6
6
|
Project-URL: homepage, https://github.com/Sallenmoore/autonomous
|
|
@@ -22,10 +22,10 @@ class LocalAIModel(AutoModel):
|
|
|
22
22
|
description = StringAttr(default="A Local AI Model using Ollama and Media AI.")
|
|
23
23
|
|
|
24
24
|
# Config
|
|
25
|
-
_ollama_url = os.environ.get("OLLAMA_API_BASE", "http://
|
|
26
|
-
_media_url = os.environ.get("MEDIA_API_BASE", "http://
|
|
27
|
-
_text_model = "
|
|
28
|
-
_json_model = "
|
|
25
|
+
_ollama_url = os.environ.get("OLLAMA_API_BASE", "http://ollama:11434/api")
|
|
26
|
+
_media_url = os.environ.get("MEDIA_API_BASE", "http://media_ai:5005")
|
|
27
|
+
_text_model = "llama3"
|
|
28
|
+
_json_model = "llama3"
|
|
29
29
|
|
|
30
30
|
# DB Connections
|
|
31
31
|
_mongo_client = pymongo.MongoClient("mongodb://db:27017/")
|
|
@@ -66,12 +66,6 @@ class LocalAIModel(AutoModel):
|
|
|
66
66
|
}
|
|
67
67
|
|
|
68
68
|
def _convert_tools_to_json_schema(self, user_function):
|
|
69
|
-
"""
|
|
70
|
-
Ollama doesn't support 'tools' strictly yet.
|
|
71
|
-
We convert the tool definition into a system prompt instruction.
|
|
72
|
-
"""
|
|
73
|
-
# If the user passes a raw dictionary (like a Gemini tool definition)
|
|
74
|
-
# we extract the relevant parts for the schema.
|
|
75
69
|
schema = {
|
|
76
70
|
"name": user_function.get("name"),
|
|
77
71
|
"description": user_function.get("description", ""),
|
|
@@ -89,117 +83,74 @@ class LocalAIModel(AutoModel):
|
|
|
89
83
|
return []
|
|
90
84
|
|
|
91
85
|
def build_hybrid_context(self, prompt, focus_object_id=None):
|
|
92
|
-
"""
|
|
93
|
-
Builds context based on RELATIONAL ASSOCIATIONS + SEMANTIC LORE.
|
|
94
|
-
"""
|
|
95
|
-
|
|
96
|
-
# 1. Create a Cache Key based on what defines the "Scene"
|
|
97
|
-
# We assume 'focus_object_id' + rough prompt length captures the context enough
|
|
98
86
|
cache_key = f"ctx:{focus_object_id}:{len(prompt) // 50}"
|
|
99
|
-
|
|
100
|
-
# 2. Check Cache
|
|
101
87
|
cached_ctx = self._redis.get(cache_key)
|
|
102
88
|
if cached_ctx:
|
|
103
89
|
return cached_ctx
|
|
104
90
|
|
|
105
91
|
context_str = ""
|
|
106
|
-
|
|
107
|
-
# --- PART 1: MONGODB (Relational Associations) ---
|
|
108
|
-
# If we are focusing on a specific object, fetch it and its specific refs.
|
|
92
|
+
# --- PART 1: MONGODB ---
|
|
109
93
|
if focus_object_id:
|
|
110
94
|
try:
|
|
111
|
-
# 1. Fetch the Main Object
|
|
112
|
-
# Handle both string ID and ObjectId
|
|
113
95
|
oid = (
|
|
114
96
|
ObjectId(focus_object_id)
|
|
115
97
|
if isinstance(focus_object_id, str)
|
|
116
98
|
else focus_object_id
|
|
117
99
|
)
|
|
118
|
-
|
|
119
100
|
main_obj = self._mongo_db.objects.find_one({"_id": oid})
|
|
120
101
|
|
|
121
102
|
if main_obj:
|
|
122
|
-
|
|
123
|
-
context_str += "### FOCUS OBJECT ###\n"
|
|
124
|
-
context_str += prompt
|
|
125
|
-
|
|
126
|
-
# 2. Extract References (Associations)
|
|
127
|
-
# 1. Start with the main list
|
|
103
|
+
context_str += "### FOCUS OBJECT ###\n" + prompt
|
|
128
104
|
ref_ids = main_obj.get("associations", []) or []
|
|
129
|
-
|
|
130
|
-
# 2. Safely add single fields (if they exist)
|
|
131
105
|
if world_id := main_obj.get("world"):
|
|
132
106
|
ref_ids.append(world_id)
|
|
133
|
-
|
|
134
|
-
# 3. Safely add lists (ensure they are lists)
|
|
135
107
|
ref_ids.extend(main_obj.get("stories", []) or [])
|
|
136
108
|
ref_ids.extend(main_obj.get("events", []) or [])
|
|
137
109
|
|
|
138
110
|
if ref_ids:
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
valid_oids.append(
|
|
144
|
-
ObjectId(rid) if isinstance(rid, str) else rid
|
|
145
|
-
)
|
|
146
|
-
except:
|
|
147
|
-
pass
|
|
148
|
-
|
|
149
|
-
# 3. Fetch all associated objects in ONE query
|
|
111
|
+
valid_oids = [
|
|
112
|
+
ObjectId(rid) if isinstance(rid, str) else rid
|
|
113
|
+
for rid in ref_ids
|
|
114
|
+
]
|
|
150
115
|
if valid_oids:
|
|
151
116
|
associated_objs = self._mongo_db.objects.find(
|
|
152
117
|
{"_id": {"$in": valid_oids}}
|
|
153
118
|
)
|
|
154
|
-
|
|
155
119
|
context_str += "\n### ASSOCIATED REFERENCES ###\n"
|
|
156
120
|
for obj in associated_objs:
|
|
157
|
-
log(f"Associated Obj: {obj}", _print=True)
|
|
158
121
|
context_str += f"- {obj}\n"
|
|
159
|
-
|
|
160
122
|
context_str += "\n"
|
|
161
123
|
except Exception as e:
|
|
162
124
|
log(f"Mongo Association Error: {e}", _print=True)
|
|
163
125
|
|
|
164
|
-
# --- PART 2: REDIS
|
|
165
|
-
# We keep this! It catches "Lore" or "Rules" that aren't explicitly linked in the DB.
|
|
166
|
-
# e.g., If the sword is "Elven", this finds "Elven History" even if not linked by ID.
|
|
126
|
+
# --- PART 2: REDIS ---
|
|
167
127
|
if len(prompt) > 10:
|
|
168
128
|
vector = self.get_embedding(prompt)
|
|
169
129
|
if vector:
|
|
170
130
|
try:
|
|
171
|
-
q = "*=>[KNN 2 @vector $blob AS score]"
|
|
131
|
+
q = "*=>[KNN 2 @vector $blob AS score]"
|
|
172
132
|
params = {"blob": np.array(vector, dtype=np.float32).tobytes()}
|
|
173
133
|
results = self._redis.ft("search_index").search(
|
|
174
134
|
q, query_params=params
|
|
175
135
|
)
|
|
176
|
-
|
|
177
136
|
if results.docs:
|
|
178
137
|
context_str += "### RELEVANT LORE ###\n"
|
|
179
138
|
for doc in results.docs:
|
|
180
139
|
context_str += f"- {doc.content}\n"
|
|
181
|
-
except Exception
|
|
140
|
+
except Exception:
|
|
182
141
|
pass
|
|
183
142
|
|
|
184
|
-
# 3. Save to Cache (Expire in 60s)
|
|
185
|
-
# This prevents hammering the DB/Vector engine during a rapid conversation
|
|
186
143
|
self._redis.set(cache_key, context_str, ex=120)
|
|
187
|
-
|
|
188
144
|
return context_str
|
|
189
145
|
|
|
190
146
|
def generate_json(self, message, function, additional_instructions="", **kwargs):
|
|
191
147
|
"""
|
|
192
|
-
|
|
193
|
-
and injecting the schema into the prompt.
|
|
148
|
+
UPDATED: Uses correct /api/chat payload structure (messages list)
|
|
194
149
|
"""
|
|
195
150
|
schema_str = self._convert_tools_to_json_schema(function)
|
|
196
|
-
|
|
197
151
|
focus_pk = kwargs.get("focus_object")
|
|
198
|
-
|
|
199
|
-
# Build Relational Context
|
|
200
152
|
world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
|
|
201
153
|
|
|
202
|
-
# Construct System Prompt
|
|
203
154
|
full_system_prompt = (
|
|
204
155
|
f"{self.instructions}. {additional_instructions}\n"
|
|
205
156
|
f"You must respond strictly with a valid JSON object matching this schema:\n"
|
|
@@ -209,21 +160,24 @@ class LocalAIModel(AutoModel):
|
|
|
209
160
|
f"{world_context}"
|
|
210
161
|
)
|
|
211
162
|
|
|
163
|
+
# FIX: Using 'messages' instead of 'prompt'/'system'
|
|
212
164
|
payload = {
|
|
213
|
-
"model":
|
|
214
|
-
"
|
|
215
|
-
|
|
216
|
-
|
|
165
|
+
"model": "llama3",
|
|
166
|
+
"messages": [
|
|
167
|
+
{"role": "system", "content": full_system_prompt},
|
|
168
|
+
{"role": "user", "content": message},
|
|
169
|
+
],
|
|
170
|
+
"format": "json",
|
|
217
171
|
"stream": False,
|
|
218
172
|
"keep_alive": "24h",
|
|
219
173
|
}
|
|
220
174
|
|
|
221
175
|
try:
|
|
222
|
-
response = requests.post(f"{self._ollama_url}/
|
|
176
|
+
response = requests.post(f"{self._ollama_url}/chat", json=payload)
|
|
223
177
|
response.raise_for_status()
|
|
224
|
-
result_text = response.json().get("response", "{}")
|
|
225
178
|
|
|
226
|
-
#
|
|
179
|
+
# FIX: Chat API returns 'message' -> 'content'
|
|
180
|
+
result_text = response.json().get("message", {}).get("content", "{}")
|
|
227
181
|
return json.loads(result_text)
|
|
228
182
|
|
|
229
183
|
except Exception as e:
|
|
@@ -232,14 +186,11 @@ class LocalAIModel(AutoModel):
|
|
|
232
186
|
|
|
233
187
|
def generate_text(self, message, additional_instructions="", **kwargs):
|
|
234
188
|
"""
|
|
235
|
-
|
|
189
|
+
UPDATED: Uses correct /api/chat payload structure
|
|
236
190
|
"""
|
|
237
191
|
focus_pk = kwargs.get("focus_object")
|
|
238
|
-
|
|
239
|
-
# Build Relational Context
|
|
240
192
|
world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
|
|
241
193
|
|
|
242
|
-
# Construct System Prompt
|
|
243
194
|
full_system_prompt = (
|
|
244
195
|
f"{self.instructions}. {additional_instructions}\n"
|
|
245
196
|
f"You must strictly adhere to the following context:\n"
|
|
@@ -247,41 +198,40 @@ class LocalAIModel(AutoModel):
|
|
|
247
198
|
)
|
|
248
199
|
|
|
249
200
|
payload = {
|
|
250
|
-
"model":
|
|
251
|
-
"
|
|
252
|
-
|
|
201
|
+
"model": "llama3",
|
|
202
|
+
"messages": [
|
|
203
|
+
{"role": "system", "content": full_system_prompt},
|
|
204
|
+
{"role": "user", "content": message},
|
|
205
|
+
],
|
|
253
206
|
"stream": False,
|
|
254
207
|
"keep_alive": "24h",
|
|
255
208
|
}
|
|
256
209
|
|
|
257
210
|
try:
|
|
258
|
-
response = requests.post(f"{self._ollama_url}/
|
|
211
|
+
response = requests.post(f"{self._ollama_url}/chat", json=payload)
|
|
259
212
|
response.raise_for_status()
|
|
260
|
-
|
|
213
|
+
# FIX: Chat API returns 'message' -> 'content'
|
|
214
|
+
return response.json().get("message", {}).get("content", "")
|
|
261
215
|
except Exception as e:
|
|
262
216
|
log(f"==== LocalAI Text Error: {e} ====", _print=True)
|
|
263
217
|
return "Error generating text."
|
|
264
218
|
|
|
265
219
|
def summarize_text(self, text, primer="", **kwargs):
|
|
266
220
|
primer = primer or "Summarize the following text concisely."
|
|
267
|
-
|
|
268
|
-
# Simple chunking logic (similar to your original)
|
|
269
|
-
# Note: Mistral-Nemo has a large context window (128k), so chunking
|
|
270
|
-
# is less necessary than with older models, but we keep it for safety.
|
|
271
|
-
max_chars = 12000 # Roughly 3k tokens
|
|
221
|
+
max_chars = 12000
|
|
272
222
|
chunks = [text[i : i + max_chars] for i in range(0, len(text), max_chars)]
|
|
273
223
|
|
|
274
224
|
full_summary = ""
|
|
275
225
|
for chunk in chunks:
|
|
276
226
|
payload = {
|
|
277
|
-
"model":
|
|
278
|
-
"
|
|
227
|
+
"model": "llama3",
|
|
228
|
+
"messages": [{"role": "user", "content": f"{primer}:\n\n{chunk}"}],
|
|
279
229
|
"stream": False,
|
|
280
230
|
"keep_alive": "24h",
|
|
281
231
|
}
|
|
282
232
|
try:
|
|
283
|
-
res = requests.post(f"{self._ollama_url}/
|
|
284
|
-
full_summary += res.json().get("
|
|
233
|
+
res = requests.post(f"{self._ollama_url}/chat", json=payload)
|
|
234
|
+
full_summary += res.json().get("message", {}).get("content", "") + "\n"
|
|
285
235
|
except Exception as e:
|
|
286
236
|
log(f"Summary Error: {e}", _print=True)
|
|
287
237
|
break
|
|
@@ -289,100 +239,59 @@ class LocalAIModel(AutoModel):
|
|
|
289
239
|
return full_summary
|
|
290
240
|
|
|
291
241
|
def generate_audio_text(self, audio_file, prompt="", **kwargs):
|
|
292
|
-
"""
|
|
293
|
-
Sends audio bytes to the Media AI container for Whisper transcription.
|
|
294
|
-
"""
|
|
295
242
|
try:
|
|
296
|
-
# Prepare the file for upload
|
|
297
|
-
# audio_file is likely bytes, so we wrap in BytesIO if needed
|
|
298
243
|
if isinstance(audio_file, bytes):
|
|
299
244
|
f_obj = io.BytesIO(audio_file)
|
|
300
245
|
else:
|
|
301
246
|
f_obj = audio_file
|
|
302
|
-
|
|
303
247
|
files = {"file": ("audio.mp3", f_obj, "audio/mpeg")}
|
|
304
|
-
|
|
305
248
|
response = requests.post(f"{self._media_url}/transcribe", files=files)
|
|
306
249
|
response.raise_for_status()
|
|
307
250
|
return response.json().get("text", "")
|
|
308
|
-
|
|
309
251
|
except Exception as e:
|
|
310
252
|
log(f"STT Error: {e}", _print=True)
|
|
311
253
|
return ""
|
|
312
254
|
|
|
313
255
|
def generate_audio(self, prompt, voice=None, **kwargs):
|
|
314
|
-
"""
|
|
315
|
-
Sends text to the Media AI container for TTS.
|
|
316
|
-
"""
|
|
317
256
|
voice = voice or random.choice(list(self.VOICES.keys()))
|
|
318
|
-
|
|
319
257
|
try:
|
|
320
258
|
payload = {"text": prompt, "voice": voice}
|
|
321
259
|
response = requests.post(f"{self._media_url}/tts", json=payload)
|
|
322
260
|
response.raise_for_status()
|
|
323
|
-
|
|
324
|
-
# Response content is WAV bytes
|
|
325
261
|
wav_bytes = response.content
|
|
326
|
-
|
|
327
|
-
# Convert to MP3 to match your original interface (using pydub)
|
|
328
262
|
audio = AudioSegment.from_file(io.BytesIO(wav_bytes), format="wav")
|
|
329
263
|
mp3_buffer = io.BytesIO()
|
|
330
264
|
audio.export(mp3_buffer, format="mp3")
|
|
331
265
|
return mp3_buffer.getvalue()
|
|
332
|
-
|
|
333
266
|
except Exception as e:
|
|
334
267
|
log(f"TTS Error: {e}", _print=True)
|
|
335
268
|
return None
|
|
336
269
|
|
|
337
270
|
def generate_image(self, prompt, negative_prompt="", **kwargs):
|
|
338
|
-
"""
|
|
339
|
-
Generates an image using Local AI.
|
|
340
|
-
If 'files' are provided, performs Image-to-Image generation using the first file as reference.
|
|
341
|
-
"""
|
|
342
271
|
try:
|
|
343
|
-
# Prepare the multipart data
|
|
344
|
-
# We send the prompt as a form field
|
|
345
272
|
data = {"prompt": prompt, "negative_prompt": negative_prompt}
|
|
346
273
|
files = {}
|
|
347
|
-
|
|
348
|
-
# Check if reference images were passed
|
|
349
274
|
if kwargs.get("files"):
|
|
350
|
-
# Take the first available file
|
|
351
275
|
for fn, f_bytes in kwargs.get("files").items():
|
|
352
|
-
# If f_bytes is bytes, wrap in IO, else assume it's file-like
|
|
353
276
|
if isinstance(f_bytes, bytes):
|
|
354
277
|
file_obj = io.BytesIO(f_bytes)
|
|
355
278
|
else:
|
|
356
279
|
file_obj = f_bytes
|
|
357
|
-
|
|
358
|
-
# Add to the request files
|
|
359
|
-
# Key must be 'file' to match server.py logic
|
|
360
|
-
# TODO: Support multiple images if needed
|
|
361
280
|
files["file"] = (fn, file_obj, "image/png")
|
|
362
|
-
break
|
|
363
|
-
|
|
364
|
-
# Send Request
|
|
281
|
+
break
|
|
365
282
|
if files:
|
|
366
|
-
# Multipart/form-data request (Prompt + File)
|
|
367
283
|
response = requests.post(
|
|
368
284
|
f"{self._media_url}/generate-image", data=data, files=files
|
|
369
285
|
)
|
|
370
286
|
else:
|
|
371
|
-
# Standard request (Prompt only) - server.py handles request.form vs json
|
|
372
|
-
# But our updated server expects form data for consistency
|
|
373
287
|
response = requests.post(f"{self._media_url}/generate-image", data=data)
|
|
374
|
-
|
|
375
288
|
response.raise_for_status()
|
|
376
|
-
|
|
377
|
-
# Returns WebP bytes directly
|
|
378
289
|
return response.content
|
|
379
|
-
|
|
380
290
|
except Exception as e:
|
|
381
291
|
log(f"Image Gen Error: {e}", _print=True)
|
|
382
292
|
return None
|
|
383
293
|
|
|
384
294
|
def list_voices(self, filters=[]):
|
|
385
|
-
# Same logic as before
|
|
386
295
|
if not filters:
|
|
387
296
|
return list(self.VOICES.keys())
|
|
388
297
|
voices = []
|
|
@@ -13,6 +13,7 @@ from autonomous.db import (
|
|
|
13
13
|
signals,
|
|
14
14
|
)
|
|
15
15
|
from autonomous.db.connection import * # noqa: F401
|
|
16
|
+
from autonomous.db.db_sync import * # noqa: F401
|
|
16
17
|
from autonomous.db.document import * # noqa: F401
|
|
17
18
|
from autonomous.db.errors import * # noqa: F401
|
|
18
19
|
from autonomous.db.fields import * # noqa: F401
|
|
@@ -29,14 +30,11 @@ __all__ = (
|
|
|
29
30
|
)
|
|
30
31
|
|
|
31
32
|
|
|
32
|
-
VERSION = (0,
|
|
33
|
+
VERSION = (0, 30, 0)
|
|
33
34
|
|
|
34
35
|
|
|
35
36
|
def get_version():
|
|
36
|
-
"""Return the VERSION as a string.
|
|
37
|
-
|
|
38
|
-
For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.
|
|
39
|
-
"""
|
|
37
|
+
"""Return the VERSION as a string."""
|
|
40
38
|
return ".".join(map(str, VERSION))
|
|
41
39
|
|
|
42
40
|
|
|
@@ -110,6 +110,7 @@ def request_indexing(object_id, collection_name):
|
|
|
110
110
|
THE TRIGGER FUNCTION (Runs in Main App).
|
|
111
111
|
MUST BE FAST. NO SLEEPING HERE.
|
|
112
112
|
"""
|
|
113
|
+
print("Requesting Indexing...")
|
|
113
114
|
# Import your Queue Wrapper
|
|
114
115
|
from autonomous.tasks.autotask import AutoTasks
|
|
115
116
|
|
|
@@ -126,7 +127,6 @@ def request_indexing(object_id, collection_name):
|
|
|
126
127
|
r.set(token_key, current_token, ex=300)
|
|
127
128
|
|
|
128
129
|
# 3. ENQUEUE THE TASK (Instant)
|
|
129
|
-
# CRITICAL CHANGE: We use task_runner.task() instead of calling the function directly.
|
|
130
130
|
try:
|
|
131
131
|
task_runner.task(
|
|
132
132
|
process_single_object_sync, # The function to run later
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import re
|
|
2
|
-
|
|
2
|
+
|
|
3
3
|
|
|
4
4
|
class TaskRouterBase:
|
|
5
5
|
"""
|
|
@@ -9,7 +9,6 @@ class TaskRouterBase:
|
|
|
9
9
|
|
|
10
10
|
# Format: (Regex Pattern, Function Object)
|
|
11
11
|
|
|
12
|
-
|
|
13
12
|
@classmethod
|
|
14
13
|
def resolve(cls, path):
|
|
15
14
|
"""
|
|
@@ -22,5 +21,4 @@ class TaskRouterBase:
|
|
|
22
21
|
return func, match.groupdict()
|
|
23
22
|
return None, None
|
|
24
23
|
|
|
25
|
-
|
|
26
|
-
ROUTES = []
|
|
24
|
+
ROUTES = []
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: autonomous-app
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.31
|
|
4
4
|
Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
|
|
5
5
|
Author-email: Steven A Moore <samoore@binghamton.edu>
|
|
6
6
|
Project-URL: homepage, https://github.com/Sallenmoore/autonomous
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/apis/version_control/GHCallbacks.py
RENAMED
|
File without changes
|
|
File without changes
|
{autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/apis/version_control/GHRepo.py
RENAMED
|
File without changes
|
|
File without changes
|
{autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous/apis/version_control/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{autonomous_app-0.3.30 → autonomous_app-0.3.31}/src/autonomous_app.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|