autonomous-app 0.3.29__tar.gz → 0.3.31__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/PKG-INFO +2 -2
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/requirements.txt +1 -1
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/__init__.py +1 -1
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/ai/baseagent.py +7 -8
- autonomous_app-0.3.31/src/autonomous/ai/models/local_model.py +301 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/__init__.py +4 -5
- autonomous_app-0.3.31/src/autonomous/db/db_sync.py +140 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/model/automodel.py +4 -0
- autonomous_app-0.3.31/src/autonomous/tasks/autotask.py +80 -0
- autonomous_app-0.3.31/src/autonomous/tasks/task_router.py +24 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous_app.egg-info/PKG-INFO +2 -2
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous_app.egg-info/SOURCES.txt +2 -1
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous_app.egg-info/requires.txt +1 -1
- autonomous_app-0.3.29/src/autonomous/ai/models/local_model.py +0 -272
- autonomous_app-0.3.29/src/autonomous/model/__init__.py +0 -1
- autonomous_app-0.3.29/src/autonomous/tasks/autotask.py +0 -144
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/README.md +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/pyproject.toml +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/setup.cfg +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/setup.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/ai/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/ai/audioagent.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/ai/imageagent.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/ai/jsonagent.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/ai/models/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/ai/models/aws.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/ai/models/deepseek.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/ai/models/gemini.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/ai/models/openai.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/ai/textagent.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/apis/version_control/GHCallbacks.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/apis/version_control/GHOrganization.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/apis/version_control/GHRepo.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/apis/version_control/GHVersionControl.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/apis/version_control/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/auth/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/auth/autoauth.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/auth/github.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/auth/google.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/auth/user.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/cli.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/base/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/base/common.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/base/datastructures.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/base/document.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/base/fields.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/base/metaclasses.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/base/utils.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/common.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/connection.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/context_managers.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/dereference.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/document.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/errors.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/fields.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/mongodb_support.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/pymongo_support.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/queryset/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/queryset/base.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/queryset/field_list.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/queryset/manager.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/queryset/queryset.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/queryset/transform.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/queryset/visitor.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/db/signals.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/logger.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/model/autoattr.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/storage/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/storage/imagestorage.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/storage/localstorage.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/tasks/__init__.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous/utils/markdown.py +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous_app.egg-info/dependency_links.txt +0 -0
- {autonomous_app-0.3.29 → autonomous_app-0.3.31}/src/autonomous_app.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: autonomous-app
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.31
|
|
4
4
|
Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
|
|
5
5
|
Author-email: Steven A Moore <samoore@binghamton.edu>
|
|
6
6
|
Project-URL: homepage, https://github.com/Sallenmoore/autonomous
|
|
@@ -24,8 +24,8 @@ Requires-Dist: gunicorn
|
|
|
24
24
|
Requires-Dist: Authlib
|
|
25
25
|
Requires-Dist: rq
|
|
26
26
|
Requires-Dist: ollama
|
|
27
|
-
Requires-Dist: openai>=1.42
|
|
28
27
|
Requires-Dist: google-genai
|
|
28
|
+
Requires-Dist: sentence-transformers
|
|
29
29
|
Requires-Dist: dateparser
|
|
30
30
|
Requires-Dist: python-slugify
|
|
31
31
|
Requires-Dist: pydub
|
|
@@ -23,12 +23,11 @@ class BaseAgent(AutoModel):
|
|
|
23
23
|
return self.get_client().id
|
|
24
24
|
|
|
25
25
|
def get_client(self):
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
self.save()
|
|
26
|
+
self.client = self._ai_model(
|
|
27
|
+
name=self.name,
|
|
28
|
+
instructions=self.instructions,
|
|
29
|
+
description=self.description,
|
|
30
|
+
)
|
|
31
|
+
self.client.save()
|
|
32
|
+
self.save()
|
|
34
33
|
return self.client
|
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
import io
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
import random
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
import pymongo
|
|
8
|
+
import redis
|
|
9
|
+
import requests
|
|
10
|
+
from bson.objectid import ObjectId
|
|
11
|
+
from pydub import AudioSegment
|
|
12
|
+
|
|
13
|
+
from autonomous import log
|
|
14
|
+
from autonomous.model.autoattr import ListAttr, StringAttr
|
|
15
|
+
from autonomous.model.automodel import AutoModel
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class LocalAIModel(AutoModel):
|
|
19
|
+
messages = ListAttr(StringAttr(default=[]))
|
|
20
|
+
name = StringAttr(default="agent")
|
|
21
|
+
instructions = StringAttr(default="You are a helpful AI.")
|
|
22
|
+
description = StringAttr(default="A Local AI Model using Ollama and Media AI.")
|
|
23
|
+
|
|
24
|
+
# Config
|
|
25
|
+
_ollama_url = os.environ.get("OLLAMA_API_BASE", "http://ollama:11434/api")
|
|
26
|
+
_media_url = os.environ.get("MEDIA_API_BASE", "http://media_ai:5005")
|
|
27
|
+
_text_model = "llama3"
|
|
28
|
+
_json_model = "llama3"
|
|
29
|
+
|
|
30
|
+
# DB Connections
|
|
31
|
+
_mongo_client = pymongo.MongoClient("mongodb://db:27017/")
|
|
32
|
+
_mongo_db = os.getenv("DB_DB", "default")
|
|
33
|
+
_redis = redis.Redis(host="cachedb", port=6379, decode_responses=True)
|
|
34
|
+
|
|
35
|
+
VOICES = {
|
|
36
|
+
"Zephyr": ["female"],
|
|
37
|
+
"Puck": ["male"],
|
|
38
|
+
"Charon": ["male"],
|
|
39
|
+
"Kore": ["female"],
|
|
40
|
+
"Fenrir": ["non-binary"],
|
|
41
|
+
"Leda": ["female"],
|
|
42
|
+
"Orus": ["male"],
|
|
43
|
+
"Aoede": ["female"],
|
|
44
|
+
"Callirhoe": ["female"],
|
|
45
|
+
"Autonoe": ["female"],
|
|
46
|
+
"Enceladus": ["male"],
|
|
47
|
+
"Iapetus": ["male"],
|
|
48
|
+
"Umbriel": ["male"],
|
|
49
|
+
"Algieba": ["male"],
|
|
50
|
+
"Despina": ["female"],
|
|
51
|
+
"Erinome": ["female"],
|
|
52
|
+
"Algenib": ["male"],
|
|
53
|
+
"Rasalgethi": ["non-binary"],
|
|
54
|
+
"Laomedeia": ["female"],
|
|
55
|
+
"Achernar": ["female"],
|
|
56
|
+
"Alnilam": ["male"],
|
|
57
|
+
"Schedar": ["male"],
|
|
58
|
+
"Gacrux": ["female"],
|
|
59
|
+
"Pulcherrima": ["non-binary"],
|
|
60
|
+
"Achird": ["male"],
|
|
61
|
+
"Zubenelgenubi": ["male"],
|
|
62
|
+
"Vindemiatrix": ["female"],
|
|
63
|
+
"Sadachbia": ["male"],
|
|
64
|
+
"Sadaltager": ["male"],
|
|
65
|
+
"Sulafar": ["female"],
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
def _convert_tools_to_json_schema(self, user_function):
|
|
69
|
+
schema = {
|
|
70
|
+
"name": user_function.get("name"),
|
|
71
|
+
"description": user_function.get("description", ""),
|
|
72
|
+
"parameters": user_function.get("parameters", {}),
|
|
73
|
+
}
|
|
74
|
+
return json.dumps(schema, indent=2)
|
|
75
|
+
|
|
76
|
+
def get_embedding(self, text):
|
|
77
|
+
try:
|
|
78
|
+
res = requests.post(f"{self._media_url}/embeddings", json={"text": text})
|
|
79
|
+
res.raise_for_status()
|
|
80
|
+
return res.json()["embedding"]
|
|
81
|
+
except Exception as e:
|
|
82
|
+
log(f"Embedding Error: {e}", _print=True)
|
|
83
|
+
return []
|
|
84
|
+
|
|
85
|
+
def build_hybrid_context(self, prompt, focus_object_id=None):
|
|
86
|
+
cache_key = f"ctx:{focus_object_id}:{len(prompt) // 50}"
|
|
87
|
+
cached_ctx = self._redis.get(cache_key)
|
|
88
|
+
if cached_ctx:
|
|
89
|
+
return cached_ctx
|
|
90
|
+
|
|
91
|
+
context_str = ""
|
|
92
|
+
# --- PART 1: MONGODB ---
|
|
93
|
+
if focus_object_id:
|
|
94
|
+
try:
|
|
95
|
+
oid = (
|
|
96
|
+
ObjectId(focus_object_id)
|
|
97
|
+
if isinstance(focus_object_id, str)
|
|
98
|
+
else focus_object_id
|
|
99
|
+
)
|
|
100
|
+
main_obj = self._mongo_db.objects.find_one({"_id": oid})
|
|
101
|
+
|
|
102
|
+
if main_obj:
|
|
103
|
+
context_str += "### FOCUS OBJECT ###\n" + prompt
|
|
104
|
+
ref_ids = main_obj.get("associations", []) or []
|
|
105
|
+
if world_id := main_obj.get("world"):
|
|
106
|
+
ref_ids.append(world_id)
|
|
107
|
+
ref_ids.extend(main_obj.get("stories", []) or [])
|
|
108
|
+
ref_ids.extend(main_obj.get("events", []) or [])
|
|
109
|
+
|
|
110
|
+
if ref_ids:
|
|
111
|
+
valid_oids = [
|
|
112
|
+
ObjectId(rid) if isinstance(rid, str) else rid
|
|
113
|
+
for rid in ref_ids
|
|
114
|
+
]
|
|
115
|
+
if valid_oids:
|
|
116
|
+
associated_objs = self._mongo_db.objects.find(
|
|
117
|
+
{"_id": {"$in": valid_oids}}
|
|
118
|
+
)
|
|
119
|
+
context_str += "\n### ASSOCIATED REFERENCES ###\n"
|
|
120
|
+
for obj in associated_objs:
|
|
121
|
+
context_str += f"- {obj}\n"
|
|
122
|
+
context_str += "\n"
|
|
123
|
+
except Exception as e:
|
|
124
|
+
log(f"Mongo Association Error: {e}", _print=True)
|
|
125
|
+
|
|
126
|
+
# --- PART 2: REDIS ---
|
|
127
|
+
if len(prompt) > 10:
|
|
128
|
+
vector = self.get_embedding(prompt)
|
|
129
|
+
if vector:
|
|
130
|
+
try:
|
|
131
|
+
q = "*=>[KNN 2 @vector $blob AS score]"
|
|
132
|
+
params = {"blob": np.array(vector, dtype=np.float32).tobytes()}
|
|
133
|
+
results = self._redis.ft("search_index").search(
|
|
134
|
+
q, query_params=params
|
|
135
|
+
)
|
|
136
|
+
if results.docs:
|
|
137
|
+
context_str += "### RELEVANT LORE ###\n"
|
|
138
|
+
for doc in results.docs:
|
|
139
|
+
context_str += f"- {doc.content}\n"
|
|
140
|
+
except Exception:
|
|
141
|
+
pass
|
|
142
|
+
|
|
143
|
+
self._redis.set(cache_key, context_str, ex=120)
|
|
144
|
+
return context_str
|
|
145
|
+
|
|
146
|
+
def generate_json(self, message, function, additional_instructions="", **kwargs):
|
|
147
|
+
"""
|
|
148
|
+
UPDATED: Uses correct /api/chat payload structure (messages list)
|
|
149
|
+
"""
|
|
150
|
+
schema_str = self._convert_tools_to_json_schema(function)
|
|
151
|
+
focus_pk = kwargs.get("focus_object")
|
|
152
|
+
world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
|
|
153
|
+
|
|
154
|
+
full_system_prompt = (
|
|
155
|
+
f"{self.instructions}. {additional_instructions}\n"
|
|
156
|
+
f"You must respond strictly with a valid JSON object matching this schema:\n"
|
|
157
|
+
f"{schema_str}\n"
|
|
158
|
+
f"Do not include markdown formatting or explanations."
|
|
159
|
+
f"You must strictly adhere to the following context:\n"
|
|
160
|
+
f"{world_context}"
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
# FIX: Using 'messages' instead of 'prompt'/'system'
|
|
164
|
+
payload = {
|
|
165
|
+
"model": "llama3",
|
|
166
|
+
"messages": [
|
|
167
|
+
{"role": "system", "content": full_system_prompt},
|
|
168
|
+
{"role": "user", "content": message},
|
|
169
|
+
],
|
|
170
|
+
"format": "json",
|
|
171
|
+
"stream": False,
|
|
172
|
+
"keep_alive": "24h",
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
try:
|
|
176
|
+
response = requests.post(f"{self._ollama_url}/chat", json=payload)
|
|
177
|
+
response.raise_for_status()
|
|
178
|
+
|
|
179
|
+
# FIX: Chat API returns 'message' -> 'content'
|
|
180
|
+
result_text = response.json().get("message", {}).get("content", "{}")
|
|
181
|
+
return json.loads(result_text)
|
|
182
|
+
|
|
183
|
+
except Exception as e:
|
|
184
|
+
log(f"==== LocalAI JSON Error: {e} ====", _print=True)
|
|
185
|
+
return {}
|
|
186
|
+
|
|
187
|
+
def generate_text(self, message, additional_instructions="", **kwargs):
|
|
188
|
+
"""
|
|
189
|
+
UPDATED: Uses correct /api/chat payload structure
|
|
190
|
+
"""
|
|
191
|
+
focus_pk = kwargs.get("focus_object")
|
|
192
|
+
world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
|
|
193
|
+
|
|
194
|
+
full_system_prompt = (
|
|
195
|
+
f"{self.instructions}. {additional_instructions}\n"
|
|
196
|
+
f"You must strictly adhere to the following context:\n"
|
|
197
|
+
f"{world_context}"
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
payload = {
|
|
201
|
+
"model": "llama3",
|
|
202
|
+
"messages": [
|
|
203
|
+
{"role": "system", "content": full_system_prompt},
|
|
204
|
+
{"role": "user", "content": message},
|
|
205
|
+
],
|
|
206
|
+
"stream": False,
|
|
207
|
+
"keep_alive": "24h",
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
try:
|
|
211
|
+
response = requests.post(f"{self._ollama_url}/chat", json=payload)
|
|
212
|
+
response.raise_for_status()
|
|
213
|
+
# FIX: Chat API returns 'message' -> 'content'
|
|
214
|
+
return response.json().get("message", {}).get("content", "")
|
|
215
|
+
except Exception as e:
|
|
216
|
+
log(f"==== LocalAI Text Error: {e} ====", _print=True)
|
|
217
|
+
return "Error generating text."
|
|
218
|
+
|
|
219
|
+
def summarize_text(self, text, primer="", **kwargs):
|
|
220
|
+
primer = primer or "Summarize the following text concisely."
|
|
221
|
+
max_chars = 12000
|
|
222
|
+
chunks = [text[i : i + max_chars] for i in range(0, len(text), max_chars)]
|
|
223
|
+
|
|
224
|
+
full_summary = ""
|
|
225
|
+
for chunk in chunks:
|
|
226
|
+
payload = {
|
|
227
|
+
"model": "llama3",
|
|
228
|
+
"messages": [{"role": "user", "content": f"{primer}:\n\n{chunk}"}],
|
|
229
|
+
"stream": False,
|
|
230
|
+
"keep_alive": "24h",
|
|
231
|
+
}
|
|
232
|
+
try:
|
|
233
|
+
res = requests.post(f"{self._ollama_url}/chat", json=payload)
|
|
234
|
+
full_summary += res.json().get("message", {}).get("content", "") + "\n"
|
|
235
|
+
except Exception as e:
|
|
236
|
+
log(f"Summary Error: {e}", _print=True)
|
|
237
|
+
break
|
|
238
|
+
|
|
239
|
+
return full_summary
|
|
240
|
+
|
|
241
|
+
def generate_audio_text(self, audio_file, prompt="", **kwargs):
|
|
242
|
+
try:
|
|
243
|
+
if isinstance(audio_file, bytes):
|
|
244
|
+
f_obj = io.BytesIO(audio_file)
|
|
245
|
+
else:
|
|
246
|
+
f_obj = audio_file
|
|
247
|
+
files = {"file": ("audio.mp3", f_obj, "audio/mpeg")}
|
|
248
|
+
response = requests.post(f"{self._media_url}/transcribe", files=files)
|
|
249
|
+
response.raise_for_status()
|
|
250
|
+
return response.json().get("text", "")
|
|
251
|
+
except Exception as e:
|
|
252
|
+
log(f"STT Error: {e}", _print=True)
|
|
253
|
+
return ""
|
|
254
|
+
|
|
255
|
+
def generate_audio(self, prompt, voice=None, **kwargs):
|
|
256
|
+
voice = voice or random.choice(list(self.VOICES.keys()))
|
|
257
|
+
try:
|
|
258
|
+
payload = {"text": prompt, "voice": voice}
|
|
259
|
+
response = requests.post(f"{self._media_url}/tts", json=payload)
|
|
260
|
+
response.raise_for_status()
|
|
261
|
+
wav_bytes = response.content
|
|
262
|
+
audio = AudioSegment.from_file(io.BytesIO(wav_bytes), format="wav")
|
|
263
|
+
mp3_buffer = io.BytesIO()
|
|
264
|
+
audio.export(mp3_buffer, format="mp3")
|
|
265
|
+
return mp3_buffer.getvalue()
|
|
266
|
+
except Exception as e:
|
|
267
|
+
log(f"TTS Error: {e}", _print=True)
|
|
268
|
+
return None
|
|
269
|
+
|
|
270
|
+
def generate_image(self, prompt, negative_prompt="", **kwargs):
|
|
271
|
+
try:
|
|
272
|
+
data = {"prompt": prompt, "negative_prompt": negative_prompt}
|
|
273
|
+
files = {}
|
|
274
|
+
if kwargs.get("files"):
|
|
275
|
+
for fn, f_bytes in kwargs.get("files").items():
|
|
276
|
+
if isinstance(f_bytes, bytes):
|
|
277
|
+
file_obj = io.BytesIO(f_bytes)
|
|
278
|
+
else:
|
|
279
|
+
file_obj = f_bytes
|
|
280
|
+
files["file"] = (fn, file_obj, "image/png")
|
|
281
|
+
break
|
|
282
|
+
if files:
|
|
283
|
+
response = requests.post(
|
|
284
|
+
f"{self._media_url}/generate-image", data=data, files=files
|
|
285
|
+
)
|
|
286
|
+
else:
|
|
287
|
+
response = requests.post(f"{self._media_url}/generate-image", data=data)
|
|
288
|
+
response.raise_for_status()
|
|
289
|
+
return response.content
|
|
290
|
+
except Exception as e:
|
|
291
|
+
log(f"Image Gen Error: {e}", _print=True)
|
|
292
|
+
return None
|
|
293
|
+
|
|
294
|
+
def list_voices(self, filters=[]):
|
|
295
|
+
if not filters:
|
|
296
|
+
return list(self.VOICES.keys())
|
|
297
|
+
voices = []
|
|
298
|
+
for voice, attribs in self.VOICES.items():
|
|
299
|
+
if any(f.lower() in attribs for f in filters):
|
|
300
|
+
voices.append(voice)
|
|
301
|
+
return voices
|
|
@@ -5,6 +5,7 @@
|
|
|
5
5
|
# `from autonomous.db import *` and then `connect('testdb')`.
|
|
6
6
|
from autonomous.db import (
|
|
7
7
|
connection,
|
|
8
|
+
db_sync,
|
|
8
9
|
document,
|
|
9
10
|
errors,
|
|
10
11
|
fields,
|
|
@@ -12,6 +13,7 @@ from autonomous.db import (
|
|
|
12
13
|
signals,
|
|
13
14
|
)
|
|
14
15
|
from autonomous.db.connection import * # noqa: F401
|
|
16
|
+
from autonomous.db.db_sync import * # noqa: F401
|
|
15
17
|
from autonomous.db.document import * # noqa: F401
|
|
16
18
|
from autonomous.db.errors import * # noqa: F401
|
|
17
19
|
from autonomous.db.fields import * # noqa: F401
|
|
@@ -28,14 +30,11 @@ __all__ = (
|
|
|
28
30
|
)
|
|
29
31
|
|
|
30
32
|
|
|
31
|
-
VERSION = (0,
|
|
33
|
+
VERSION = (0, 30, 0)
|
|
32
34
|
|
|
33
35
|
|
|
34
36
|
def get_version():
|
|
35
|
-
"""Return the VERSION as a string.
|
|
36
|
-
|
|
37
|
-
For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.
|
|
38
|
-
"""
|
|
37
|
+
"""Return the VERSION as a string."""
|
|
39
38
|
return ".".join(map(str, VERSION))
|
|
40
39
|
|
|
41
40
|
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import time
|
|
3
|
+
import urllib.parse
|
|
4
|
+
import uuid
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
import pymongo
|
|
9
|
+
import redis
|
|
10
|
+
import requests
|
|
11
|
+
|
|
12
|
+
# CONFIGURATION
|
|
13
|
+
db_host = os.getenv("DB_HOST", "db")
|
|
14
|
+
db_port = os.getenv("DB_PORT", 27017)
|
|
15
|
+
password = urllib.parse.quote_plus(str(os.getenv("DB_PASSWORD")))
|
|
16
|
+
username = urllib.parse.quote_plus(str(os.getenv("DB_USERNAME")))
|
|
17
|
+
MEDIA_URL = "http://media_ai_internal:5005"
|
|
18
|
+
REDIS_HOST = os.getenv("REDIS_HOST", "cachedb")
|
|
19
|
+
MONGO_URI = f"mongodb://{username}:{password}@{db_host}:{db_port}/?authSource=admin"
|
|
20
|
+
|
|
21
|
+
# DB SETUP
|
|
22
|
+
r = redis.Redis(host=REDIS_HOST, port=6379, decode_responses=True)
|
|
23
|
+
|
|
24
|
+
mongo = pymongo.MongoClient(MONGO_URI)
|
|
25
|
+
db = mongo[os.getenv("DB_DB")]
|
|
26
|
+
# connect(host=f"mongodb://{username}:{password}@{host}:{port}/{dbname}?authSource=admin")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def get_vector(text):
|
|
30
|
+
"""Helper to get embedding from your Media AI container"""
|
|
31
|
+
try:
|
|
32
|
+
resp = requests.post(f"{MEDIA_URL}/embeddings", json={"text": text}, timeout=30)
|
|
33
|
+
if resp.status_code == 200:
|
|
34
|
+
return resp.json()["embedding"]
|
|
35
|
+
except Exception as e:
|
|
36
|
+
print(f"Vector Gen Failed: {e}")
|
|
37
|
+
return None
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def process_single_object_sync(object_id, collection_name, token):
|
|
41
|
+
"""
|
|
42
|
+
THE WORKER FUNCTION (Runs in Background).
|
|
43
|
+
It is safe to sleep here because we are not in the web request.
|
|
44
|
+
"""
|
|
45
|
+
str_id = str(object_id)
|
|
46
|
+
token_key = f"sync_token:{collection_name}:{str_id}"
|
|
47
|
+
|
|
48
|
+
# 1. THE DEBOUNCE WAIT (Happens in background)
|
|
49
|
+
print(f"Debouncing {str_id} for 5 seconds...")
|
|
50
|
+
time.sleep(5)
|
|
51
|
+
|
|
52
|
+
# 2. THE VERIFICATION
|
|
53
|
+
# Check if a newer save happened while we slept
|
|
54
|
+
current_active_token = r.get(token_key)
|
|
55
|
+
|
|
56
|
+
if current_active_token != token:
|
|
57
|
+
print(f"Skipping sync for {str_id}: Superseded by a newer save.")
|
|
58
|
+
return
|
|
59
|
+
|
|
60
|
+
# 3. THE EXECUTION (Embedding generation)
|
|
61
|
+
print(f"Processing Sync for: {str_id} in {collection_name}")
|
|
62
|
+
|
|
63
|
+
from bson.objectid import ObjectId
|
|
64
|
+
|
|
65
|
+
# FIX: Use dynamic collection access instead of db.objects
|
|
66
|
+
try:
|
|
67
|
+
# Tries to convert string ID to ObjectId.
|
|
68
|
+
# If your DB uses String IDs, remove the ObjectId() wrapper.
|
|
69
|
+
oid = ObjectId(object_id)
|
|
70
|
+
doc = db[collection_name].find_one({"_id": oid})
|
|
71
|
+
except Exception:
|
|
72
|
+
# Fallback if ID is not a valid ObjectId string
|
|
73
|
+
doc = db[collection_name].find_one({"_id": object_id})
|
|
74
|
+
|
|
75
|
+
if not doc:
|
|
76
|
+
print(f"Object {object_id} not found in collection '{collection_name}'")
|
|
77
|
+
# Optional: Remove from Redis index if it exists
|
|
78
|
+
r.delete(f"lore:{object_id}")
|
|
79
|
+
return
|
|
80
|
+
|
|
81
|
+
# 2. Construct Searchable Text
|
|
82
|
+
# (Existing logic...)
|
|
83
|
+
searchable_text = (
|
|
84
|
+
f"{doc.get('name', '')}: {doc.get('description', '')} {doc.get('history', '')}"
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
if len(searchable_text) < 10:
|
|
88
|
+
return
|
|
89
|
+
|
|
90
|
+
# 3. Generate Vector
|
|
91
|
+
vector = get_vector(searchable_text)
|
|
92
|
+
|
|
93
|
+
# 4. Save to Redis Index
|
|
94
|
+
if vector:
|
|
95
|
+
r.hset(
|
|
96
|
+
f"lore:{object_id}",
|
|
97
|
+
mapping={
|
|
98
|
+
"mongo_id": str(object_id),
|
|
99
|
+
"collection": collection_name, # Useful for debugging
|
|
100
|
+
"content": searchable_text,
|
|
101
|
+
"vector": np.array(vector, dtype=np.float32).tobytes(),
|
|
102
|
+
"last_synced": datetime.utcnow().isoformat(),
|
|
103
|
+
},
|
|
104
|
+
)
|
|
105
|
+
print(f"Successfully Indexed: {doc.get('name')}")
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def request_indexing(object_id, collection_name):
|
|
109
|
+
"""
|
|
110
|
+
THE TRIGGER FUNCTION (Runs in Main App).
|
|
111
|
+
MUST BE FAST. NO SLEEPING HERE.
|
|
112
|
+
"""
|
|
113
|
+
print("Requesting Indexing...")
|
|
114
|
+
# Import your Queue Wrapper
|
|
115
|
+
from autonomous.tasks.autotask import AutoTasks
|
|
116
|
+
|
|
117
|
+
# Initialize the Task Runner
|
|
118
|
+
task_runner = AutoTasks()
|
|
119
|
+
|
|
120
|
+
str_id = str(object_id)
|
|
121
|
+
token_key = f"sync_token:{collection_name}:{str_id}"
|
|
122
|
+
|
|
123
|
+
# 1. GENERATE NEW TOKEN
|
|
124
|
+
current_token = str(uuid.uuid4())
|
|
125
|
+
|
|
126
|
+
# 2. SAVE TOKEN TO REDIS (Instant)
|
|
127
|
+
r.set(token_key, current_token, ex=300)
|
|
128
|
+
|
|
129
|
+
# 3. ENQUEUE THE TASK (Instant)
|
|
130
|
+
try:
|
|
131
|
+
task_runner.task(
|
|
132
|
+
process_single_object_sync, # The function to run later
|
|
133
|
+
object_id=str_id,
|
|
134
|
+
collection_name=collection_name,
|
|
135
|
+
token=current_token,
|
|
136
|
+
)
|
|
137
|
+
return True
|
|
138
|
+
except Exception as e:
|
|
139
|
+
print(f"Sync Enqueue failed: {e}")
|
|
140
|
+
return False
|
|
@@ -8,6 +8,7 @@ from autonomous import log
|
|
|
8
8
|
from autonomous.db import Document, connect, signals
|
|
9
9
|
from autonomous.db.errors import ValidationError
|
|
10
10
|
from autonomous.db.fields import DateTimeField
|
|
11
|
+
from autonomous.db import db_sync
|
|
11
12
|
|
|
12
13
|
host = os.getenv("DB_HOST", "db")
|
|
13
14
|
port = os.getenv("DB_PORT", 27017)
|
|
@@ -240,6 +241,9 @@ class AutoModel(Document):
|
|
|
240
241
|
"""
|
|
241
242
|
obj = super().save()
|
|
242
243
|
self.pk = obj.pk
|
|
244
|
+
|
|
245
|
+
db_sync.request_indexing(self.pk, collection_name=self._get_collection_name())
|
|
246
|
+
|
|
243
247
|
return self.pk
|
|
244
248
|
|
|
245
249
|
@classmethod
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from redis import Redis
|
|
3
|
+
from rq import Queue
|
|
4
|
+
from rq.job import Job
|
|
5
|
+
|
|
6
|
+
class AutoTask:
|
|
7
|
+
def __init__(self, job):
|
|
8
|
+
self.job = job
|
|
9
|
+
|
|
10
|
+
@property
|
|
11
|
+
def id(self):
|
|
12
|
+
return self.job.id
|
|
13
|
+
|
|
14
|
+
@property
|
|
15
|
+
def status(self):
|
|
16
|
+
return self.job.get_status()
|
|
17
|
+
|
|
18
|
+
@property
|
|
19
|
+
def result(self):
|
|
20
|
+
# Simplified result fetching
|
|
21
|
+
return {
|
|
22
|
+
"id": self.id,
|
|
23
|
+
"return_value": self.job.result,
|
|
24
|
+
"status": self.status,
|
|
25
|
+
"error": self.job.exc_info
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
class AutoTasks:
|
|
29
|
+
_connection = None
|
|
30
|
+
queue = None
|
|
31
|
+
|
|
32
|
+
# Config stays the same
|
|
33
|
+
config = {
|
|
34
|
+
"host": os.environ.get("REDIS_HOST", "cachedb"),
|
|
35
|
+
"port": os.environ.get("REDIS_PORT", 6379),
|
|
36
|
+
"password": os.environ.get("REDIS_PASSWORD"),
|
|
37
|
+
"username": os.environ.get("REDIS_USERNAME"),
|
|
38
|
+
"db": os.environ.get("REDIS_DB", 0),
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
def __init__(self, queue_name="default"):
|
|
42
|
+
if not AutoTasks._connection:
|
|
43
|
+
options = {}
|
|
44
|
+
if AutoTasks.config.get("password"):
|
|
45
|
+
options["password"] = AutoTasks.config.get("password")
|
|
46
|
+
|
|
47
|
+
# Create Redis Connection
|
|
48
|
+
AutoTasks._connection = Redis(
|
|
49
|
+
host=AutoTasks.config.get("host"),
|
|
50
|
+
port=AutoTasks.config.get("port"),
|
|
51
|
+
decode_responses=False, # RQ requires bytes, not strings
|
|
52
|
+
**options,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# Initialize Queue
|
|
56
|
+
AutoTasks.queue = Queue(queue_name, connection=AutoTasks._connection)
|
|
57
|
+
|
|
58
|
+
def task(self, func, *args, **kwargs):
|
|
59
|
+
"""
|
|
60
|
+
Enqueues a job to Redis. Does NOT start a worker.
|
|
61
|
+
"""
|
|
62
|
+
job_timeout = kwargs.pop("_task_job_timeout", 3600)
|
|
63
|
+
|
|
64
|
+
# Enqueue the job
|
|
65
|
+
# func can be a string path or the function object itself
|
|
66
|
+
job = AutoTasks.queue.enqueue(
|
|
67
|
+
func,
|
|
68
|
+
args=args,
|
|
69
|
+
kwargs=kwargs,
|
|
70
|
+
job_timeout=job_timeout
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
return AutoTask(job)
|
|
74
|
+
|
|
75
|
+
def get_task(self, job_id):
|
|
76
|
+
try:
|
|
77
|
+
job = Job.fetch(job_id, connection=AutoTasks._connection)
|
|
78
|
+
return AutoTask(job)
|
|
79
|
+
except Exception:
|
|
80
|
+
return None
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import re
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class TaskRouterBase:
|
|
5
|
+
"""
|
|
6
|
+
Maps URL paths to Task Functions.
|
|
7
|
+
Acts as the central registry for all background tasks.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
# Format: (Regex Pattern, Function Object)
|
|
11
|
+
|
|
12
|
+
@classmethod
|
|
13
|
+
def resolve(cls, path):
|
|
14
|
+
"""
|
|
15
|
+
Parses the path, finds the matching function, and extracts arguments.
|
|
16
|
+
Returns: (function_obj, kwargs_dict) or (None, None)
|
|
17
|
+
"""
|
|
18
|
+
for pattern, func in cls.ROUTES:
|
|
19
|
+
match = re.match(pattern, path)
|
|
20
|
+
if match:
|
|
21
|
+
return func, match.groupdict()
|
|
22
|
+
return None, None
|
|
23
|
+
|
|
24
|
+
ROUTES = []
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: autonomous-app
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.31
|
|
4
4
|
Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
|
|
5
5
|
Author-email: Steven A Moore <samoore@binghamton.edu>
|
|
6
6
|
Project-URL: homepage, https://github.com/Sallenmoore/autonomous
|
|
@@ -24,8 +24,8 @@ Requires-Dist: gunicorn
|
|
|
24
24
|
Requires-Dist: Authlib
|
|
25
25
|
Requires-Dist: rq
|
|
26
26
|
Requires-Dist: ollama
|
|
27
|
-
Requires-Dist: openai>=1.42
|
|
28
27
|
Requires-Dist: google-genai
|
|
28
|
+
Requires-Dist: sentence-transformers
|
|
29
29
|
Requires-Dist: dateparser
|
|
30
30
|
Requires-Dist: python-slugify
|
|
31
31
|
Requires-Dist: pydub
|