autonomous-app 0.3.28__tar.gz → 0.3.30__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/PKG-INFO +2 -2
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/requirements.txt +1 -1
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/__init__.py +1 -1
- autonomous_app-0.3.30/src/autonomous/ai/baseagent.py +33 -0
- autonomous_app-0.3.30/src/autonomous/ai/models/local_model.py +392 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/__init__.py +1 -0
- autonomous_app-0.3.30/src/autonomous/db/db_sync.py +140 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/model/automodel.py +4 -0
- autonomous_app-0.3.30/src/autonomous/tasks/autotask.py +80 -0
- autonomous_app-0.3.30/src/autonomous/tasks/task_router.py +26 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous_app.egg-info/PKG-INFO +2 -2
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous_app.egg-info/SOURCES.txt +3 -2
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous_app.egg-info/requires.txt +1 -1
- autonomous_app-0.3.28/src/autonomous/ai/baseagent.py +0 -42
- autonomous_app-0.3.28/src/autonomous/ai/models/local.py +0 -99
- autonomous_app-0.3.28/src/autonomous/model/__init__.py +0 -1
- autonomous_app-0.3.28/src/autonomous/tasks/autotask.py +0 -144
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/README.md +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/pyproject.toml +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/setup.cfg +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/setup.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/ai/__init__.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/ai/audioagent.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/ai/imageagent.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/ai/jsonagent.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/ai/models/__init__.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/ai/models/aws.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/ai/models/deepseek.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/ai/models/gemini.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/ai/models/openai.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/ai/textagent.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/apis/version_control/GHCallbacks.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/apis/version_control/GHOrganization.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/apis/version_control/GHRepo.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/apis/version_control/GHVersionControl.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/apis/version_control/__init__.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/auth/__init__.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/auth/autoauth.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/auth/github.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/auth/google.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/auth/user.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/cli.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/base/__init__.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/base/common.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/base/datastructures.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/base/document.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/base/fields.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/base/metaclasses.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/base/utils.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/common.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/connection.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/context_managers.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/dereference.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/document.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/errors.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/fields.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/mongodb_support.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/pymongo_support.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/queryset/__init__.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/queryset/base.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/queryset/field_list.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/queryset/manager.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/queryset/queryset.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/queryset/transform.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/queryset/visitor.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/db/signals.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/logger.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/model/autoattr.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/storage/__init__.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/storage/imagestorage.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/storage/localstorage.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/tasks/__init__.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous/utils/markdown.py +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous_app.egg-info/dependency_links.txt +0 -0
- {autonomous_app-0.3.28 → autonomous_app-0.3.30}/src/autonomous_app.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: autonomous-app
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.30
|
|
4
4
|
Summary: Containerized application framework built on Flask with additional libraries and tools for rapid development of web applications.
|
|
5
5
|
Author-email: Steven A Moore <samoore@binghamton.edu>
|
|
6
6
|
Project-URL: homepage, https://github.com/Sallenmoore/autonomous
|
|
@@ -24,8 +24,8 @@ Requires-Dist: gunicorn
|
|
|
24
24
|
Requires-Dist: Authlib
|
|
25
25
|
Requires-Dist: rq
|
|
26
26
|
Requires-Dist: ollama
|
|
27
|
-
Requires-Dist: openai>=1.42
|
|
28
27
|
Requires-Dist: google-genai
|
|
28
|
+
Requires-Dist: sentence-transformers
|
|
29
29
|
Requires-Dist: dateparser
|
|
30
30
|
Requires-Dist: python-slugify
|
|
31
31
|
Requires-Dist: pydub
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from autonomous import log
|
|
2
|
+
from autonomous.model.autoattr import ReferenceAttr
|
|
3
|
+
from autonomous.model.automodel import AutoModel
|
|
4
|
+
|
|
5
|
+
from .models.gemini import GeminiAIModel
|
|
6
|
+
from .models.local_model import LocalAIModel
|
|
7
|
+
from .models.openai import OpenAIModel
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class BaseAgent(AutoModel):
|
|
11
|
+
meta = {"abstract": True, "allow_inheritance": True, "strict": False}
|
|
12
|
+
|
|
13
|
+
client = ReferenceAttr(choices=[LocalAIModel])
|
|
14
|
+
|
|
15
|
+
_ai_model = LocalAIModel
|
|
16
|
+
|
|
17
|
+
def delete(self):
|
|
18
|
+
if self.client:
|
|
19
|
+
self.client.delete()
|
|
20
|
+
return super().delete()
|
|
21
|
+
|
|
22
|
+
def get_agent_id(self):
|
|
23
|
+
return self.get_client().id
|
|
24
|
+
|
|
25
|
+
def get_client(self):
|
|
26
|
+
self.client = self._ai_model(
|
|
27
|
+
name=self.name,
|
|
28
|
+
instructions=self.instructions,
|
|
29
|
+
description=self.description,
|
|
30
|
+
)
|
|
31
|
+
self.client.save()
|
|
32
|
+
self.save()
|
|
33
|
+
return self.client
|
|
@@ -0,0 +1,392 @@
|
|
|
1
|
+
import io
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
import random
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
import pymongo
|
|
8
|
+
import redis
|
|
9
|
+
import requests
|
|
10
|
+
from bson.objectid import ObjectId
|
|
11
|
+
from pydub import AudioSegment
|
|
12
|
+
|
|
13
|
+
from autonomous import log
|
|
14
|
+
from autonomous.model.autoattr import ListAttr, StringAttr
|
|
15
|
+
from autonomous.model.automodel import AutoModel
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class LocalAIModel(AutoModel):
|
|
19
|
+
messages = ListAttr(StringAttr(default=[]))
|
|
20
|
+
name = StringAttr(default="agent")
|
|
21
|
+
instructions = StringAttr(default="You are a helpful AI.")
|
|
22
|
+
description = StringAttr(default="A Local AI Model using Ollama and Media AI.")
|
|
23
|
+
|
|
24
|
+
# Config
|
|
25
|
+
_ollama_url = os.environ.get("OLLAMA_API_BASE", "http://ollama_internal:11434/api")
|
|
26
|
+
_media_url = os.environ.get("MEDIA_API_BASE", "http://media_ai_internal:5005")
|
|
27
|
+
_text_model = "mistral-nemo"
|
|
28
|
+
_json_model = "mistral-nemo"
|
|
29
|
+
|
|
30
|
+
# DB Connections
|
|
31
|
+
_mongo_client = pymongo.MongoClient("mongodb://db:27017/")
|
|
32
|
+
_mongo_db = os.getenv("DB_DB", "default")
|
|
33
|
+
_redis = redis.Redis(host="cachedb", port=6379, decode_responses=True)
|
|
34
|
+
|
|
35
|
+
VOICES = {
|
|
36
|
+
"Zephyr": ["female"],
|
|
37
|
+
"Puck": ["male"],
|
|
38
|
+
"Charon": ["male"],
|
|
39
|
+
"Kore": ["female"],
|
|
40
|
+
"Fenrir": ["non-binary"],
|
|
41
|
+
"Leda": ["female"],
|
|
42
|
+
"Orus": ["male"],
|
|
43
|
+
"Aoede": ["female"],
|
|
44
|
+
"Callirhoe": ["female"],
|
|
45
|
+
"Autonoe": ["female"],
|
|
46
|
+
"Enceladus": ["male"],
|
|
47
|
+
"Iapetus": ["male"],
|
|
48
|
+
"Umbriel": ["male"],
|
|
49
|
+
"Algieba": ["male"],
|
|
50
|
+
"Despina": ["female"],
|
|
51
|
+
"Erinome": ["female"],
|
|
52
|
+
"Algenib": ["male"],
|
|
53
|
+
"Rasalgethi": ["non-binary"],
|
|
54
|
+
"Laomedeia": ["female"],
|
|
55
|
+
"Achernar": ["female"],
|
|
56
|
+
"Alnilam": ["male"],
|
|
57
|
+
"Schedar": ["male"],
|
|
58
|
+
"Gacrux": ["female"],
|
|
59
|
+
"Pulcherrima": ["non-binary"],
|
|
60
|
+
"Achird": ["male"],
|
|
61
|
+
"Zubenelgenubi": ["male"],
|
|
62
|
+
"Vindemiatrix": ["female"],
|
|
63
|
+
"Sadachbia": ["male"],
|
|
64
|
+
"Sadaltager": ["male"],
|
|
65
|
+
"Sulafar": ["female"],
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
def _convert_tools_to_json_schema(self, user_function):
|
|
69
|
+
"""
|
|
70
|
+
Ollama doesn't support 'tools' strictly yet.
|
|
71
|
+
We convert the tool definition into a system prompt instruction.
|
|
72
|
+
"""
|
|
73
|
+
# If the user passes a raw dictionary (like a Gemini tool definition)
|
|
74
|
+
# we extract the relevant parts for the schema.
|
|
75
|
+
schema = {
|
|
76
|
+
"name": user_function.get("name"),
|
|
77
|
+
"description": user_function.get("description", ""),
|
|
78
|
+
"parameters": user_function.get("parameters", {}),
|
|
79
|
+
}
|
|
80
|
+
return json.dumps(schema, indent=2)
|
|
81
|
+
|
|
82
|
+
def get_embedding(self, text):
|
|
83
|
+
try:
|
|
84
|
+
res = requests.post(f"{self._media_url}/embeddings", json={"text": text})
|
|
85
|
+
res.raise_for_status()
|
|
86
|
+
return res.json()["embedding"]
|
|
87
|
+
except Exception as e:
|
|
88
|
+
log(f"Embedding Error: {e}", _print=True)
|
|
89
|
+
return []
|
|
90
|
+
|
|
91
|
+
def build_hybrid_context(self, prompt, focus_object_id=None):
|
|
92
|
+
"""
|
|
93
|
+
Builds context based on RELATIONAL ASSOCIATIONS + SEMANTIC LORE.
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
# 1. Create a Cache Key based on what defines the "Scene"
|
|
97
|
+
# We assume 'focus_object_id' + rough prompt length captures the context enough
|
|
98
|
+
cache_key = f"ctx:{focus_object_id}:{len(prompt) // 50}"
|
|
99
|
+
|
|
100
|
+
# 2. Check Cache
|
|
101
|
+
cached_ctx = self._redis.get(cache_key)
|
|
102
|
+
if cached_ctx:
|
|
103
|
+
return cached_ctx
|
|
104
|
+
|
|
105
|
+
context_str = ""
|
|
106
|
+
|
|
107
|
+
# --- PART 1: MONGODB (Relational Associations) ---
|
|
108
|
+
# If we are focusing on a specific object, fetch it and its specific refs.
|
|
109
|
+
if focus_object_id:
|
|
110
|
+
try:
|
|
111
|
+
# 1. Fetch the Main Object
|
|
112
|
+
# Handle both string ID and ObjectId
|
|
113
|
+
oid = (
|
|
114
|
+
ObjectId(focus_object_id)
|
|
115
|
+
if isinstance(focus_object_id, str)
|
|
116
|
+
else focus_object_id
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
main_obj = self._mongo_db.objects.find_one({"_id": oid})
|
|
120
|
+
|
|
121
|
+
if main_obj:
|
|
122
|
+
# Start the context with the main object itself
|
|
123
|
+
context_str += "### FOCUS OBJECT ###\n"
|
|
124
|
+
context_str += prompt
|
|
125
|
+
|
|
126
|
+
# 2. Extract References (Associations)
|
|
127
|
+
# 1. Start with the main list
|
|
128
|
+
ref_ids = main_obj.get("associations", []) or []
|
|
129
|
+
|
|
130
|
+
# 2. Safely add single fields (if they exist)
|
|
131
|
+
if world_id := main_obj.get("world"):
|
|
132
|
+
ref_ids.append(world_id)
|
|
133
|
+
|
|
134
|
+
# 3. Safely add lists (ensure they are lists)
|
|
135
|
+
ref_ids.extend(main_obj.get("stories", []) or [])
|
|
136
|
+
ref_ids.extend(main_obj.get("events", []) or [])
|
|
137
|
+
|
|
138
|
+
if ref_ids:
|
|
139
|
+
# Convert all to ObjectIds if they are strings
|
|
140
|
+
valid_oids = []
|
|
141
|
+
for rid in ref_ids:
|
|
142
|
+
try:
|
|
143
|
+
valid_oids.append(
|
|
144
|
+
ObjectId(rid) if isinstance(rid, str) else rid
|
|
145
|
+
)
|
|
146
|
+
except:
|
|
147
|
+
pass
|
|
148
|
+
|
|
149
|
+
# 3. Fetch all associated objects in ONE query
|
|
150
|
+
if valid_oids:
|
|
151
|
+
associated_objs = self._mongo_db.objects.find(
|
|
152
|
+
{"_id": {"$in": valid_oids}}
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
context_str += "\n### ASSOCIATED REFERENCES ###\n"
|
|
156
|
+
for obj in associated_objs:
|
|
157
|
+
log(f"Associated Obj: {obj}", _print=True)
|
|
158
|
+
context_str += f"- {obj}\n"
|
|
159
|
+
|
|
160
|
+
context_str += "\n"
|
|
161
|
+
except Exception as e:
|
|
162
|
+
log(f"Mongo Association Error: {e}", _print=True)
|
|
163
|
+
|
|
164
|
+
# --- PART 2: REDIS (Semantic Search) ---
|
|
165
|
+
# We keep this! It catches "Lore" or "Rules" that aren't explicitly linked in the DB.
|
|
166
|
+
# e.g., If the sword is "Elven", this finds "Elven History" even if not linked by ID.
|
|
167
|
+
if len(prompt) > 10:
|
|
168
|
+
vector = self.get_embedding(prompt)
|
|
169
|
+
if vector:
|
|
170
|
+
try:
|
|
171
|
+
q = "*=>[KNN 2 @vector $blob AS score]" # Lowered to 2 to save tokens
|
|
172
|
+
params = {"blob": np.array(vector, dtype=np.float32).tobytes()}
|
|
173
|
+
results = self._redis.ft("search_index").search(
|
|
174
|
+
q, query_params=params
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
if results.docs:
|
|
178
|
+
context_str += "### RELEVANT LORE ###\n"
|
|
179
|
+
for doc in results.docs:
|
|
180
|
+
context_str += f"- {doc.content}\n"
|
|
181
|
+
except Exception as e:
|
|
182
|
+
pass
|
|
183
|
+
|
|
184
|
+
# 3. Save to Cache (Expire in 60s)
|
|
185
|
+
# This prevents hammering the DB/Vector engine during a rapid conversation
|
|
186
|
+
self._redis.set(cache_key, context_str, ex=120)
|
|
187
|
+
|
|
188
|
+
return context_str
|
|
189
|
+
|
|
190
|
+
def generate_json(self, message, function, additional_instructions="", **kwargs):
|
|
191
|
+
"""
|
|
192
|
+
Mimics Gemini's tool use by forcing Ollama into JSON mode
|
|
193
|
+
and injecting the schema into the prompt.
|
|
194
|
+
"""
|
|
195
|
+
schema_str = self._convert_tools_to_json_schema(function)
|
|
196
|
+
|
|
197
|
+
focus_pk = kwargs.get("focus_object")
|
|
198
|
+
|
|
199
|
+
# Build Relational Context
|
|
200
|
+
world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
|
|
201
|
+
|
|
202
|
+
# Construct System Prompt
|
|
203
|
+
full_system_prompt = (
|
|
204
|
+
f"{self.instructions}. {additional_instructions}\n"
|
|
205
|
+
f"You must respond strictly with a valid JSON object matching this schema:\n"
|
|
206
|
+
f"{schema_str}\n"
|
|
207
|
+
f"Do not include markdown formatting or explanations."
|
|
208
|
+
f"You must strictly adhere to the following context:\n"
|
|
209
|
+
f"{world_context}"
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
payload = {
|
|
213
|
+
"model": self._json_model,
|
|
214
|
+
"prompt": message,
|
|
215
|
+
"system": full_system_prompt,
|
|
216
|
+
"format": "json", # Force JSON mode
|
|
217
|
+
"stream": False,
|
|
218
|
+
"keep_alive": "24h",
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
try:
|
|
222
|
+
response = requests.post(f"{self._ollama_url}/generate", json=payload)
|
|
223
|
+
response.raise_for_status()
|
|
224
|
+
result_text = response.json().get("response", "{}")
|
|
225
|
+
|
|
226
|
+
# log(f"Raw Local JSON: {result_text}", _print=True)
|
|
227
|
+
return json.loads(result_text)
|
|
228
|
+
|
|
229
|
+
except Exception as e:
|
|
230
|
+
log(f"==== LocalAI JSON Error: {e} ====", _print=True)
|
|
231
|
+
return {}
|
|
232
|
+
|
|
233
|
+
def generate_text(self, message, additional_instructions="", **kwargs):
|
|
234
|
+
"""
|
|
235
|
+
Standard text generation via Ollama.
|
|
236
|
+
"""
|
|
237
|
+
focus_pk = kwargs.get("focus_object")
|
|
238
|
+
|
|
239
|
+
# Build Relational Context
|
|
240
|
+
world_context = self.build_hybrid_context(message, focus_object_id=focus_pk)
|
|
241
|
+
|
|
242
|
+
# Construct System Prompt
|
|
243
|
+
full_system_prompt = (
|
|
244
|
+
f"{self.instructions}. {additional_instructions}\n"
|
|
245
|
+
f"You must strictly adhere to the following context:\n"
|
|
246
|
+
f"{world_context}"
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
payload = {
|
|
250
|
+
"model": self._text_model,
|
|
251
|
+
"prompt": message,
|
|
252
|
+
"system": full_system_prompt,
|
|
253
|
+
"stream": False,
|
|
254
|
+
"keep_alive": "24h",
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
try:
|
|
258
|
+
response = requests.post(f"{self._ollama_url}/generate", json=payload)
|
|
259
|
+
response.raise_for_status()
|
|
260
|
+
return response.json().get("response", "")
|
|
261
|
+
except Exception as e:
|
|
262
|
+
log(f"==== LocalAI Text Error: {e} ====", _print=True)
|
|
263
|
+
return "Error generating text."
|
|
264
|
+
|
|
265
|
+
def summarize_text(self, text, primer="", **kwargs):
|
|
266
|
+
primer = primer or "Summarize the following text concisely."
|
|
267
|
+
|
|
268
|
+
# Simple chunking logic (similar to your original)
|
|
269
|
+
# Note: Mistral-Nemo has a large context window (128k), so chunking
|
|
270
|
+
# is less necessary than with older models, but we keep it for safety.
|
|
271
|
+
max_chars = 12000 # Roughly 3k tokens
|
|
272
|
+
chunks = [text[i : i + max_chars] for i in range(0, len(text), max_chars)]
|
|
273
|
+
|
|
274
|
+
full_summary = ""
|
|
275
|
+
for chunk in chunks:
|
|
276
|
+
payload = {
|
|
277
|
+
"model": self._text_model,
|
|
278
|
+
"prompt": f"{primer}:\n\n{chunk}",
|
|
279
|
+
"stream": False,
|
|
280
|
+
"keep_alive": "24h",
|
|
281
|
+
}
|
|
282
|
+
try:
|
|
283
|
+
res = requests.post(f"{self._ollama_url}/generate", json=payload)
|
|
284
|
+
full_summary += res.json().get("response", "") + "\n"
|
|
285
|
+
except Exception as e:
|
|
286
|
+
log(f"Summary Error: {e}", _print=True)
|
|
287
|
+
break
|
|
288
|
+
|
|
289
|
+
return full_summary
|
|
290
|
+
|
|
291
|
+
def generate_audio_text(self, audio_file, prompt="", **kwargs):
|
|
292
|
+
"""
|
|
293
|
+
Sends audio bytes to the Media AI container for Whisper transcription.
|
|
294
|
+
"""
|
|
295
|
+
try:
|
|
296
|
+
# Prepare the file for upload
|
|
297
|
+
# audio_file is likely bytes, so we wrap in BytesIO if needed
|
|
298
|
+
if isinstance(audio_file, bytes):
|
|
299
|
+
f_obj = io.BytesIO(audio_file)
|
|
300
|
+
else:
|
|
301
|
+
f_obj = audio_file
|
|
302
|
+
|
|
303
|
+
files = {"file": ("audio.mp3", f_obj, "audio/mpeg")}
|
|
304
|
+
|
|
305
|
+
response = requests.post(f"{self._media_url}/transcribe", files=files)
|
|
306
|
+
response.raise_for_status()
|
|
307
|
+
return response.json().get("text", "")
|
|
308
|
+
|
|
309
|
+
except Exception as e:
|
|
310
|
+
log(f"STT Error: {e}", _print=True)
|
|
311
|
+
return ""
|
|
312
|
+
|
|
313
|
+
def generate_audio(self, prompt, voice=None, **kwargs):
|
|
314
|
+
"""
|
|
315
|
+
Sends text to the Media AI container for TTS.
|
|
316
|
+
"""
|
|
317
|
+
voice = voice or random.choice(list(self.VOICES.keys()))
|
|
318
|
+
|
|
319
|
+
try:
|
|
320
|
+
payload = {"text": prompt, "voice": voice}
|
|
321
|
+
response = requests.post(f"{self._media_url}/tts", json=payload)
|
|
322
|
+
response.raise_for_status()
|
|
323
|
+
|
|
324
|
+
# Response content is WAV bytes
|
|
325
|
+
wav_bytes = response.content
|
|
326
|
+
|
|
327
|
+
# Convert to MP3 to match your original interface (using pydub)
|
|
328
|
+
audio = AudioSegment.from_file(io.BytesIO(wav_bytes), format="wav")
|
|
329
|
+
mp3_buffer = io.BytesIO()
|
|
330
|
+
audio.export(mp3_buffer, format="mp3")
|
|
331
|
+
return mp3_buffer.getvalue()
|
|
332
|
+
|
|
333
|
+
except Exception as e:
|
|
334
|
+
log(f"TTS Error: {e}", _print=True)
|
|
335
|
+
return None
|
|
336
|
+
|
|
337
|
+
def generate_image(self, prompt, negative_prompt="", **kwargs):
|
|
338
|
+
"""
|
|
339
|
+
Generates an image using Local AI.
|
|
340
|
+
If 'files' are provided, performs Image-to-Image generation using the first file as reference.
|
|
341
|
+
"""
|
|
342
|
+
try:
|
|
343
|
+
# Prepare the multipart data
|
|
344
|
+
# We send the prompt as a form field
|
|
345
|
+
data = {"prompt": prompt, "negative_prompt": negative_prompt}
|
|
346
|
+
files = {}
|
|
347
|
+
|
|
348
|
+
# Check if reference images were passed
|
|
349
|
+
if kwargs.get("files"):
|
|
350
|
+
# Take the first available file
|
|
351
|
+
for fn, f_bytes in kwargs.get("files").items():
|
|
352
|
+
# If f_bytes is bytes, wrap in IO, else assume it's file-like
|
|
353
|
+
if isinstance(f_bytes, bytes):
|
|
354
|
+
file_obj = io.BytesIO(f_bytes)
|
|
355
|
+
else:
|
|
356
|
+
file_obj = f_bytes
|
|
357
|
+
|
|
358
|
+
# Add to the request files
|
|
359
|
+
# Key must be 'file' to match server.py logic
|
|
360
|
+
# TODO: Support multiple images if needed
|
|
361
|
+
files["file"] = (fn, file_obj, "image/png")
|
|
362
|
+
break # We only support 1 reference image for SD Img2Img
|
|
363
|
+
|
|
364
|
+
# Send Request
|
|
365
|
+
if files:
|
|
366
|
+
# Multipart/form-data request (Prompt + File)
|
|
367
|
+
response = requests.post(
|
|
368
|
+
f"{self._media_url}/generate-image", data=data, files=files
|
|
369
|
+
)
|
|
370
|
+
else:
|
|
371
|
+
# Standard request (Prompt only) - server.py handles request.form vs json
|
|
372
|
+
# But our updated server expects form data for consistency
|
|
373
|
+
response = requests.post(f"{self._media_url}/generate-image", data=data)
|
|
374
|
+
|
|
375
|
+
response.raise_for_status()
|
|
376
|
+
|
|
377
|
+
# Returns WebP bytes directly
|
|
378
|
+
return response.content
|
|
379
|
+
|
|
380
|
+
except Exception as e:
|
|
381
|
+
log(f"Image Gen Error: {e}", _print=True)
|
|
382
|
+
return None
|
|
383
|
+
|
|
384
|
+
def list_voices(self, filters=[]):
|
|
385
|
+
# Same logic as before
|
|
386
|
+
if not filters:
|
|
387
|
+
return list(self.VOICES.keys())
|
|
388
|
+
voices = []
|
|
389
|
+
for voice, attribs in self.VOICES.items():
|
|
390
|
+
if any(f.lower() in attribs for f in filters):
|
|
391
|
+
voices.append(voice)
|
|
392
|
+
return voices
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import time
|
|
3
|
+
import urllib.parse
|
|
4
|
+
import uuid
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
import pymongo
|
|
9
|
+
import redis
|
|
10
|
+
import requests
|
|
11
|
+
|
|
12
|
+
# CONFIGURATION
|
|
13
|
+
db_host = os.getenv("DB_HOST", "db")
|
|
14
|
+
db_port = os.getenv("DB_PORT", 27017)
|
|
15
|
+
password = urllib.parse.quote_plus(str(os.getenv("DB_PASSWORD")))
|
|
16
|
+
username = urllib.parse.quote_plus(str(os.getenv("DB_USERNAME")))
|
|
17
|
+
MEDIA_URL = "http://media_ai_internal:5005"
|
|
18
|
+
REDIS_HOST = os.getenv("REDIS_HOST", "cachedb")
|
|
19
|
+
MONGO_URI = f"mongodb://{username}:{password}@{db_host}:{db_port}/?authSource=admin"
|
|
20
|
+
|
|
21
|
+
# DB SETUP
|
|
22
|
+
r = redis.Redis(host=REDIS_HOST, port=6379, decode_responses=True)
|
|
23
|
+
|
|
24
|
+
mongo = pymongo.MongoClient(MONGO_URI)
|
|
25
|
+
db = mongo[os.getenv("DB_DB")]
|
|
26
|
+
# connect(host=f"mongodb://{username}:{password}@{host}:{port}/{dbname}?authSource=admin")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def get_vector(text):
|
|
30
|
+
"""Helper to get embedding from your Media AI container"""
|
|
31
|
+
try:
|
|
32
|
+
resp = requests.post(f"{MEDIA_URL}/embeddings", json={"text": text}, timeout=30)
|
|
33
|
+
if resp.status_code == 200:
|
|
34
|
+
return resp.json()["embedding"]
|
|
35
|
+
except Exception as e:
|
|
36
|
+
print(f"Vector Gen Failed: {e}")
|
|
37
|
+
return None
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def process_single_object_sync(object_id, collection_name, token):
|
|
41
|
+
"""
|
|
42
|
+
THE WORKER FUNCTION (Runs in Background).
|
|
43
|
+
It is safe to sleep here because we are not in the web request.
|
|
44
|
+
"""
|
|
45
|
+
str_id = str(object_id)
|
|
46
|
+
token_key = f"sync_token:{collection_name}:{str_id}"
|
|
47
|
+
|
|
48
|
+
# 1. THE DEBOUNCE WAIT (Happens in background)
|
|
49
|
+
print(f"Debouncing {str_id} for 5 seconds...")
|
|
50
|
+
time.sleep(5)
|
|
51
|
+
|
|
52
|
+
# 2. THE VERIFICATION
|
|
53
|
+
# Check if a newer save happened while we slept
|
|
54
|
+
current_active_token = r.get(token_key)
|
|
55
|
+
|
|
56
|
+
if current_active_token != token:
|
|
57
|
+
print(f"Skipping sync for {str_id}: Superseded by a newer save.")
|
|
58
|
+
return
|
|
59
|
+
|
|
60
|
+
# 3. THE EXECUTION (Embedding generation)
|
|
61
|
+
print(f"Processing Sync for: {str_id} in {collection_name}")
|
|
62
|
+
|
|
63
|
+
from bson.objectid import ObjectId
|
|
64
|
+
|
|
65
|
+
# FIX: Use dynamic collection access instead of db.objects
|
|
66
|
+
try:
|
|
67
|
+
# Tries to convert string ID to ObjectId.
|
|
68
|
+
# If your DB uses String IDs, remove the ObjectId() wrapper.
|
|
69
|
+
oid = ObjectId(object_id)
|
|
70
|
+
doc = db[collection_name].find_one({"_id": oid})
|
|
71
|
+
except Exception:
|
|
72
|
+
# Fallback if ID is not a valid ObjectId string
|
|
73
|
+
doc = db[collection_name].find_one({"_id": object_id})
|
|
74
|
+
|
|
75
|
+
if not doc:
|
|
76
|
+
print(f"Object {object_id} not found in collection '{collection_name}'")
|
|
77
|
+
# Optional: Remove from Redis index if it exists
|
|
78
|
+
r.delete(f"lore:{object_id}")
|
|
79
|
+
return
|
|
80
|
+
|
|
81
|
+
# 2. Construct Searchable Text
|
|
82
|
+
# (Existing logic...)
|
|
83
|
+
searchable_text = (
|
|
84
|
+
f"{doc.get('name', '')}: {doc.get('description', '')} {doc.get('history', '')}"
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
if len(searchable_text) < 10:
|
|
88
|
+
return
|
|
89
|
+
|
|
90
|
+
# 3. Generate Vector
|
|
91
|
+
vector = get_vector(searchable_text)
|
|
92
|
+
|
|
93
|
+
# 4. Save to Redis Index
|
|
94
|
+
if vector:
|
|
95
|
+
r.hset(
|
|
96
|
+
f"lore:{object_id}",
|
|
97
|
+
mapping={
|
|
98
|
+
"mongo_id": str(object_id),
|
|
99
|
+
"collection": collection_name, # Useful for debugging
|
|
100
|
+
"content": searchable_text,
|
|
101
|
+
"vector": np.array(vector, dtype=np.float32).tobytes(),
|
|
102
|
+
"last_synced": datetime.utcnow().isoformat(),
|
|
103
|
+
},
|
|
104
|
+
)
|
|
105
|
+
print(f"Successfully Indexed: {doc.get('name')}")
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def request_indexing(object_id, collection_name):
|
|
109
|
+
"""
|
|
110
|
+
THE TRIGGER FUNCTION (Runs in Main App).
|
|
111
|
+
MUST BE FAST. NO SLEEPING HERE.
|
|
112
|
+
"""
|
|
113
|
+
# Import your Queue Wrapper
|
|
114
|
+
from autonomous.tasks.autotask import AutoTasks
|
|
115
|
+
|
|
116
|
+
# Initialize the Task Runner
|
|
117
|
+
task_runner = AutoTasks()
|
|
118
|
+
|
|
119
|
+
str_id = str(object_id)
|
|
120
|
+
token_key = f"sync_token:{collection_name}:{str_id}"
|
|
121
|
+
|
|
122
|
+
# 1. GENERATE NEW TOKEN
|
|
123
|
+
current_token = str(uuid.uuid4())
|
|
124
|
+
|
|
125
|
+
# 2. SAVE TOKEN TO REDIS (Instant)
|
|
126
|
+
r.set(token_key, current_token, ex=300)
|
|
127
|
+
|
|
128
|
+
# 3. ENQUEUE THE TASK (Instant)
|
|
129
|
+
# CRITICAL CHANGE: We use task_runner.task() instead of calling the function directly.
|
|
130
|
+
try:
|
|
131
|
+
task_runner.task(
|
|
132
|
+
process_single_object_sync, # The function to run later
|
|
133
|
+
object_id=str_id,
|
|
134
|
+
collection_name=collection_name,
|
|
135
|
+
token=current_token,
|
|
136
|
+
)
|
|
137
|
+
return True
|
|
138
|
+
except Exception as e:
|
|
139
|
+
print(f"Sync Enqueue failed: {e}")
|
|
140
|
+
return False
|
|
@@ -8,6 +8,7 @@ from autonomous import log
|
|
|
8
8
|
from autonomous.db import Document, connect, signals
|
|
9
9
|
from autonomous.db.errors import ValidationError
|
|
10
10
|
from autonomous.db.fields import DateTimeField
|
|
11
|
+
from autonomous.db import db_sync
|
|
11
12
|
|
|
12
13
|
host = os.getenv("DB_HOST", "db")
|
|
13
14
|
port = os.getenv("DB_PORT", 27017)
|
|
@@ -240,6 +241,9 @@ class AutoModel(Document):
|
|
|
240
241
|
"""
|
|
241
242
|
obj = super().save()
|
|
242
243
|
self.pk = obj.pk
|
|
244
|
+
|
|
245
|
+
db_sync.request_indexing(self.pk, collection_name=self._get_collection_name())
|
|
246
|
+
|
|
243
247
|
return self.pk
|
|
244
248
|
|
|
245
249
|
@classmethod
|