sentinel-ai-os 1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. sentinel/__init__.py +0 -0
  2. sentinel/auth.py +40 -0
  3. sentinel/cli.py +9 -0
  4. sentinel/core/__init__.py +0 -0
  5. sentinel/core/agent.py +298 -0
  6. sentinel/core/audit.py +48 -0
  7. sentinel/core/cognitive.py +94 -0
  8. sentinel/core/config.py +99 -0
  9. sentinel/core/llm.py +143 -0
  10. sentinel/core/registry.py +351 -0
  11. sentinel/core/scheduler.py +61 -0
  12. sentinel/core/schema.py +11 -0
  13. sentinel/core/setup.py +101 -0
  14. sentinel/core/ui.py +112 -0
  15. sentinel/main.py +110 -0
  16. sentinel/paths.py +77 -0
  17. sentinel/tools/__init__.py +0 -0
  18. sentinel/tools/apps.py +462 -0
  19. sentinel/tools/audio.py +30 -0
  20. sentinel/tools/browser.py +66 -0
  21. sentinel/tools/calendar_ops.py +163 -0
  22. sentinel/tools/clock.py +25 -0
  23. sentinel/tools/context.py +40 -0
  24. sentinel/tools/desktop.py +116 -0
  25. sentinel/tools/email_ops.py +62 -0
  26. sentinel/tools/factory.py +125 -0
  27. sentinel/tools/file_ops.py +81 -0
  28. sentinel/tools/flights.py +62 -0
  29. sentinel/tools/gmail_auth.py +47 -0
  30. sentinel/tools/indexer.py +156 -0
  31. sentinel/tools/installer.py +69 -0
  32. sentinel/tools/macros.py +58 -0
  33. sentinel/tools/memory_ops.py +281 -0
  34. sentinel/tools/navigation.py +109 -0
  35. sentinel/tools/notes.py +78 -0
  36. sentinel/tools/office.py +67 -0
  37. sentinel/tools/organizer.py +150 -0
  38. sentinel/tools/smart_index.py +76 -0
  39. sentinel/tools/sql_index.py +186 -0
  40. sentinel/tools/system_ops.py +86 -0
  41. sentinel/tools/vision.py +94 -0
  42. sentinel/tools/weather_ops.py +59 -0
  43. sentinel_ai_os-1.0.dist-info/METADATA +282 -0
  44. sentinel_ai_os-1.0.dist-info/RECORD +48 -0
  45. sentinel_ai_os-1.0.dist-info/WHEEL +5 -0
  46. sentinel_ai_os-1.0.dist-info/entry_points.txt +2 -0
  47. sentinel_ai_os-1.0.dist-info/licenses/LICENSE +21 -0
  48. sentinel_ai_os-1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,47 @@
1
+ import os.path
2
+ from google.auth.transport.requests import Request
3
+ from google.oauth2.credentials import Credentials
4
+ from google_auth_oauthlib.flow import InstalledAppFlow
5
+
6
+ # --- FIX: IMPORT PATHS FROM CENTRAL MODULE ---
7
+ from sentinel.paths import CREDENTIALS_PATH, TOKEN_PATH
8
+
9
+ # The permissions the AI needs
10
+ SCOPES = [
11
+ 'https://www.googleapis.com/auth/gmail.readonly',
12
+ 'https://www.googleapis.com/auth/gmail.send'
13
+ ]
14
+
15
+
16
+ def get_gmail_service():
17
+ creds = None
18
+
19
+ # 1. Check if we already logged in (using the centralized token path)
20
+ if os.path.exists(TOKEN_PATH):
21
+ creds = Credentials.from_authorized_user_file(TOKEN_PATH, SCOPES)
22
+
23
+ # 2. If not logged in or token expired, log in
24
+ if not creds or not creds.valid:
25
+ if creds and creds.expired and creds.refresh_token:
26
+ creds.refresh(Request())
27
+ else:
28
+ # Check if credentials file exists in the user data folder
29
+ if not os.path.exists(CREDENTIALS_PATH):
30
+ print(f"Error: credentials.json not found at {CREDENTIALS_PATH}")
31
+ return None
32
+
33
+ # This opens your browser to login
34
+ flow = InstalledAppFlow.from_client_secrets_file(
35
+ CREDENTIALS_PATH, SCOPES)
36
+ creds = flow.run_local_server(port=0)
37
+
38
+ # Save the token for next time
39
+ # Ensure the directory exists
40
+ TOKEN_PATH.parent.mkdir(parents=True, exist_ok=True)
41
+
42
+ with open(TOKEN_PATH, 'w') as token:
43
+ token.write(creds.to_json())
44
+
45
+ # 3. Return the API Service
46
+ from googleapiclient.discovery import build
47
+ return build('gmail', 'v1', credentials=creds)
@@ -0,0 +1,156 @@
1
+ # FILE: tools/indexer.py
2
+ import os
3
+ import sqlite3
4
+ import time
5
+ import logging
6
+ from pypdf import PdfReader
7
+ from docx import Document
8
+ from openpyxl import load_workbook
9
+ from pathlib import Path
10
+
11
+ BASE_DIR = Path.home() / ".sentinel-1"
12
+ BASE_DIR.mkdir(exist_ok=True)
13
+ DB_FILE = BASE_DIR / "file_index.db"
14
+ MAX_FILE_SIZE_MB = 10 # Skip files larger than this to save RAM
15
+ THROTTLE_SEC = 0.05 # Sleep time between files (Smoothness)
16
+
17
+ SKIP_DIRS = {
18
+ "node_modules", "venv", ".venv", ".git", "__pycache__",
19
+ "AppData", "site-packages", "Program Files", "Windows", "Library"
20
+ }
21
+
22
+ SKIP_EXT = {'.exe', '.dll', '.bin', '.iso', '.zip', '.tar', '.gz'}
23
+
24
+
25
+ def _get_text_from_file(path, ext):
26
+ """Safely extracts text with memory limits."""
27
+ try:
28
+ # 1. Check file size first (Fast fail)
29
+ size_mb = os.path.getsize(path) / (1024 * 1024)
30
+ if size_mb > MAX_FILE_SIZE_MB:
31
+ return f"[Skipped: Too Large ({size_mb:.1f}MB)]"
32
+
33
+ if ext == '.pdf':
34
+ reader = PdfReader(path)
35
+ # Limit to first 50 pages to prevent freezing on 1000-page books
36
+ return " ".join([page.extract_text() for page in reader.pages[:50] if page.extract_text()])
37
+
38
+ elif ext == '.docx':
39
+ doc = Document(path)
40
+ return "\n".join([p.text for p in doc.paragraphs])
41
+
42
+ elif ext == '.xlsx':
43
+ wb = load_workbook(path, read_only=True, data_only=True)
44
+ content = []
45
+ for sheet in wb.worksheets:
46
+ # Limit rows per sheet
47
+ for i, row in enumerate(sheet.iter_rows(values_only=True)):
48
+ if i > 1000: break
49
+ row_text = " ".join([str(c) for c in row if c is not None])
50
+ content.append(row_text)
51
+ return "\n".join(content)
52
+
53
+ elif ext == '.csv':
54
+ with open(path, 'r', encoding='utf-8', errors='ignore') as f:
55
+ # Read only first 10k bytes
56
+ return f.read(10000)
57
+ else:
58
+ with open(path, 'r', encoding='utf-8', errors='ignore') as f:
59
+ return f.read(20000) # Limit text files too
60
+
61
+ except Exception:
62
+ return ""
63
+
64
+
65
+ def build_index(root=None, verbose=False):
66
+ """
67
+ Runs smoothly in background without hogging CPU.
68
+ """
69
+ if not root: root = os.path.expanduser("~")
70
+
71
+ conn = sqlite3.connect(DB_FILE)
72
+ c = conn.cursor()
73
+
74
+ # Enable FTS5
75
+ c.execute('CREATE VIRTUAL TABLE IF NOT EXISTS files USING fts5(path, name, content)')
76
+ c.execute('CREATE TABLE IF NOT EXISTS file_meta (path TEXT PRIMARY KEY, mtime REAL)')
77
+
78
+ # Cache existing files to avoid unnecessary disk reads
79
+ c.execute("SELECT path, mtime FROM file_meta")
80
+ existing_meta = {row[0]: row[1] for row in c.fetchall()}
81
+
82
+ count = 0
83
+ updated = 0
84
+
85
+ # We scan these specific user folders to stay efficient
86
+ target_dirs = [
87
+ os.path.join(root, "Documents"),
88
+ os.path.join(root, "Desktop"),
89
+ os.path.join(root, "Downloads")
90
+ ]
91
+
92
+ for target in target_dirs:
93
+ if not os.path.exists(target): continue
94
+
95
+ for r, dirs, files in os.walk(target):
96
+ # Clean skip dirs in-place
97
+ dirs[:] = [d for d in dirs if d not in SKIP_DIRS and not d.startswith(".")]
98
+
99
+ for file in files:
100
+ # CPU BREATHING ROOM
101
+ time.sleep(THROTTLE_SEC)
102
+
103
+ ext = os.path.splitext(file)[1].lower()
104
+ if ext in SKIP_EXT: continue
105
+ if ext not in ['.txt', '.md', '.py', '.json', '.pdf', '.docx', '.xlsx', '.csv']:
106
+ continue
107
+
108
+ path = os.path.join(r, file)
109
+
110
+ try:
111
+ stats = os.stat(path)
112
+ current_mtime = stats.st_mtime
113
+
114
+ # Skip unchanged
115
+ if path in existing_meta and existing_meta[path] == current_mtime:
116
+ continue
117
+
118
+ content = _get_text_from_file(path, ext)
119
+ if not content or len(content) < 5: continue
120
+
121
+ # Update Database
122
+ c.execute("DELETE FROM files WHERE path = ?", (path,))
123
+ c.execute("INSERT INTO files (path, name, content) VALUES (?,?,?)", (path, file, content))
124
+ c.execute("INSERT OR REPLACE INTO file_meta (path, mtime) VALUES (?,?)", (path, current_mtime))
125
+ conn.commit() # Commit often to save progress
126
+
127
+ updated += 1
128
+ count += 1
129
+
130
+ # LOGGING: Only print every 10th file to reduce spam
131
+ if verbose and updated % 10 == 0:
132
+ print(f" [Indexer] Indexed: {file} ({updated} total)")
133
+
134
+ except Exception:
135
+ continue
136
+
137
+ conn.close()
138
+ if verbose and updated > 0:
139
+ print(f" [Indexer] Batch complete. {updated} new files added.")
140
+ return f"Indexed {updated} files."
141
+
142
+
143
+ def search_index(query):
144
+ if not os.path.exists(DB_FILE): return "Index missing."
145
+ conn = sqlite3.connect(DB_FILE)
146
+ try:
147
+ # Search content and return snippets
148
+ res = conn.execute(
149
+ "SELECT path, snippet(files, 2, '[', ']', '...', 10) FROM files WHERE files MATCH ? LIMIT 5",
150
+ (query,)
151
+ ).fetchall()
152
+ except Exception:
153
+ return "No matches."
154
+ conn.close()
155
+ if not res: return "No text matches found."
156
+ return "\n".join([f"📄 {r[0]}\n Snippet: \"{r[1]}\"\n" for r in res])
@@ -0,0 +1,69 @@
1
+ import subprocess
2
+ import json
3
+
4
+
5
+ def winget_search(name):
6
+ """Find best Winget package ID for a given name."""
7
+ try:
8
+ output = subprocess.check_output(
9
+ f'winget search "{name}" --output json',
10
+ shell=True
11
+ ).decode()
12
+
13
+ data = json.loads(output)
14
+
15
+ if "Data" in data and len(data["Data"]) > 0:
16
+ return data["Data"][0]["PackageIdentifier"]
17
+
18
+ except:
19
+ pass
20
+
21
+ return None
22
+
23
+
24
+ def install_software(package_names):
25
+ """
26
+ Returns safe winget install commands.
27
+ The agent should pass this to run_cmd.
28
+ """
29
+
30
+ WINGET_MAP = {
31
+ "vscode": "Microsoft.VisualStudioCode",
32
+ "chrome": "Google.Chrome",
33
+ "discord": "Discord.Discord",
34
+ "git": "Git.Git",
35
+ "python": "Python.Python.3.11",
36
+ "node": "OpenJS.NodeJS",
37
+ "spotify": "Spotify.Spotify",
38
+ "zoom": "Zoom.Zoom",
39
+ "notion": "Notion.Notion",
40
+ "docker": "Docker.DockerDesktop"
41
+ }
42
+
43
+ commands = []
44
+
45
+ for pkg in package_names:
46
+ pkg_lower = pkg.lower()
47
+
48
+ # 1. Try curated aliases
49
+ target = WINGET_MAP.get(pkg_lower)
50
+
51
+ # 2. Live winget search
52
+ if not target:
53
+ target = winget_search(pkg)
54
+
55
+ # 3. Fallback
56
+ if not target:
57
+ target = pkg
58
+
59
+ cmd = f"winget install -e --id {target} --accept-source-agreements --accept-package-agreements"
60
+ commands.append(cmd)
61
+
62
+ return " && ".join(commands)
63
+
64
+
65
+ def list_installed():
66
+ try:
67
+ return subprocess.check_output("winget list", shell=True).decode()
68
+ except:
69
+ return "Could not list apps. Ensure Winget is installed."
@@ -0,0 +1,58 @@
1
+ import webbrowser
2
+ import os
3
+ import time
4
+ from sentinel.tools import apps, desktop
5
+
6
+ # Define your personal workflows here
7
+ WORKFLOWS = {
8
+ "work": {
9
+ "apps": ["code", "slack"], # Add your specific app executable names
10
+ "urls": ["https://jira.atlassian.com", "https://github.com"],
11
+ "volume": 20,
12
+ "say": "Work mode activated. Focus."
13
+ },
14
+ "morning": {
15
+ "urls": ["https://news.ycombinator.com", "https://gmail.com", "https://calendar.google.com"],
16
+ "say": "Good morning. Here is your briefing."
17
+ },
18
+ "chill": {
19
+ "urls": ["https://youtube.com", "https://reddit.com"],
20
+ "volume": 50,
21
+ "say": "Relaxing."
22
+ }
23
+ }
24
+
25
+
26
+ def run_macro(name):
27
+ """
28
+ Executes a named workflow (work, morning, chill).
29
+ """
30
+ if name not in WORKFLOWS:
31
+ return f"Macro '{name}' not found. Available: {', '.join(WORKFLOWS.keys())}"
32
+
33
+ plan = WORKFLOWS[name]
34
+ log = []
35
+
36
+ # 1. Open Apps
37
+ if "apps" in plan:
38
+ for app in plan["apps"]:
39
+ apps.open_app(app)
40
+ log.append(f"Opened {app}")
41
+ time.sleep(1) # Wait for launch
42
+
43
+ # 2. Open URLs
44
+ if "urls" in plan:
45
+ for url in plan["urls"]:
46
+ webbrowser.open(url)
47
+ log.append(f"Opened {url}")
48
+
49
+ # 3. System Settings
50
+ if "volume" in plan:
51
+ desktop.set_volume(plan["volume"])
52
+ log.append(f"Volume set to {plan['volume']}")
53
+
54
+ # 4. Voice
55
+ if "say" in plan:
56
+ desktop.speak(plan["say"])
57
+
58
+ return "\n".join(log)
@@ -0,0 +1,281 @@
1
+ # FILE: tools/memory_ops.py
2
+
3
+ import sqlite3
4
+ import datetime
5
+ import chromadb
6
+ import uuid
7
+ import json
8
+ import re
9
+ import gc
10
+ from chromadb.utils import embedding_functions
11
+ from sentinel.core.config import ConfigManager
12
+ from pathlib import Path
13
+
14
+ # --- CONFIGURATION ---
15
+ BASE_DIR = Path.home() / ".sentinel-1"
16
+ BASE_DIR.mkdir(exist_ok=True)
17
+
18
+ DB_FILE = BASE_DIR / "brain.db"
19
+ CHROMA_PATH = BASE_DIR / "brain_vectors"
20
+
21
+ # Global references for cleanup
22
+ chroma_client = None
23
+ collection = None
24
+
25
+ def ensure_chroma():
26
+ global collection
27
+ if collection is None:
28
+ init_chroma()
29
+
30
+ def init_chroma():
31
+ """Initializes ChromaDB (Safe re-init)"""
32
+ global chroma_client, collection
33
+ try:
34
+ chroma_client = chromadb.PersistentClient(path=CHROMA_PATH)
35
+ except Exception as e:
36
+ chroma_client = None
37
+ return
38
+
39
+ cfg = ConfigManager()
40
+ openai_key = cfg.get_key("openai")
41
+
42
+ if openai_key:
43
+ emb_fn = embedding_functions.OpenAIEmbeddingFunction(
44
+ api_key=openai_key, model_name="text-embedding-3-small"
45
+ )
46
+ else:
47
+ emb_fn = embedding_functions.DefaultEmbeddingFunction()
48
+
49
+ collection = chroma_client.get_or_create_collection(
50
+ name="sentinel_memory", embedding_function=emb_fn
51
+ )
52
+
53
+ def delete_fact(subject, predicate=None, obj=None):
54
+ """
55
+ Delete matching facts from memory.
56
+ Any field can be None (acts as wildcard).
57
+ """
58
+ ensure_chroma()
59
+ if not collection:
60
+ return "Vector DB unavailable."
61
+
62
+ try:
63
+ results = collection.get(include=["metadatas", "documents"])
64
+
65
+ ids = results.get("ids", [])
66
+ metadatas = results.get("metadatas", [])
67
+ documents = results.get("documents", [])
68
+
69
+ to_delete = []
70
+
71
+ for i, meta in enumerate(metadatas):
72
+ text = documents[i]
73
+
74
+ match_subject = meta.get("subject") == subject
75
+ match_pred = predicate is None or predicate in text
76
+ match_obj = obj is None or obj in text
77
+
78
+ if match_subject and match_pred and match_obj:
79
+ to_delete.append(ids[i])
80
+
81
+ if not to_delete:
82
+ return "No matching memory found."
83
+
84
+ collection.delete(ids=to_delete)
85
+ return f"Deleted {len(to_delete)} memory entries."
86
+
87
+ except Exception as e:
88
+ return f"Delete error: {e}"
89
+
90
+
91
+ # Initialize on import
92
+ init_chroma()
93
+
94
+
95
+ def teardown():
96
+ """Releases database locks for wiping."""
97
+ global chroma_client, collection
98
+ try:
99
+ if chroma_client:
100
+ chroma_client.reset()
101
+ except:
102
+ pass
103
+
104
+ chroma_client = None
105
+ collection = None
106
+ gc.collect()
107
+
108
+
109
+
110
+ def _get_sql_conn():
111
+ conn = sqlite3.connect(DB_FILE, check_same_thread=False)
112
+ conn.row_factory = sqlite3.Row
113
+ return conn
114
+
115
+
116
+ def init_memory():
117
+ conn = _get_sql_conn()
118
+ conn.execute('''
119
+ CREATE TABLE IF NOT EXISTS metadata (
120
+ id TEXT PRIMARY KEY,
121
+ importance INTEGER,
122
+ created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
123
+ last_accessed DATETIME DEFAULT CURRENT_TIMESTAMP
124
+ )
125
+ ''')
126
+ conn.execute('''
127
+ CREATE TABLE IF NOT EXISTS logs (
128
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
129
+ action TEXT, details TEXT, timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
130
+ )
131
+ ''')
132
+ conn.commit()
133
+ conn.close()
134
+
135
+
136
+ init_memory()
137
+
138
+
139
+ def _rate_importance(text):
140
+ return 5
141
+
142
+
143
+ def store_fact(subject, predicate, obj, context="user_defined"):
144
+ ensure_chroma()
145
+ if not collection:
146
+ return "Vector DB unavailable."
147
+ fact_text = f"{subject} {predicate} {obj}"
148
+ mem_id = str(uuid.uuid4())
149
+ full_text = f"{fact_text}. Context: {context}"
150
+
151
+ collection.add(
152
+ documents=[full_text],
153
+ metadatas=[{"subject": subject, "type": "fact"}],
154
+ ids=[mem_id]
155
+ )
156
+
157
+ importance = _rate_importance(fact_text)
158
+ conn = _get_sql_conn()
159
+ conn.execute("INSERT INTO metadata (id, importance) VALUES (?, ?)", (mem_id, importance))
160
+ conn.commit()
161
+ conn.close()
162
+ return f"🧠 Memory Stored: {fact_text}"
163
+
164
+
165
+ def retrieve_relevant_context(query, limit=5):
166
+ ensure_chroma()
167
+ if not collection:
168
+ return ""
169
+ try:
170
+ results = collection.query(query_texts=[query], n_results=20)
171
+ except:
172
+ return ""
173
+
174
+ if not results['ids'] or not results['ids'][0]: return ""
175
+
176
+ found_ids = results['ids'][0]
177
+ found_texts = results['documents'][0]
178
+ found_distances = results['distances'][0]
179
+
180
+ conn = _get_sql_conn()
181
+ placeholders = ','.join(['?'] * len(found_ids))
182
+ sql = f"SELECT * FROM metadata WHERE id IN ({placeholders})"
183
+ rows = conn.execute(sql, found_ids).fetchall()
184
+ conn.close()
185
+
186
+ meta_map = {row['id']: dict(row) for row in rows}
187
+ scored_memories = []
188
+
189
+ for i, mem_id in enumerate(found_ids):
190
+ meta = meta_map.get(mem_id, {'importance': 5, 'last_accessed': str(datetime.datetime.now())})
191
+ text = found_texts[i]
192
+ relevance = 1.0 - (found_distances[i] / 2.0)
193
+ try:
194
+ last_accessed = datetime.datetime.strptime(meta['last_accessed'], "%Y-%m-%d %H:%M:%S")
195
+ hours_diff = (datetime.datetime.now() - last_accessed).total_seconds() / 3600
196
+ recency_score = 0.99 ** hours_diff
197
+ except:
198
+ recency_score = 1.0
199
+
200
+ importance_score = meta['importance'] / 10.0
201
+ final_score = (relevance * 0.5) + (importance_score * 0.3) + (recency_score * 0.2)
202
+
203
+ if final_score > 0.4: scored_memories.append((final_score, mem_id, text))
204
+
205
+ scored_memories.sort(key=lambda x: x[0], reverse=True)
206
+ top_picks = scored_memories[:limit]
207
+
208
+ if top_picks:
209
+ conn = _get_sql_conn()
210
+ winner_ids = [x[1] for x in top_picks]
211
+ placeholders = ','.join(['?'] * len(winner_ids))
212
+ conn.execute(f"UPDATE metadata SET last_accessed = CURRENT_TIMESTAMP WHERE id IN ({placeholders})", winner_ids)
213
+ conn.commit()
214
+ conn.close()
215
+
216
+ if not top_picks:
217
+ return "No long-term memories found."
218
+
219
+ return "Relevant Memories:\n" + "\n".join([f"- {x[2]}" for x in top_picks])
220
+
221
+
222
+ def log_activity(action, details):
223
+ try:
224
+ conn = _get_sql_conn()
225
+ conn.execute("INSERT INTO logs (action, details) VALUES (?, ?)", (action, details))
226
+ conn.commit()
227
+ conn.close()
228
+ except:
229
+ pass
230
+
231
+
232
+ def reflect_on_day(date_str=None):
233
+ """
234
+ Retrieves activity logs for a specific day.
235
+ Required by registry.py and cognitive.py for Daily Briefings.
236
+ """
237
+ if not date_str:
238
+ date_str = datetime.datetime.now().strftime("%Y-%m-%d")
239
+
240
+ conn = _get_sql_conn()
241
+ c = conn.execute('''
242
+ SELECT time(timestamp) as t, action, details
243
+ FROM logs
244
+ WHERE date(timestamp) = ?
245
+ ORDER BY timestamp ASC
246
+ ''', (date_str,))
247
+
248
+ logs = c.fetchall()
249
+ conn.close()
250
+
251
+ if not logs: return f"No activity recorded for {date_str}."
252
+
253
+ summary = f"Activity Log for {date_str}:\n"
254
+ for log in logs:
255
+ summary += f"[{log['t']}] {log['action']}: {log['details']}\n"
256
+
257
+ return summary
258
+
259
+
260
+ def archive_interaction(user_text, ai_text):
261
+ from sentinel.core.llm import LLMEngine
262
+ if not user_text or not ai_text: return
263
+ if len(user_text) < 10 and len(ai_text) < 10: return
264
+
265
+ try:
266
+ cfg = ConfigManager()
267
+ brain = LLMEngine(cfg, verbose=False)
268
+
269
+ prompt = (
270
+ "Analyze interaction. Extract PERMANENT facts about user. "
271
+ "Return JSON list of strings. If none, return [].\n\n"
272
+ f"User: {user_text}\nAI: {ai_text}"
273
+ )
274
+ response = brain.query(prompt, [])
275
+ match = re.search(r'\[.*\]', response, re.DOTALL)
276
+ if match:
277
+ facts = json.loads(match.group(0))
278
+ for fact in facts:
279
+ store_fact("User", "context", fact, context="conversation_archive")
280
+ except Exception:
281
+ pass