superlocalmemory 3.4.0 → 3.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/README.md +7 -8
  2. package/docs/screenshots/01-dashboard-main.png +0 -0
  3. package/docs/screenshots/02-knowledge-graph.png +0 -0
  4. package/docs/screenshots/03-patterns-learning.png +0 -0
  5. package/docs/screenshots/04-learning-dashboard.png +0 -0
  6. package/docs/screenshots/05-behavioral-analysis.png +0 -0
  7. package/docs/screenshots/06-graph-communities.png +0 -0
  8. package/package.json +2 -2
  9. package/pyproject.toml +11 -2
  10. package/scripts/postinstall.js +26 -7
  11. package/src/superlocalmemory/cli/commands.py +42 -60
  12. package/src/superlocalmemory/cli/daemon.py +107 -47
  13. package/src/superlocalmemory/cli/main.py +10 -0
  14. package/src/superlocalmemory/cli/setup_wizard.py +137 -9
  15. package/src/superlocalmemory/core/config.py +28 -0
  16. package/src/superlocalmemory/core/consolidation_engine.py +38 -1
  17. package/src/superlocalmemory/core/engine.py +9 -0
  18. package/src/superlocalmemory/core/engine_wiring.py +5 -1
  19. package/src/superlocalmemory/core/graph_analyzer.py +254 -12
  20. package/src/superlocalmemory/core/health_monitor.py +313 -0
  21. package/src/superlocalmemory/core/reranker_worker.py +19 -5
  22. package/src/superlocalmemory/ingestion/__init__.py +13 -0
  23. package/src/superlocalmemory/ingestion/adapter_manager.py +234 -0
  24. package/src/superlocalmemory/ingestion/base_adapter.py +177 -0
  25. package/src/superlocalmemory/ingestion/calendar_adapter.py +340 -0
  26. package/src/superlocalmemory/ingestion/credentials.py +118 -0
  27. package/src/superlocalmemory/ingestion/gmail_adapter.py +369 -0
  28. package/src/superlocalmemory/ingestion/parsers.py +100 -0
  29. package/src/superlocalmemory/ingestion/transcript_adapter.py +156 -0
  30. package/src/superlocalmemory/learning/consolidation_worker.py +287 -53
  31. package/src/superlocalmemory/learning/entity_compiler.py +377 -0
  32. package/src/superlocalmemory/mesh/__init__.py +12 -0
  33. package/src/superlocalmemory/mesh/broker.py +344 -0
  34. package/src/superlocalmemory/retrieval/entity_channel.py +141 -4
  35. package/src/superlocalmemory/retrieval/spreading_activation.py +45 -0
  36. package/src/superlocalmemory/server/api.py +15 -8
  37. package/src/superlocalmemory/server/routes/behavioral.py +8 -4
  38. package/src/superlocalmemory/server/routes/chat.py +320 -0
  39. package/src/superlocalmemory/server/routes/entity.py +95 -0
  40. package/src/superlocalmemory/server/routes/ingest.py +110 -0
  41. package/src/superlocalmemory/server/routes/insights.py +368 -0
  42. package/src/superlocalmemory/server/routes/learning.py +106 -6
  43. package/src/superlocalmemory/server/routes/memories.py +20 -9
  44. package/src/superlocalmemory/server/routes/mesh.py +186 -0
  45. package/src/superlocalmemory/server/routes/stats.py +25 -3
  46. package/src/superlocalmemory/server/routes/timeline.py +252 -0
  47. package/src/superlocalmemory/server/routes/v3_api.py +161 -0
  48. package/src/superlocalmemory/server/ui.py +8 -0
  49. package/src/superlocalmemory/server/unified_daemon.py +691 -0
  50. package/src/superlocalmemory/storage/schema_v343.py +229 -0
  51. package/src/superlocalmemory/ui/index.html +168 -58
  52. package/src/superlocalmemory/ui/js/graph-event-bus.js +83 -0
  53. package/src/superlocalmemory/ui/js/graph-filters.js +1 -1
  54. package/src/superlocalmemory/ui/js/knowledge-graph.js +942 -0
  55. package/src/superlocalmemory/ui/js/memory-chat.js +344 -0
  56. package/src/superlocalmemory/ui/js/memory-timeline.js +265 -0
  57. package/src/superlocalmemory/ui/js/quick-actions.js +334 -0
  58. package/src/superlocalmemory.egg-info/PKG-INFO +0 -594
  59. package/src/superlocalmemory.egg-info/SOURCES.txt +0 -279
  60. package/src/superlocalmemory.egg-info/dependency_links.txt +0 -1
  61. package/src/superlocalmemory.egg-info/entry_points.txt +0 -2
  62. package/src/superlocalmemory.egg-info/requires.txt +0 -47
  63. package/src/superlocalmemory.egg-info/top_level.txt +0 -1
@@ -0,0 +1,320 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under AGPL-3.0-or-later — see LICENSE file
3
+ # Part of SuperLocalMemory v3.4.1 | https://qualixar.com
4
+
5
+ """Ask My Memory — SSE chat endpoint.
6
+
7
+ Flow: query → 6-channel retrieval → format context → LLM stream → SSE
8
+ Mode A: No LLM, returns formatted retrieval results.
9
+ Mode B: Ollama local streaming via /api/chat.
10
+ Mode C: Cloud LLM streaming (OpenAI-compatible).
11
+
12
+ Part of Qualixar | Author: Varun Pratap Bhardwaj
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ import asyncio
18
+ import json
19
+ import logging
20
+ import re
21
+ from typing import AsyncGenerator
22
+
23
+ import httpx
24
+ from fastapi import APIRouter, Request
25
+ from fastapi.responses import StreamingResponse
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+ router = APIRouter(tags=["chat"])
30
+
31
+ # Citation marker pattern: [MEM-1], [MEM-2], etc.
32
+ _CITATION_RE = re.compile(r"\[MEM-(\d+)\]")
33
+
34
+ # System prompt for LLM — instructs citation usage
35
+ _SYSTEM_PROMPT = (
36
+ "You are a memory assistant. Answer the user's question using ONLY the "
37
+ "provided memories. When you use information from a memory, include its "
38
+ "marker inline, e.g. [MEM-1]. If no memories are relevant, say so. "
39
+ "Be concise and factual."
40
+ )
41
+
42
+
43
+ # ── SSE Stream Endpoint ─────────────────────────────────────────
44
+
45
+ @router.post("/api/v3/chat/stream")
46
+ async def chat_stream(request: Request):
47
+ """Stream a memory-grounded chat response via SSE.
48
+
49
+ Body: {"query": "...", "mode": "a"|"b"|"c", "limit": 10}
50
+ Response: text/event-stream with events: token, citation, done, error
51
+ """
52
+ try:
53
+ body = await request.json()
54
+ except Exception:
55
+ return StreamingResponse(
56
+ _sse_error("Invalid JSON body"),
57
+ media_type="text/event-stream",
58
+ )
59
+
60
+ query = (body.get("query") or "").strip()
61
+ if not query:
62
+ return StreamingResponse(
63
+ _sse_error("Query is required"),
64
+ media_type="text/event-stream",
65
+ )
66
+
67
+ mode = (body.get("mode") or "a").lower()
68
+ limit = min(body.get("limit", 10), 20)
69
+
70
+ return StreamingResponse(
71
+ _stream_chat(query, mode, limit),
72
+ media_type="text/event-stream",
73
+ headers={
74
+ "Cache-Control": "no-cache",
75
+ "X-Accel-Buffering": "no", # nginx
76
+ "Content-Encoding": "identity", # bypass GZipMiddleware
77
+ },
78
+ )
79
+
80
+
81
+ # ── Core Chat Logic ──────────────────────────────────────────────
82
+
83
+ async def _stream_chat(
84
+ query: str, mode: str, limit: int,
85
+ ) -> AsyncGenerator[str, None]:
86
+ """Retrieve memories, then stream LLM response with citations."""
87
+
88
+ # Step 1: Retrieve memories via WorkerPool (run in executor to avoid blocking)
89
+ memories = []
90
+ try:
91
+ loop = asyncio.get_event_loop()
92
+ memories = await loop.run_in_executor(None, _recall_memories, query, limit)
93
+ except Exception as exc:
94
+ yield _sse_event("error", json.dumps({"message": f"Retrieval failed: {exc}"}))
95
+ yield _sse_event("done", "")
96
+ return
97
+
98
+ if not memories:
99
+ yield _sse_event("token", "No relevant memories found for your query.")
100
+ yield _sse_event("done", "")
101
+ return
102
+
103
+ # Step 2: Send citation metadata
104
+ for i, mem in enumerate(memories):
105
+ citation_data = {
106
+ "index": i + 1,
107
+ "fact_id": mem.get("fact_id", ""),
108
+ "content_preview": (mem.get("content") or "")[:80],
109
+ "trust_score": mem.get("trust_score", 0),
110
+ "score": mem.get("score", 0),
111
+ }
112
+ yield _sse_event("citation", json.dumps(citation_data))
113
+
114
+ # Step 3: Route to appropriate mode
115
+ if mode == "a":
116
+ # Mode A: No LLM — return formatted retrieval results
117
+ async for event in _stream_mode_a(query, memories):
118
+ yield event
119
+ elif mode in ("b", "c"):
120
+ # Mode B/C: LLM streaming
121
+ async for event in _stream_mode_bc(query, memories, mode):
122
+ yield event
123
+ else:
124
+ yield _sse_event("token", "Unknown mode. Use a, b, or c.")
125
+
126
+ yield _sse_event("done", "")
127
+
128
+
129
+ # ── Mode A: Raw Retrieval Results ────────────────────────────────
130
+
131
+ async def _stream_mode_a(
132
+ query: str, memories: list,
133
+ ) -> AsyncGenerator[str, None]:
134
+ """Format retrieval results as readable answer (no LLM).
135
+
136
+ Mode A = zero-cloud. No LLM available, so we show raw retrieval
137
+ results in a structured format. For conversational AI answers,
138
+ users should switch to Mode B (Ollama) or Mode C (Cloud) in Settings.
139
+ """
140
+ yield _sse_event("token", "**Mode A — Raw Memory Retrieval** (no LLM connected)\n")
141
+ yield _sse_event("token", "For AI-powered answers, switch to Mode B or C in Settings.\n")
142
+ yield _sse_event("token", f"Found **{len(memories)}** relevant memories for: *{query}*\n\n")
143
+ await asyncio.sleep(0.03)
144
+
145
+ for i, mem in enumerate(memories):
146
+ content = mem.get("content") or mem.get("source_content") or ""
147
+ score = mem.get("score", 0)
148
+ trust = mem.get("trust_score", 0)
149
+ text = (
150
+ f"**[MEM-{i+1}]** (relevance: {score:.2f}, trust: {trust:.2f})\n"
151
+ f"{content}\n\n"
152
+ )
153
+ yield _sse_event("token", text)
154
+ await asyncio.sleep(0.03)
155
+
156
+
157
+ # ── Mode B/C: LLM Streaming ─────────────────────────────────────
158
+
159
+ async def _stream_mode_bc(
160
+ query: str, memories: list, mode: str,
161
+ ) -> AsyncGenerator[str, None]:
162
+ """Stream LLM response with memory context and citation detection."""
163
+
164
+ # Build context with citation markers
165
+ context_parts = []
166
+ for i, mem in enumerate(memories):
167
+ content = mem.get("content") or mem.get("source_content") or ""
168
+ trust = mem.get("trust_score", 0)
169
+ context_parts.append(f"[MEM-{i+1}] {content} (trust: {trust:.2f})")
170
+ context = "\n".join(context_parts)
171
+
172
+ messages = [
173
+ {"role": "system", "content": _SYSTEM_PROMPT},
174
+ {"role": "user", "content": f"Memories:\n{context}\n\nQuestion: {query}"},
175
+ ]
176
+
177
+ # Load LLM config
178
+ try:
179
+ from superlocalmemory.core.config import SLMConfig
180
+ config = SLMConfig.load()
181
+ provider = config.llm.provider or ""
182
+ model = config.llm.model or ""
183
+ api_key = config.llm.api_key or ""
184
+ api_base = config.llm.api_base or ""
185
+ except Exception:
186
+ yield _sse_event("token", "LLM not configured. Use Mode A or configure a provider in Settings.")
187
+ return
188
+
189
+ if not provider:
190
+ yield _sse_event("token", "No LLM provider configured. Showing raw results instead.\n\n")
191
+ async for event in _stream_mode_a(query, memories):
192
+ yield event
193
+ return
194
+
195
+ # Stream from provider
196
+ try:
197
+ if provider == "ollama":
198
+ async for token in _stream_ollama(messages, model, api_base):
199
+ yield _sse_event("token", token)
200
+ else:
201
+ async for token in _stream_openai_compat(
202
+ messages, model, api_key, api_base, provider,
203
+ ):
204
+ yield _sse_event("token", token)
205
+ except httpx.ConnectError:
206
+ yield _sse_event("token", f"\n\n[Connection failed — is {provider} running?]")
207
+ except Exception as exc:
208
+ yield _sse_event("token", f"\n\n[LLM error: {exc}]")
209
+
210
+
211
+ # ── Ollama Streaming (/api/chat with messages) ───────────────────
212
+
213
+ async def _stream_ollama(
214
+ messages: list, model: str, api_base: str,
215
+ ) -> AsyncGenerator[str, None]:
216
+ """Stream tokens from Ollama /api/chat endpoint."""
217
+ import os
218
+ base = api_base or os.environ.get("OLLAMA_HOST", "http://localhost:11434")
219
+ url = f"{base.rstrip('/')}/api/chat"
220
+
221
+ payload = {
222
+ "model": model or "llama3.2",
223
+ "messages": messages,
224
+ "stream": True,
225
+ "options": {"num_predict": 1024, "temperature": 0.3, "num_ctx": 4096},
226
+ }
227
+
228
+ async with httpx.AsyncClient(timeout=httpx.Timeout(120.0)) as client:
229
+ async with client.stream("POST", url, json=payload) as resp:
230
+ resp.raise_for_status()
231
+ async for line in resp.aiter_lines():
232
+ if not line:
233
+ continue
234
+ try:
235
+ chunk = json.loads(line)
236
+ if chunk.get("done"):
237
+ break
238
+ token = chunk.get("message", {}).get("content", "")
239
+ if token:
240
+ yield token
241
+ except json.JSONDecodeError:
242
+ continue
243
+
244
+
245
+ # ── OpenAI-Compatible Streaming ──────────────────────────────────
246
+
247
+ async def _stream_openai_compat(
248
+ messages: list, model: str, api_key: str,
249
+ api_base: str, provider: str,
250
+ ) -> AsyncGenerator[str, None]:
251
+ """Stream tokens from OpenAI-compatible API (OpenAI, Azure, OpenRouter)."""
252
+ import os
253
+
254
+ if provider == "azure":
255
+ url = api_base # Azure uses full deployment URL
256
+ headers = {"api-key": api_key, "Content-Type": "application/json"}
257
+ elif provider == "openrouter":
258
+ url = api_base or "https://openrouter.ai/api/v1/chat/completions"
259
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
260
+ elif provider == "anthropic":
261
+ # Anthropic uses a different streaming format — simplified here
262
+ url = api_base or "https://api.anthropic.com/v1/messages"
263
+ headers = {
264
+ "x-api-key": api_key,
265
+ "anthropic-version": "2023-06-01",
266
+ "Content-Type": "application/json",
267
+ }
268
+ else:
269
+ url = api_base or "https://api.openai.com/v1/chat/completions"
270
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
271
+
272
+ payload = {
273
+ "model": model,
274
+ "messages": messages,
275
+ "stream": True,
276
+ "max_tokens": 1024,
277
+ "temperature": 0.3,
278
+ }
279
+
280
+ async with httpx.AsyncClient(timeout=httpx.Timeout(120.0)) as client:
281
+ async with client.stream("POST", url, json=payload, headers=headers) as resp:
282
+ resp.raise_for_status()
283
+ async for line in resp.aiter_lines():
284
+ if not line.startswith("data: "):
285
+ continue
286
+ data = line[6:]
287
+ if data == "[DONE]":
288
+ break
289
+ try:
290
+ chunk = json.loads(data)
291
+ token = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
292
+ if token:
293
+ yield token
294
+ except json.JSONDecodeError:
295
+ continue
296
+
297
+
298
+ # ── Retrieval Helper ─────────────────────────────────────────────
299
+
300
+ def _recall_memories(query: str, limit: int) -> list:
301
+ """Run 6-channel retrieval via WorkerPool (synchronous, runs in executor)."""
302
+ from superlocalmemory.core.worker_pool import WorkerPool
303
+ pool = WorkerPool.shared()
304
+ result = pool.recall(query, limit=limit)
305
+ if result.get("ok"):
306
+ return result.get("results", [])
307
+ return []
308
+
309
+
310
+ # ── SSE Formatting ───────────────────────────────────────────────
311
+
312
+ def _sse_event(event_type: str, data: str) -> str:
313
+ """Format a single SSE event."""
314
+ return f"event: {event_type}\ndata: {data}\n\n"
315
+
316
+
317
+ async def _sse_error(message: str) -> AsyncGenerator[str, None]:
318
+ """Yield a single SSE error event."""
319
+ yield _sse_event("error", json.dumps({"message": message}))
320
+ yield _sse_event("done", "")
@@ -0,0 +1,95 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the Elastic License 2.0 - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """Entity compilation API routes — view and recompile entity summaries."""
6
+
7
+ from __future__ import annotations
8
+
9
+ from fastapi import APIRouter, HTTPException, Request, Query
10
+
11
+ router = APIRouter(prefix="/api/entity", tags=["entity"])
12
+
13
+
14
+ @router.get("/{entity_name}")
15
+ async def get_entity(
16
+ entity_name: str,
17
+ request: Request,
18
+ profile: str = Query(default="default"),
19
+ project: str = Query(default=""),
20
+ ):
21
+ """Get compiled truth + timeline for an entity."""
22
+ engine = request.app.state.engine
23
+ if engine is None:
24
+ raise HTTPException(503, detail="Engine not initialized")
25
+
26
+ import sqlite3
27
+ import json
28
+ conn = sqlite3.connect(str(engine._config.db_path))
29
+ conn.row_factory = sqlite3.Row
30
+ try:
31
+ # Search by canonical_name (case-insensitive)
32
+ row = conn.execute("""
33
+ SELECT ep.compiled_truth, ep.timeline, ep.fact_ids_json,
34
+ ep.last_compiled_at, ep.compilation_confidence,
35
+ ep.knowledge_summary, ce.entity_type
36
+ FROM entity_profiles ep
37
+ JOIN canonical_entities ce ON ep.entity_id = ce.entity_id
38
+ WHERE LOWER(ce.canonical_name) = LOWER(?)
39
+ AND ep.profile_id = ?
40
+ AND ep.project_name = ?
41
+ """, (entity_name, profile, project)).fetchone()
42
+
43
+ if not row:
44
+ raise HTTPException(404, detail=f"Entity '{entity_name}' not found")
45
+
46
+ return {
47
+ "entity_name": entity_name,
48
+ "entity_type": row["entity_type"],
49
+ "compiled_truth": row["compiled_truth"] or "",
50
+ "knowledge_summary": row["knowledge_summary"] or "",
51
+ "timeline": json.loads(row["timeline"]) if row["timeline"] else [],
52
+ "source_fact_ids": json.loads(row["fact_ids_json"]) if row["fact_ids_json"] else [],
53
+ "last_compiled_at": row["last_compiled_at"],
54
+ "confidence": row["compilation_confidence"],
55
+ }
56
+ finally:
57
+ conn.close()
58
+
59
+
60
+ @router.post("/{entity_name}/recompile")
61
+ async def recompile_entity(
62
+ entity_name: str,
63
+ request: Request,
64
+ profile: str = Query(default="default"),
65
+ project: str = Query(default=""),
66
+ ):
67
+ """Force immediate recompilation of an entity."""
68
+ engine = request.app.state.engine
69
+ if engine is None:
70
+ raise HTTPException(503, detail="Engine not initialized")
71
+
72
+ import sqlite3
73
+ conn = sqlite3.connect(str(engine._config.db_path))
74
+ conn.row_factory = sqlite3.Row
75
+ try:
76
+ entity = conn.execute(
77
+ "SELECT entity_id, canonical_name, entity_type FROM canonical_entities "
78
+ "WHERE LOWER(canonical_name) = LOWER(?) AND profile_id = ?",
79
+ (entity_name, profile),
80
+ ).fetchone()
81
+
82
+ if not entity:
83
+ raise HTTPException(404, detail=f"Entity '{entity_name}' not found")
84
+
85
+ from superlocalmemory.learning.entity_compiler import EntityCompiler
86
+ compiler = EntityCompiler(str(engine._config.db_path), engine._config)
87
+ result = compiler.compile_entity(
88
+ profile, project, entity["entity_id"], entity["canonical_name"],
89
+ )
90
+
91
+ if result:
92
+ return {"ok": True, **result}
93
+ return {"ok": False, "reason": "no facts to compile"}
94
+ finally:
95
+ conn.close()
@@ -0,0 +1,110 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the Elastic License 2.0 - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """Ingestion endpoint — accepts data from external adapters.
6
+
7
+ POST /ingest with {content, source_type, dedup_key, metadata}.
8
+ Deduplicates by source_type + dedup_key. Stores via MemoryEngine.
9
+ Admission control: max 10 concurrent ingestions (HTTP 429 on overflow).
10
+
11
+ Part of Qualixar | Author: Varun Pratap Bhardwaj
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ import sqlite3
18
+ import threading
19
+ from datetime import datetime, timezone
20
+
21
+ from fastapi import APIRouter, HTTPException, Request
22
+ from pydantic import BaseModel
23
+
24
+ router = APIRouter(tags=["ingestion"])
25
+
26
+ _MAX_CONCURRENT = 10
27
+ _active_count = 0
28
+ _active_lock = threading.Lock()
29
+
30
+
31
+ class IngestRequest(BaseModel):
32
+ content: str
33
+ source_type: str
34
+ dedup_key: str
35
+ metadata: dict = {}
36
+
37
+
38
+ @router.post("/ingest")
39
+ async def ingest(req: IngestRequest, request: Request):
40
+ """Ingest content from an external adapter.
41
+
42
+ Deduplicates by (source_type, dedup_key). Returns 429 if too many
43
+ concurrent ingestions. Stores via the singleton MemoryEngine.
44
+ """
45
+ global _active_count
46
+
47
+ engine = request.app.state.engine
48
+ if engine is None:
49
+ raise HTTPException(503, detail="Engine not initialized")
50
+
51
+ if not req.content:
52
+ raise HTTPException(400, detail="content required")
53
+ if not req.source_type:
54
+ raise HTTPException(400, detail="source_type required")
55
+ if not req.dedup_key:
56
+ raise HTTPException(400, detail="dedup_key required")
57
+
58
+ # Admission control
59
+ with _active_lock:
60
+ if _active_count >= _MAX_CONCURRENT:
61
+ raise HTTPException(
62
+ 429,
63
+ detail="Too many concurrent ingestions",
64
+ headers={"Retry-After": "5"},
65
+ )
66
+ _active_count += 1
67
+
68
+ try:
69
+ # Dedup check
70
+ conn = sqlite3.connect(str(engine._config.db_path))
71
+ try:
72
+ existing = conn.execute(
73
+ "SELECT id FROM ingestion_log WHERE source_type=? AND dedup_key=?",
74
+ (req.source_type, req.dedup_key),
75
+ ).fetchone()
76
+ if existing:
77
+ return {"ingested": False, "reason": "already_ingested"}
78
+ finally:
79
+ conn.close()
80
+
81
+ # Store via engine
82
+ metadata = {**req.metadata, "source_type": req.source_type}
83
+ fact_ids = engine.store(req.content, metadata=metadata)
84
+
85
+ # Log to ingestion_log
86
+ conn = sqlite3.connect(str(engine._config.db_path))
87
+ try:
88
+ conn.execute(
89
+ "INSERT OR IGNORE INTO ingestion_log "
90
+ "(source_type, dedup_key, fact_ids, metadata, status, ingested_at) "
91
+ "VALUES (?, ?, ?, ?, 'ingested', ?)",
92
+ (
93
+ req.source_type,
94
+ req.dedup_key,
95
+ json.dumps(fact_ids),
96
+ json.dumps(req.metadata),
97
+ datetime.now(timezone.utc).isoformat(),
98
+ ),
99
+ )
100
+ conn.commit()
101
+ finally:
102
+ conn.close()
103
+
104
+ return {"ingested": True, "fact_ids": fact_ids}
105
+
106
+ except Exception as exc:
107
+ raise HTTPException(500, detail=str(exc))
108
+ finally:
109
+ with _active_lock:
110
+ _active_count -= 1