omni-cortex 1.12.0__py3-none-any.whl → 1.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. omni_cortex-1.13.0.data/data/share/omni-cortex/dashboard/backend/chat_service.py +572 -0
  2. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/database.py +1653 -1094
  3. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/main.py +1681 -1381
  4. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/models.py +400 -285
  5. {omni_cortex-1.12.0.dist-info → omni_cortex-1.13.0.dist-info}/METADATA +1 -1
  6. omni_cortex-1.13.0.dist-info/RECORD +26 -0
  7. omni_cortex-1.12.0.data/data/share/omni-cortex/dashboard/backend/chat_service.py +0 -317
  8. omni_cortex-1.12.0.dist-info/RECORD +0 -26
  9. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/.env.example +0 -0
  10. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/backfill_summaries.py +0 -0
  11. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/image_service.py +0 -0
  12. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/logging_config.py +0 -0
  13. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/project_config.py +0 -0
  14. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +0 -0
  15. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/prompt_security.py +0 -0
  16. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
  17. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/security.py +0 -0
  18. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/uv.lock +0 -0
  19. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +0 -0
  20. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/hooks/post_tool_use.py +0 -0
  21. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/hooks/pre_tool_use.py +0 -0
  22. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/hooks/session_utils.py +0 -0
  23. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/hooks/stop.py +0 -0
  24. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
  25. {omni_cortex-1.12.0.data → omni_cortex-1.13.0.data}/data/share/omni-cortex/hooks/user_prompt.py +0 -0
  26. {omni_cortex-1.12.0.dist-info → omni_cortex-1.13.0.dist-info}/WHEEL +0 -0
  27. {omni_cortex-1.12.0.dist-info → omni_cortex-1.13.0.dist-info}/entry_points.txt +0 -0
  28. {omni_cortex-1.12.0.dist-info → omni_cortex-1.13.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,1381 +1,1681 @@
1
- """FastAPI backend for Omni-Cortex Web Dashboard."""
2
- # Trigger reload for relationship graph column fix
3
-
4
- import asyncio
5
- import json
6
- import os
7
- import traceback
8
- from contextlib import asynccontextmanager
9
- from datetime import datetime
10
- from pathlib import Path
11
- from typing import Optional
12
-
13
- import uvicorn
14
- from fastapi import FastAPI, HTTPException, Query, WebSocket, WebSocketDisconnect, Request, Depends
15
- from fastapi.middleware.cors import CORSMiddleware
16
- from fastapi.staticfiles import StaticFiles
17
- from fastapi.responses import FileResponse, Response
18
- from starlette.middleware.base import BaseHTTPMiddleware
19
- from watchdog.events import FileSystemEventHandler
20
- from watchdog.observers import Observer
21
-
22
- # Rate limiting imports (optional - graceful degradation if not installed)
23
- try:
24
- from slowapi import Limiter, _rate_limit_exceeded_handler
25
- from slowapi.util import get_remote_address
26
- from slowapi.errors import RateLimitExceeded
27
- RATE_LIMITING_AVAILABLE = True
28
- except ImportError:
29
- RATE_LIMITING_AVAILABLE = False
30
- Limiter = None
31
-
32
- from database import (
33
- bulk_update_memory_status,
34
- create_memory,
35
- delete_memory,
36
- ensure_migrations,
37
- get_activities,
38
- get_activity_detail,
39
- get_activity_heatmap,
40
- get_all_tags,
41
- get_command_usage,
42
- get_mcp_usage,
43
- get_memories,
44
- get_memories_needing_review,
45
- get_memory_by_id,
46
- get_memory_growth,
47
- get_memory_stats,
48
- get_recent_sessions,
49
- get_relationship_graph,
50
- get_relationships,
51
- get_sessions,
52
- get_skill_usage,
53
- get_timeline,
54
- get_tool_usage,
55
- get_type_distribution,
56
- search_memories,
57
- update_memory,
58
- )
59
- from logging_config import log_success, log_error
60
- from models import (
61
- AggregateChatRequest,
62
- AggregateMemoryRequest,
63
- AggregateStatsRequest,
64
- AggregateStatsResponse,
65
- ChatRequest,
66
- ChatResponse,
67
- ConversationSaveRequest,
68
- ConversationSaveResponse,
69
- FilterParams,
70
- MemoryCreateRequest,
71
- MemoryUpdate,
72
- ProjectInfo,
73
- ProjectRegistration,
74
- BatchImageGenerationRequest,
75
- BatchImageGenerationResponse,
76
- ImageRefineRequest,
77
- SingleImageRequestModel,
78
- SingleImageResponseModel,
79
- )
80
- from project_config import (
81
- load_config,
82
- add_registered_project,
83
- remove_registered_project,
84
- toggle_favorite,
85
- add_scan_directory,
86
- remove_scan_directory,
87
- )
88
- from project_scanner import scan_projects
89
- from websocket_manager import manager
90
- import chat_service
91
- from image_service import image_service, ImagePreset, SingleImageRequest
92
- from security import PathValidator, get_cors_config, IS_PRODUCTION
93
-
94
-
95
- class SecurityHeadersMiddleware(BaseHTTPMiddleware):
96
- """Add security headers to all responses."""
97
-
98
- async def dispatch(self, request: Request, call_next) -> Response:
99
- response = await call_next(request)
100
-
101
- # Prevent MIME type sniffing
102
- response.headers["X-Content-Type-Options"] = "nosniff"
103
-
104
- # Prevent clickjacking
105
- response.headers["X-Frame-Options"] = "DENY"
106
-
107
- # XSS protection (legacy browsers)
108
- response.headers["X-XSS-Protection"] = "1; mode=block"
109
-
110
- # Content Security Policy
111
- response.headers["Content-Security-Policy"] = (
112
- "default-src 'self'; "
113
- "script-src 'self' 'unsafe-inline' 'unsafe-eval'; " # Vue needs these
114
- "style-src 'self' 'unsafe-inline'; " # Tailwind needs inline
115
- "img-src 'self' data: blob: https:; " # Allow AI-generated images
116
- "connect-src 'self' ws: wss: https://generativelanguage.googleapis.com; "
117
- "font-src 'self'; "
118
- "frame-ancestors 'none';"
119
- )
120
-
121
- # HSTS (only in production with HTTPS)
122
- if IS_PRODUCTION and os.getenv("SSL_CERTFILE"):
123
- response.headers["Strict-Transport-Security"] = "max-age=31536000; includeSubDomains"
124
-
125
- return response
126
-
127
-
128
- def validate_project_path(project: str = Query(..., description="Path to the database file")) -> Path:
129
- """Validate project database path - dependency for endpoints."""
130
- try:
131
- return PathValidator.validate_project_path(project)
132
- except ValueError as e:
133
- raise HTTPException(status_code=400, detail=str(e))
134
-
135
-
136
- class DatabaseChangeHandler(FileSystemEventHandler):
137
- """Handle database file changes for real-time updates."""
138
-
139
- def __init__(self, ws_manager, loop):
140
- self.ws_manager = ws_manager
141
- self.loop = loop
142
- self._debounce_task: Optional[asyncio.Task] = None
143
- self._last_path: Optional[str] = None
144
- self._last_activity_count: dict[str, int] = {}
145
-
146
- def on_modified(self, event):
147
- if event.src_path.endswith("cortex.db") or event.src_path.endswith("global.db"):
148
- # Debounce rapid changes
149
- self._last_path = event.src_path
150
- if self._debounce_task is None or self._debounce_task.done():
151
- self._debounce_task = asyncio.run_coroutine_threadsafe(
152
- self._debounced_notify(), self.loop
153
- )
154
-
155
- async def _debounced_notify(self):
156
- await asyncio.sleep(0.3) # Reduced from 0.5s for faster updates
157
- if self._last_path:
158
- db_path = self._last_path
159
-
160
- # Broadcast general database change
161
- await self.ws_manager.broadcast("database_changed", {"path": db_path})
162
-
163
- # Fetch and broadcast latest activities (IndyDevDan pattern)
164
- try:
165
- # Get recent activities
166
- recent = get_activities(db_path, limit=5, offset=0)
167
- if recent:
168
- # Broadcast each new activity
169
- for activity in recent:
170
- await self.ws_manager.broadcast_activity_logged(
171
- db_path,
172
- activity if isinstance(activity, dict) else activity.model_dump()
173
- )
174
-
175
- # Also broadcast session update
176
- sessions = get_recent_sessions(db_path, limit=1)
177
- if sessions:
178
- session = sessions[0]
179
- await self.ws_manager.broadcast_session_updated(
180
- db_path,
181
- session if isinstance(session, dict) else dict(session)
182
- )
183
- except Exception as e:
184
- print(f"[WS] Error broadcasting activities: {e}")
185
-
186
-
187
- # File watcher
188
- observer: Optional[Observer] = None
189
-
190
-
191
- @asynccontextmanager
192
- async def lifespan(app: FastAPI):
193
- """Manage file watcher lifecycle."""
194
- global observer
195
- loop = asyncio.get_event_loop()
196
- handler = DatabaseChangeHandler(manager, loop)
197
- observer = Observer()
198
-
199
- # Watch common project directories
200
- watch_paths = [
201
- Path.home() / ".omni-cortex",
202
- Path("D:/Projects"),
203
- ]
204
-
205
- for watch_path in watch_paths:
206
- if watch_path.exists():
207
- observer.schedule(handler, str(watch_path), recursive=True)
208
- print(f"[Watcher] Monitoring: {watch_path}")
209
-
210
- observer.start()
211
- print("[Server] File watcher started")
212
-
213
- yield
214
-
215
- observer.stop()
216
- observer.join()
217
- print("[Server] File watcher stopped")
218
-
219
-
220
- # FastAPI app
221
- app = FastAPI(
222
- title="Omni-Cortex Dashboard",
223
- description="Web dashboard for viewing and managing Omni-Cortex memories",
224
- version="0.1.0",
225
- lifespan=lifespan,
226
- )
227
-
228
- # Add security headers middleware (MUST come before CORS)
229
- app.add_middleware(SecurityHeadersMiddleware)
230
-
231
- # Rate limiting (if available)
232
- if RATE_LIMITING_AVAILABLE:
233
- limiter = Limiter(key_func=get_remote_address)
234
- app.state.limiter = limiter
235
- app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
236
- else:
237
- limiter = None
238
-
239
-
240
- def rate_limit(limit_string: str):
241
- """Decorator for conditional rate limiting.
242
-
243
- Returns the actual limiter decorator if available, otherwise a no-op.
244
- Usage: @rate_limit("10/minute")
245
- """
246
- if limiter is not None:
247
- return limiter.limit(limit_string)
248
- # No-op decorator when rate limiting is not available
249
- def noop_decorator(func):
250
- return func
251
- return noop_decorator
252
-
253
- # CORS configuration (environment-aware)
254
- cors_config = get_cors_config()
255
- app.add_middleware(
256
- CORSMiddleware,
257
- allow_origins=cors_config["allow_origins"],
258
- allow_credentials=True,
259
- allow_methods=cors_config["allow_methods"],
260
- allow_headers=cors_config["allow_headers"],
261
- )
262
-
263
- # Static files for production build
264
- DASHBOARD_DIR = Path(__file__).parent.parent
265
- DIST_DIR = DASHBOARD_DIR / "frontend" / "dist"
266
-
267
-
268
- def setup_static_files():
269
- """Mount static files if dist directory exists (production build)."""
270
- if DIST_DIR.exists():
271
- # Mount assets directory
272
- assets_dir = DIST_DIR / "assets"
273
- if assets_dir.exists():
274
- app.mount("/assets", StaticFiles(directory=str(assets_dir)), name="assets")
275
- print(f"[Static] Serving assets from: {assets_dir}")
276
-
277
-
278
- # Call setup at module load
279
- setup_static_files()
280
-
281
-
282
- # --- REST Endpoints ---
283
-
284
-
285
- @app.get("/api/projects", response_model=list[ProjectInfo])
286
- async def list_projects():
287
- """List all discovered omni-cortex project databases."""
288
- return scan_projects()
289
-
290
-
291
- # --- Project Management Endpoints ---
292
-
293
-
294
- @app.get("/api/projects/config")
295
- async def get_project_config():
296
- """Get project configuration (scan dirs, counts)."""
297
- config = load_config()
298
- return {
299
- "scan_directories": config.scan_directories,
300
- "registered_count": len(config.registered_projects),
301
- "favorites_count": len(config.favorites),
302
- }
303
-
304
-
305
- @app.post("/api/projects/register")
306
- async def register_project(body: ProjectRegistration):
307
- """Manually register a project by path."""
308
- success = add_registered_project(body.path, body.display_name)
309
- if not success:
310
- raise HTTPException(400, "Invalid path or already registered")
311
- return {"success": True}
312
-
313
-
314
- @app.delete("/api/projects/register")
315
- async def unregister_project(path: str = Query(..., description="Project path to unregister")):
316
- """Remove a registered project."""
317
- success = remove_registered_project(path)
318
- if not success:
319
- raise HTTPException(404, "Project not found")
320
- return {"success": True}
321
-
322
-
323
- @app.post("/api/projects/favorite")
324
- async def toggle_project_favorite(path: str = Query(..., description="Project path to toggle favorite")):
325
- """Toggle favorite status for a project."""
326
- is_favorite = toggle_favorite(path)
327
- return {"is_favorite": is_favorite}
328
-
329
-
330
- @app.post("/api/projects/scan-directories")
331
- async def add_scan_dir(directory: str = Query(..., description="Directory path to add")):
332
- """Add a directory to auto-scan list."""
333
- success = add_scan_directory(directory)
334
- if not success:
335
- raise HTTPException(400, "Invalid directory or already added")
336
- return {"success": True}
337
-
338
-
339
- @app.delete("/api/projects/scan-directories")
340
- async def remove_scan_dir(directory: str = Query(..., description="Directory path to remove")):
341
- """Remove a directory from auto-scan list."""
342
- success = remove_scan_directory(directory)
343
- if not success:
344
- raise HTTPException(404, "Directory not found")
345
- return {"success": True}
346
-
347
-
348
- @app.post("/api/projects/refresh")
349
- async def refresh_projects():
350
- """Force rescan of all project directories."""
351
- projects = scan_projects()
352
- return {"count": len(projects)}
353
-
354
-
355
- # --- Aggregate Multi-Project Endpoints ---
356
-
357
-
358
- @app.post("/api/aggregate/memories")
359
- @rate_limit("50/minute")
360
- async def get_aggregate_memories(request: AggregateMemoryRequest):
361
- """Get memories from multiple projects with project attribution."""
362
- try:
363
- all_memories = []
364
- filters = request.filters or FilterParams()
365
-
366
- for project_path in request.projects:
367
- if not Path(project_path).exists():
368
- continue
369
-
370
- try:
371
- memories = get_memories(project_path, filters)
372
- # Add project attribution to each memory
373
- for m in memories:
374
- m_dict = m.model_dump()
375
- m_dict['source_project'] = project_path
376
- # Extract project name from path
377
- project_dir = Path(project_path).parent
378
- m_dict['source_project_name'] = project_dir.name
379
- all_memories.append(m_dict)
380
- except Exception as e:
381
- log_error(f"/api/aggregate/memories (project: {project_path})", e)
382
- continue
383
-
384
- # Sort by last_accessed or created_at (convert to str to handle mixed tz-aware/naive)
385
- all_memories.sort(
386
- key=lambda x: str(x.get('last_accessed') or x.get('created_at') or ''),
387
- reverse=True
388
- )
389
-
390
- # Apply pagination
391
- start = filters.offset
392
- end = start + filters.limit
393
- return all_memories[start:end]
394
- except Exception as e:
395
- log_error("/api/aggregate/memories", e)
396
- raise HTTPException(status_code=500, detail=str(e))
397
-
398
-
399
- @app.post("/api/aggregate/stats", response_model=AggregateStatsResponse)
400
- @rate_limit("50/minute")
401
- async def get_aggregate_stats(request: AggregateStatsRequest):
402
- """Get combined statistics across multiple projects."""
403
- try:
404
- total_count = 0
405
- total_access = 0
406
- importance_sum = 0
407
- by_type = {}
408
- by_status = {}
409
-
410
- for project_path in request.projects:
411
- if not Path(project_path).exists():
412
- continue
413
-
414
- try:
415
- stats = get_memory_stats(project_path)
416
- total_count += stats.total_count
417
- total_access += stats.total_access_count
418
-
419
- # Weighted average for importance
420
- project_count = stats.total_count
421
- project_avg_importance = stats.avg_importance
422
- importance_sum += project_avg_importance * project_count
423
-
424
- # Aggregate by_type
425
- for type_name, count in stats.by_type.items():
426
- by_type[type_name] = by_type.get(type_name, 0) + count
427
-
428
- # Aggregate by_status
429
- for status, count in stats.by_status.items():
430
- by_status[status] = by_status.get(status, 0) + count
431
- except Exception as e:
432
- log_error(f"/api/aggregate/stats (project: {project_path})", e)
433
- continue
434
-
435
- return AggregateStatsResponse(
436
- total_count=total_count,
437
- total_access_count=total_access,
438
- avg_importance=round(importance_sum / total_count, 1) if total_count > 0 else 0,
439
- by_type=by_type,
440
- by_status=by_status,
441
- project_count=len(request.projects),
442
- )
443
- except Exception as e:
444
- log_error("/api/aggregate/stats", e)
445
- raise HTTPException(status_code=500, detail=str(e))
446
-
447
-
448
- @app.post("/api/aggregate/tags")
449
- @rate_limit("50/minute")
450
- async def get_aggregate_tags(request: AggregateStatsRequest):
451
- """Get combined tags across multiple projects."""
452
- try:
453
- tag_counts = {}
454
-
455
- for project_path in request.projects:
456
- if not Path(project_path).exists():
457
- continue
458
-
459
- try:
460
- tags = get_all_tags(project_path)
461
- for tag in tags:
462
- tag_name = tag['name']
463
- tag_counts[tag_name] = tag_counts.get(tag_name, 0) + tag['count']
464
- except Exception as e:
465
- log_error(f"/api/aggregate/tags (project: {project_path})", e)
466
- continue
467
-
468
- # Return sorted by count
469
- return sorted(
470
- [{'name': k, 'count': v} for k, v in tag_counts.items()],
471
- key=lambda x: x['count'],
472
- reverse=True
473
- )
474
- except Exception as e:
475
- log_error("/api/aggregate/tags", e)
476
- raise HTTPException(status_code=500, detail=str(e))
477
-
478
-
479
- @app.post("/api/aggregate/chat", response_model=ChatResponse)
480
- @rate_limit("10/minute")
481
- async def chat_across_projects(request: AggregateChatRequest):
482
- """Ask AI about memories across multiple projects."""
483
- try:
484
- if not chat_service.is_available():
485
- raise HTTPException(
486
- status_code=503,
487
- detail="Chat service not available. Set GEMINI_API_KEY environment variable."
488
- )
489
-
490
- all_sources = []
491
-
492
- # Gather relevant memories from each project
493
- for project_path in request.projects:
494
- if not Path(project_path).exists():
495
- continue
496
-
497
- try:
498
- memories = search_memories(
499
- project_path,
500
- request.question,
501
- limit=request.max_memories_per_project
502
- )
503
-
504
- for m in memories:
505
- project_dir = Path(project_path).parent
506
- source = {
507
- 'id': m.id,
508
- 'type': m.memory_type,
509
- 'content_preview': m.content[:200],
510
- 'tags': m.tags,
511
- 'project_path': project_path,
512
- 'project_name': project_dir.name,
513
- }
514
- all_sources.append(source)
515
- except Exception as e:
516
- log_error(f"/api/aggregate/chat (project: {project_path})", e)
517
- continue
518
-
519
- if not all_sources:
520
- return ChatResponse(
521
- answer="No relevant memories found across the selected projects.",
522
- sources=[],
523
- )
524
-
525
- # Build context with project attribution
526
- context = "\n\n".join([
527
- f"[From: {s['project_name']}] {s['content_preview']}"
528
- for s in all_sources
529
- ])
530
-
531
- # Query AI with attributed context
532
- answer = await chat_service.generate_response(request.question, context)
533
-
534
- log_success("/api/aggregate/chat", projects=len(request.projects), sources=len(all_sources))
535
-
536
- return ChatResponse(
537
- answer=answer,
538
- sources=[ChatSource(**s) for s in all_sources],
539
- )
540
- except HTTPException:
541
- raise
542
- except Exception as e:
543
- log_error("/api/aggregate/chat", e)
544
- raise HTTPException(status_code=500, detail=str(e))
545
-
546
-
547
- @app.get("/api/memories")
548
- @rate_limit("100/minute")
549
- async def list_memories(
550
- project: str = Query(..., description="Path to the database file"),
551
- memory_type: Optional[str] = Query(None, alias="type"),
552
- status: Optional[str] = None,
553
- tags: Optional[str] = None,
554
- search: Optional[str] = None,
555
- min_importance: Optional[int] = None,
556
- max_importance: Optional[int] = None,
557
- sort_by: str = "last_accessed",
558
- sort_order: str = "desc",
559
- limit: int = 50,
560
- offset: int = 0,
561
- ):
562
- """Get memories with filtering and pagination."""
563
- try:
564
- if not Path(project).exists():
565
- log_error("/api/memories", FileNotFoundError("Database not found"), project=project)
566
- raise HTTPException(status_code=404, detail="Database not found")
567
-
568
- filters = FilterParams(
569
- memory_type=memory_type,
570
- status=status,
571
- tags=tags.split(",") if tags else None,
572
- search=search,
573
- min_importance=min_importance,
574
- max_importance=max_importance,
575
- sort_by=sort_by,
576
- sort_order=sort_order,
577
- limit=limit,
578
- offset=offset,
579
- )
580
-
581
- memories = get_memories(project, filters)
582
- log_success("/api/memories", count=len(memories), offset=offset, filters=bool(search or memory_type))
583
- return memories
584
- except Exception as e:
585
- log_error("/api/memories", e, project=project)
586
- raise
587
-
588
-
589
- @app.post("/api/memories")
590
- @rate_limit("30/minute")
591
- async def create_memory_endpoint(
592
- request: MemoryCreateRequest,
593
- project: str = Query(..., description="Path to the database file"),
594
- ):
595
- """Create a new memory."""
596
- try:
597
- if not Path(project).exists():
598
- log_error("/api/memories POST", FileNotFoundError("Database not found"), project=project)
599
- raise HTTPException(status_code=404, detail="Database not found")
600
-
601
- # Create the memory
602
- memory_id = create_memory(
603
- db_path=project,
604
- content=request.content,
605
- memory_type=request.memory_type,
606
- context=request.context,
607
- tags=request.tags if request.tags else None,
608
- importance_score=request.importance_score,
609
- )
610
-
611
- # Fetch the created memory to return it
612
- created_memory = get_memory_by_id(project, memory_id)
613
-
614
- # Broadcast to WebSocket clients
615
- await manager.broadcast("memory_created", created_memory.model_dump(by_alias=True))
616
-
617
- log_success("/api/memories POST", memory_id=memory_id, type=request.memory_type)
618
- return created_memory
619
- except HTTPException:
620
- raise
621
- except Exception as e:
622
- import traceback
623
- print(f"[DEBUG] create_memory_endpoint error: {type(e).__name__}: {e}")
624
- traceback.print_exc()
625
- log_error("/api/memories POST", e, project=project)
626
- raise
627
-
628
-
629
- # NOTE: These routes MUST be defined before /api/memories/{memory_id} to avoid path conflicts
630
- @app.get("/api/memories/needs-review")
631
- async def get_memories_needing_review_endpoint(
632
- project: str = Query(..., description="Path to the database file"),
633
- days_threshold: int = 30,
634
- limit: int = 50,
635
- ):
636
- """Get memories that may need freshness review."""
637
- if not Path(project).exists():
638
- raise HTTPException(status_code=404, detail="Database not found")
639
-
640
- return get_memories_needing_review(project, days_threshold, limit)
641
-
642
-
643
- @app.post("/api/memories/bulk-update-status")
644
- async def bulk_update_status_endpoint(
645
- project: str = Query(..., description="Path to the database file"),
646
- memory_ids: list[str] = [],
647
- status: str = "fresh",
648
- ):
649
- """Update status for multiple memories at once."""
650
- if not Path(project).exists():
651
- raise HTTPException(status_code=404, detail="Database not found")
652
-
653
- valid_statuses = ["fresh", "needs_review", "outdated", "archived"]
654
- if status not in valid_statuses:
655
- raise HTTPException(status_code=400, detail=f"Invalid status. Must be one of: {valid_statuses}")
656
-
657
- count = bulk_update_memory_status(project, memory_ids, status)
658
-
659
- # Notify connected clients
660
- await manager.broadcast("memories_bulk_updated", {"count": count, "status": status})
661
-
662
- return {"updated_count": count, "status": status}
663
-
664
-
665
- @app.get("/api/memories/{memory_id}")
666
- async def get_memory(
667
- memory_id: str,
668
- project: str = Query(..., description="Path to the database file"),
669
- ):
670
- """Get a single memory by ID."""
671
- if not Path(project).exists():
672
- raise HTTPException(status_code=404, detail="Database not found")
673
-
674
- memory = get_memory_by_id(project, memory_id)
675
- if not memory:
676
- raise HTTPException(status_code=404, detail="Memory not found")
677
- return memory
678
-
679
-
680
- @app.put("/api/memories/{memory_id}")
681
- async def update_memory_endpoint(
682
- memory_id: str,
683
- updates: MemoryUpdate,
684
- project: str = Query(..., description="Path to the database file"),
685
- ):
686
- """Update a memory."""
687
- try:
688
- if not Path(project).exists():
689
- log_error("/api/memories/update", FileNotFoundError("Database not found"), memory_id=memory_id)
690
- raise HTTPException(status_code=404, detail="Database not found")
691
-
692
- updated = update_memory(project, memory_id, updates)
693
- if not updated:
694
- log_error("/api/memories/update", ValueError("Memory not found"), memory_id=memory_id)
695
- raise HTTPException(status_code=404, detail="Memory not found")
696
-
697
- # Notify connected clients
698
- await manager.broadcast("memory_updated", updated.model_dump(by_alias=True))
699
- log_success("/api/memories/update", memory_id=memory_id, fields_updated=len(updates.model_dump(exclude_unset=True)))
700
- return updated
701
- except HTTPException:
702
- raise
703
- except Exception as e:
704
- log_error("/api/memories/update", e, memory_id=memory_id)
705
- raise
706
-
707
-
708
- @app.delete("/api/memories/{memory_id}")
709
- async def delete_memory_endpoint(
710
- memory_id: str,
711
- project: str = Query(..., description="Path to the database file"),
712
- ):
713
- """Delete a memory."""
714
- try:
715
- if not Path(project).exists():
716
- log_error("/api/memories/delete", FileNotFoundError("Database not found"), memory_id=memory_id)
717
- raise HTTPException(status_code=404, detail="Database not found")
718
-
719
- deleted = delete_memory(project, memory_id)
720
- if not deleted:
721
- log_error("/api/memories/delete", ValueError("Memory not found"), memory_id=memory_id)
722
- raise HTTPException(status_code=404, detail="Memory not found")
723
-
724
- # Notify connected clients
725
- await manager.broadcast("memory_deleted", {"id": memory_id})
726
- log_success("/api/memories/delete", memory_id=memory_id)
727
- return {"message": "Memory deleted", "id": memory_id}
728
- except HTTPException:
729
- raise
730
- except Exception as e:
731
- log_error("/api/memories/delete", e, memory_id=memory_id)
732
- raise
733
-
734
-
735
- @app.get("/api/memories/stats/summary")
736
- async def memory_stats(
737
- project: str = Query(..., description="Path to the database file"),
738
- ):
739
- """Get memory statistics."""
740
- if not Path(project).exists():
741
- raise HTTPException(status_code=404, detail="Database not found")
742
-
743
- return get_memory_stats(project)
744
-
745
-
746
- @app.get("/api/search")
747
- async def search(
748
- q: str = Query(..., min_length=1),
749
- project: str = Query(..., description="Path to the database file"),
750
- limit: int = 20,
751
- ):
752
- """Search memories."""
753
- if not Path(project).exists():
754
- raise HTTPException(status_code=404, detail="Database not found")
755
-
756
- return search_memories(project, q, limit)
757
-
758
-
759
- @app.get("/api/activities")
760
- async def list_activities(
761
- project: str = Query(..., description="Path to the database file"),
762
- event_type: Optional[str] = None,
763
- tool_name: Optional[str] = None,
764
- limit: int = 100,
765
- offset: int = 0,
766
- ):
767
- """Get activity log entries."""
768
- if not Path(project).exists():
769
- raise HTTPException(status_code=404, detail="Database not found")
770
-
771
- # Ensure migrations are applied (adds summary columns if missing)
772
- ensure_migrations(project)
773
-
774
- return get_activities(project, event_type, tool_name, limit, offset)
775
-
776
-
777
- @app.get("/api/timeline")
778
- async def get_timeline_view(
779
- project: str = Query(..., description="Path to the database file"),
780
- hours: int = 24,
781
- include_memories: bool = True,
782
- include_activities: bool = True,
783
- ):
784
- """Get timeline of recent activity."""
785
- if not Path(project).exists():
786
- raise HTTPException(status_code=404, detail="Database not found")
787
-
788
- return get_timeline(project, hours, include_memories, include_activities)
789
-
790
-
791
- @app.get("/api/tags")
792
- async def list_tags(
793
- project: str = Query(..., description="Path to the database file"),
794
- ):
795
- """Get all tags with counts."""
796
- if not Path(project).exists():
797
- raise HTTPException(status_code=404, detail="Database not found")
798
-
799
- return get_all_tags(project)
800
-
801
-
802
- @app.get("/api/types")
803
- async def list_types(
804
- project: str = Query(..., description="Path to the database file"),
805
- ):
806
- """Get memory type distribution."""
807
- if not Path(project).exists():
808
- raise HTTPException(status_code=404, detail="Database not found")
809
-
810
- return get_type_distribution(project)
811
-
812
-
813
- @app.get("/api/sessions")
814
- async def list_sessions(
815
- project: str = Query(..., description="Path to the database file"),
816
- limit: int = 20,
817
- ):
818
- """Get recent sessions."""
819
- if not Path(project).exists():
820
- raise HTTPException(status_code=404, detail="Database not found")
821
-
822
- return get_sessions(project, limit)
823
-
824
-
825
- # --- Stats Endpoints for Charts ---
826
-
827
-
828
- @app.get("/api/stats/activity-heatmap")
829
- async def get_activity_heatmap_endpoint(
830
- project: str = Query(..., description="Path to the database file"),
831
- days: int = 90,
832
- ):
833
- """Get activity counts grouped by day for heatmap visualization."""
834
- if not Path(project).exists():
835
- raise HTTPException(status_code=404, detail="Database not found")
836
-
837
- return get_activity_heatmap(project, days)
838
-
839
-
840
- @app.get("/api/stats/tool-usage")
841
- async def get_tool_usage_endpoint(
842
- project: str = Query(..., description="Path to the database file"),
843
- limit: int = 10,
844
- ):
845
- """Get tool usage statistics."""
846
- if not Path(project).exists():
847
- raise HTTPException(status_code=404, detail="Database not found")
848
-
849
- return get_tool_usage(project, limit)
850
-
851
-
852
- @app.get("/api/stats/memory-growth")
853
- async def get_memory_growth_endpoint(
854
- project: str = Query(..., description="Path to the database file"),
855
- days: int = 30,
856
- ):
857
- """Get memory creation over time."""
858
- if not Path(project).exists():
859
- raise HTTPException(status_code=404, detail="Database not found")
860
-
861
- return get_memory_growth(project, days)
862
-
863
-
864
- # --- Command Analytics Endpoints ---
865
-
866
-
867
- @app.get("/api/stats/command-usage")
868
- async def get_command_usage_endpoint(
869
- project: str = Query(..., description="Path to the database file"),
870
- scope: Optional[str] = Query(None, description="Filter by scope: 'universal' or 'project'"),
871
- days: int = Query(30, ge=1, le=365),
872
- ):
873
- """Get slash command usage statistics."""
874
- if not Path(project).exists():
875
- raise HTTPException(status_code=404, detail="Database not found")
876
-
877
- return get_command_usage(project, scope, days)
878
-
879
-
880
- @app.get("/api/stats/skill-usage")
881
- async def get_skill_usage_endpoint(
882
- project: str = Query(..., description="Path to the database file"),
883
- scope: Optional[str] = Query(None, description="Filter by scope: 'universal' or 'project'"),
884
- days: int = Query(30, ge=1, le=365),
885
- ):
886
- """Get skill usage statistics."""
887
- if not Path(project).exists():
888
- raise HTTPException(status_code=404, detail="Database not found")
889
-
890
- return get_skill_usage(project, scope, days)
891
-
892
-
893
- @app.get("/api/stats/mcp-usage")
894
- async def get_mcp_usage_endpoint(
895
- project: str = Query(..., description="Path to the database file"),
896
- days: int = Query(30, ge=1, le=365),
897
- ):
898
- """Get MCP server usage statistics."""
899
- if not Path(project).exists():
900
- raise HTTPException(status_code=404, detail="Database not found")
901
-
902
- return get_mcp_usage(project, days)
903
-
904
-
905
- @app.get("/api/activities/{activity_id}")
906
- async def get_activity_detail_endpoint(
907
- activity_id: str,
908
- project: str = Query(..., description="Path to the database file"),
909
- ):
910
- """Get full activity details including complete input/output."""
911
- if not Path(project).exists():
912
- raise HTTPException(status_code=404, detail="Database not found")
913
-
914
- # Ensure migrations are applied
915
- ensure_migrations(project)
916
-
917
- activity = get_activity_detail(project, activity_id)
918
- if not activity:
919
- raise HTTPException(status_code=404, detail="Activity not found")
920
-
921
- return activity
922
-
923
-
924
- @app.post("/api/activities/backfill-summaries")
925
- async def backfill_activity_summaries_endpoint(
926
- project: str = Query(..., description="Path to the database file"),
927
- ):
928
- """Generate summaries for existing activities that don't have them."""
929
- if not Path(project).exists():
930
- raise HTTPException(status_code=404, detail="Database not found")
931
-
932
- try:
933
- from backfill_summaries import backfill_all
934
- results = backfill_all(project)
935
- return {
936
- "success": True,
937
- "summaries_updated": results["summaries"],
938
- "mcp_servers_updated": results["mcp_servers"],
939
- }
940
- except Exception as e:
941
- raise HTTPException(status_code=500, detail=f"Backfill failed: {str(e)}")
942
-
943
-
944
- # --- Session Context Endpoints ---
945
-
946
-
947
- @app.get("/api/sessions/recent")
948
- async def get_recent_sessions_endpoint(
949
- project: str = Query(..., description="Path to the database file"),
950
- limit: int = 5,
951
- ):
952
- """Get recent sessions with summaries."""
953
- if not Path(project).exists():
954
- raise HTTPException(status_code=404, detail="Database not found")
955
-
956
- return get_recent_sessions(project, limit)
957
-
958
-
959
- # --- Relationship Graph Endpoints ---
960
-
961
-
962
- @app.get("/api/relationships")
963
- async def get_relationships_endpoint(
964
- project: str = Query(..., description="Path to the database file"),
965
- memory_id: Optional[str] = None,
966
- ):
967
- """Get memory relationships for graph visualization."""
968
- if not Path(project).exists():
969
- raise HTTPException(status_code=404, detail="Database not found")
970
-
971
- return get_relationships(project, memory_id)
972
-
973
-
974
- @app.get("/api/relationships/graph")
975
- async def get_relationship_graph_endpoint(
976
- project: str = Query(..., description="Path to the database file"),
977
- center_id: Optional[str] = None,
978
- depth: int = 2,
979
- ):
980
- """Get graph data centered on a memory with configurable depth."""
981
- if not Path(project).exists():
982
- raise HTTPException(status_code=404, detail="Database not found")
983
-
984
- return get_relationship_graph(project, center_id, depth)
985
-
986
-
987
- # --- Chat Endpoint ---
988
-
989
-
990
- @app.get("/api/chat/status")
991
- async def chat_status():
992
- """Check if chat service is available."""
993
- return {
994
- "available": chat_service.is_available(),
995
- "message": "Chat is available" if chat_service.is_available() else "Set GEMINI_API_KEY environment variable to enable chat",
996
- }
997
-
998
-
999
- @app.post("/api/chat", response_model=ChatResponse)
1000
- @rate_limit("10/minute")
1001
- async def chat_with_memories(
1002
- request: ChatRequest,
1003
- project: str = Query(..., description="Path to the database file"),
1004
- ):
1005
- """Ask a natural language question about memories."""
1006
- try:
1007
- if not Path(project).exists():
1008
- log_error("/api/chat", FileNotFoundError("Database not found"), question=request.question[:50])
1009
- raise HTTPException(status_code=404, detail="Database not found")
1010
-
1011
- result = await chat_service.ask_about_memories(
1012
- project,
1013
- request.question,
1014
- request.max_memories,
1015
- )
1016
-
1017
- log_success("/api/chat", question_len=len(request.question), sources=len(result.get("sources", [])))
1018
- return ChatResponse(**result)
1019
- except HTTPException:
1020
- raise
1021
- except Exception as e:
1022
- log_error("/api/chat", e, question=request.question[:50])
1023
- raise
1024
-
1025
-
1026
- @app.get("/api/chat/stream")
1027
- @rate_limit("10/minute")
1028
- async def stream_chat(
1029
- project: str = Query(..., description="Path to the database file"),
1030
- question: str = Query(..., description="The question to ask"),
1031
- max_memories: int = Query(10, ge=1, le=50),
1032
- ):
1033
- """SSE endpoint for streaming chat responses."""
1034
- from fastapi.responses import StreamingResponse
1035
-
1036
- if not Path(project).exists():
1037
- raise HTTPException(status_code=404, detail="Database not found")
1038
-
1039
- async def event_generator():
1040
- try:
1041
- async for event in chat_service.stream_ask_about_memories(project, question, max_memories):
1042
- yield f"data: {json.dumps(event)}\n\n"
1043
- except Exception as e:
1044
- yield f"data: {json.dumps({'type': 'error', 'data': str(e)})}\n\n"
1045
-
1046
- return StreamingResponse(
1047
- event_generator(),
1048
- media_type="text/event-stream",
1049
- headers={
1050
- "Cache-Control": "no-cache",
1051
- "Connection": "keep-alive",
1052
- "X-Accel-Buffering": "no",
1053
- }
1054
- )
1055
-
1056
-
1057
- @app.post("/api/chat/save", response_model=ConversationSaveResponse)
1058
- async def save_chat_conversation(
1059
- request: ConversationSaveRequest,
1060
- project: str = Query(..., description="Path to the database file"),
1061
- ):
1062
- """Save a chat conversation as a memory."""
1063
- try:
1064
- if not Path(project).exists():
1065
- log_error("/api/chat/save", FileNotFoundError("Database not found"))
1066
- raise HTTPException(status_code=404, detail="Database not found")
1067
-
1068
- result = await chat_service.save_conversation(
1069
- project,
1070
- [msg.model_dump() for msg in request.messages],
1071
- request.referenced_memory_ids,
1072
- request.importance or 60,
1073
- )
1074
-
1075
- log_success("/api/chat/save", memory_id=result["memory_id"], messages=len(request.messages))
1076
- return ConversationSaveResponse(**result)
1077
- except HTTPException:
1078
- raise
1079
- except Exception as e:
1080
- log_error("/api/chat/save", e)
1081
- raise
1082
-
1083
-
1084
- # --- Image Generation Endpoints ---
1085
-
1086
-
1087
- @app.get("/api/image/status")
1088
- async def get_image_status():
1089
- """Check if image generation is available."""
1090
- return {
1091
- "available": image_service.is_available(),
1092
- "message": "Image generation ready" if image_service.is_available()
1093
- else "Configure GEMINI_API_KEY and install google-genai for image generation",
1094
- }
1095
-
1096
-
1097
- @app.get("/api/image/presets")
1098
- async def get_image_presets():
1099
- """Get available image preset templates."""
1100
- return {"presets": image_service.get_presets()}
1101
-
1102
-
1103
- @app.post("/api/image/generate-batch", response_model=BatchImageGenerationResponse)
1104
- @rate_limit("5/minute")
1105
- async def generate_images_batch(
1106
- request: BatchImageGenerationRequest,
1107
- db_path: str = Query(..., alias="project", description="Path to the database file"),
1108
- ):
1109
- """Generate multiple images with different presets/prompts."""
1110
- # Validate image count
1111
- if len(request.images) not in [1, 2, 4]:
1112
- return BatchImageGenerationResponse(
1113
- success=False,
1114
- errors=["Must request 1, 2, or 4 images"]
1115
- )
1116
-
1117
- # Build memory context
1118
- memory_context = ""
1119
- if request.memory_ids:
1120
- memory_context = image_service.build_memory_context(db_path, request.memory_ids)
1121
-
1122
- # Build chat context
1123
- chat_context = image_service.build_chat_context(request.chat_messages)
1124
-
1125
- # Convert request models to internal format
1126
- image_requests = [
1127
- SingleImageRequest(
1128
- preset=ImagePreset(img.preset),
1129
- custom_prompt=img.custom_prompt,
1130
- aspect_ratio=img.aspect_ratio,
1131
- image_size=img.image_size
1132
- )
1133
- for img in request.images
1134
- ]
1135
-
1136
- result = await image_service.generate_batch(
1137
- requests=image_requests,
1138
- memory_context=memory_context,
1139
- chat_context=chat_context,
1140
- use_search_grounding=request.use_search_grounding
1141
- )
1142
-
1143
- return BatchImageGenerationResponse(
1144
- success=result.success,
1145
- images=[
1146
- SingleImageResponseModel(
1147
- success=img.success,
1148
- image_data=img.image_data,
1149
- text_response=img.text_response,
1150
- thought_signature=img.thought_signature,
1151
- image_id=img.image_id,
1152
- error=img.error,
1153
- index=img.index
1154
- )
1155
- for img in result.images
1156
- ],
1157
- errors=result.errors
1158
- )
1159
-
1160
-
1161
- @app.post("/api/image/refine", response_model=SingleImageResponseModel)
1162
- @rate_limit("5/minute")
1163
- async def refine_image(request: ImageRefineRequest):
1164
- """Refine an existing generated image with a new prompt."""
1165
- result = await image_service.refine_image(
1166
- image_id=request.image_id,
1167
- refinement_prompt=request.refinement_prompt,
1168
- aspect_ratio=request.aspect_ratio,
1169
- image_size=request.image_size
1170
- )
1171
-
1172
- return SingleImageResponseModel(
1173
- success=result.success,
1174
- image_data=result.image_data,
1175
- text_response=result.text_response,
1176
- thought_signature=result.thought_signature,
1177
- image_id=result.image_id,
1178
- error=result.error
1179
- )
1180
-
1181
-
1182
- @app.post("/api/image/clear-conversation")
1183
- async def clear_image_conversation(image_id: Optional[str] = None):
1184
- """Clear image conversation history. If image_id provided, clear only that image."""
1185
- image_service.clear_conversation(image_id)
1186
- return {"status": "cleared", "image_id": image_id}
1187
-
1188
-
1189
- # --- WebSocket Endpoint ---
1190
-
1191
-
1192
- @app.websocket("/ws")
1193
- async def websocket_endpoint(websocket: WebSocket):
1194
- """WebSocket endpoint for real-time updates."""
1195
- client_id = await manager.connect(websocket)
1196
- try:
1197
- # Send initial connection confirmation
1198
- await manager.send_to_client(client_id, "connected", {"client_id": client_id})
1199
-
1200
- # Keep connection alive and handle messages
1201
- while True:
1202
- data = await websocket.receive_text()
1203
- # Echo back for ping/pong
1204
- if data == "ping":
1205
- await manager.send_to_client(client_id, "pong", {})
1206
- except WebSocketDisconnect:
1207
- await manager.disconnect(client_id)
1208
- except Exception as e:
1209
- print(f"[WS] Error: {e}")
1210
- await manager.disconnect(client_id)
1211
-
1212
-
1213
- # --- Export Endpoints ---
1214
-
1215
-
1216
- @app.get("/api/export")
1217
- async def export_memories(
1218
- project: str = Query(..., description="Path to the database file"),
1219
- format: str = Query("json", description="Export format: json, markdown, csv"),
1220
- memory_ids: Optional[str] = Query(None, description="Comma-separated memory IDs to export, or all if empty"),
1221
- include_relationships: bool = Query(True, description="Include memory relationships"),
1222
- ):
1223
- """Export memories to specified format."""
1224
- from fastapi.responses import Response
1225
- import csv
1226
- import io
1227
-
1228
- if not Path(project).exists():
1229
- raise HTTPException(status_code=404, detail="Database not found")
1230
-
1231
- # Get memories
1232
- if memory_ids:
1233
- ids = memory_ids.split(",")
1234
- memories = [get_memory_by_id(project, mid) for mid in ids if mid.strip()]
1235
- memories = [m for m in memories if m is not None]
1236
- else:
1237
- from models import FilterParams
1238
- filters = FilterParams(limit=1000, offset=0, sort_by="created_at", sort_order="desc")
1239
- memories = get_memories(project, filters)
1240
-
1241
- # Get relationships if requested
1242
- relationships = []
1243
- if include_relationships:
1244
- relationships = get_relationships(project)
1245
-
1246
- if format == "json":
1247
- export_data = {
1248
- "exported_at": datetime.now().isoformat(),
1249
- "project": project,
1250
- "memory_count": len(memories),
1251
- "memories": [m.model_dump(by_alias=True) for m in memories],
1252
- "relationships": relationships if include_relationships else [],
1253
- }
1254
- return Response(
1255
- content=json.dumps(export_data, indent=2, default=str),
1256
- media_type="application/json",
1257
- headers={"Content-Disposition": f"attachment; filename=memories_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"}
1258
- )
1259
-
1260
- elif format == "markdown":
1261
- md_lines = [
1262
- f"# Omni-Cortex Memory Export",
1263
- f"",
1264
- f"**Exported:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
1265
- f"**Total Memories:** {len(memories)}",
1266
- f"",
1267
- "---",
1268
- "",
1269
- ]
1270
- for m in memories:
1271
- md_lines.extend([
1272
- f"## {m.type.title()}: {m.content[:50]}{'...' if len(m.content) > 50 else ''}",
1273
- f"",
1274
- f"**ID:** `{m.id}`",
1275
- f"**Type:** {m.type}",
1276
- f"**Status:** {m.status}",
1277
- f"**Importance:** {m.importance_score}",
1278
- f"**Created:** {m.created_at}",
1279
- f"**Tags:** {', '.join(m.tags) if m.tags else 'None'}",
1280
- f"",
1281
- "### Content",
1282
- f"",
1283
- m.content,
1284
- f"",
1285
- "### Context",
1286
- f"",
1287
- m.context or "_No context_",
1288
- f"",
1289
- "---",
1290
- "",
1291
- ])
1292
- return Response(
1293
- content="\n".join(md_lines),
1294
- media_type="text/markdown",
1295
- headers={"Content-Disposition": f"attachment; filename=memories_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md"}
1296
- )
1297
-
1298
- elif format == "csv":
1299
- output = io.StringIO()
1300
- writer = csv.writer(output)
1301
- writer.writerow(["id", "type", "status", "importance", "content", "context", "tags", "created_at", "last_accessed"])
1302
- for m in memories:
1303
- writer.writerow([
1304
- m.id,
1305
- m.type,
1306
- m.status,
1307
- m.importance_score,
1308
- m.content,
1309
- m.context or "",
1310
- ",".join(m.tags) if m.tags else "",
1311
- m.created_at,
1312
- m.last_accessed or "",
1313
- ])
1314
- return Response(
1315
- content=output.getvalue(),
1316
- media_type="text/csv",
1317
- headers={"Content-Disposition": f"attachment; filename=memories_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"}
1318
- )
1319
-
1320
- else:
1321
- raise HTTPException(status_code=400, detail=f"Unsupported format: {format}. Use json, markdown, or csv.")
1322
-
1323
-
1324
- # --- Health Check ---
1325
-
1326
-
1327
- @app.get("/health")
1328
- async def health_check():
1329
- """Health check endpoint."""
1330
- return {
1331
- "status": "healthy",
1332
- "websocket_connections": manager.connection_count,
1333
- }
1334
-
1335
-
1336
- # --- Static File Serving (SPA) ---
1337
- # These routes must come AFTER all API routes
1338
-
1339
-
1340
- @app.get("/")
1341
- async def serve_root():
1342
- """Serve the frontend index.html."""
1343
- index_file = DIST_DIR / "index.html"
1344
- if index_file.exists():
1345
- return FileResponse(str(index_file))
1346
- return {"message": "Omni-Cortex Dashboard API", "docs": "/docs"}
1347
-
1348
-
1349
- @app.get("/{path:path}")
1350
- async def serve_spa(path: str):
1351
- """Catch-all route to serve SPA for client-side routing with path traversal protection."""
1352
- # Skip API routes and known paths
1353
- if path.startswith(("api/", "ws", "health", "docs", "openapi", "redoc")):
1354
- raise HTTPException(status_code=404, detail="Not found")
1355
-
1356
- # Check if it's a static file (with path traversal protection)
1357
- safe_path = PathValidator.is_safe_static_path(DIST_DIR, path)
1358
- if safe_path:
1359
- return FileResponse(str(safe_path))
1360
-
1361
- # Otherwise serve index.html for SPA routing
1362
- index_file = DIST_DIR / "index.html"
1363
- if index_file.exists():
1364
- return FileResponse(str(index_file))
1365
-
1366
- raise HTTPException(status_code=404, detail="Not found")
1367
-
1368
-
1369
- def run():
1370
- """Run the dashboard server."""
1371
- uvicorn.run(
1372
- "main:app",
1373
- host="0.0.0.0",
1374
- port=8765,
1375
- reload=True,
1376
- reload_dirs=[str(Path(__file__).parent)],
1377
- )
1378
-
1379
-
1380
- if __name__ == "__main__":
1381
- run()
1
+ """FastAPI backend for Omni-Cortex Web Dashboard."""
2
+ # Trigger reload for relationship graph column fix
3
+
4
+ import asyncio
5
+ import json
6
+ import os
7
+ import traceback
8
+ from contextlib import asynccontextmanager
9
+ from datetime import datetime
10
+ from pathlib import Path
11
+ from typing import Optional
12
+
13
+ import uvicorn
14
+ from fastapi import FastAPI, HTTPException, Query, WebSocket, WebSocketDisconnect, Request, Depends
15
+ from fastapi.middleware.cors import CORSMiddleware
16
+ from fastapi.staticfiles import StaticFiles
17
+ from fastapi.responses import FileResponse, Response
18
+ from starlette.middleware.base import BaseHTTPMiddleware
19
+ from watchdog.events import FileSystemEventHandler
20
+ from watchdog.observers import Observer
21
+
22
+ # Rate limiting imports (optional - graceful degradation if not installed)
23
+ try:
24
+ from slowapi import Limiter, _rate_limit_exceeded_handler
25
+ from slowapi.util import get_remote_address
26
+ from slowapi.errors import RateLimitExceeded
27
+ RATE_LIMITING_AVAILABLE = True
28
+ except ImportError:
29
+ RATE_LIMITING_AVAILABLE = False
30
+ Limiter = None
31
+
32
+ from database import (
33
+ bulk_update_memory_status,
34
+ create_memory,
35
+ delete_memory,
36
+ delete_user_message,
37
+ delete_user_messages_bulk,
38
+ ensure_migrations,
39
+ get_activities,
40
+ get_activity_detail,
41
+ get_activity_heatmap,
42
+ get_all_tags,
43
+ get_command_usage,
44
+ get_mcp_usage,
45
+ get_memories,
46
+ get_memories_needing_review,
47
+ get_memory_by_id,
48
+ get_memory_growth,
49
+ get_memory_stats,
50
+ get_recent_sessions,
51
+ get_relationship_graph,
52
+ get_relationships,
53
+ get_sessions,
54
+ get_skill_usage,
55
+ get_style_profile,
56
+ get_style_samples,
57
+ get_style_samples_by_category,
58
+ compute_style_profile_from_messages,
59
+ get_timeline,
60
+ get_tool_usage,
61
+ get_type_distribution,
62
+ get_user_message_count,
63
+ get_user_messages,
64
+ search_memories,
65
+ update_memory,
66
+ )
67
+ from logging_config import log_success, log_error
68
+ from models import (
69
+ AggregateChatRequest,
70
+ AggregateMemoryRequest,
71
+ AggregateStatsRequest,
72
+ AggregateStatsResponse,
73
+ BatchImageGenerationRequest,
74
+ BatchImageGenerationResponse,
75
+ BulkDeleteRequest,
76
+ ChatRequest,
77
+ ChatResponse,
78
+ ComposeRequest,
79
+ ComposeResponse,
80
+ ConversationSaveRequest,
81
+ ConversationSaveResponse,
82
+ FilterParams,
83
+ ImageRefineRequest,
84
+ MemoryCreateRequest,
85
+ MemoryUpdate,
86
+ ProjectInfo,
87
+ ProjectRegistration,
88
+ SingleImageRequestModel,
89
+ SingleImageResponseModel,
90
+ StyleProfile,
91
+ StyleSample,
92
+ UserMessage,
93
+ UserMessagesResponse,
94
+ )
95
+ from project_config import (
96
+ load_config,
97
+ add_registered_project,
98
+ remove_registered_project,
99
+ toggle_favorite,
100
+ add_scan_directory,
101
+ remove_scan_directory,
102
+ )
103
+ from project_scanner import scan_projects
104
+ from websocket_manager import manager
105
+ import chat_service
106
+ from image_service import image_service, ImagePreset, SingleImageRequest
107
+ from security import PathValidator, get_cors_config, IS_PRODUCTION
108
+
109
+
110
+ class SecurityHeadersMiddleware(BaseHTTPMiddleware):
111
+ """Add security headers to all responses."""
112
+
113
+ async def dispatch(self, request: Request, call_next) -> Response:
114
+ response = await call_next(request)
115
+
116
+ # Prevent MIME type sniffing
117
+ response.headers["X-Content-Type-Options"] = "nosniff"
118
+
119
+ # Prevent clickjacking
120
+ response.headers["X-Frame-Options"] = "DENY"
121
+
122
+ # XSS protection (legacy browsers)
123
+ response.headers["X-XSS-Protection"] = "1; mode=block"
124
+
125
+ # Content Security Policy
126
+ response.headers["Content-Security-Policy"] = (
127
+ "default-src 'self'; "
128
+ "script-src 'self' 'unsafe-inline' 'unsafe-eval'; " # Vue needs these
129
+ "style-src 'self' 'unsafe-inline'; " # Tailwind needs inline
130
+ "img-src 'self' data: blob: https:; " # Allow AI-generated images
131
+ "connect-src 'self' ws: wss: https://generativelanguage.googleapis.com; "
132
+ "font-src 'self'; "
133
+ "frame-ancestors 'none';"
134
+ )
135
+
136
+ # HSTS (only in production with HTTPS)
137
+ if IS_PRODUCTION and os.getenv("SSL_CERTFILE"):
138
+ response.headers["Strict-Transport-Security"] = "max-age=31536000; includeSubDomains"
139
+
140
+ return response
141
+
142
+
143
+ def validate_project_path(project: str = Query(..., description="Path to the database file")) -> Path:
144
+ """Validate project database path - dependency for endpoints."""
145
+ try:
146
+ return PathValidator.validate_project_path(project)
147
+ except ValueError as e:
148
+ raise HTTPException(status_code=400, detail=str(e))
149
+
150
+
151
+ class DatabaseChangeHandler(FileSystemEventHandler):
152
+ """Handle database file changes for real-time updates."""
153
+
154
+ def __init__(self, ws_manager, loop):
155
+ self.ws_manager = ws_manager
156
+ self.loop = loop
157
+ self._debounce_task: Optional[asyncio.Task] = None
158
+ self._last_path: Optional[str] = None
159
+ self._last_activity_count: dict[str, int] = {}
160
+
161
+ def on_modified(self, event):
162
+ if event.src_path.endswith("cortex.db") or event.src_path.endswith("global.db"):
163
+ # Debounce rapid changes
164
+ self._last_path = event.src_path
165
+ if self._debounce_task is None or self._debounce_task.done():
166
+ self._debounce_task = asyncio.run_coroutine_threadsafe(
167
+ self._debounced_notify(), self.loop
168
+ )
169
+
170
+ async def _debounced_notify(self):
171
+ await asyncio.sleep(0.3) # Reduced from 0.5s for faster updates
172
+ if self._last_path:
173
+ db_path = self._last_path
174
+
175
+ # Broadcast general database change
176
+ await self.ws_manager.broadcast("database_changed", {"path": db_path})
177
+
178
+ # Fetch and broadcast latest activities (IndyDevDan pattern)
179
+ try:
180
+ # Get recent activities
181
+ recent = get_activities(db_path, limit=5, offset=0)
182
+ if recent:
183
+ # Broadcast each new activity
184
+ for activity in recent:
185
+ await self.ws_manager.broadcast_activity_logged(
186
+ db_path,
187
+ activity if isinstance(activity, dict) else activity.model_dump()
188
+ )
189
+
190
+ # Also broadcast session update
191
+ sessions = get_recent_sessions(db_path, limit=1)
192
+ if sessions:
193
+ session = sessions[0]
194
+ await self.ws_manager.broadcast_session_updated(
195
+ db_path,
196
+ session if isinstance(session, dict) else dict(session)
197
+ )
198
+ except Exception as e:
199
+ print(f"[WS] Error broadcasting activities: {e}")
200
+
201
+
202
+ # File watcher
203
+ observer: Optional[Observer] = None
204
+
205
+
206
+ @asynccontextmanager
207
+ async def lifespan(app: FastAPI):
208
+ """Manage file watcher lifecycle."""
209
+ global observer
210
+ loop = asyncio.get_event_loop()
211
+ handler = DatabaseChangeHandler(manager, loop)
212
+ observer = Observer()
213
+
214
+ # Watch common project directories
215
+ watch_paths = [
216
+ Path.home() / ".omni-cortex",
217
+ Path("D:/Projects"),
218
+ ]
219
+
220
+ for watch_path in watch_paths:
221
+ if watch_path.exists():
222
+ observer.schedule(handler, str(watch_path), recursive=True)
223
+ print(f"[Watcher] Monitoring: {watch_path}")
224
+
225
+ observer.start()
226
+ print("[Server] File watcher started")
227
+
228
+ yield
229
+
230
+ observer.stop()
231
+ observer.join()
232
+ print("[Server] File watcher stopped")
233
+
234
+
235
+ # FastAPI app
236
+ app = FastAPI(
237
+ title="Omni-Cortex Dashboard",
238
+ description="Web dashboard for viewing and managing Omni-Cortex memories",
239
+ version="0.1.0",
240
+ lifespan=lifespan,
241
+ )
242
+
243
+ # Add security headers middleware (MUST come before CORS)
244
+ app.add_middleware(SecurityHeadersMiddleware)
245
+
246
+ # Rate limiting (if available)
247
+ if RATE_LIMITING_AVAILABLE:
248
+ limiter = Limiter(key_func=get_remote_address)
249
+ app.state.limiter = limiter
250
+ app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
251
+ else:
252
+ limiter = None
253
+
254
+
255
+ def rate_limit(limit_string: str):
256
+ """Decorator for conditional rate limiting.
257
+
258
+ Returns the actual limiter decorator if available, otherwise a no-op.
259
+ Usage: @rate_limit("10/minute")
260
+ """
261
+ if limiter is not None:
262
+ return limiter.limit(limit_string)
263
+ # No-op decorator when rate limiting is not available
264
+ def noop_decorator(func):
265
+ return func
266
+ return noop_decorator
267
+
268
+ # CORS configuration (environment-aware)
269
+ cors_config = get_cors_config()
270
+ app.add_middleware(
271
+ CORSMiddleware,
272
+ allow_origins=cors_config["allow_origins"],
273
+ allow_credentials=True,
274
+ allow_methods=cors_config["allow_methods"],
275
+ allow_headers=cors_config["allow_headers"],
276
+ )
277
+
278
+ # Static files for production build
279
+ DASHBOARD_DIR = Path(__file__).parent.parent
280
+ DIST_DIR = DASHBOARD_DIR / "frontend" / "dist"
281
+
282
+
283
+ def setup_static_files():
284
+ """Mount static files if dist directory exists (production build)."""
285
+ if DIST_DIR.exists():
286
+ # Mount assets directory
287
+ assets_dir = DIST_DIR / "assets"
288
+ if assets_dir.exists():
289
+ app.mount("/assets", StaticFiles(directory=str(assets_dir)), name="assets")
290
+ print(f"[Static] Serving assets from: {assets_dir}")
291
+
292
+
293
+ # Call setup at module load
294
+ setup_static_files()
295
+
296
+
297
+ # --- REST Endpoints ---
298
+
299
+
300
+ @app.get("/api/projects", response_model=list[ProjectInfo])
301
+ async def list_projects():
302
+ """List all discovered omni-cortex project databases."""
303
+ return scan_projects()
304
+
305
+
306
+ # --- Project Management Endpoints ---
307
+
308
+
309
+ @app.get("/api/projects/config")
310
+ async def get_project_config():
311
+ """Get project configuration (scan dirs, counts)."""
312
+ config = load_config()
313
+ return {
314
+ "scan_directories": config.scan_directories,
315
+ "registered_count": len(config.registered_projects),
316
+ "favorites_count": len(config.favorites),
317
+ }
318
+
319
+
320
+ @app.post("/api/projects/register")
321
+ async def register_project(body: ProjectRegistration):
322
+ """Manually register a project by path."""
323
+ success = add_registered_project(body.path, body.display_name)
324
+ if not success:
325
+ raise HTTPException(400, "Invalid path or already registered")
326
+ return {"success": True}
327
+
328
+
329
+ @app.delete("/api/projects/register")
330
+ async def unregister_project(path: str = Query(..., description="Project path to unregister")):
331
+ """Remove a registered project."""
332
+ success = remove_registered_project(path)
333
+ if not success:
334
+ raise HTTPException(404, "Project not found")
335
+ return {"success": True}
336
+
337
+
338
+ @app.post("/api/projects/favorite")
339
+ async def toggle_project_favorite(path: str = Query(..., description="Project path to toggle favorite")):
340
+ """Toggle favorite status for a project."""
341
+ is_favorite = toggle_favorite(path)
342
+ return {"is_favorite": is_favorite}
343
+
344
+
345
+ @app.post("/api/projects/scan-directories")
346
+ async def add_scan_dir(directory: str = Query(..., description="Directory path to add")):
347
+ """Add a directory to auto-scan list."""
348
+ success = add_scan_directory(directory)
349
+ if not success:
350
+ raise HTTPException(400, "Invalid directory or already added")
351
+ return {"success": True}
352
+
353
+
354
+ @app.delete("/api/projects/scan-directories")
355
+ async def remove_scan_dir(directory: str = Query(..., description="Directory path to remove")):
356
+ """Remove a directory from auto-scan list."""
357
+ success = remove_scan_directory(directory)
358
+ if not success:
359
+ raise HTTPException(404, "Directory not found")
360
+ return {"success": True}
361
+
362
+
363
+ @app.post("/api/projects/refresh")
364
+ async def refresh_projects():
365
+ """Force rescan of all project directories."""
366
+ projects = scan_projects()
367
+ return {"count": len(projects)}
368
+
369
+
370
+ # --- Aggregate Multi-Project Endpoints ---
371
+
372
+
373
+ @app.post("/api/aggregate/memories")
374
+ @rate_limit("50/minute")
375
+ async def get_aggregate_memories(request: AggregateMemoryRequest):
376
+ """Get memories from multiple projects with project attribution."""
377
+ try:
378
+ all_memories = []
379
+ filters = request.filters or FilterParams()
380
+
381
+ for project_path in request.projects:
382
+ if not Path(project_path).exists():
383
+ continue
384
+
385
+ try:
386
+ memories = get_memories(project_path, filters)
387
+ # Add project attribution to each memory
388
+ for m in memories:
389
+ m_dict = m.model_dump()
390
+ m_dict['source_project'] = project_path
391
+ # Extract project name from path
392
+ project_dir = Path(project_path).parent
393
+ m_dict['source_project_name'] = project_dir.name
394
+ all_memories.append(m_dict)
395
+ except Exception as e:
396
+ log_error(f"/api/aggregate/memories (project: {project_path})", e)
397
+ continue
398
+
399
+ # Sort by last_accessed or created_at (convert to str to handle mixed tz-aware/naive)
400
+ all_memories.sort(
401
+ key=lambda x: str(x.get('last_accessed') or x.get('created_at') or ''),
402
+ reverse=True
403
+ )
404
+
405
+ # Apply pagination
406
+ start = filters.offset
407
+ end = start + filters.limit
408
+ return all_memories[start:end]
409
+ except Exception as e:
410
+ log_error("/api/aggregate/memories", e)
411
+ raise HTTPException(status_code=500, detail=str(e))
412
+
413
+
414
+ @app.post("/api/aggregate/stats", response_model=AggregateStatsResponse)
415
+ @rate_limit("50/minute")
416
+ async def get_aggregate_stats(request: AggregateStatsRequest):
417
+ """Get combined statistics across multiple projects."""
418
+ try:
419
+ total_count = 0
420
+ total_access = 0
421
+ importance_sum = 0
422
+ by_type = {}
423
+ by_status = {}
424
+
425
+ for project_path in request.projects:
426
+ if not Path(project_path).exists():
427
+ continue
428
+
429
+ try:
430
+ stats = get_memory_stats(project_path)
431
+ total_count += stats.total_count
432
+ total_access += stats.total_access_count
433
+
434
+ # Weighted average for importance
435
+ project_count = stats.total_count
436
+ project_avg_importance = stats.avg_importance
437
+ importance_sum += project_avg_importance * project_count
438
+
439
+ # Aggregate by_type
440
+ for type_name, count in stats.by_type.items():
441
+ by_type[type_name] = by_type.get(type_name, 0) + count
442
+
443
+ # Aggregate by_status
444
+ for status, count in stats.by_status.items():
445
+ by_status[status] = by_status.get(status, 0) + count
446
+ except Exception as e:
447
+ log_error(f"/api/aggregate/stats (project: {project_path})", e)
448
+ continue
449
+
450
+ return AggregateStatsResponse(
451
+ total_count=total_count,
452
+ total_access_count=total_access,
453
+ avg_importance=round(importance_sum / total_count, 1) if total_count > 0 else 0,
454
+ by_type=by_type,
455
+ by_status=by_status,
456
+ project_count=len(request.projects),
457
+ )
458
+ except Exception as e:
459
+ log_error("/api/aggregate/stats", e)
460
+ raise HTTPException(status_code=500, detail=str(e))
461
+
462
+
463
+ @app.post("/api/aggregate/tags")
464
+ @rate_limit("50/minute")
465
+ async def get_aggregate_tags(request: AggregateStatsRequest):
466
+ """Get combined tags across multiple projects."""
467
+ try:
468
+ tag_counts = {}
469
+
470
+ for project_path in request.projects:
471
+ if not Path(project_path).exists():
472
+ continue
473
+
474
+ try:
475
+ tags = get_all_tags(project_path)
476
+ for tag in tags:
477
+ tag_name = tag['name']
478
+ tag_counts[tag_name] = tag_counts.get(tag_name, 0) + tag['count']
479
+ except Exception as e:
480
+ log_error(f"/api/aggregate/tags (project: {project_path})", e)
481
+ continue
482
+
483
+ # Return sorted by count
484
+ return sorted(
485
+ [{'name': k, 'count': v} for k, v in tag_counts.items()],
486
+ key=lambda x: x['count'],
487
+ reverse=True
488
+ )
489
+ except Exception as e:
490
+ log_error("/api/aggregate/tags", e)
491
+ raise HTTPException(status_code=500, detail=str(e))
492
+
493
+
494
+ @app.post("/api/aggregate/chat", response_model=ChatResponse)
495
+ @rate_limit("10/minute")
496
+ async def chat_across_projects(request: AggregateChatRequest):
497
+ """Ask AI about memories across multiple projects."""
498
+ try:
499
+ if not chat_service.is_available():
500
+ raise HTTPException(
501
+ status_code=503,
502
+ detail="Chat service not available. Set GEMINI_API_KEY environment variable."
503
+ )
504
+
505
+ all_sources = []
506
+
507
+ # Gather relevant memories from each project
508
+ for project_path in request.projects:
509
+ if not Path(project_path).exists():
510
+ continue
511
+
512
+ try:
513
+ memories = search_memories(
514
+ project_path,
515
+ request.question,
516
+ limit=request.max_memories_per_project
517
+ )
518
+
519
+ for m in memories:
520
+ project_dir = Path(project_path).parent
521
+ source = {
522
+ 'id': m.id,
523
+ 'type': m.memory_type,
524
+ 'content_preview': m.content[:200],
525
+ 'tags': m.tags,
526
+ 'project_path': project_path,
527
+ 'project_name': project_dir.name,
528
+ }
529
+ all_sources.append(source)
530
+ except Exception as e:
531
+ log_error(f"/api/aggregate/chat (project: {project_path})", e)
532
+ continue
533
+
534
+ if not all_sources:
535
+ return ChatResponse(
536
+ answer="No relevant memories found across the selected projects.",
537
+ sources=[],
538
+ )
539
+
540
+ # Build context with project attribution
541
+ context = "\n\n".join([
542
+ f"[From: {s['project_name']}] {s['content_preview']}"
543
+ for s in all_sources
544
+ ])
545
+
546
+ # Query AI with attributed context
547
+ answer = await chat_service.generate_response(request.question, context)
548
+
549
+ log_success("/api/aggregate/chat", projects=len(request.projects), sources=len(all_sources))
550
+
551
+ return ChatResponse(
552
+ answer=answer,
553
+ sources=[ChatSource(**s) for s in all_sources],
554
+ )
555
+ except HTTPException:
556
+ raise
557
+ except Exception as e:
558
+ log_error("/api/aggregate/chat", e)
559
+ raise HTTPException(status_code=500, detail=str(e))
560
+
561
+
562
+ @app.get("/api/memories")
563
+ @rate_limit("100/minute")
564
+ async def list_memories(
565
+ project: str = Query(..., description="Path to the database file"),
566
+ memory_type: Optional[str] = Query(None, alias="type"),
567
+ status: Optional[str] = None,
568
+ tags: Optional[str] = None,
569
+ search: Optional[str] = None,
570
+ min_importance: Optional[int] = None,
571
+ max_importance: Optional[int] = None,
572
+ sort_by: str = "last_accessed",
573
+ sort_order: str = "desc",
574
+ limit: int = 50,
575
+ offset: int = 0,
576
+ ):
577
+ """Get memories with filtering and pagination."""
578
+ try:
579
+ if not Path(project).exists():
580
+ log_error("/api/memories", FileNotFoundError("Database not found"), project=project)
581
+ raise HTTPException(status_code=404, detail="Database not found")
582
+
583
+ filters = FilterParams(
584
+ memory_type=memory_type,
585
+ status=status,
586
+ tags=tags.split(",") if tags else None,
587
+ search=search,
588
+ min_importance=min_importance,
589
+ max_importance=max_importance,
590
+ sort_by=sort_by,
591
+ sort_order=sort_order,
592
+ limit=limit,
593
+ offset=offset,
594
+ )
595
+
596
+ memories = get_memories(project, filters)
597
+ log_success("/api/memories", count=len(memories), offset=offset, filters=bool(search or memory_type))
598
+ return memories
599
+ except Exception as e:
600
+ log_error("/api/memories", e, project=project)
601
+ raise
602
+
603
+
604
+ @app.post("/api/memories")
605
+ @rate_limit("30/minute")
606
+ async def create_memory_endpoint(
607
+ request: MemoryCreateRequest,
608
+ project: str = Query(..., description="Path to the database file"),
609
+ ):
610
+ """Create a new memory."""
611
+ try:
612
+ if not Path(project).exists():
613
+ log_error("/api/memories POST", FileNotFoundError("Database not found"), project=project)
614
+ raise HTTPException(status_code=404, detail="Database not found")
615
+
616
+ # Create the memory
617
+ memory_id = create_memory(
618
+ db_path=project,
619
+ content=request.content,
620
+ memory_type=request.memory_type,
621
+ context=request.context,
622
+ tags=request.tags if request.tags else None,
623
+ importance_score=request.importance_score,
624
+ )
625
+
626
+ # Fetch the created memory to return it
627
+ created_memory = get_memory_by_id(project, memory_id)
628
+
629
+ # Broadcast to WebSocket clients
630
+ await manager.broadcast("memory_created", created_memory.model_dump(by_alias=True))
631
+
632
+ log_success("/api/memories POST", memory_id=memory_id, type=request.memory_type)
633
+ return created_memory
634
+ except HTTPException:
635
+ raise
636
+ except Exception as e:
637
+ import traceback
638
+ print(f"[DEBUG] create_memory_endpoint error: {type(e).__name__}: {e}")
639
+ traceback.print_exc()
640
+ log_error("/api/memories POST", e, project=project)
641
+ raise
642
+
643
+
644
+ # NOTE: These routes MUST be defined before /api/memories/{memory_id} to avoid path conflicts
645
+ @app.get("/api/memories/needs-review")
646
+ async def get_memories_needing_review_endpoint(
647
+ project: str = Query(..., description="Path to the database file"),
648
+ days_threshold: int = 30,
649
+ limit: int = 50,
650
+ ):
651
+ """Get memories that may need freshness review."""
652
+ if not Path(project).exists():
653
+ raise HTTPException(status_code=404, detail="Database not found")
654
+
655
+ return get_memories_needing_review(project, days_threshold, limit)
656
+
657
+
658
+ @app.post("/api/memories/bulk-update-status")
659
+ async def bulk_update_status_endpoint(
660
+ project: str = Query(..., description="Path to the database file"),
661
+ memory_ids: list[str] = [],
662
+ status: str = "fresh",
663
+ ):
664
+ """Update status for multiple memories at once."""
665
+ if not Path(project).exists():
666
+ raise HTTPException(status_code=404, detail="Database not found")
667
+
668
+ valid_statuses = ["fresh", "needs_review", "outdated", "archived"]
669
+ if status not in valid_statuses:
670
+ raise HTTPException(status_code=400, detail=f"Invalid status. Must be one of: {valid_statuses}")
671
+
672
+ count = bulk_update_memory_status(project, memory_ids, status)
673
+
674
+ # Notify connected clients
675
+ await manager.broadcast("memories_bulk_updated", {"count": count, "status": status})
676
+
677
+ return {"updated_count": count, "status": status}
678
+
679
+
680
+ @app.get("/api/memories/{memory_id}")
681
+ async def get_memory(
682
+ memory_id: str,
683
+ project: str = Query(..., description="Path to the database file"),
684
+ ):
685
+ """Get a single memory by ID."""
686
+ if not Path(project).exists():
687
+ raise HTTPException(status_code=404, detail="Database not found")
688
+
689
+ memory = get_memory_by_id(project, memory_id)
690
+ if not memory:
691
+ raise HTTPException(status_code=404, detail="Memory not found")
692
+ return memory
693
+
694
+
695
+ @app.put("/api/memories/{memory_id}")
696
+ async def update_memory_endpoint(
697
+ memory_id: str,
698
+ updates: MemoryUpdate,
699
+ project: str = Query(..., description="Path to the database file"),
700
+ ):
701
+ """Update a memory."""
702
+ try:
703
+ if not Path(project).exists():
704
+ log_error("/api/memories/update", FileNotFoundError("Database not found"), memory_id=memory_id)
705
+ raise HTTPException(status_code=404, detail="Database not found")
706
+
707
+ updated = update_memory(project, memory_id, updates)
708
+ if not updated:
709
+ log_error("/api/memories/update", ValueError("Memory not found"), memory_id=memory_id)
710
+ raise HTTPException(status_code=404, detail="Memory not found")
711
+
712
+ # Notify connected clients
713
+ await manager.broadcast("memory_updated", updated.model_dump(by_alias=True))
714
+ log_success("/api/memories/update", memory_id=memory_id, fields_updated=len(updates.model_dump(exclude_unset=True)))
715
+ return updated
716
+ except HTTPException:
717
+ raise
718
+ except Exception as e:
719
+ log_error("/api/memories/update", e, memory_id=memory_id)
720
+ raise
721
+
722
+
723
+ @app.delete("/api/memories/{memory_id}")
724
+ async def delete_memory_endpoint(
725
+ memory_id: str,
726
+ project: str = Query(..., description="Path to the database file"),
727
+ ):
728
+ """Delete a memory."""
729
+ try:
730
+ if not Path(project).exists():
731
+ log_error("/api/memories/delete", FileNotFoundError("Database not found"), memory_id=memory_id)
732
+ raise HTTPException(status_code=404, detail="Database not found")
733
+
734
+ deleted = delete_memory(project, memory_id)
735
+ if not deleted:
736
+ log_error("/api/memories/delete", ValueError("Memory not found"), memory_id=memory_id)
737
+ raise HTTPException(status_code=404, detail="Memory not found")
738
+
739
+ # Notify connected clients
740
+ await manager.broadcast("memory_deleted", {"id": memory_id})
741
+ log_success("/api/memories/delete", memory_id=memory_id)
742
+ return {"message": "Memory deleted", "id": memory_id}
743
+ except HTTPException:
744
+ raise
745
+ except Exception as e:
746
+ log_error("/api/memories/delete", e, memory_id=memory_id)
747
+ raise
748
+
749
+
750
+ @app.get("/api/memories/stats/summary")
751
+ async def memory_stats(
752
+ project: str = Query(..., description="Path to the database file"),
753
+ ):
754
+ """Get memory statistics."""
755
+ if not Path(project).exists():
756
+ raise HTTPException(status_code=404, detail="Database not found")
757
+
758
+ return get_memory_stats(project)
759
+
760
+
761
+ @app.get("/api/search")
762
+ async def search(
763
+ q: str = Query(..., min_length=1),
764
+ project: str = Query(..., description="Path to the database file"),
765
+ limit: int = 20,
766
+ ):
767
+ """Search memories."""
768
+ if not Path(project).exists():
769
+ raise HTTPException(status_code=404, detail="Database not found")
770
+
771
+ return search_memories(project, q, limit)
772
+
773
+
774
+ @app.get("/api/activities")
775
+ async def list_activities(
776
+ project: str = Query(..., description="Path to the database file"),
777
+ event_type: Optional[str] = None,
778
+ tool_name: Optional[str] = None,
779
+ limit: int = 100,
780
+ offset: int = 0,
781
+ ):
782
+ """Get activity log entries."""
783
+ if not Path(project).exists():
784
+ raise HTTPException(status_code=404, detail="Database not found")
785
+
786
+ # Ensure migrations are applied (adds summary columns if missing)
787
+ ensure_migrations(project)
788
+
789
+ return get_activities(project, event_type, tool_name, limit, offset)
790
+
791
+
792
+ @app.get("/api/timeline")
793
+ async def get_timeline_view(
794
+ project: str = Query(..., description="Path to the database file"),
795
+ hours: int = 24,
796
+ include_memories: bool = True,
797
+ include_activities: bool = True,
798
+ ):
799
+ """Get timeline of recent activity."""
800
+ if not Path(project).exists():
801
+ raise HTTPException(status_code=404, detail="Database not found")
802
+
803
+ return get_timeline(project, hours, include_memories, include_activities)
804
+
805
+
806
+ @app.get("/api/tags")
807
+ async def list_tags(
808
+ project: str = Query(..., description="Path to the database file"),
809
+ ):
810
+ """Get all tags with counts."""
811
+ if not Path(project).exists():
812
+ raise HTTPException(status_code=404, detail="Database not found")
813
+
814
+ return get_all_tags(project)
815
+
816
+
817
+ @app.get("/api/types")
818
+ async def list_types(
819
+ project: str = Query(..., description="Path to the database file"),
820
+ ):
821
+ """Get memory type distribution."""
822
+ if not Path(project).exists():
823
+ raise HTTPException(status_code=404, detail="Database not found")
824
+
825
+ return get_type_distribution(project)
826
+
827
+
828
+ @app.get("/api/sessions")
829
+ async def list_sessions(
830
+ project: str = Query(..., description="Path to the database file"),
831
+ limit: int = 20,
832
+ ):
833
+ """Get recent sessions."""
834
+ if not Path(project).exists():
835
+ raise HTTPException(status_code=404, detail="Database not found")
836
+
837
+ return get_sessions(project, limit)
838
+
839
+
840
+ # --- Stats Endpoints for Charts ---
841
+
842
+
843
+ @app.get("/api/stats/activity-heatmap")
844
+ async def get_activity_heatmap_endpoint(
845
+ project: str = Query(..., description="Path to the database file"),
846
+ days: int = 90,
847
+ ):
848
+ """Get activity counts grouped by day for heatmap visualization."""
849
+ if not Path(project).exists():
850
+ raise HTTPException(status_code=404, detail="Database not found")
851
+
852
+ return get_activity_heatmap(project, days)
853
+
854
+
855
+ @app.get("/api/stats/tool-usage")
856
+ async def get_tool_usage_endpoint(
857
+ project: str = Query(..., description="Path to the database file"),
858
+ limit: int = 10,
859
+ ):
860
+ """Get tool usage statistics."""
861
+ if not Path(project).exists():
862
+ raise HTTPException(status_code=404, detail="Database not found")
863
+
864
+ return get_tool_usage(project, limit)
865
+
866
+
867
+ @app.get("/api/stats/memory-growth")
868
+ async def get_memory_growth_endpoint(
869
+ project: str = Query(..., description="Path to the database file"),
870
+ days: int = 30,
871
+ ):
872
+ """Get memory creation over time."""
873
+ if not Path(project).exists():
874
+ raise HTTPException(status_code=404, detail="Database not found")
875
+
876
+ return get_memory_growth(project, days)
877
+
878
+
879
+ # --- Command Analytics Endpoints ---
880
+
881
+
882
+ @app.get("/api/stats/command-usage")
883
+ async def get_command_usage_endpoint(
884
+ project: str = Query(..., description="Path to the database file"),
885
+ scope: Optional[str] = Query(None, description="Filter by scope: 'universal' or 'project'"),
886
+ days: int = Query(30, ge=1, le=365),
887
+ ):
888
+ """Get slash command usage statistics."""
889
+ if not Path(project).exists():
890
+ raise HTTPException(status_code=404, detail="Database not found")
891
+
892
+ return get_command_usage(project, scope, days)
893
+
894
+
895
+ @app.get("/api/stats/skill-usage")
896
+ async def get_skill_usage_endpoint(
897
+ project: str = Query(..., description="Path to the database file"),
898
+ scope: Optional[str] = Query(None, description="Filter by scope: 'universal' or 'project'"),
899
+ days: int = Query(30, ge=1, le=365),
900
+ ):
901
+ """Get skill usage statistics."""
902
+ if not Path(project).exists():
903
+ raise HTTPException(status_code=404, detail="Database not found")
904
+
905
+ return get_skill_usage(project, scope, days)
906
+
907
+
908
+ @app.get("/api/stats/mcp-usage")
909
+ async def get_mcp_usage_endpoint(
910
+ project: str = Query(..., description="Path to the database file"),
911
+ days: int = Query(30, ge=1, le=365),
912
+ ):
913
+ """Get MCP server usage statistics."""
914
+ if not Path(project).exists():
915
+ raise HTTPException(status_code=404, detail="Database not found")
916
+
917
+ return get_mcp_usage(project, days)
918
+
919
+
920
+ @app.get("/api/activities/{activity_id}")
921
+ async def get_activity_detail_endpoint(
922
+ activity_id: str,
923
+ project: str = Query(..., description="Path to the database file"),
924
+ ):
925
+ """Get full activity details including complete input/output."""
926
+ if not Path(project).exists():
927
+ raise HTTPException(status_code=404, detail="Database not found")
928
+
929
+ # Ensure migrations are applied
930
+ ensure_migrations(project)
931
+
932
+ activity = get_activity_detail(project, activity_id)
933
+ if not activity:
934
+ raise HTTPException(status_code=404, detail="Activity not found")
935
+
936
+ return activity
937
+
938
+
939
+ @app.post("/api/activities/backfill-summaries")
940
+ async def backfill_activity_summaries_endpoint(
941
+ project: str = Query(..., description="Path to the database file"),
942
+ ):
943
+ """Generate summaries for existing activities that don't have them."""
944
+ if not Path(project).exists():
945
+ raise HTTPException(status_code=404, detail="Database not found")
946
+
947
+ try:
948
+ from backfill_summaries import backfill_all
949
+ results = backfill_all(project)
950
+ return {
951
+ "success": True,
952
+ "summaries_updated": results["summaries"],
953
+ "mcp_servers_updated": results["mcp_servers"],
954
+ }
955
+ except Exception as e:
956
+ raise HTTPException(status_code=500, detail=f"Backfill failed: {str(e)}")
957
+
958
+
959
+ # --- Session Context Endpoints ---
960
+
961
+
962
+ @app.get("/api/sessions/recent")
963
+ async def get_recent_sessions_endpoint(
964
+ project: str = Query(..., description="Path to the database file"),
965
+ limit: int = 5,
966
+ ):
967
+ """Get recent sessions with summaries."""
968
+ if not Path(project).exists():
969
+ raise HTTPException(status_code=404, detail="Database not found")
970
+
971
+ return get_recent_sessions(project, limit)
972
+
973
+
974
+ # --- Relationship Graph Endpoints ---
975
+
976
+
977
+ @app.get("/api/relationships")
978
+ async def get_relationships_endpoint(
979
+ project: str = Query(..., description="Path to the database file"),
980
+ memory_id: Optional[str] = None,
981
+ ):
982
+ """Get memory relationships for graph visualization."""
983
+ if not Path(project).exists():
984
+ raise HTTPException(status_code=404, detail="Database not found")
985
+
986
+ return get_relationships(project, memory_id)
987
+
988
+
989
+ @app.get("/api/relationships/graph")
990
+ async def get_relationship_graph_endpoint(
991
+ project: str = Query(..., description="Path to the database file"),
992
+ center_id: Optional[str] = None,
993
+ depth: int = 2,
994
+ ):
995
+ """Get graph data centered on a memory with configurable depth."""
996
+ if not Path(project).exists():
997
+ raise HTTPException(status_code=404, detail="Database not found")
998
+
999
+ return get_relationship_graph(project, center_id, depth)
1000
+
1001
+
1002
+ # --- Chat Endpoint ---
1003
+
1004
+
1005
+ @app.get("/api/chat/status")
1006
+ async def chat_status():
1007
+ """Check if chat service is available."""
1008
+ return {
1009
+ "available": chat_service.is_available(),
1010
+ "message": "Chat is available" if chat_service.is_available() else "Set GEMINI_API_KEY environment variable to enable chat",
1011
+ }
1012
+
1013
+
1014
+ @app.post("/api/chat", response_model=ChatResponse)
1015
+ @rate_limit("10/minute")
1016
+ async def chat_with_memories(
1017
+ request: ChatRequest,
1018
+ project: str = Query(..., description="Path to the database file"),
1019
+ ):
1020
+ """Ask a natural language question about memories."""
1021
+ try:
1022
+ if not Path(project).exists():
1023
+ log_error("/api/chat", FileNotFoundError("Database not found"), question=request.question[:50])
1024
+ raise HTTPException(status_code=404, detail="Database not found")
1025
+
1026
+ # Fetch style profile if style mode enabled
1027
+ style_context = None
1028
+ if request.use_style:
1029
+ try:
1030
+ # First try computed profile from user_messages (richer data)
1031
+ style_context = compute_style_profile_from_messages(project)
1032
+ # Fall back to stored profile if no user_messages
1033
+ if not style_context:
1034
+ style_context = get_style_profile(project)
1035
+ except Exception:
1036
+ pass # Graceful fallback if no style data
1037
+
1038
+ result = await chat_service.ask_about_memories(
1039
+ project,
1040
+ request.question,
1041
+ request.max_memories,
1042
+ style_context,
1043
+ )
1044
+
1045
+ log_success("/api/chat", question_len=len(request.question), sources=len(result.get("sources", [])))
1046
+ return ChatResponse(**result)
1047
+ except HTTPException:
1048
+ raise
1049
+ except Exception as e:
1050
+ log_error("/api/chat", e, question=request.question[:50])
1051
+ raise
1052
+
1053
+
1054
+ @app.get("/api/chat/stream")
1055
+ @rate_limit("10/minute")
1056
+ async def stream_chat(
1057
+ project: str = Query(..., description="Path to the database file"),
1058
+ question: str = Query(..., description="The question to ask"),
1059
+ max_memories: int = Query(10, ge=1, le=50),
1060
+ use_style: bool = Query(False, description="Use user's communication style"),
1061
+ ):
1062
+ """SSE endpoint for streaming chat responses."""
1063
+ from fastapi.responses import StreamingResponse
1064
+
1065
+ if not Path(project).exists():
1066
+ raise HTTPException(status_code=404, detail="Database not found")
1067
+
1068
+ # Fetch style profile if style mode enabled
1069
+ style_context = None
1070
+ if use_style:
1071
+ try:
1072
+ # First try computed profile from user_messages (richer data)
1073
+ style_context = compute_style_profile_from_messages(project)
1074
+ # Fall back to stored profile if no user_messages
1075
+ if not style_context:
1076
+ style_context = get_style_profile(project)
1077
+ except Exception:
1078
+ pass # Graceful fallback if no style data
1079
+
1080
+ async def event_generator():
1081
+ try:
1082
+ async for event in chat_service.stream_ask_about_memories(project, question, max_memories, style_context):
1083
+ yield f"data: {json.dumps(event)}\n\n"
1084
+ except Exception as e:
1085
+ yield f"data: {json.dumps({'type': 'error', 'data': str(e)})}\n\n"
1086
+
1087
+ return StreamingResponse(
1088
+ event_generator(),
1089
+ media_type="text/event-stream",
1090
+ headers={
1091
+ "Cache-Control": "no-cache",
1092
+ "Connection": "keep-alive",
1093
+ "X-Accel-Buffering": "no",
1094
+ }
1095
+ )
1096
+
1097
+
1098
+ @app.post("/api/chat/save", response_model=ConversationSaveResponse)
1099
+ async def save_chat_conversation(
1100
+ request: ConversationSaveRequest,
1101
+ project: str = Query(..., description="Path to the database file"),
1102
+ ):
1103
+ """Save a chat conversation as a memory."""
1104
+ try:
1105
+ if not Path(project).exists():
1106
+ log_error("/api/chat/save", FileNotFoundError("Database not found"))
1107
+ raise HTTPException(status_code=404, detail="Database not found")
1108
+
1109
+ result = await chat_service.save_conversation(
1110
+ project,
1111
+ [msg.model_dump() for msg in request.messages],
1112
+ request.referenced_memory_ids,
1113
+ request.importance or 60,
1114
+ )
1115
+
1116
+ log_success("/api/chat/save", memory_id=result["memory_id"], messages=len(request.messages))
1117
+ return ConversationSaveResponse(**result)
1118
+ except HTTPException:
1119
+ raise
1120
+ except Exception as e:
1121
+ log_error("/api/chat/save", e)
1122
+ raise
1123
+
1124
+
1125
+ @app.post("/api/compose-response", response_model=ComposeResponse)
1126
+ @rate_limit("10/minute")
1127
+ async def compose_response_endpoint(
1128
+ request: ComposeRequest,
1129
+ project: str = Query(..., description="Path to the database file"),
1130
+ ):
1131
+ """Compose a response to an incoming message in the user's style."""
1132
+ try:
1133
+ if not Path(project).exists():
1134
+ log_error("/api/compose-response", FileNotFoundError("Database not found"))
1135
+ raise HTTPException(status_code=404, detail="Database not found")
1136
+
1137
+ # Get style profile
1138
+ style_profile = compute_style_profile_from_messages(project)
1139
+
1140
+ # Compose the response
1141
+ result = await chat_service.compose_response(
1142
+ db_path=project,
1143
+ incoming_message=request.incoming_message,
1144
+ context_type=request.context_type,
1145
+ template=request.template,
1146
+ tone_level=request.tone_level,
1147
+ include_memories=request.include_memories,
1148
+ style_profile=style_profile,
1149
+ )
1150
+
1151
+ if result.get("error"):
1152
+ log_error("/api/compose-response", Exception(result["error"]))
1153
+ raise HTTPException(status_code=500, detail=result["error"])
1154
+
1155
+ # Build response model
1156
+ import uuid
1157
+ from datetime import datetime
1158
+ response = ComposeResponse(
1159
+ id=str(uuid.uuid4()),
1160
+ response=result["response"],
1161
+ sources=result["sources"],
1162
+ style_applied=bool(style_profile and style_profile.get("total_messages", 0) > 0),
1163
+ tone_level=request.tone_level,
1164
+ template_used=request.template,
1165
+ incoming_message=request.incoming_message,
1166
+ context_type=request.context_type,
1167
+ created_at=datetime.now().isoformat(),
1168
+ )
1169
+
1170
+ log_success("/api/compose-response", context=request.context_type, tone=request.tone_level)
1171
+ return response
1172
+ except HTTPException:
1173
+ raise
1174
+ except Exception as e:
1175
+ log_error("/api/compose-response", e)
1176
+ raise HTTPException(status_code=500, detail=str(e))
1177
+
1178
+
1179
+ # --- Image Generation Endpoints ---
1180
+
1181
+
1182
+ @app.get("/api/image/status")
1183
+ async def get_image_status():
1184
+ """Check if image generation is available."""
1185
+ return {
1186
+ "available": image_service.is_available(),
1187
+ "message": "Image generation ready" if image_service.is_available()
1188
+ else "Configure GEMINI_API_KEY and install google-genai for image generation",
1189
+ }
1190
+
1191
+
1192
+ @app.get("/api/image/presets")
1193
+ async def get_image_presets():
1194
+ """Get available image preset templates."""
1195
+ return {"presets": image_service.get_presets()}
1196
+
1197
+
1198
+ @app.post("/api/image/generate-batch", response_model=BatchImageGenerationResponse)
1199
+ @rate_limit("5/minute")
1200
+ async def generate_images_batch(
1201
+ request: BatchImageGenerationRequest,
1202
+ db_path: str = Query(..., alias="project", description="Path to the database file"),
1203
+ ):
1204
+ """Generate multiple images with different presets/prompts."""
1205
+ # Validate image count
1206
+ if len(request.images) not in [1, 2, 4]:
1207
+ return BatchImageGenerationResponse(
1208
+ success=False,
1209
+ errors=["Must request 1, 2, or 4 images"]
1210
+ )
1211
+
1212
+ # Build memory context
1213
+ memory_context = ""
1214
+ if request.memory_ids:
1215
+ memory_context = image_service.build_memory_context(db_path, request.memory_ids)
1216
+
1217
+ # Build chat context
1218
+ chat_context = image_service.build_chat_context(request.chat_messages)
1219
+
1220
+ # Convert request models to internal format
1221
+ image_requests = [
1222
+ SingleImageRequest(
1223
+ preset=ImagePreset(img.preset),
1224
+ custom_prompt=img.custom_prompt,
1225
+ aspect_ratio=img.aspect_ratio,
1226
+ image_size=img.image_size
1227
+ )
1228
+ for img in request.images
1229
+ ]
1230
+
1231
+ result = await image_service.generate_batch(
1232
+ requests=image_requests,
1233
+ memory_context=memory_context,
1234
+ chat_context=chat_context,
1235
+ use_search_grounding=request.use_search_grounding
1236
+ )
1237
+
1238
+ return BatchImageGenerationResponse(
1239
+ success=result.success,
1240
+ images=[
1241
+ SingleImageResponseModel(
1242
+ success=img.success,
1243
+ image_data=img.image_data,
1244
+ text_response=img.text_response,
1245
+ thought_signature=img.thought_signature,
1246
+ image_id=img.image_id,
1247
+ error=img.error,
1248
+ index=img.index
1249
+ )
1250
+ for img in result.images
1251
+ ],
1252
+ errors=result.errors
1253
+ )
1254
+
1255
+
1256
+ @app.post("/api/image/refine", response_model=SingleImageResponseModel)
1257
+ @rate_limit("5/minute")
1258
+ async def refine_image(request: ImageRefineRequest):
1259
+ """Refine an existing generated image with a new prompt."""
1260
+ result = await image_service.refine_image(
1261
+ image_id=request.image_id,
1262
+ refinement_prompt=request.refinement_prompt,
1263
+ aspect_ratio=request.aspect_ratio,
1264
+ image_size=request.image_size
1265
+ )
1266
+
1267
+ return SingleImageResponseModel(
1268
+ success=result.success,
1269
+ image_data=result.image_data,
1270
+ text_response=result.text_response,
1271
+ thought_signature=result.thought_signature,
1272
+ image_id=result.image_id,
1273
+ error=result.error
1274
+ )
1275
+
1276
+
1277
+ @app.post("/api/image/clear-conversation")
1278
+ async def clear_image_conversation(image_id: Optional[str] = None):
1279
+ """Clear image conversation history. If image_id provided, clear only that image."""
1280
+ image_service.clear_conversation(image_id)
1281
+ return {"status": "cleared", "image_id": image_id}
1282
+
1283
+
1284
+ # --- User Messages & Style Profile Endpoints ---
1285
+
1286
+
1287
+ @app.get("/api/user-messages", response_model=UserMessagesResponse)
1288
+ @rate_limit("100/minute")
1289
+ async def list_user_messages(
1290
+ project: str = Query(..., description="Path to the database file"),
1291
+ session_id: Optional[str] = None,
1292
+ search: Optional[str] = None,
1293
+ has_code_blocks: Optional[bool] = None,
1294
+ has_questions: Optional[bool] = None,
1295
+ has_commands: Optional[bool] = None,
1296
+ tone_filter: Optional[str] = None,
1297
+ sort_by: str = "timestamp",
1298
+ sort_order: str = "desc",
1299
+ limit: int = Query(50, ge=1, le=500),
1300
+ offset: int = Query(0, ge=0),
1301
+ ):
1302
+ """Get user messages with filtering and pagination.
1303
+
1304
+ Filter options:
1305
+ - session_id: Filter by session
1306
+ - search: Search in message content
1307
+ - has_code_blocks: Filter by presence of code blocks
1308
+ - has_questions: Filter by presence of questions
1309
+ - has_commands: Filter by slash commands
1310
+ - tone_filter: Filter by tone indicator (polite, urgent, technical, casual, direct, inquisitive)
1311
+ """
1312
+ try:
1313
+ if not Path(project).exists():
1314
+ raise HTTPException(status_code=404, detail="Database not found")
1315
+
1316
+ messages = get_user_messages(
1317
+ project,
1318
+ session_id=session_id,
1319
+ search=search,
1320
+ has_code_blocks=has_code_blocks,
1321
+ has_questions=has_questions,
1322
+ has_commands=has_commands,
1323
+ tone_filter=tone_filter,
1324
+ sort_by=sort_by,
1325
+ sort_order=sort_order,
1326
+ limit=limit,
1327
+ offset=offset,
1328
+ )
1329
+
1330
+ total_count = get_user_message_count(project, session_id=session_id)
1331
+ has_more = (offset + len(messages)) < total_count
1332
+
1333
+ log_success("/api/user-messages", count=len(messages), total=total_count)
1334
+ return UserMessagesResponse(
1335
+ messages=[UserMessage(**m) for m in messages],
1336
+ total_count=total_count,
1337
+ limit=limit,
1338
+ offset=offset,
1339
+ has_more=has_more,
1340
+ )
1341
+ except HTTPException:
1342
+ raise
1343
+ except Exception as e:
1344
+ log_error("/api/user-messages", e)
1345
+ raise HTTPException(status_code=500, detail=str(e))
1346
+
1347
+
1348
+ @app.delete("/api/user-messages/{message_id}")
1349
+ async def delete_single_user_message(
1350
+ message_id: str,
1351
+ project: str = Query(..., description="Path to the database file"),
1352
+ ):
1353
+ """Delete a single user message by ID."""
1354
+ try:
1355
+ if not Path(project).exists():
1356
+ raise HTTPException(status_code=404, detail="Database not found")
1357
+
1358
+ deleted = delete_user_message(project, message_id)
1359
+ if not deleted:
1360
+ raise HTTPException(status_code=404, detail="Message not found")
1361
+
1362
+ log_success("/api/user-messages/delete", message_id=message_id)
1363
+ return {"message": "Message deleted", "id": message_id}
1364
+ except HTTPException:
1365
+ raise
1366
+ except Exception as e:
1367
+ log_error("/api/user-messages/delete", e)
1368
+ raise HTTPException(status_code=500, detail=str(e))
1369
+
1370
+
1371
+ @app.post("/api/user-messages/bulk-delete")
1372
+ async def delete_user_messages_bulk_endpoint(
1373
+ request: BulkDeleteRequest,
1374
+ project: str = Query(..., description="Path to the database file"),
1375
+ ):
1376
+ """Delete multiple user messages at once."""
1377
+ try:
1378
+ if not Path(project).exists():
1379
+ raise HTTPException(status_code=404, detail="Database not found")
1380
+
1381
+ count = delete_user_messages_bulk(project, request.message_ids)
1382
+
1383
+ log_success("/api/user-messages/bulk-delete", deleted_count=count)
1384
+ return {"message": f"Deleted {count} messages", "deleted_count": count}
1385
+ except HTTPException:
1386
+ raise
1387
+ except Exception as e:
1388
+ log_error("/api/user-messages/bulk-delete", e)
1389
+ raise HTTPException(status_code=500, detail=str(e))
1390
+
1391
+
1392
+ @app.get("/api/style/profile")
1393
+ async def get_style_profile_endpoint(
1394
+ project: str = Query(..., description="Path to the database file"),
1395
+ project_path: Optional[str] = Query(None, description="Project-specific profile path, or None for global"),
1396
+ ):
1397
+ """Get user style profile for style analysis.
1398
+
1399
+ Returns style metrics computed from user messages:
1400
+ - total_messages: Total message count
1401
+ - avg_word_count: Average words per message
1402
+ - primary_tone: Most common tone (direct, polite, technical, etc.)
1403
+ - question_percentage: Percentage of messages containing questions
1404
+ - tone_distribution: Count of messages by tone
1405
+ - style_markers: Descriptive labels for writing style
1406
+ """
1407
+ try:
1408
+ if not Path(project).exists():
1409
+ raise HTTPException(status_code=404, detail="Database not found")
1410
+
1411
+ # First try to get pre-computed profile from user_style_profiles table
1412
+ profile = get_style_profile(project, project_path=project_path)
1413
+
1414
+ # If no stored profile, compute from user_messages
1415
+ if not profile:
1416
+ profile = compute_style_profile_from_messages(project)
1417
+
1418
+ # If still no profile (no user_messages), return empty structure
1419
+ if not profile:
1420
+ return {
1421
+ "totalMessages": 0,
1422
+ "avgWordCount": 0,
1423
+ "primaryTone": "direct",
1424
+ "questionPercentage": 0,
1425
+ "toneDistribution": {},
1426
+ "styleMarkers": ["No data available yet"],
1427
+ }
1428
+
1429
+ # Convert stored profile format to frontend expected format if needed
1430
+ if "totalMessages" in profile:
1431
+ # Already in camelCase format from compute_style_profile_from_messages
1432
+ pass
1433
+ elif "id" in profile:
1434
+ # Convert stored profile (from user_style_profiles table) to frontend format
1435
+ tone_dist = {}
1436
+ # Stored profile doesn't have tone_distribution, so compute it
1437
+ computed = compute_style_profile_from_messages(project)
1438
+ if computed:
1439
+ tone_dist = computed.get("toneDistribution", {})
1440
+ primary_tone = computed.get("primaryTone", "direct")
1441
+ style_markers = computed.get("styleMarkers", [])
1442
+ else:
1443
+ primary_tone = "direct"
1444
+ style_markers = []
1445
+
1446
+ profile = {
1447
+ "totalMessages": profile.get("total_messages", 0),
1448
+ "avgWordCount": profile.get("avg_word_count", 0) or 0,
1449
+ "primaryTone": primary_tone,
1450
+ "questionPercentage": (profile.get("question_frequency", 0) or 0) * 100,
1451
+ "toneDistribution": tone_dist,
1452
+ "styleMarkers": style_markers or profile.get("greeting_patterns", []) or [],
1453
+ }
1454
+
1455
+ log_success("/api/style/profile", has_profile=True, total_messages=profile.get("totalMessages", 0))
1456
+ return profile
1457
+ except HTTPException:
1458
+ raise
1459
+ except Exception as e:
1460
+ log_error("/api/style/profile", e)
1461
+ raise HTTPException(status_code=500, detail=str(e))
1462
+
1463
+
1464
+ @app.get("/api/style/samples")
1465
+ async def get_style_samples_endpoint(
1466
+ project: str = Query(..., description="Path to the database file"),
1467
+ samples_per_tone: int = Query(3, ge=1, le=10, description="Max samples per tone category"),
1468
+ ):
1469
+ """Get sample user messages for style analysis preview.
1470
+
1471
+ Returns messages grouped by style category (professional, casual, technical, creative).
1472
+ """
1473
+ try:
1474
+ if not Path(project).exists():
1475
+ raise HTTPException(status_code=404, detail="Database not found")
1476
+
1477
+ samples = get_style_samples_by_category(project, samples_per_tone=samples_per_tone)
1478
+
1479
+ total_count = sum(len(v) for v in samples.values())
1480
+ log_success("/api/style/samples", count=total_count)
1481
+ return samples
1482
+ except HTTPException:
1483
+ raise
1484
+ except Exception as e:
1485
+ log_error("/api/style/samples", e)
1486
+ raise HTTPException(status_code=500, detail=str(e))
1487
+
1488
+
1489
+ # --- WebSocket Endpoint ---
1490
+
1491
+
1492
+ @app.websocket("/ws")
1493
+ async def websocket_endpoint(websocket: WebSocket):
1494
+ """WebSocket endpoint for real-time updates."""
1495
+ client_id = await manager.connect(websocket)
1496
+ try:
1497
+ # Send initial connection confirmation
1498
+ await manager.send_to_client(client_id, "connected", {"client_id": client_id})
1499
+
1500
+ # Keep connection alive and handle messages
1501
+ while True:
1502
+ data = await websocket.receive_text()
1503
+ # Echo back for ping/pong
1504
+ if data == "ping":
1505
+ await manager.send_to_client(client_id, "pong", {})
1506
+ except WebSocketDisconnect:
1507
+ await manager.disconnect(client_id)
1508
+ except Exception as e:
1509
+ print(f"[WS] Error: {e}")
1510
+ await manager.disconnect(client_id)
1511
+
1512
+
1513
+ # --- Export Endpoints ---
1514
+
1515
+
1516
+ @app.get("/api/export")
1517
+ async def export_memories(
1518
+ project: str = Query(..., description="Path to the database file"),
1519
+ format: str = Query("json", description="Export format: json, markdown, csv"),
1520
+ memory_ids: Optional[str] = Query(None, description="Comma-separated memory IDs to export, or all if empty"),
1521
+ include_relationships: bool = Query(True, description="Include memory relationships"),
1522
+ ):
1523
+ """Export memories to specified format."""
1524
+ from fastapi.responses import Response
1525
+ import csv
1526
+ import io
1527
+
1528
+ if not Path(project).exists():
1529
+ raise HTTPException(status_code=404, detail="Database not found")
1530
+
1531
+ # Get memories
1532
+ if memory_ids:
1533
+ ids = memory_ids.split(",")
1534
+ memories = [get_memory_by_id(project, mid) for mid in ids if mid.strip()]
1535
+ memories = [m for m in memories if m is not None]
1536
+ else:
1537
+ from models import FilterParams
1538
+ filters = FilterParams(limit=1000, offset=0, sort_by="created_at", sort_order="desc")
1539
+ memories = get_memories(project, filters)
1540
+
1541
+ # Get relationships if requested
1542
+ relationships = []
1543
+ if include_relationships:
1544
+ relationships = get_relationships(project)
1545
+
1546
+ if format == "json":
1547
+ export_data = {
1548
+ "exported_at": datetime.now().isoformat(),
1549
+ "project": project,
1550
+ "memory_count": len(memories),
1551
+ "memories": [m.model_dump(by_alias=True) for m in memories],
1552
+ "relationships": relationships if include_relationships else [],
1553
+ }
1554
+ return Response(
1555
+ content=json.dumps(export_data, indent=2, default=str),
1556
+ media_type="application/json",
1557
+ headers={"Content-Disposition": f"attachment; filename=memories_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"}
1558
+ )
1559
+
1560
+ elif format == "markdown":
1561
+ md_lines = [
1562
+ f"# Omni-Cortex Memory Export",
1563
+ f"",
1564
+ f"**Exported:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
1565
+ f"**Total Memories:** {len(memories)}",
1566
+ f"",
1567
+ "---",
1568
+ "",
1569
+ ]
1570
+ for m in memories:
1571
+ md_lines.extend([
1572
+ f"## {m.type.title()}: {m.content[:50]}{'...' if len(m.content) > 50 else ''}",
1573
+ f"",
1574
+ f"**ID:** `{m.id}`",
1575
+ f"**Type:** {m.type}",
1576
+ f"**Status:** {m.status}",
1577
+ f"**Importance:** {m.importance_score}",
1578
+ f"**Created:** {m.created_at}",
1579
+ f"**Tags:** {', '.join(m.tags) if m.tags else 'None'}",
1580
+ f"",
1581
+ "### Content",
1582
+ f"",
1583
+ m.content,
1584
+ f"",
1585
+ "### Context",
1586
+ f"",
1587
+ m.context or "_No context_",
1588
+ f"",
1589
+ "---",
1590
+ "",
1591
+ ])
1592
+ return Response(
1593
+ content="\n".join(md_lines),
1594
+ media_type="text/markdown",
1595
+ headers={"Content-Disposition": f"attachment; filename=memories_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md"}
1596
+ )
1597
+
1598
+ elif format == "csv":
1599
+ output = io.StringIO()
1600
+ writer = csv.writer(output)
1601
+ writer.writerow(["id", "type", "status", "importance", "content", "context", "tags", "created_at", "last_accessed"])
1602
+ for m in memories:
1603
+ writer.writerow([
1604
+ m.id,
1605
+ m.type,
1606
+ m.status,
1607
+ m.importance_score,
1608
+ m.content,
1609
+ m.context or "",
1610
+ ",".join(m.tags) if m.tags else "",
1611
+ m.created_at,
1612
+ m.last_accessed or "",
1613
+ ])
1614
+ return Response(
1615
+ content=output.getvalue(),
1616
+ media_type="text/csv",
1617
+ headers={"Content-Disposition": f"attachment; filename=memories_export_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"}
1618
+ )
1619
+
1620
+ else:
1621
+ raise HTTPException(status_code=400, detail=f"Unsupported format: {format}. Use json, markdown, or csv.")
1622
+
1623
+
1624
+ # --- Health Check ---
1625
+
1626
+
1627
+ @app.get("/health")
1628
+ async def health_check():
1629
+ """Health check endpoint."""
1630
+ return {
1631
+ "status": "healthy",
1632
+ "websocket_connections": manager.connection_count,
1633
+ }
1634
+
1635
+
1636
+ # --- Static File Serving (SPA) ---
1637
+ # These routes must come AFTER all API routes
1638
+
1639
+
1640
+ @app.get("/")
1641
+ async def serve_root():
1642
+ """Serve the frontend index.html."""
1643
+ index_file = DIST_DIR / "index.html"
1644
+ if index_file.exists():
1645
+ return FileResponse(str(index_file))
1646
+ return {"message": "Omni-Cortex Dashboard API", "docs": "/docs"}
1647
+
1648
+
1649
+ @app.get("/{path:path}")
1650
+ async def serve_spa(path: str):
1651
+ """Catch-all route to serve SPA for client-side routing with path traversal protection."""
1652
+ # Skip API routes and known paths
1653
+ if path.startswith(("api/", "ws", "health", "docs", "openapi", "redoc")):
1654
+ raise HTTPException(status_code=404, detail="Not found")
1655
+
1656
+ # Check if it's a static file (with path traversal protection)
1657
+ safe_path = PathValidator.is_safe_static_path(DIST_DIR, path)
1658
+ if safe_path:
1659
+ return FileResponse(str(safe_path))
1660
+
1661
+ # Otherwise serve index.html for SPA routing
1662
+ index_file = DIST_DIR / "index.html"
1663
+ if index_file.exists():
1664
+ return FileResponse(str(index_file))
1665
+
1666
+ raise HTTPException(status_code=404, detail="Not found")
1667
+
1668
+
1669
+ def run():
1670
+ """Run the dashboard server."""
1671
+ uvicorn.run(
1672
+ "main:app",
1673
+ host="0.0.0.0",
1674
+ port=8765,
1675
+ reload=True,
1676
+ reload_dirs=[str(Path(__file__).parent)],
1677
+ )
1678
+
1679
+
1680
+ if __name__ == "__main__":
1681
+ run()