omni-cortex 1.3.0__py3-none-any.whl → 1.11.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/.env.example +12 -0
  2. omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/backfill_summaries.py +280 -0
  3. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/chat_service.py +19 -10
  4. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/database.py +97 -18
  5. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/image_service.py +21 -12
  6. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/logging_config.py +34 -4
  7. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/main.py +390 -13
  8. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/models.py +64 -12
  9. omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/prompt_security.py +111 -0
  10. omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/security.py +104 -0
  11. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +24 -2
  12. omni_cortex-1.11.3.data/data/share/omni-cortex/hooks/post_tool_use.py +429 -0
  13. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/hooks/pre_tool_use.py +52 -2
  14. omni_cortex-1.11.3.data/data/share/omni-cortex/hooks/session_utils.py +186 -0
  15. {omni_cortex-1.3.0.dist-info → omni_cortex-1.11.3.dist-info}/METADATA +237 -8
  16. omni_cortex-1.11.3.dist-info/RECORD +25 -0
  17. omni_cortex-1.3.0.data/data/share/omni-cortex/hooks/post_tool_use.py +0 -160
  18. omni_cortex-1.3.0.dist-info/RECORD +0 -20
  19. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/project_config.py +0 -0
  20. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +0 -0
  21. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
  22. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/uv.lock +0 -0
  23. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/hooks/stop.py +0 -0
  24. {omni_cortex-1.3.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
  25. {omni_cortex-1.3.0.dist-info → omni_cortex-1.11.3.dist-info}/WHEEL +0 -0
  26. {omni_cortex-1.3.0.dist-info → omni_cortex-1.11.3.dist-info}/entry_points.txt +0 -0
  27. {omni_cortex-1.3.0.dist-info → omni_cortex-1.11.3.dist-info}/licenses/LICENSE +0 -0
@@ -3,6 +3,7 @@
3
3
 
4
4
  import asyncio
5
5
  import json
6
+ import os
6
7
  import traceback
7
8
  from contextlib import asynccontextmanager
8
9
  from datetime import datetime
@@ -10,16 +11,29 @@ from pathlib import Path
10
11
  from typing import Optional
11
12
 
12
13
  import uvicorn
13
- from fastapi import FastAPI, HTTPException, Query, WebSocket, WebSocketDisconnect
14
+ from fastapi import FastAPI, HTTPException, Query, WebSocket, WebSocketDisconnect, Request, Depends
14
15
  from fastapi.middleware.cors import CORSMiddleware
15
16
  from fastapi.staticfiles import StaticFiles
16
- from fastapi.responses import FileResponse
17
+ from fastapi.responses import FileResponse, Response
18
+ from starlette.middleware.base import BaseHTTPMiddleware
17
19
  from watchdog.events import FileSystemEventHandler
18
20
  from watchdog.observers import Observer
19
21
 
22
+ # Rate limiting imports (optional - graceful degradation if not installed)
23
+ try:
24
+ from slowapi import Limiter, _rate_limit_exceeded_handler
25
+ from slowapi.util import get_remote_address
26
+ from slowapi.errors import RateLimitExceeded
27
+ RATE_LIMITING_AVAILABLE = True
28
+ except ImportError:
29
+ RATE_LIMITING_AVAILABLE = False
30
+ Limiter = None
31
+
20
32
  from database import (
21
33
  bulk_update_memory_status,
34
+ create_memory,
22
35
  delete_memory,
36
+ ensure_migrations,
23
37
  get_activities,
24
38
  get_activity_detail,
25
39
  get_activity_heatmap,
@@ -44,11 +58,16 @@ from database import (
44
58
  )
45
59
  from logging_config import log_success, log_error
46
60
  from models import (
61
+ AggregateChatRequest,
62
+ AggregateMemoryRequest,
63
+ AggregateStatsRequest,
64
+ AggregateStatsResponse,
47
65
  ChatRequest,
48
66
  ChatResponse,
49
67
  ConversationSaveRequest,
50
68
  ConversationSaveResponse,
51
69
  FilterParams,
70
+ MemoryCreateRequest,
52
71
  MemoryUpdate,
53
72
  ProjectInfo,
54
73
  ProjectRegistration,
@@ -70,6 +89,48 @@ from project_scanner import scan_projects
70
89
  from websocket_manager import manager
71
90
  import chat_service
72
91
  from image_service import image_service, ImagePreset, SingleImageRequest
92
+ from security import PathValidator, get_cors_config, IS_PRODUCTION
93
+
94
+
95
+ class SecurityHeadersMiddleware(BaseHTTPMiddleware):
96
+ """Add security headers to all responses."""
97
+
98
+ async def dispatch(self, request: Request, call_next) -> Response:
99
+ response = await call_next(request)
100
+
101
+ # Prevent MIME type sniffing
102
+ response.headers["X-Content-Type-Options"] = "nosniff"
103
+
104
+ # Prevent clickjacking
105
+ response.headers["X-Frame-Options"] = "DENY"
106
+
107
+ # XSS protection (legacy browsers)
108
+ response.headers["X-XSS-Protection"] = "1; mode=block"
109
+
110
+ # Content Security Policy
111
+ response.headers["Content-Security-Policy"] = (
112
+ "default-src 'self'; "
113
+ "script-src 'self' 'unsafe-inline' 'unsafe-eval'; " # Vue needs these
114
+ "style-src 'self' 'unsafe-inline'; " # Tailwind needs inline
115
+ "img-src 'self' data: blob: https:; " # Allow AI-generated images
116
+ "connect-src 'self' ws: wss: https://generativelanguage.googleapis.com; "
117
+ "font-src 'self'; "
118
+ "frame-ancestors 'none';"
119
+ )
120
+
121
+ # HSTS (only in production with HTTPS)
122
+ if IS_PRODUCTION and os.getenv("SSL_CERTFILE"):
123
+ response.headers["Strict-Transport-Security"] = "max-age=31536000; includeSubDomains"
124
+
125
+ return response
126
+
127
+
128
+ def validate_project_path(project: str = Query(..., description="Path to the database file")) -> Path:
129
+ """Validate project database path - dependency for endpoints."""
130
+ try:
131
+ return PathValidator.validate_project_path(project)
132
+ except ValueError as e:
133
+ raise HTTPException(status_code=400, detail=str(e))
73
134
 
74
135
 
75
136
  class DatabaseChangeHandler(FileSystemEventHandler):
@@ -80,6 +141,7 @@ class DatabaseChangeHandler(FileSystemEventHandler):
80
141
  self.loop = loop
81
142
  self._debounce_task: Optional[asyncio.Task] = None
82
143
  self._last_path: Optional[str] = None
144
+ self._last_activity_count: dict[str, int] = {}
83
145
 
84
146
  def on_modified(self, event):
85
147
  if event.src_path.endswith("cortex.db") or event.src_path.endswith("global.db"):
@@ -91,9 +153,35 @@ class DatabaseChangeHandler(FileSystemEventHandler):
91
153
  )
92
154
 
93
155
  async def _debounced_notify(self):
94
- await asyncio.sleep(0.5) # Wait for rapid changes to settle
156
+ await asyncio.sleep(0.3) # Reduced from 0.5s for faster updates
95
157
  if self._last_path:
96
- await self.ws_manager.broadcast("database_changed", {"path": self._last_path})
158
+ db_path = self._last_path
159
+
160
+ # Broadcast general database change
161
+ await self.ws_manager.broadcast("database_changed", {"path": db_path})
162
+
163
+ # Fetch and broadcast latest activities (IndyDevDan pattern)
164
+ try:
165
+ # Get recent activities
166
+ recent = get_activities(db_path, limit=5, offset=0)
167
+ if recent:
168
+ # Broadcast each new activity
169
+ for activity in recent:
170
+ await self.ws_manager.broadcast_activity_logged(
171
+ db_path,
172
+ activity if isinstance(activity, dict) else activity.model_dump()
173
+ )
174
+
175
+ # Also broadcast session update
176
+ sessions = get_recent_sessions(db_path, limit=1)
177
+ if sessions:
178
+ session = sessions[0]
179
+ await self.ws_manager.broadcast_session_updated(
180
+ db_path,
181
+ session if isinstance(session, dict) else dict(session)
182
+ )
183
+ except Exception as e:
184
+ print(f"[WS] Error broadcasting activities: {e}")
97
185
 
98
186
 
99
187
  # File watcher
@@ -137,13 +225,39 @@ app = FastAPI(
137
225
  lifespan=lifespan,
138
226
  )
139
227
 
140
- # CORS for frontend dev server
228
+ # Add security headers middleware (MUST come before CORS)
229
+ app.add_middleware(SecurityHeadersMiddleware)
230
+
231
+ # Rate limiting (if available)
232
+ if RATE_LIMITING_AVAILABLE:
233
+ limiter = Limiter(key_func=get_remote_address)
234
+ app.state.limiter = limiter
235
+ app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
236
+ else:
237
+ limiter = None
238
+
239
+
240
+ def rate_limit(limit_string: str):
241
+ """Decorator for conditional rate limiting.
242
+
243
+ Returns the actual limiter decorator if available, otherwise a no-op.
244
+ Usage: @rate_limit("10/minute")
245
+ """
246
+ if limiter is not None:
247
+ return limiter.limit(limit_string)
248
+ # No-op decorator when rate limiting is not available
249
+ def noop_decorator(func):
250
+ return func
251
+ return noop_decorator
252
+
253
+ # CORS configuration (environment-aware)
254
+ cors_config = get_cors_config()
141
255
  app.add_middleware(
142
256
  CORSMiddleware,
143
- allow_origins=["http://localhost:5173", "http://127.0.0.1:5173"],
257
+ allow_origins=cors_config["allow_origins"],
144
258
  allow_credentials=True,
145
- allow_methods=["*"],
146
- allow_headers=["*"],
259
+ allow_methods=cors_config["allow_methods"],
260
+ allow_headers=cors_config["allow_headers"],
147
261
  )
148
262
 
149
263
  # Static files for production build
@@ -238,7 +352,200 @@ async def refresh_projects():
238
352
  return {"count": len(projects)}
239
353
 
240
354
 
355
+ # --- Aggregate Multi-Project Endpoints ---
356
+
357
+
358
+ @app.post("/api/aggregate/memories")
359
+ @rate_limit("50/minute")
360
+ async def get_aggregate_memories(request: AggregateMemoryRequest):
361
+ """Get memories from multiple projects with project attribution."""
362
+ try:
363
+ all_memories = []
364
+ filters = request.filters or FilterParams()
365
+
366
+ for project_path in request.projects:
367
+ if not Path(project_path).exists():
368
+ continue
369
+
370
+ try:
371
+ memories = get_memories(project_path, filters)
372
+ # Add project attribution to each memory
373
+ for m in memories:
374
+ m_dict = m.model_dump()
375
+ m_dict['source_project'] = project_path
376
+ # Extract project name from path
377
+ project_dir = Path(project_path).parent
378
+ m_dict['source_project_name'] = project_dir.name
379
+ all_memories.append(m_dict)
380
+ except Exception as e:
381
+ log_error(f"/api/aggregate/memories (project: {project_path})", e)
382
+ continue
383
+
384
+ # Sort by last_accessed or created_at (convert to str to handle mixed tz-aware/naive)
385
+ all_memories.sort(
386
+ key=lambda x: str(x.get('last_accessed') or x.get('created_at') or ''),
387
+ reverse=True
388
+ )
389
+
390
+ # Apply pagination
391
+ start = filters.offset
392
+ end = start + filters.limit
393
+ return all_memories[start:end]
394
+ except Exception as e:
395
+ log_error("/api/aggregate/memories", e)
396
+ raise HTTPException(status_code=500, detail=str(e))
397
+
398
+
399
+ @app.post("/api/aggregate/stats", response_model=AggregateStatsResponse)
400
+ @rate_limit("50/minute")
401
+ async def get_aggregate_stats(request: AggregateStatsRequest):
402
+ """Get combined statistics across multiple projects."""
403
+ try:
404
+ total_count = 0
405
+ total_access = 0
406
+ importance_sum = 0
407
+ by_type = {}
408
+ by_status = {}
409
+
410
+ for project_path in request.projects:
411
+ if not Path(project_path).exists():
412
+ continue
413
+
414
+ try:
415
+ stats = get_memory_stats(project_path)
416
+ total_count += stats.total_count
417
+ total_access += stats.total_access_count
418
+
419
+ # Weighted average for importance
420
+ project_count = stats.total_count
421
+ project_avg_importance = stats.avg_importance
422
+ importance_sum += project_avg_importance * project_count
423
+
424
+ # Aggregate by_type
425
+ for type_name, count in stats.by_type.items():
426
+ by_type[type_name] = by_type.get(type_name, 0) + count
427
+
428
+ # Aggregate by_status
429
+ for status, count in stats.by_status.items():
430
+ by_status[status] = by_status.get(status, 0) + count
431
+ except Exception as e:
432
+ log_error(f"/api/aggregate/stats (project: {project_path})", e)
433
+ continue
434
+
435
+ return AggregateStatsResponse(
436
+ total_count=total_count,
437
+ total_access_count=total_access,
438
+ avg_importance=round(importance_sum / total_count, 1) if total_count > 0 else 0,
439
+ by_type=by_type,
440
+ by_status=by_status,
441
+ project_count=len(request.projects),
442
+ )
443
+ except Exception as e:
444
+ log_error("/api/aggregate/stats", e)
445
+ raise HTTPException(status_code=500, detail=str(e))
446
+
447
+
448
+ @app.post("/api/aggregate/tags")
449
+ @rate_limit("50/minute")
450
+ async def get_aggregate_tags(request: AggregateStatsRequest):
451
+ """Get combined tags across multiple projects."""
452
+ try:
453
+ tag_counts = {}
454
+
455
+ for project_path in request.projects:
456
+ if not Path(project_path).exists():
457
+ continue
458
+
459
+ try:
460
+ tags = get_all_tags(project_path)
461
+ for tag in tags:
462
+ tag_name = tag['name']
463
+ tag_counts[tag_name] = tag_counts.get(tag_name, 0) + tag['count']
464
+ except Exception as e:
465
+ log_error(f"/api/aggregate/tags (project: {project_path})", e)
466
+ continue
467
+
468
+ # Return sorted by count
469
+ return sorted(
470
+ [{'name': k, 'count': v} for k, v in tag_counts.items()],
471
+ key=lambda x: x['count'],
472
+ reverse=True
473
+ )
474
+ except Exception as e:
475
+ log_error("/api/aggregate/tags", e)
476
+ raise HTTPException(status_code=500, detail=str(e))
477
+
478
+
479
+ @app.post("/api/aggregate/chat", response_model=ChatResponse)
480
+ @rate_limit("10/minute")
481
+ async def chat_across_projects(request: AggregateChatRequest):
482
+ """Ask AI about memories across multiple projects."""
483
+ try:
484
+ if not chat_service.is_available():
485
+ raise HTTPException(
486
+ status_code=503,
487
+ detail="Chat service not available. Set GEMINI_API_KEY environment variable."
488
+ )
489
+
490
+ all_sources = []
491
+
492
+ # Gather relevant memories from each project
493
+ for project_path in request.projects:
494
+ if not Path(project_path).exists():
495
+ continue
496
+
497
+ try:
498
+ memories = search_memories(
499
+ project_path,
500
+ request.question,
501
+ limit=request.max_memories_per_project
502
+ )
503
+
504
+ for m in memories:
505
+ project_dir = Path(project_path).parent
506
+ source = {
507
+ 'id': m.id,
508
+ 'type': m.memory_type,
509
+ 'content_preview': m.content[:200],
510
+ 'tags': m.tags,
511
+ 'project_path': project_path,
512
+ 'project_name': project_dir.name,
513
+ }
514
+ all_sources.append(source)
515
+ except Exception as e:
516
+ log_error(f"/api/aggregate/chat (project: {project_path})", e)
517
+ continue
518
+
519
+ if not all_sources:
520
+ return ChatResponse(
521
+ answer="No relevant memories found across the selected projects.",
522
+ sources=[],
523
+ )
524
+
525
+ # Build context with project attribution
526
+ context = "\n\n".join([
527
+ f"[From: {s['project_name']}] {s['content_preview']}"
528
+ for s in all_sources
529
+ ])
530
+
531
+ # Query AI with attributed context
532
+ answer = await chat_service.generate_response(request.question, context)
533
+
534
+ log_success("/api/aggregate/chat", projects=len(request.projects), sources=len(all_sources))
535
+
536
+ return ChatResponse(
537
+ answer=answer,
538
+ sources=[ChatSource(**s) for s in all_sources],
539
+ )
540
+ except HTTPException:
541
+ raise
542
+ except Exception as e:
543
+ log_error("/api/aggregate/chat", e)
544
+ raise HTTPException(status_code=500, detail=str(e))
545
+
546
+
241
547
  @app.get("/api/memories")
548
+ @rate_limit("100/minute")
242
549
  async def list_memories(
243
550
  project: str = Query(..., description="Path to the database file"),
244
551
  memory_type: Optional[str] = Query(None, alias="type"),
@@ -279,6 +586,46 @@ async def list_memories(
279
586
  raise
280
587
 
281
588
 
589
+ @app.post("/api/memories")
590
+ @rate_limit("30/minute")
591
+ async def create_memory_endpoint(
592
+ request: MemoryCreateRequest,
593
+ project: str = Query(..., description="Path to the database file"),
594
+ ):
595
+ """Create a new memory."""
596
+ try:
597
+ if not Path(project).exists():
598
+ log_error("/api/memories POST", FileNotFoundError("Database not found"), project=project)
599
+ raise HTTPException(status_code=404, detail="Database not found")
600
+
601
+ # Create the memory
602
+ memory_id = create_memory(
603
+ db_path=project,
604
+ content=request.content,
605
+ memory_type=request.memory_type,
606
+ context=request.context,
607
+ tags=request.tags if request.tags else None,
608
+ importance_score=request.importance_score,
609
+ )
610
+
611
+ # Fetch the created memory to return it
612
+ created_memory = get_memory_by_id(project, memory_id)
613
+
614
+ # Broadcast to WebSocket clients
615
+ await manager.broadcast("memory_created", created_memory.model_dump(by_alias=True))
616
+
617
+ log_success("/api/memories POST", memory_id=memory_id, type=request.memory_type)
618
+ return created_memory
619
+ except HTTPException:
620
+ raise
621
+ except Exception as e:
622
+ import traceback
623
+ print(f"[DEBUG] create_memory_endpoint error: {type(e).__name__}: {e}")
624
+ traceback.print_exc()
625
+ log_error("/api/memories POST", e, project=project)
626
+ raise
627
+
628
+
282
629
  # NOTE: These routes MUST be defined before /api/memories/{memory_id} to avoid path conflicts
283
630
  @app.get("/api/memories/needs-review")
284
631
  async def get_memories_needing_review_endpoint(
@@ -421,6 +768,9 @@ async def list_activities(
421
768
  if not Path(project).exists():
422
769
  raise HTTPException(status_code=404, detail="Database not found")
423
770
 
771
+ # Ensure migrations are applied (adds summary columns if missing)
772
+ ensure_migrations(project)
773
+
424
774
  return get_activities(project, event_type, tool_name, limit, offset)
425
775
 
426
776
 
@@ -561,6 +911,9 @@ async def get_activity_detail_endpoint(
561
911
  if not Path(project).exists():
562
912
  raise HTTPException(status_code=404, detail="Database not found")
563
913
 
914
+ # Ensure migrations are applied
915
+ ensure_migrations(project)
916
+
564
917
  activity = get_activity_detail(project, activity_id)
565
918
  if not activity:
566
919
  raise HTTPException(status_code=404, detail="Activity not found")
@@ -568,6 +921,26 @@ async def get_activity_detail_endpoint(
568
921
  return activity
569
922
 
570
923
 
924
+ @app.post("/api/activities/backfill-summaries")
925
+ async def backfill_activity_summaries_endpoint(
926
+ project: str = Query(..., description="Path to the database file"),
927
+ ):
928
+ """Generate summaries for existing activities that don't have them."""
929
+ if not Path(project).exists():
930
+ raise HTTPException(status_code=404, detail="Database not found")
931
+
932
+ try:
933
+ from backfill_summaries import backfill_all
934
+ results = backfill_all(project)
935
+ return {
936
+ "success": True,
937
+ "summaries_updated": results["summaries"],
938
+ "mcp_servers_updated": results["mcp_servers"],
939
+ }
940
+ except Exception as e:
941
+ raise HTTPException(status_code=500, detail=f"Backfill failed: {str(e)}")
942
+
943
+
571
944
  # --- Session Context Endpoints ---
572
945
 
573
946
 
@@ -624,6 +997,7 @@ async def chat_status():
624
997
 
625
998
 
626
999
  @app.post("/api/chat", response_model=ChatResponse)
1000
+ @rate_limit("10/minute")
627
1001
  async def chat_with_memories(
628
1002
  request: ChatRequest,
629
1003
  project: str = Query(..., description="Path to the database file"),
@@ -650,6 +1024,7 @@ async def chat_with_memories(
650
1024
 
651
1025
 
652
1026
  @app.get("/api/chat/stream")
1027
+ @rate_limit("10/minute")
653
1028
  async def stream_chat(
654
1029
  project: str = Query(..., description="Path to the database file"),
655
1030
  question: str = Query(..., description="The question to ask"),
@@ -726,6 +1101,7 @@ async def get_image_presets():
726
1101
 
727
1102
 
728
1103
  @app.post("/api/image/generate-batch", response_model=BatchImageGenerationResponse)
1104
+ @rate_limit("5/minute")
729
1105
  async def generate_images_batch(
730
1106
  request: BatchImageGenerationRequest,
731
1107
  db_path: str = Query(..., alias="project", description="Path to the database file"),
@@ -783,6 +1159,7 @@ async def generate_images_batch(
783
1159
 
784
1160
 
785
1161
  @app.post("/api/image/refine", response_model=SingleImageResponseModel)
1162
+ @rate_limit("5/minute")
786
1163
  async def refine_image(request: ImageRefineRequest):
787
1164
  """Refine an existing generated image with a new prompt."""
788
1165
  result = await image_service.refine_image(
@@ -971,15 +1348,15 @@ async def serve_root():
971
1348
 
972
1349
  @app.get("/{path:path}")
973
1350
  async def serve_spa(path: str):
974
- """Catch-all route to serve SPA for client-side routing."""
1351
+ """Catch-all route to serve SPA for client-side routing with path traversal protection."""
975
1352
  # Skip API routes and known paths
976
1353
  if path.startswith(("api/", "ws", "health", "docs", "openapi", "redoc")):
977
1354
  raise HTTPException(status_code=404, detail="Not found")
978
1355
 
979
- # Check if it's a static file
980
- file_path = DIST_DIR / path
981
- if file_path.exists() and file_path.is_file():
982
- return FileResponse(str(file_path))
1356
+ # Check if it's a static file (with path traversal protection)
1357
+ safe_path = PathValidator.is_safe_static_path(DIST_DIR, path)
1358
+ if safe_path:
1359
+ return FileResponse(str(safe_path))
983
1360
 
984
1361
  # Otherwise serve index.html for SPA routing
985
1362
  index_file = DIST_DIR / "index.html"
@@ -70,6 +70,53 @@ class MemoryStats(BaseModel):
70
70
  tags: list[dict[str, int | str]]
71
71
 
72
72
 
73
+ class FilterParams(BaseModel):
74
+ """Query filter parameters."""
75
+
76
+ memory_type: Optional[str] = None
77
+ status: Optional[str] = None
78
+ tags: Optional[list[str]] = None
79
+ search: Optional[str] = None
80
+ min_importance: Optional[int] = None
81
+ max_importance: Optional[int] = None
82
+ sort_by: str = "last_accessed"
83
+ sort_order: str = "desc"
84
+ limit: int = 50
85
+ offset: int = 0
86
+
87
+
88
+ class AggregateMemoryRequest(BaseModel):
89
+ """Request for aggregate memory data across projects."""
90
+
91
+ projects: list[str] = Field(..., description="List of project db paths")
92
+ filters: Optional[FilterParams] = None
93
+
94
+
95
+ class AggregateStatsRequest(BaseModel):
96
+ """Request for aggregate statistics."""
97
+
98
+ projects: list[str] = Field(..., description="List of project db paths")
99
+
100
+
101
+ class AggregateStatsResponse(BaseModel):
102
+ """Aggregate statistics across multiple projects."""
103
+
104
+ total_count: int
105
+ total_access_count: int
106
+ avg_importance: float
107
+ by_type: dict[str, int]
108
+ by_status: dict[str, int]
109
+ project_count: int
110
+
111
+
112
+ class AggregateChatRequest(BaseModel):
113
+ """Request for chat across multiple projects."""
114
+
115
+ projects: list[str] = Field(..., description="List of project db paths")
116
+ question: str = Field(..., min_length=1, max_length=2000)
117
+ max_memories_per_project: int = Field(default=5, ge=1, le=20)
118
+
119
+
73
120
  class Activity(BaseModel):
74
121
  """Activity log record."""
75
122
 
@@ -84,6 +131,14 @@ class Activity(BaseModel):
84
131
  duration_ms: Optional[int] = None
85
132
  file_path: Optional[str] = None
86
133
  timestamp: datetime
134
+ # Command analytics fields
135
+ command_name: Optional[str] = None
136
+ command_scope: Optional[str] = None
137
+ mcp_server: Optional[str] = None
138
+ skill_name: Optional[str] = None
139
+ # Natural language summary fields
140
+ summary: Optional[str] = None
141
+ summary_detail: Optional[str] = None
87
142
 
88
143
 
89
144
  class Session(BaseModel):
@@ -105,19 +160,14 @@ class TimelineEntry(BaseModel):
105
160
  data: dict
106
161
 
107
162
 
108
- class FilterParams(BaseModel):
109
- """Query filter parameters."""
163
+ class MemoryCreateRequest(BaseModel):
164
+ """Create request for a new memory."""
110
165
 
111
- memory_type: Optional[str] = None
112
- status: Optional[str] = None
113
- tags: Optional[list[str]] = None
114
- search: Optional[str] = None
115
- min_importance: Optional[int] = None
116
- max_importance: Optional[int] = None
117
- sort_by: str = "last_accessed"
118
- sort_order: str = "desc"
119
- limit: int = 50
120
- offset: int = 0
166
+ content: str = Field(..., min_length=1, max_length=50000)
167
+ memory_type: str = Field(default="general")
168
+ context: Optional[str] = None
169
+ importance_score: int = Field(default=50, ge=1, le=100)
170
+ tags: list[str] = Field(default_factory=list)
121
171
 
122
172
 
123
173
  class MemoryUpdate(BaseModel):
@@ -155,6 +205,8 @@ class ChatSource(BaseModel):
155
205
  type: str
156
206
  content_preview: str
157
207
  tags: list[str]
208
+ project_path: Optional[str] = None
209
+ project_name: Optional[str] = None
158
210
 
159
211
 
160
212
  class ChatResponse(BaseModel):