omni-cortex 1.2.0__py3-none-any.whl → 1.11.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/.env.example +12 -0
- omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/backfill_summaries.py +280 -0
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/chat_service.py +59 -32
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/database.py +305 -18
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/image_service.py +35 -16
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/logging_config.py +34 -4
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/main.py +451 -13
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/models.py +64 -12
- omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/prompt_security.py +111 -0
- omni_cortex-1.11.3.data/data/share/omni-cortex/dashboard/backend/security.py +104 -0
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/uv.lock +414 -1
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/websocket_manager.py +24 -2
- omni_cortex-1.11.3.data/data/share/omni-cortex/hooks/post_tool_use.py +429 -0
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/hooks/pre_tool_use.py +52 -2
- omni_cortex-1.11.3.data/data/share/omni-cortex/hooks/session_utils.py +186 -0
- {omni_cortex-1.2.0.dist-info → omni_cortex-1.11.3.dist-info}/METADATA +237 -8
- omni_cortex-1.11.3.dist-info/RECORD +25 -0
- omni_cortex-1.2.0.data/data/share/omni-cortex/hooks/post_tool_use.py +0 -160
- omni_cortex-1.2.0.dist-info/RECORD +0 -20
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/project_config.py +0 -0
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/project_scanner.py +0 -0
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/pyproject.toml +0 -0
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/hooks/stop.py +0 -0
- {omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/hooks/subagent_stop.py +0 -0
- {omni_cortex-1.2.0.dist-info → omni_cortex-1.11.3.dist-info}/WHEEL +0 -0
- {omni_cortex-1.2.0.dist-info → omni_cortex-1.11.3.dist-info}/entry_points.txt +0 -0
- {omni_cortex-1.2.0.dist-info → omni_cortex-1.11.3.dist-info}/licenses/LICENSE +0 -0
{omni_cortex-1.2.0.data → omni_cortex-1.11.3.data}/data/share/omni-cortex/dashboard/backend/main.py
RENAMED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
|
|
4
4
|
import asyncio
|
|
5
5
|
import json
|
|
6
|
+
import os
|
|
6
7
|
import traceback
|
|
7
8
|
from contextlib import asynccontextmanager
|
|
8
9
|
from datetime import datetime
|
|
@@ -10,19 +11,35 @@ from pathlib import Path
|
|
|
10
11
|
from typing import Optional
|
|
11
12
|
|
|
12
13
|
import uvicorn
|
|
13
|
-
from fastapi import FastAPI, HTTPException, Query, WebSocket, WebSocketDisconnect
|
|
14
|
+
from fastapi import FastAPI, HTTPException, Query, WebSocket, WebSocketDisconnect, Request, Depends
|
|
14
15
|
from fastapi.middleware.cors import CORSMiddleware
|
|
15
16
|
from fastapi.staticfiles import StaticFiles
|
|
16
|
-
from fastapi.responses import FileResponse
|
|
17
|
+
from fastapi.responses import FileResponse, Response
|
|
18
|
+
from starlette.middleware.base import BaseHTTPMiddleware
|
|
17
19
|
from watchdog.events import FileSystemEventHandler
|
|
18
20
|
from watchdog.observers import Observer
|
|
19
21
|
|
|
22
|
+
# Rate limiting imports (optional - graceful degradation if not installed)
|
|
23
|
+
try:
|
|
24
|
+
from slowapi import Limiter, _rate_limit_exceeded_handler
|
|
25
|
+
from slowapi.util import get_remote_address
|
|
26
|
+
from slowapi.errors import RateLimitExceeded
|
|
27
|
+
RATE_LIMITING_AVAILABLE = True
|
|
28
|
+
except ImportError:
|
|
29
|
+
RATE_LIMITING_AVAILABLE = False
|
|
30
|
+
Limiter = None
|
|
31
|
+
|
|
20
32
|
from database import (
|
|
21
33
|
bulk_update_memory_status,
|
|
34
|
+
create_memory,
|
|
22
35
|
delete_memory,
|
|
36
|
+
ensure_migrations,
|
|
23
37
|
get_activities,
|
|
38
|
+
get_activity_detail,
|
|
24
39
|
get_activity_heatmap,
|
|
25
40
|
get_all_tags,
|
|
41
|
+
get_command_usage,
|
|
42
|
+
get_mcp_usage,
|
|
26
43
|
get_memories,
|
|
27
44
|
get_memories_needing_review,
|
|
28
45
|
get_memory_by_id,
|
|
@@ -32,6 +49,7 @@ from database import (
|
|
|
32
49
|
get_relationship_graph,
|
|
33
50
|
get_relationships,
|
|
34
51
|
get_sessions,
|
|
52
|
+
get_skill_usage,
|
|
35
53
|
get_timeline,
|
|
36
54
|
get_tool_usage,
|
|
37
55
|
get_type_distribution,
|
|
@@ -40,11 +58,16 @@ from database import (
|
|
|
40
58
|
)
|
|
41
59
|
from logging_config import log_success, log_error
|
|
42
60
|
from models import (
|
|
61
|
+
AggregateChatRequest,
|
|
62
|
+
AggregateMemoryRequest,
|
|
63
|
+
AggregateStatsRequest,
|
|
64
|
+
AggregateStatsResponse,
|
|
43
65
|
ChatRequest,
|
|
44
66
|
ChatResponse,
|
|
45
67
|
ConversationSaveRequest,
|
|
46
68
|
ConversationSaveResponse,
|
|
47
69
|
FilterParams,
|
|
70
|
+
MemoryCreateRequest,
|
|
48
71
|
MemoryUpdate,
|
|
49
72
|
ProjectInfo,
|
|
50
73
|
ProjectRegistration,
|
|
@@ -66,6 +89,48 @@ from project_scanner import scan_projects
|
|
|
66
89
|
from websocket_manager import manager
|
|
67
90
|
import chat_service
|
|
68
91
|
from image_service import image_service, ImagePreset, SingleImageRequest
|
|
92
|
+
from security import PathValidator, get_cors_config, IS_PRODUCTION
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class SecurityHeadersMiddleware(BaseHTTPMiddleware):
|
|
96
|
+
"""Add security headers to all responses."""
|
|
97
|
+
|
|
98
|
+
async def dispatch(self, request: Request, call_next) -> Response:
|
|
99
|
+
response = await call_next(request)
|
|
100
|
+
|
|
101
|
+
# Prevent MIME type sniffing
|
|
102
|
+
response.headers["X-Content-Type-Options"] = "nosniff"
|
|
103
|
+
|
|
104
|
+
# Prevent clickjacking
|
|
105
|
+
response.headers["X-Frame-Options"] = "DENY"
|
|
106
|
+
|
|
107
|
+
# XSS protection (legacy browsers)
|
|
108
|
+
response.headers["X-XSS-Protection"] = "1; mode=block"
|
|
109
|
+
|
|
110
|
+
# Content Security Policy
|
|
111
|
+
response.headers["Content-Security-Policy"] = (
|
|
112
|
+
"default-src 'self'; "
|
|
113
|
+
"script-src 'self' 'unsafe-inline' 'unsafe-eval'; " # Vue needs these
|
|
114
|
+
"style-src 'self' 'unsafe-inline'; " # Tailwind needs inline
|
|
115
|
+
"img-src 'self' data: blob: https:; " # Allow AI-generated images
|
|
116
|
+
"connect-src 'self' ws: wss: https://generativelanguage.googleapis.com; "
|
|
117
|
+
"font-src 'self'; "
|
|
118
|
+
"frame-ancestors 'none';"
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
# HSTS (only in production with HTTPS)
|
|
122
|
+
if IS_PRODUCTION and os.getenv("SSL_CERTFILE"):
|
|
123
|
+
response.headers["Strict-Transport-Security"] = "max-age=31536000; includeSubDomains"
|
|
124
|
+
|
|
125
|
+
return response
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def validate_project_path(project: str = Query(..., description="Path to the database file")) -> Path:
|
|
129
|
+
"""Validate project database path - dependency for endpoints."""
|
|
130
|
+
try:
|
|
131
|
+
return PathValidator.validate_project_path(project)
|
|
132
|
+
except ValueError as e:
|
|
133
|
+
raise HTTPException(status_code=400, detail=str(e))
|
|
69
134
|
|
|
70
135
|
|
|
71
136
|
class DatabaseChangeHandler(FileSystemEventHandler):
|
|
@@ -76,6 +141,7 @@ class DatabaseChangeHandler(FileSystemEventHandler):
|
|
|
76
141
|
self.loop = loop
|
|
77
142
|
self._debounce_task: Optional[asyncio.Task] = None
|
|
78
143
|
self._last_path: Optional[str] = None
|
|
144
|
+
self._last_activity_count: dict[str, int] = {}
|
|
79
145
|
|
|
80
146
|
def on_modified(self, event):
|
|
81
147
|
if event.src_path.endswith("cortex.db") or event.src_path.endswith("global.db"):
|
|
@@ -87,9 +153,35 @@ class DatabaseChangeHandler(FileSystemEventHandler):
|
|
|
87
153
|
)
|
|
88
154
|
|
|
89
155
|
async def _debounced_notify(self):
|
|
90
|
-
await asyncio.sleep(0.
|
|
156
|
+
await asyncio.sleep(0.3) # Reduced from 0.5s for faster updates
|
|
91
157
|
if self._last_path:
|
|
92
|
-
|
|
158
|
+
db_path = self._last_path
|
|
159
|
+
|
|
160
|
+
# Broadcast general database change
|
|
161
|
+
await self.ws_manager.broadcast("database_changed", {"path": db_path})
|
|
162
|
+
|
|
163
|
+
# Fetch and broadcast latest activities (IndyDevDan pattern)
|
|
164
|
+
try:
|
|
165
|
+
# Get recent activities
|
|
166
|
+
recent = get_activities(db_path, limit=5, offset=0)
|
|
167
|
+
if recent:
|
|
168
|
+
# Broadcast each new activity
|
|
169
|
+
for activity in recent:
|
|
170
|
+
await self.ws_manager.broadcast_activity_logged(
|
|
171
|
+
db_path,
|
|
172
|
+
activity if isinstance(activity, dict) else activity.model_dump()
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
# Also broadcast session update
|
|
176
|
+
sessions = get_recent_sessions(db_path, limit=1)
|
|
177
|
+
if sessions:
|
|
178
|
+
session = sessions[0]
|
|
179
|
+
await self.ws_manager.broadcast_session_updated(
|
|
180
|
+
db_path,
|
|
181
|
+
session if isinstance(session, dict) else dict(session)
|
|
182
|
+
)
|
|
183
|
+
except Exception as e:
|
|
184
|
+
print(f"[WS] Error broadcasting activities: {e}")
|
|
93
185
|
|
|
94
186
|
|
|
95
187
|
# File watcher
|
|
@@ -133,13 +225,39 @@ app = FastAPI(
|
|
|
133
225
|
lifespan=lifespan,
|
|
134
226
|
)
|
|
135
227
|
|
|
136
|
-
#
|
|
228
|
+
# Add security headers middleware (MUST come before CORS)
|
|
229
|
+
app.add_middleware(SecurityHeadersMiddleware)
|
|
230
|
+
|
|
231
|
+
# Rate limiting (if available)
|
|
232
|
+
if RATE_LIMITING_AVAILABLE:
|
|
233
|
+
limiter = Limiter(key_func=get_remote_address)
|
|
234
|
+
app.state.limiter = limiter
|
|
235
|
+
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
|
|
236
|
+
else:
|
|
237
|
+
limiter = None
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def rate_limit(limit_string: str):
|
|
241
|
+
"""Decorator for conditional rate limiting.
|
|
242
|
+
|
|
243
|
+
Returns the actual limiter decorator if available, otherwise a no-op.
|
|
244
|
+
Usage: @rate_limit("10/minute")
|
|
245
|
+
"""
|
|
246
|
+
if limiter is not None:
|
|
247
|
+
return limiter.limit(limit_string)
|
|
248
|
+
# No-op decorator when rate limiting is not available
|
|
249
|
+
def noop_decorator(func):
|
|
250
|
+
return func
|
|
251
|
+
return noop_decorator
|
|
252
|
+
|
|
253
|
+
# CORS configuration (environment-aware)
|
|
254
|
+
cors_config = get_cors_config()
|
|
137
255
|
app.add_middleware(
|
|
138
256
|
CORSMiddleware,
|
|
139
|
-
allow_origins=["
|
|
257
|
+
allow_origins=cors_config["allow_origins"],
|
|
140
258
|
allow_credentials=True,
|
|
141
|
-
allow_methods=["
|
|
142
|
-
allow_headers=["
|
|
259
|
+
allow_methods=cors_config["allow_methods"],
|
|
260
|
+
allow_headers=cors_config["allow_headers"],
|
|
143
261
|
)
|
|
144
262
|
|
|
145
263
|
# Static files for production build
|
|
@@ -234,7 +352,200 @@ async def refresh_projects():
|
|
|
234
352
|
return {"count": len(projects)}
|
|
235
353
|
|
|
236
354
|
|
|
355
|
+
# --- Aggregate Multi-Project Endpoints ---
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
@app.post("/api/aggregate/memories")
|
|
359
|
+
@rate_limit("50/minute")
|
|
360
|
+
async def get_aggregate_memories(request: AggregateMemoryRequest):
|
|
361
|
+
"""Get memories from multiple projects with project attribution."""
|
|
362
|
+
try:
|
|
363
|
+
all_memories = []
|
|
364
|
+
filters = request.filters or FilterParams()
|
|
365
|
+
|
|
366
|
+
for project_path in request.projects:
|
|
367
|
+
if not Path(project_path).exists():
|
|
368
|
+
continue
|
|
369
|
+
|
|
370
|
+
try:
|
|
371
|
+
memories = get_memories(project_path, filters)
|
|
372
|
+
# Add project attribution to each memory
|
|
373
|
+
for m in memories:
|
|
374
|
+
m_dict = m.model_dump()
|
|
375
|
+
m_dict['source_project'] = project_path
|
|
376
|
+
# Extract project name from path
|
|
377
|
+
project_dir = Path(project_path).parent
|
|
378
|
+
m_dict['source_project_name'] = project_dir.name
|
|
379
|
+
all_memories.append(m_dict)
|
|
380
|
+
except Exception as e:
|
|
381
|
+
log_error(f"/api/aggregate/memories (project: {project_path})", e)
|
|
382
|
+
continue
|
|
383
|
+
|
|
384
|
+
# Sort by last_accessed or created_at (convert to str to handle mixed tz-aware/naive)
|
|
385
|
+
all_memories.sort(
|
|
386
|
+
key=lambda x: str(x.get('last_accessed') or x.get('created_at') or ''),
|
|
387
|
+
reverse=True
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
# Apply pagination
|
|
391
|
+
start = filters.offset
|
|
392
|
+
end = start + filters.limit
|
|
393
|
+
return all_memories[start:end]
|
|
394
|
+
except Exception as e:
|
|
395
|
+
log_error("/api/aggregate/memories", e)
|
|
396
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
@app.post("/api/aggregate/stats", response_model=AggregateStatsResponse)
|
|
400
|
+
@rate_limit("50/minute")
|
|
401
|
+
async def get_aggregate_stats(request: AggregateStatsRequest):
|
|
402
|
+
"""Get combined statistics across multiple projects."""
|
|
403
|
+
try:
|
|
404
|
+
total_count = 0
|
|
405
|
+
total_access = 0
|
|
406
|
+
importance_sum = 0
|
|
407
|
+
by_type = {}
|
|
408
|
+
by_status = {}
|
|
409
|
+
|
|
410
|
+
for project_path in request.projects:
|
|
411
|
+
if not Path(project_path).exists():
|
|
412
|
+
continue
|
|
413
|
+
|
|
414
|
+
try:
|
|
415
|
+
stats = get_memory_stats(project_path)
|
|
416
|
+
total_count += stats.total_count
|
|
417
|
+
total_access += stats.total_access_count
|
|
418
|
+
|
|
419
|
+
# Weighted average for importance
|
|
420
|
+
project_count = stats.total_count
|
|
421
|
+
project_avg_importance = stats.avg_importance
|
|
422
|
+
importance_sum += project_avg_importance * project_count
|
|
423
|
+
|
|
424
|
+
# Aggregate by_type
|
|
425
|
+
for type_name, count in stats.by_type.items():
|
|
426
|
+
by_type[type_name] = by_type.get(type_name, 0) + count
|
|
427
|
+
|
|
428
|
+
# Aggregate by_status
|
|
429
|
+
for status, count in stats.by_status.items():
|
|
430
|
+
by_status[status] = by_status.get(status, 0) + count
|
|
431
|
+
except Exception as e:
|
|
432
|
+
log_error(f"/api/aggregate/stats (project: {project_path})", e)
|
|
433
|
+
continue
|
|
434
|
+
|
|
435
|
+
return AggregateStatsResponse(
|
|
436
|
+
total_count=total_count,
|
|
437
|
+
total_access_count=total_access,
|
|
438
|
+
avg_importance=round(importance_sum / total_count, 1) if total_count > 0 else 0,
|
|
439
|
+
by_type=by_type,
|
|
440
|
+
by_status=by_status,
|
|
441
|
+
project_count=len(request.projects),
|
|
442
|
+
)
|
|
443
|
+
except Exception as e:
|
|
444
|
+
log_error("/api/aggregate/stats", e)
|
|
445
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
@app.post("/api/aggregate/tags")
|
|
449
|
+
@rate_limit("50/minute")
|
|
450
|
+
async def get_aggregate_tags(request: AggregateStatsRequest):
|
|
451
|
+
"""Get combined tags across multiple projects."""
|
|
452
|
+
try:
|
|
453
|
+
tag_counts = {}
|
|
454
|
+
|
|
455
|
+
for project_path in request.projects:
|
|
456
|
+
if not Path(project_path).exists():
|
|
457
|
+
continue
|
|
458
|
+
|
|
459
|
+
try:
|
|
460
|
+
tags = get_all_tags(project_path)
|
|
461
|
+
for tag in tags:
|
|
462
|
+
tag_name = tag['name']
|
|
463
|
+
tag_counts[tag_name] = tag_counts.get(tag_name, 0) + tag['count']
|
|
464
|
+
except Exception as e:
|
|
465
|
+
log_error(f"/api/aggregate/tags (project: {project_path})", e)
|
|
466
|
+
continue
|
|
467
|
+
|
|
468
|
+
# Return sorted by count
|
|
469
|
+
return sorted(
|
|
470
|
+
[{'name': k, 'count': v} for k, v in tag_counts.items()],
|
|
471
|
+
key=lambda x: x['count'],
|
|
472
|
+
reverse=True
|
|
473
|
+
)
|
|
474
|
+
except Exception as e:
|
|
475
|
+
log_error("/api/aggregate/tags", e)
|
|
476
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
@app.post("/api/aggregate/chat", response_model=ChatResponse)
|
|
480
|
+
@rate_limit("10/minute")
|
|
481
|
+
async def chat_across_projects(request: AggregateChatRequest):
|
|
482
|
+
"""Ask AI about memories across multiple projects."""
|
|
483
|
+
try:
|
|
484
|
+
if not chat_service.is_available():
|
|
485
|
+
raise HTTPException(
|
|
486
|
+
status_code=503,
|
|
487
|
+
detail="Chat service not available. Set GEMINI_API_KEY environment variable."
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
all_sources = []
|
|
491
|
+
|
|
492
|
+
# Gather relevant memories from each project
|
|
493
|
+
for project_path in request.projects:
|
|
494
|
+
if not Path(project_path).exists():
|
|
495
|
+
continue
|
|
496
|
+
|
|
497
|
+
try:
|
|
498
|
+
memories = search_memories(
|
|
499
|
+
project_path,
|
|
500
|
+
request.question,
|
|
501
|
+
limit=request.max_memories_per_project
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
for m in memories:
|
|
505
|
+
project_dir = Path(project_path).parent
|
|
506
|
+
source = {
|
|
507
|
+
'id': m.id,
|
|
508
|
+
'type': m.memory_type,
|
|
509
|
+
'content_preview': m.content[:200],
|
|
510
|
+
'tags': m.tags,
|
|
511
|
+
'project_path': project_path,
|
|
512
|
+
'project_name': project_dir.name,
|
|
513
|
+
}
|
|
514
|
+
all_sources.append(source)
|
|
515
|
+
except Exception as e:
|
|
516
|
+
log_error(f"/api/aggregate/chat (project: {project_path})", e)
|
|
517
|
+
continue
|
|
518
|
+
|
|
519
|
+
if not all_sources:
|
|
520
|
+
return ChatResponse(
|
|
521
|
+
answer="No relevant memories found across the selected projects.",
|
|
522
|
+
sources=[],
|
|
523
|
+
)
|
|
524
|
+
|
|
525
|
+
# Build context with project attribution
|
|
526
|
+
context = "\n\n".join([
|
|
527
|
+
f"[From: {s['project_name']}] {s['content_preview']}"
|
|
528
|
+
for s in all_sources
|
|
529
|
+
])
|
|
530
|
+
|
|
531
|
+
# Query AI with attributed context
|
|
532
|
+
answer = await chat_service.generate_response(request.question, context)
|
|
533
|
+
|
|
534
|
+
log_success("/api/aggregate/chat", projects=len(request.projects), sources=len(all_sources))
|
|
535
|
+
|
|
536
|
+
return ChatResponse(
|
|
537
|
+
answer=answer,
|
|
538
|
+
sources=[ChatSource(**s) for s in all_sources],
|
|
539
|
+
)
|
|
540
|
+
except HTTPException:
|
|
541
|
+
raise
|
|
542
|
+
except Exception as e:
|
|
543
|
+
log_error("/api/aggregate/chat", e)
|
|
544
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
545
|
+
|
|
546
|
+
|
|
237
547
|
@app.get("/api/memories")
|
|
548
|
+
@rate_limit("100/minute")
|
|
238
549
|
async def list_memories(
|
|
239
550
|
project: str = Query(..., description="Path to the database file"),
|
|
240
551
|
memory_type: Optional[str] = Query(None, alias="type"),
|
|
@@ -275,6 +586,46 @@ async def list_memories(
|
|
|
275
586
|
raise
|
|
276
587
|
|
|
277
588
|
|
|
589
|
+
@app.post("/api/memories")
|
|
590
|
+
@rate_limit("30/minute")
|
|
591
|
+
async def create_memory_endpoint(
|
|
592
|
+
request: MemoryCreateRequest,
|
|
593
|
+
project: str = Query(..., description="Path to the database file"),
|
|
594
|
+
):
|
|
595
|
+
"""Create a new memory."""
|
|
596
|
+
try:
|
|
597
|
+
if not Path(project).exists():
|
|
598
|
+
log_error("/api/memories POST", FileNotFoundError("Database not found"), project=project)
|
|
599
|
+
raise HTTPException(status_code=404, detail="Database not found")
|
|
600
|
+
|
|
601
|
+
# Create the memory
|
|
602
|
+
memory_id = create_memory(
|
|
603
|
+
db_path=project,
|
|
604
|
+
content=request.content,
|
|
605
|
+
memory_type=request.memory_type,
|
|
606
|
+
context=request.context,
|
|
607
|
+
tags=request.tags if request.tags else None,
|
|
608
|
+
importance_score=request.importance_score,
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
# Fetch the created memory to return it
|
|
612
|
+
created_memory = get_memory_by_id(project, memory_id)
|
|
613
|
+
|
|
614
|
+
# Broadcast to WebSocket clients
|
|
615
|
+
await manager.broadcast("memory_created", created_memory.model_dump(by_alias=True))
|
|
616
|
+
|
|
617
|
+
log_success("/api/memories POST", memory_id=memory_id, type=request.memory_type)
|
|
618
|
+
return created_memory
|
|
619
|
+
except HTTPException:
|
|
620
|
+
raise
|
|
621
|
+
except Exception as e:
|
|
622
|
+
import traceback
|
|
623
|
+
print(f"[DEBUG] create_memory_endpoint error: {type(e).__name__}: {e}")
|
|
624
|
+
traceback.print_exc()
|
|
625
|
+
log_error("/api/memories POST", e, project=project)
|
|
626
|
+
raise
|
|
627
|
+
|
|
628
|
+
|
|
278
629
|
# NOTE: These routes MUST be defined before /api/memories/{memory_id} to avoid path conflicts
|
|
279
630
|
@app.get("/api/memories/needs-review")
|
|
280
631
|
async def get_memories_needing_review_endpoint(
|
|
@@ -417,6 +768,9 @@ async def list_activities(
|
|
|
417
768
|
if not Path(project).exists():
|
|
418
769
|
raise HTTPException(status_code=404, detail="Database not found")
|
|
419
770
|
|
|
771
|
+
# Ensure migrations are applied (adds summary columns if missing)
|
|
772
|
+
ensure_migrations(project)
|
|
773
|
+
|
|
420
774
|
return get_activities(project, event_type, tool_name, limit, offset)
|
|
421
775
|
|
|
422
776
|
|
|
@@ -507,6 +861,86 @@ async def get_memory_growth_endpoint(
|
|
|
507
861
|
return get_memory_growth(project, days)
|
|
508
862
|
|
|
509
863
|
|
|
864
|
+
# --- Command Analytics Endpoints ---
|
|
865
|
+
|
|
866
|
+
|
|
867
|
+
@app.get("/api/stats/command-usage")
|
|
868
|
+
async def get_command_usage_endpoint(
|
|
869
|
+
project: str = Query(..., description="Path to the database file"),
|
|
870
|
+
scope: Optional[str] = Query(None, description="Filter by scope: 'universal' or 'project'"),
|
|
871
|
+
days: int = Query(30, ge=1, le=365),
|
|
872
|
+
):
|
|
873
|
+
"""Get slash command usage statistics."""
|
|
874
|
+
if not Path(project).exists():
|
|
875
|
+
raise HTTPException(status_code=404, detail="Database not found")
|
|
876
|
+
|
|
877
|
+
return get_command_usage(project, scope, days)
|
|
878
|
+
|
|
879
|
+
|
|
880
|
+
@app.get("/api/stats/skill-usage")
|
|
881
|
+
async def get_skill_usage_endpoint(
|
|
882
|
+
project: str = Query(..., description="Path to the database file"),
|
|
883
|
+
scope: Optional[str] = Query(None, description="Filter by scope: 'universal' or 'project'"),
|
|
884
|
+
days: int = Query(30, ge=1, le=365),
|
|
885
|
+
):
|
|
886
|
+
"""Get skill usage statistics."""
|
|
887
|
+
if not Path(project).exists():
|
|
888
|
+
raise HTTPException(status_code=404, detail="Database not found")
|
|
889
|
+
|
|
890
|
+
return get_skill_usage(project, scope, days)
|
|
891
|
+
|
|
892
|
+
|
|
893
|
+
@app.get("/api/stats/mcp-usage")
|
|
894
|
+
async def get_mcp_usage_endpoint(
|
|
895
|
+
project: str = Query(..., description="Path to the database file"),
|
|
896
|
+
days: int = Query(30, ge=1, le=365),
|
|
897
|
+
):
|
|
898
|
+
"""Get MCP server usage statistics."""
|
|
899
|
+
if not Path(project).exists():
|
|
900
|
+
raise HTTPException(status_code=404, detail="Database not found")
|
|
901
|
+
|
|
902
|
+
return get_mcp_usage(project, days)
|
|
903
|
+
|
|
904
|
+
|
|
905
|
+
@app.get("/api/activities/{activity_id}")
|
|
906
|
+
async def get_activity_detail_endpoint(
|
|
907
|
+
activity_id: str,
|
|
908
|
+
project: str = Query(..., description="Path to the database file"),
|
|
909
|
+
):
|
|
910
|
+
"""Get full activity details including complete input/output."""
|
|
911
|
+
if not Path(project).exists():
|
|
912
|
+
raise HTTPException(status_code=404, detail="Database not found")
|
|
913
|
+
|
|
914
|
+
# Ensure migrations are applied
|
|
915
|
+
ensure_migrations(project)
|
|
916
|
+
|
|
917
|
+
activity = get_activity_detail(project, activity_id)
|
|
918
|
+
if not activity:
|
|
919
|
+
raise HTTPException(status_code=404, detail="Activity not found")
|
|
920
|
+
|
|
921
|
+
return activity
|
|
922
|
+
|
|
923
|
+
|
|
924
|
+
@app.post("/api/activities/backfill-summaries")
|
|
925
|
+
async def backfill_activity_summaries_endpoint(
|
|
926
|
+
project: str = Query(..., description="Path to the database file"),
|
|
927
|
+
):
|
|
928
|
+
"""Generate summaries for existing activities that don't have them."""
|
|
929
|
+
if not Path(project).exists():
|
|
930
|
+
raise HTTPException(status_code=404, detail="Database not found")
|
|
931
|
+
|
|
932
|
+
try:
|
|
933
|
+
from backfill_summaries import backfill_all
|
|
934
|
+
results = backfill_all(project)
|
|
935
|
+
return {
|
|
936
|
+
"success": True,
|
|
937
|
+
"summaries_updated": results["summaries"],
|
|
938
|
+
"mcp_servers_updated": results["mcp_servers"],
|
|
939
|
+
}
|
|
940
|
+
except Exception as e:
|
|
941
|
+
raise HTTPException(status_code=500, detail=f"Backfill failed: {str(e)}")
|
|
942
|
+
|
|
943
|
+
|
|
510
944
|
# --- Session Context Endpoints ---
|
|
511
945
|
|
|
512
946
|
|
|
@@ -563,6 +997,7 @@ async def chat_status():
|
|
|
563
997
|
|
|
564
998
|
|
|
565
999
|
@app.post("/api/chat", response_model=ChatResponse)
|
|
1000
|
+
@rate_limit("10/minute")
|
|
566
1001
|
async def chat_with_memories(
|
|
567
1002
|
request: ChatRequest,
|
|
568
1003
|
project: str = Query(..., description="Path to the database file"),
|
|
@@ -589,6 +1024,7 @@ async def chat_with_memories(
|
|
|
589
1024
|
|
|
590
1025
|
|
|
591
1026
|
@app.get("/api/chat/stream")
|
|
1027
|
+
@rate_limit("10/minute")
|
|
592
1028
|
async def stream_chat(
|
|
593
1029
|
project: str = Query(..., description="Path to the database file"),
|
|
594
1030
|
question: str = Query(..., description="The question to ask"),
|
|
@@ -665,6 +1101,7 @@ async def get_image_presets():
|
|
|
665
1101
|
|
|
666
1102
|
|
|
667
1103
|
@app.post("/api/image/generate-batch", response_model=BatchImageGenerationResponse)
|
|
1104
|
+
@rate_limit("5/minute")
|
|
668
1105
|
async def generate_images_batch(
|
|
669
1106
|
request: BatchImageGenerationRequest,
|
|
670
1107
|
db_path: str = Query(..., alias="project", description="Path to the database file"),
|
|
@@ -722,6 +1159,7 @@ async def generate_images_batch(
|
|
|
722
1159
|
|
|
723
1160
|
|
|
724
1161
|
@app.post("/api/image/refine", response_model=SingleImageResponseModel)
|
|
1162
|
+
@rate_limit("5/minute")
|
|
725
1163
|
async def refine_image(request: ImageRefineRequest):
|
|
726
1164
|
"""Refine an existing generated image with a new prompt."""
|
|
727
1165
|
result = await image_service.refine_image(
|
|
@@ -910,15 +1348,15 @@ async def serve_root():
|
|
|
910
1348
|
|
|
911
1349
|
@app.get("/{path:path}")
|
|
912
1350
|
async def serve_spa(path: str):
|
|
913
|
-
"""Catch-all route to serve SPA for client-side routing."""
|
|
1351
|
+
"""Catch-all route to serve SPA for client-side routing with path traversal protection."""
|
|
914
1352
|
# Skip API routes and known paths
|
|
915
1353
|
if path.startswith(("api/", "ws", "health", "docs", "openapi", "redoc")):
|
|
916
1354
|
raise HTTPException(status_code=404, detail="Not found")
|
|
917
1355
|
|
|
918
|
-
# Check if it's a static file
|
|
919
|
-
|
|
920
|
-
if
|
|
921
|
-
return FileResponse(str(
|
|
1356
|
+
# Check if it's a static file (with path traversal protection)
|
|
1357
|
+
safe_path = PathValidator.is_safe_static_path(DIST_DIR, path)
|
|
1358
|
+
if safe_path:
|
|
1359
|
+
return FileResponse(str(safe_path))
|
|
922
1360
|
|
|
923
1361
|
# Otherwise serve index.html for SPA routing
|
|
924
1362
|
index_file = DIST_DIR / "index.html"
|