supervaizer 0.10.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supervaizer/__init__.py +97 -0
- supervaizer/__version__.py +10 -0
- supervaizer/account.py +308 -0
- supervaizer/account_service.py +93 -0
- supervaizer/admin/routes.py +1293 -0
- supervaizer/admin/static/js/job-start-form.js +373 -0
- supervaizer/admin/templates/agent_detail.html +145 -0
- supervaizer/admin/templates/agents.html +249 -0
- supervaizer/admin/templates/agents_grid.html +82 -0
- supervaizer/admin/templates/base.html +233 -0
- supervaizer/admin/templates/case_detail.html +230 -0
- supervaizer/admin/templates/cases_list.html +182 -0
- supervaizer/admin/templates/cases_table.html +134 -0
- supervaizer/admin/templates/console.html +389 -0
- supervaizer/admin/templates/dashboard.html +153 -0
- supervaizer/admin/templates/job_detail.html +192 -0
- supervaizer/admin/templates/job_start_test.html +109 -0
- supervaizer/admin/templates/jobs_list.html +180 -0
- supervaizer/admin/templates/jobs_table.html +122 -0
- supervaizer/admin/templates/navigation.html +163 -0
- supervaizer/admin/templates/recent_activity.html +81 -0
- supervaizer/admin/templates/server.html +105 -0
- supervaizer/admin/templates/server_status_cards.html +121 -0
- supervaizer/admin/templates/supervaize_instructions.html +212 -0
- supervaizer/agent.py +956 -0
- supervaizer/case.py +432 -0
- supervaizer/cli.py +395 -0
- supervaizer/common.py +324 -0
- supervaizer/deploy/__init__.py +16 -0
- supervaizer/deploy/cli.py +305 -0
- supervaizer/deploy/commands/__init__.py +9 -0
- supervaizer/deploy/commands/clean.py +294 -0
- supervaizer/deploy/commands/down.py +119 -0
- supervaizer/deploy/commands/local.py +460 -0
- supervaizer/deploy/commands/plan.py +167 -0
- supervaizer/deploy/commands/status.py +169 -0
- supervaizer/deploy/commands/up.py +281 -0
- supervaizer/deploy/docker.py +377 -0
- supervaizer/deploy/driver_factory.py +42 -0
- supervaizer/deploy/drivers/__init__.py +39 -0
- supervaizer/deploy/drivers/aws_app_runner.py +607 -0
- supervaizer/deploy/drivers/base.py +196 -0
- supervaizer/deploy/drivers/cloud_run.py +570 -0
- supervaizer/deploy/drivers/do_app_platform.py +504 -0
- supervaizer/deploy/health.py +404 -0
- supervaizer/deploy/state.py +210 -0
- supervaizer/deploy/templates/Dockerfile.template +44 -0
- supervaizer/deploy/templates/debug_env.py +69 -0
- supervaizer/deploy/templates/docker-compose.yml.template +37 -0
- supervaizer/deploy/templates/dockerignore.template +66 -0
- supervaizer/deploy/templates/entrypoint.sh +20 -0
- supervaizer/deploy/utils.py +52 -0
- supervaizer/event.py +181 -0
- supervaizer/examples/controller_template.py +196 -0
- supervaizer/instructions.py +145 -0
- supervaizer/job.py +392 -0
- supervaizer/job_service.py +156 -0
- supervaizer/lifecycle.py +417 -0
- supervaizer/parameter.py +233 -0
- supervaizer/protocol/__init__.py +11 -0
- supervaizer/protocol/a2a/__init__.py +21 -0
- supervaizer/protocol/a2a/model.py +227 -0
- supervaizer/protocol/a2a/routes.py +99 -0
- supervaizer/py.typed +1 -0
- supervaizer/routes.py +917 -0
- supervaizer/server.py +553 -0
- supervaizer/server_utils.py +54 -0
- supervaizer/storage.py +462 -0
- supervaizer/telemetry.py +81 -0
- supervaizer/utils/__init__.py +16 -0
- supervaizer/utils/version_check.py +56 -0
- supervaizer-0.10.5.dist-info/METADATA +317 -0
- supervaizer-0.10.5.dist-info/RECORD +76 -0
- supervaizer-0.10.5.dist-info/WHEEL +4 -0
- supervaizer-0.10.5.dist-info/entry_points.txt +2 -0
- supervaizer-0.10.5.dist-info/licenses/LICENSE.md +346 -0
|
@@ -0,0 +1,1293 @@
|
|
|
1
|
+
# Copyright (c) 2024-2025 Alain Prasquier - Supervaize.com. All rights reserved.
|
|
2
|
+
#
|
|
3
|
+
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
|
|
4
|
+
# If a copy of the MPL was not distributed with this file, you can obtain one at
|
|
5
|
+
# https://mozilla.org/MPL/2.0/.
|
|
6
|
+
|
|
7
|
+
# Copyright (c) 2024-2025 Alain Prasquier - Supervaize.com. All rights reserved.
|
|
8
|
+
#
|
|
9
|
+
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
|
|
10
|
+
# If a copy of the MPL was not distributed with this file, You can obtain one at
|
|
11
|
+
# https://mozilla.org/MPL/2.0/.
|
|
12
|
+
|
|
13
|
+
import asyncio
|
|
14
|
+
import json
|
|
15
|
+
import os
|
|
16
|
+
import secrets
|
|
17
|
+
import time
|
|
18
|
+
from datetime import datetime
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import Any, AsyncGenerator, Dict, List, Optional
|
|
21
|
+
|
|
22
|
+
import psutil
|
|
23
|
+
from fastapi import APIRouter, HTTPException, Query, Request, Security
|
|
24
|
+
from fastapi.responses import HTMLResponse, Response
|
|
25
|
+
from fastapi.security import APIKeyHeader
|
|
26
|
+
from fastapi.templating import Jinja2Templates
|
|
27
|
+
from pydantic import BaseModel
|
|
28
|
+
from sse_starlette.sse import EventSourceResponse
|
|
29
|
+
|
|
30
|
+
from supervaizer.__version__ import API_VERSION, VERSION
|
|
31
|
+
from supervaizer.common import log
|
|
32
|
+
from supervaizer.lifecycle import EntityStatus
|
|
33
|
+
from supervaizer.storage import (
|
|
34
|
+
StorageManager,
|
|
35
|
+
create_case_repository,
|
|
36
|
+
create_job_repository,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
# Global log queue for streaming
|
|
40
|
+
log_queue: asyncio.Queue[Dict[str, str]] = asyncio.Queue()
|
|
41
|
+
|
|
42
|
+
# Server start time for uptime calculation
|
|
43
|
+
# This will be set when the server actually starts
|
|
44
|
+
SERVER_START_TIME = time.time()
|
|
45
|
+
|
|
46
|
+
# Console token storage (in production, use Redis or database)
|
|
47
|
+
_console_tokens: Dict[str, float] = {} # token -> expiry_timestamp
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def set_server_start_time(start_time: float) -> None:
|
|
51
|
+
"""Set the server start time for uptime calculation."""
|
|
52
|
+
global SERVER_START_TIME
|
|
53
|
+
SERVER_START_TIME = start_time
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def add_log_to_queue(timestamp: str, level: str, message: str) -> None:
|
|
57
|
+
"""Add a log message to the streaming queue."""
|
|
58
|
+
try:
|
|
59
|
+
log_data = {"timestamp": timestamp, "level": level, "message": message}
|
|
60
|
+
# Non-blocking put - if queue is full, skip the message
|
|
61
|
+
try:
|
|
62
|
+
log_queue.put_nowait(log_data)
|
|
63
|
+
except asyncio.QueueFull:
|
|
64
|
+
pass # Skip if queue is full
|
|
65
|
+
except Exception:
|
|
66
|
+
pass # Silently ignore errors to avoid breaking logging
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
# Initialize templates
|
|
70
|
+
templates = Jinja2Templates(directory=str(Path(__file__).parent / "templates"))
|
|
71
|
+
|
|
72
|
+
# API key authentication
|
|
73
|
+
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class AdminStats(BaseModel):
|
|
77
|
+
"""Statistics for admin dashboard."""
|
|
78
|
+
|
|
79
|
+
jobs: Dict[str, int]
|
|
80
|
+
cases: Dict[str, int]
|
|
81
|
+
collections: int
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class ServerStatus(BaseModel):
|
|
85
|
+
"""Server status and metrics."""
|
|
86
|
+
|
|
87
|
+
status: str
|
|
88
|
+
uptime: str
|
|
89
|
+
uptime_seconds: int
|
|
90
|
+
memory_usage: str
|
|
91
|
+
memory_usage_mb: float
|
|
92
|
+
memory_percent: float
|
|
93
|
+
cpu_percent: float
|
|
94
|
+
active_connections: int
|
|
95
|
+
agents_count: int
|
|
96
|
+
host: str
|
|
97
|
+
port: int
|
|
98
|
+
environment: str
|
|
99
|
+
database_type: str
|
|
100
|
+
storage_path: str
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class ServerConfiguration(BaseModel):
|
|
104
|
+
"""Server configuration details."""
|
|
105
|
+
|
|
106
|
+
host: str
|
|
107
|
+
port: int
|
|
108
|
+
api_version: str
|
|
109
|
+
environment: str
|
|
110
|
+
database_type: str
|
|
111
|
+
storage_path: str
|
|
112
|
+
agents: List[Dict[str, str]]
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class EntityFilter(BaseModel):
|
|
116
|
+
"""Filter parameters for entity queries."""
|
|
117
|
+
|
|
118
|
+
status: Optional[str] = None
|
|
119
|
+
agent_name: Optional[str] = None
|
|
120
|
+
search: Optional[str] = None
|
|
121
|
+
sort: str = "-created_at"
|
|
122
|
+
limit: int = 50
|
|
123
|
+
skip: int = 0
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
async def verify_admin_access(
|
|
127
|
+
request: Request,
|
|
128
|
+
api_key: Optional[str] = Security(api_key_header),
|
|
129
|
+
key: Optional[str] = Query(None),
|
|
130
|
+
) -> bool:
|
|
131
|
+
"""Verify admin access via API key in header or query parameter."""
|
|
132
|
+
# First try header authentication
|
|
133
|
+
if api_key:
|
|
134
|
+
expected_key = os.getenv("SUPERVAIZER_API_KEY")
|
|
135
|
+
if expected_key is None:
|
|
136
|
+
expected_key = "admin-secret-key-123"
|
|
137
|
+
|
|
138
|
+
if api_key == expected_key:
|
|
139
|
+
return True
|
|
140
|
+
|
|
141
|
+
# For browser access, try query parameter
|
|
142
|
+
if key:
|
|
143
|
+
expected_key = os.getenv("SUPERVAIZER_API_KEY")
|
|
144
|
+
if expected_key is None:
|
|
145
|
+
expected_key = "admin-secret-key-123"
|
|
146
|
+
|
|
147
|
+
if key == expected_key:
|
|
148
|
+
return True
|
|
149
|
+
|
|
150
|
+
raise HTTPException(
|
|
151
|
+
status_code=403,
|
|
152
|
+
detail="Invalid API key. Provide via X-API-Key header or ?key=<api_key> parameter",
|
|
153
|
+
headers={"WWW-Authenticate": "APIKey"},
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def format_uptime(seconds: int) -> str:
|
|
158
|
+
"""Format uptime seconds into human readable string."""
|
|
159
|
+
days = seconds // 86400
|
|
160
|
+
hours = (seconds % 86400) // 3600
|
|
161
|
+
minutes = (seconds % 3600) // 60
|
|
162
|
+
|
|
163
|
+
if days > 0:
|
|
164
|
+
return f"{days}d {hours}h {minutes}m"
|
|
165
|
+
elif hours > 0:
|
|
166
|
+
return f"{hours}h {minutes}m"
|
|
167
|
+
else:
|
|
168
|
+
return f"{minutes}m"
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def get_server_status() -> ServerStatus:
|
|
172
|
+
"""Get current server status and metrics."""
|
|
173
|
+
# Get server info from storage - required, no fallback
|
|
174
|
+
from supervaizer.server import get_server_info_from_storage
|
|
175
|
+
|
|
176
|
+
server_info = get_server_info_from_storage()
|
|
177
|
+
if not server_info:
|
|
178
|
+
raise HTTPException(
|
|
179
|
+
status_code=503,
|
|
180
|
+
detail="Server information not available in storage. Server may not be properly initialized.",
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
# Calculate uptime from stored start time
|
|
184
|
+
uptime_seconds = int(time.time() - server_info.start_time)
|
|
185
|
+
uptime_str = format_uptime(uptime_seconds)
|
|
186
|
+
|
|
187
|
+
# Get memory usage
|
|
188
|
+
memory = psutil.virtual_memory()
|
|
189
|
+
process = psutil.Process()
|
|
190
|
+
process_memory = process.memory_info().rss / 1024 / 1024 # MB
|
|
191
|
+
|
|
192
|
+
# Get CPU usage
|
|
193
|
+
cpu_percent = psutil.cpu_percent(interval=0.1)
|
|
194
|
+
|
|
195
|
+
# Get network connections (approximate active connections)
|
|
196
|
+
try:
|
|
197
|
+
connections = len(psutil.net_connections(kind="inet"))
|
|
198
|
+
except (psutil.AccessDenied, OSError):
|
|
199
|
+
# This is a system limitation, not a missing data issue
|
|
200
|
+
connections = 0
|
|
201
|
+
|
|
202
|
+
return ServerStatus(
|
|
203
|
+
status="online",
|
|
204
|
+
uptime=uptime_str,
|
|
205
|
+
uptime_seconds=uptime_seconds,
|
|
206
|
+
memory_usage=f"{process_memory:.1f} MB",
|
|
207
|
+
memory_usage_mb=process_memory,
|
|
208
|
+
memory_percent=memory.percent,
|
|
209
|
+
cpu_percent=cpu_percent,
|
|
210
|
+
active_connections=connections,
|
|
211
|
+
agents_count=len(server_info.agents),
|
|
212
|
+
host=server_info.host,
|
|
213
|
+
port=server_info.port,
|
|
214
|
+
environment=server_info.environment,
|
|
215
|
+
database_type="TinyDB",
|
|
216
|
+
storage_path=os.getenv("DATA_STORAGE_PATH", "./data"),
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def get_server_configuration(storage: StorageManager) -> ServerConfiguration:
|
|
221
|
+
"""Get server configuration details."""
|
|
222
|
+
# Get server info from storage - required, no fallback
|
|
223
|
+
from supervaizer.server import get_server_info_from_storage
|
|
224
|
+
|
|
225
|
+
server_info = get_server_info_from_storage()
|
|
226
|
+
if not server_info:
|
|
227
|
+
raise HTTPException(
|
|
228
|
+
status_code=503,
|
|
229
|
+
detail="Server configuration not available in storage. Server may not be properly initialized.",
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
return ServerConfiguration(
|
|
233
|
+
host=server_info.host,
|
|
234
|
+
port=server_info.port,
|
|
235
|
+
api_version=server_info.api_version,
|
|
236
|
+
environment=server_info.environment,
|
|
237
|
+
database_type="TinyDB",
|
|
238
|
+
storage_path=str(storage.db_path.absolute()),
|
|
239
|
+
agents=server_info.agents,
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
def create_admin_routes() -> APIRouter:
|
|
244
|
+
"""Create and configure admin routes."""
|
|
245
|
+
router = APIRouter(tags=["admin"])
|
|
246
|
+
|
|
247
|
+
# Initialize storage manager
|
|
248
|
+
storage = StorageManager()
|
|
249
|
+
_job_repo = create_job_repository()
|
|
250
|
+
_case_repo = create_case_repository()
|
|
251
|
+
|
|
252
|
+
@router.get("/", response_class=HTMLResponse)
|
|
253
|
+
async def admin_dashboard(request: Request) -> Response:
|
|
254
|
+
"""Admin dashboard page."""
|
|
255
|
+
try:
|
|
256
|
+
# Get stats
|
|
257
|
+
stats = get_dashboard_stats(storage)
|
|
258
|
+
|
|
259
|
+
return templates.TemplateResponse(
|
|
260
|
+
request,
|
|
261
|
+
"dashboard.html",
|
|
262
|
+
{
|
|
263
|
+
"request": request,
|
|
264
|
+
"api_version": VERSION,
|
|
265
|
+
"stats": stats,
|
|
266
|
+
"system_status": "Online",
|
|
267
|
+
"db_name": "TinyDB",
|
|
268
|
+
"data_storage_path": str(storage.db_path.absolute()),
|
|
269
|
+
"api_key": os.getenv("SUPERVAIZER_API_KEY"),
|
|
270
|
+
},
|
|
271
|
+
)
|
|
272
|
+
except Exception as e:
|
|
273
|
+
log.error(f"Admin dashboard error: {e}")
|
|
274
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
275
|
+
|
|
276
|
+
@router.get("/jobs", response_class=HTMLResponse)
|
|
277
|
+
async def admin_jobs_page(request: Request) -> Response:
|
|
278
|
+
"""Jobs management page."""
|
|
279
|
+
return templates.TemplateResponse(
|
|
280
|
+
request,
|
|
281
|
+
"jobs_list.html",
|
|
282
|
+
{
|
|
283
|
+
"request": request,
|
|
284
|
+
"api_version": VERSION,
|
|
285
|
+
"api_key": os.getenv("SUPERVAIZER_API_KEY"),
|
|
286
|
+
},
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
@router.get("/cases", response_class=HTMLResponse)
|
|
290
|
+
async def admin_cases_page(request: Request) -> Response:
|
|
291
|
+
"""Cases management page."""
|
|
292
|
+
return templates.TemplateResponse(
|
|
293
|
+
request,
|
|
294
|
+
"cases_list.html",
|
|
295
|
+
{
|
|
296
|
+
"request": request,
|
|
297
|
+
"api_version": VERSION,
|
|
298
|
+
"api_key": os.getenv("SUPERVAIZER_API_KEY"),
|
|
299
|
+
},
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
@router.get("/server", response_class=HTMLResponse)
|
|
303
|
+
async def admin_server_page(request: Request) -> Response:
|
|
304
|
+
"""Server status and configuration page."""
|
|
305
|
+
try:
|
|
306
|
+
# Get initial server data
|
|
307
|
+
server_status = get_server_status()
|
|
308
|
+
server_config = get_server_configuration(storage)
|
|
309
|
+
|
|
310
|
+
return templates.TemplateResponse(
|
|
311
|
+
request,
|
|
312
|
+
"server.html",
|
|
313
|
+
{
|
|
314
|
+
"request": request,
|
|
315
|
+
"api_version": VERSION,
|
|
316
|
+
"server_status": server_status,
|
|
317
|
+
"server_config": server_config,
|
|
318
|
+
"api_key": os.getenv("SUPERVAIZER_API_KEY"),
|
|
319
|
+
},
|
|
320
|
+
)
|
|
321
|
+
except Exception as e:
|
|
322
|
+
log.error(f"Admin server page error: {e}")
|
|
323
|
+
raise HTTPException(status_code=500, detail=str(e)) from e
|
|
324
|
+
|
|
325
|
+
@router.get("/agents", response_class=HTMLResponse)
|
|
326
|
+
async def admin_agents_page(request: Request) -> Response:
|
|
327
|
+
"""Agents management page."""
|
|
328
|
+
try:
|
|
329
|
+
from supervaizer.server import get_server_info_from_storage
|
|
330
|
+
|
|
331
|
+
server_info = get_server_info_from_storage()
|
|
332
|
+
if not server_info:
|
|
333
|
+
raise HTTPException(
|
|
334
|
+
status_code=503, detail="Server information not available"
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
return templates.TemplateResponse(
|
|
338
|
+
request,
|
|
339
|
+
"agents.html",
|
|
340
|
+
{
|
|
341
|
+
"request": request,
|
|
342
|
+
"api_version": VERSION,
|
|
343
|
+
"agents": server_info.agents,
|
|
344
|
+
"api_key": os.getenv("SUPERVAIZER_API_KEY"),
|
|
345
|
+
},
|
|
346
|
+
)
|
|
347
|
+
except Exception as e:
|
|
348
|
+
log.error(f"Admin agents page error: {e}")
|
|
349
|
+
raise HTTPException(
|
|
350
|
+
status_code=503, detail="Server information unavailable"
|
|
351
|
+
) from e
|
|
352
|
+
|
|
353
|
+
@router.get("/job-start-test", response_class=HTMLResponse)
|
|
354
|
+
async def admin_job_start_test_page(request: Request) -> Response:
|
|
355
|
+
"""Job start form test page."""
|
|
356
|
+
return templates.TemplateResponse(
|
|
357
|
+
request,
|
|
358
|
+
"job_start_test.html",
|
|
359
|
+
{
|
|
360
|
+
"request": request,
|
|
361
|
+
"api_version": VERSION,
|
|
362
|
+
"api_key": os.getenv("SUPERVAIZER_API_KEY"),
|
|
363
|
+
},
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
@router.get("/static/js/job-start-form.js")
|
|
367
|
+
async def serve_job_start_form_js() -> Response:
|
|
368
|
+
"""Serve the JobStartForm JavaScript file."""
|
|
369
|
+
js_file_path = Path(__file__).parent / "static" / "js" / "job-start-form.js"
|
|
370
|
+
if js_file_path.exists():
|
|
371
|
+
with open(js_file_path, "r") as f:
|
|
372
|
+
content = f.read()
|
|
373
|
+
return Response(content=content, media_type="application/javascript")
|
|
374
|
+
else:
|
|
375
|
+
raise HTTPException(status_code=404, detail="JavaScript file not found")
|
|
376
|
+
|
|
377
|
+
@router.get("/console", response_class=HTMLResponse)
|
|
378
|
+
async def admin_console_page(request: Request) -> Response:
|
|
379
|
+
"""Interactive console page - publicly accessible, authentication handled by frontend."""
|
|
380
|
+
# Clean up expired tokens
|
|
381
|
+
cleanup_expired_tokens()
|
|
382
|
+
|
|
383
|
+
# Generate a secure token for this console session
|
|
384
|
+
console_token = generate_console_token()
|
|
385
|
+
|
|
386
|
+
return templates.TemplateResponse(
|
|
387
|
+
request,
|
|
388
|
+
"console.html",
|
|
389
|
+
{"request": request, "console_token": console_token},
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
# API Routes
|
|
393
|
+
@router.get("/api/stats")
|
|
394
|
+
async def get_stats() -> AdminStats:
|
|
395
|
+
"""Get system statistics."""
|
|
396
|
+
return get_dashboard_stats(storage)
|
|
397
|
+
|
|
398
|
+
@router.get("/api/server/status")
|
|
399
|
+
async def get_server_status_api(request: Request) -> Response:
|
|
400
|
+
"""Get current server status for HTMX refresh."""
|
|
401
|
+
try:
|
|
402
|
+
server_status = get_server_status()
|
|
403
|
+
|
|
404
|
+
return templates.TemplateResponse(
|
|
405
|
+
request,
|
|
406
|
+
"server_status_cards.html",
|
|
407
|
+
{
|
|
408
|
+
"request": request,
|
|
409
|
+
"server_status": server_status,
|
|
410
|
+
},
|
|
411
|
+
)
|
|
412
|
+
except Exception as e:
|
|
413
|
+
log.error(f"Get server status API error: {e}")
|
|
414
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
415
|
+
|
|
416
|
+
@router.get("/api/agents")
|
|
417
|
+
async def get_agents_api(
|
|
418
|
+
request: Request,
|
|
419
|
+
status: Optional[str] = Query(None),
|
|
420
|
+
agent_type: Optional[str] = Query(None),
|
|
421
|
+
search: Optional[str] = Query(None),
|
|
422
|
+
sort: str = Query("-created_at"),
|
|
423
|
+
) -> Response:
|
|
424
|
+
"""Get agents with filtering for HTMX refresh."""
|
|
425
|
+
try:
|
|
426
|
+
from supervaizer.server import get_server_info_from_storage
|
|
427
|
+
|
|
428
|
+
server_info = get_server_info_from_storage()
|
|
429
|
+
if not server_info:
|
|
430
|
+
raise HTTPException(
|
|
431
|
+
status_code=503, detail="Server information not available"
|
|
432
|
+
)
|
|
433
|
+
|
|
434
|
+
agents = server_info.agents
|
|
435
|
+
|
|
436
|
+
# Apply filters
|
|
437
|
+
filtered_agents = []
|
|
438
|
+
for agent in agents:
|
|
439
|
+
# Status filter (we'll add this to agent data later)
|
|
440
|
+
if status and status != "all":
|
|
441
|
+
# For now, assume all agents are active since we don't have status
|
|
442
|
+
if status != "active":
|
|
443
|
+
continue
|
|
444
|
+
|
|
445
|
+
# Agent type filter
|
|
446
|
+
if agent_type and agent_type != "":
|
|
447
|
+
# Default to "conversational" if no type specified
|
|
448
|
+
agent_agent_type = agent.get("type", "conversational")
|
|
449
|
+
if agent_type.lower() != agent_agent_type.lower():
|
|
450
|
+
continue
|
|
451
|
+
|
|
452
|
+
# Search filter
|
|
453
|
+
if search:
|
|
454
|
+
search_lower = search.lower()
|
|
455
|
+
if not (
|
|
456
|
+
search_lower in agent.get("name", "").lower()
|
|
457
|
+
or search_lower in agent.get("description", "").lower()
|
|
458
|
+
):
|
|
459
|
+
continue
|
|
460
|
+
|
|
461
|
+
filtered_agents.append(agent)
|
|
462
|
+
|
|
463
|
+
# Sort agents
|
|
464
|
+
if sort.startswith("-"):
|
|
465
|
+
reverse = True
|
|
466
|
+
sort_key = sort[1:]
|
|
467
|
+
else:
|
|
468
|
+
reverse = False
|
|
469
|
+
sort_key = sort
|
|
470
|
+
|
|
471
|
+
if sort_key == "name":
|
|
472
|
+
filtered_agents.sort(key=lambda x: x.get("name", ""), reverse=reverse)
|
|
473
|
+
elif sort_key == "created_at":
|
|
474
|
+
# For now, maintain original order since we don't have created_at
|
|
475
|
+
pass
|
|
476
|
+
|
|
477
|
+
return templates.TemplateResponse(
|
|
478
|
+
request,
|
|
479
|
+
"agents_grid.html",
|
|
480
|
+
{
|
|
481
|
+
"request": request,
|
|
482
|
+
"agents": filtered_agents,
|
|
483
|
+
},
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
except Exception as e:
|
|
487
|
+
log.error(f"Get agents API error: {e}")
|
|
488
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
489
|
+
|
|
490
|
+
@router.get("/api/agents/{agent_slug}")
|
|
491
|
+
async def get_agent_details(
|
|
492
|
+
request: Request,
|
|
493
|
+
agent_slug: str,
|
|
494
|
+
) -> Response:
|
|
495
|
+
"""Get detailed agent information."""
|
|
496
|
+
try:
|
|
497
|
+
from supervaizer.server import get_server_info_from_storage
|
|
498
|
+
|
|
499
|
+
server_info = get_server_info_from_storage()
|
|
500
|
+
if not server_info:
|
|
501
|
+
raise HTTPException(
|
|
502
|
+
status_code=503, detail="Server information not available"
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
# Find the agent by slug
|
|
506
|
+
agent = None
|
|
507
|
+
for a in server_info.agents:
|
|
508
|
+
if a.get("slug") == agent_slug:
|
|
509
|
+
agent = a
|
|
510
|
+
break
|
|
511
|
+
|
|
512
|
+
if not agent:
|
|
513
|
+
raise HTTPException(status_code=404, detail="Agent not found")
|
|
514
|
+
|
|
515
|
+
return templates.TemplateResponse(
|
|
516
|
+
request,
|
|
517
|
+
"agent_detail.html",
|
|
518
|
+
{
|
|
519
|
+
"request": request,
|
|
520
|
+
"agent": agent,
|
|
521
|
+
},
|
|
522
|
+
)
|
|
523
|
+
|
|
524
|
+
except HTTPException:
|
|
525
|
+
raise
|
|
526
|
+
except Exception as e:
|
|
527
|
+
log.error(f"Get agent details error: {e}")
|
|
528
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
529
|
+
|
|
530
|
+
@router.get("/api/jobs")
|
|
531
|
+
async def get_jobs_api(
|
|
532
|
+
request: Request,
|
|
533
|
+
status: Optional[str] = Query(None),
|
|
534
|
+
agent_name: Optional[str] = Query(None),
|
|
535
|
+
search: Optional[str] = Query(None),
|
|
536
|
+
sort: str = Query("-created_at"),
|
|
537
|
+
limit: int = Query(50, le=100),
|
|
538
|
+
skip: int = Query(0, ge=0),
|
|
539
|
+
) -> Response:
|
|
540
|
+
"""Get jobs with filtering and pagination."""
|
|
541
|
+
try:
|
|
542
|
+
# Get all jobs
|
|
543
|
+
jobs_data = storage.get_objects("Job")
|
|
544
|
+
|
|
545
|
+
# Apply filters
|
|
546
|
+
filtered_jobs = []
|
|
547
|
+
for job_data in jobs_data:
|
|
548
|
+
# Status filter
|
|
549
|
+
if status and job_data.get("status") != status:
|
|
550
|
+
continue
|
|
551
|
+
|
|
552
|
+
# Agent name filter
|
|
553
|
+
if (
|
|
554
|
+
agent_name
|
|
555
|
+
and agent_name.lower() not in job_data.get("agent_name", "").lower()
|
|
556
|
+
):
|
|
557
|
+
continue
|
|
558
|
+
|
|
559
|
+
# Search filter
|
|
560
|
+
if search:
|
|
561
|
+
search_lower = search.lower()
|
|
562
|
+
if not (
|
|
563
|
+
search_lower in job_data.get("name", "").lower()
|
|
564
|
+
or search_lower in job_data.get("id", "").lower()
|
|
565
|
+
):
|
|
566
|
+
continue
|
|
567
|
+
|
|
568
|
+
filtered_jobs.append(job_data)
|
|
569
|
+
|
|
570
|
+
# Sort jobs
|
|
571
|
+
if sort.startswith("-"):
|
|
572
|
+
reverse = True
|
|
573
|
+
sort_key = sort[1:]
|
|
574
|
+
else:
|
|
575
|
+
reverse = False
|
|
576
|
+
sort_key = sort
|
|
577
|
+
|
|
578
|
+
if sort_key in ["created_at", "name", "status"]:
|
|
579
|
+
filtered_jobs.sort(key=lambda x: x.get(sort_key, ""), reverse=reverse)
|
|
580
|
+
|
|
581
|
+
# Apply pagination
|
|
582
|
+
total = len(filtered_jobs)
|
|
583
|
+
jobs_page = filtered_jobs[skip : skip + limit]
|
|
584
|
+
|
|
585
|
+
# Format for display
|
|
586
|
+
jobs = []
|
|
587
|
+
for job_data in jobs_page:
|
|
588
|
+
job = {
|
|
589
|
+
"id": job_data.get("id", ""),
|
|
590
|
+
"name": job_data.get("name", ""),
|
|
591
|
+
"agent_name": job_data.get("agent_name", ""),
|
|
592
|
+
"status": job_data.get("status", ""),
|
|
593
|
+
"created_at": job_data.get("created_at", ""),
|
|
594
|
+
"finished_at": job_data.get("finished_at"),
|
|
595
|
+
"case_count": len(job_data.get("case_ids", [])),
|
|
596
|
+
}
|
|
597
|
+
jobs.append(job)
|
|
598
|
+
|
|
599
|
+
return templates.TemplateResponse(
|
|
600
|
+
request,
|
|
601
|
+
"jobs_table.html",
|
|
602
|
+
{
|
|
603
|
+
"request": request,
|
|
604
|
+
"jobs": jobs,
|
|
605
|
+
"total": total,
|
|
606
|
+
"limit": limit,
|
|
607
|
+
"skip": skip,
|
|
608
|
+
"has_next": skip + limit < total,
|
|
609
|
+
"has_prev": skip > 0,
|
|
610
|
+
},
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
except Exception as e:
|
|
614
|
+
log.error(f"Get jobs API error: {e}")
|
|
615
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
616
|
+
|
|
617
|
+
@router.get("/api/jobs/{job_id}")
|
|
618
|
+
async def get_job_details(request: Request, job_id: str) -> Response:
|
|
619
|
+
"""Get detailed job information."""
|
|
620
|
+
try:
|
|
621
|
+
job_data = storage.get_object_by_id("Job", job_id)
|
|
622
|
+
if not job_data:
|
|
623
|
+
raise HTTPException(status_code=404, detail="Job not found")
|
|
624
|
+
|
|
625
|
+
# Get related cases
|
|
626
|
+
cases_data = storage.get_cases_for_job(job_id)
|
|
627
|
+
|
|
628
|
+
return templates.TemplateResponse(
|
|
629
|
+
request,
|
|
630
|
+
"job_detail.html",
|
|
631
|
+
{
|
|
632
|
+
"request": request,
|
|
633
|
+
"job": job_data,
|
|
634
|
+
"cases": cases_data,
|
|
635
|
+
},
|
|
636
|
+
)
|
|
637
|
+
|
|
638
|
+
except HTTPException:
|
|
639
|
+
raise
|
|
640
|
+
except Exception as e:
|
|
641
|
+
log.error(f"Get job details error: {e}")
|
|
642
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
643
|
+
|
|
644
|
+
@router.get("/api/cases")
|
|
645
|
+
async def get_cases_api(
|
|
646
|
+
request: Request,
|
|
647
|
+
status: Optional[str] = Query(None),
|
|
648
|
+
job_id: Optional[str] = Query(None),
|
|
649
|
+
search: Optional[str] = Query(None),
|
|
650
|
+
sort: str = Query("-created_at"),
|
|
651
|
+
limit: int = Query(50, le=100),
|
|
652
|
+
skip: int = Query(0, ge=0),
|
|
653
|
+
) -> Response:
|
|
654
|
+
"""Get cases with filtering and pagination."""
|
|
655
|
+
try:
|
|
656
|
+
# Get all cases
|
|
657
|
+
cases_data = storage.get_objects("Case")
|
|
658
|
+
|
|
659
|
+
# Apply filters
|
|
660
|
+
filtered_cases = []
|
|
661
|
+
for case_data in cases_data:
|
|
662
|
+
# Status filter
|
|
663
|
+
if status and case_data.get("status") != status:
|
|
664
|
+
continue
|
|
665
|
+
|
|
666
|
+
# Job ID filter
|
|
667
|
+
if job_id and case_data.get("job_id") != job_id:
|
|
668
|
+
continue
|
|
669
|
+
|
|
670
|
+
# Search filter
|
|
671
|
+
if search:
|
|
672
|
+
search_lower = search.lower()
|
|
673
|
+
if not (
|
|
674
|
+
search_lower in case_data.get("name", "").lower()
|
|
675
|
+
or search_lower in case_data.get("id", "").lower()
|
|
676
|
+
or search_lower in case_data.get("description", "").lower()
|
|
677
|
+
):
|
|
678
|
+
continue
|
|
679
|
+
|
|
680
|
+
filtered_cases.append(case_data)
|
|
681
|
+
|
|
682
|
+
# Sort cases
|
|
683
|
+
if sort.startswith("-"):
|
|
684
|
+
reverse = True
|
|
685
|
+
sort_key = sort[1:]
|
|
686
|
+
else:
|
|
687
|
+
reverse = False
|
|
688
|
+
sort_key = sort
|
|
689
|
+
|
|
690
|
+
if sort_key in ["created_at", "name", "status", "total_cost"]:
|
|
691
|
+
if sort_key == "total_cost":
|
|
692
|
+
filtered_cases.sort(
|
|
693
|
+
key=lambda x: x.get(sort_key, 0), reverse=reverse
|
|
694
|
+
)
|
|
695
|
+
else:
|
|
696
|
+
filtered_cases.sort(
|
|
697
|
+
key=lambda x: x.get(sort_key, ""), reverse=reverse
|
|
698
|
+
)
|
|
699
|
+
|
|
700
|
+
# Apply pagination
|
|
701
|
+
total = len(filtered_cases)
|
|
702
|
+
cases_page = filtered_cases[skip : skip + limit]
|
|
703
|
+
|
|
704
|
+
# Format for display
|
|
705
|
+
cases = []
|
|
706
|
+
for case_data in cases_page:
|
|
707
|
+
case = {
|
|
708
|
+
"id": case_data.get("id", ""),
|
|
709
|
+
"name": case_data.get("name", ""),
|
|
710
|
+
"description": case_data.get("description", ""),
|
|
711
|
+
"status": case_data.get("status", ""),
|
|
712
|
+
"job_id": case_data.get("job_id", ""),
|
|
713
|
+
"created_at": case_data.get("created_at", ""),
|
|
714
|
+
"finished_at": case_data.get("finished_at"),
|
|
715
|
+
"total_cost": case_data.get("total_cost", 0.0),
|
|
716
|
+
}
|
|
717
|
+
cases.append(case)
|
|
718
|
+
|
|
719
|
+
return templates.TemplateResponse(
|
|
720
|
+
request,
|
|
721
|
+
"cases_table.html",
|
|
722
|
+
{
|
|
723
|
+
"request": request,
|
|
724
|
+
"cases": cases,
|
|
725
|
+
"total": total,
|
|
726
|
+
"limit": limit,
|
|
727
|
+
"skip": skip,
|
|
728
|
+
"has_next": skip + limit < total,
|
|
729
|
+
"has_prev": skip > 0,
|
|
730
|
+
},
|
|
731
|
+
)
|
|
732
|
+
|
|
733
|
+
except Exception as e:
|
|
734
|
+
log.error(f"Get cases API error: {e}")
|
|
735
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
736
|
+
|
|
737
|
+
@router.get("/api/cases/{case_id}")
|
|
738
|
+
async def get_case_details(request: Request, case_id: str) -> Response:
|
|
739
|
+
"""Get detailed case information."""
|
|
740
|
+
try:
|
|
741
|
+
case_data = storage.get_object_by_id("Case", case_id)
|
|
742
|
+
if not case_data:
|
|
743
|
+
raise HTTPException(status_code=404, detail="Case not found")
|
|
744
|
+
|
|
745
|
+
# Get parent job if exists
|
|
746
|
+
job_data = None
|
|
747
|
+
if case_data.get("job_id"):
|
|
748
|
+
job_data = storage.get_object_by_id("Job", case_data["job_id"])
|
|
749
|
+
|
|
750
|
+
return templates.TemplateResponse(
|
|
751
|
+
request,
|
|
752
|
+
"case_detail.html",
|
|
753
|
+
{
|
|
754
|
+
"request": request,
|
|
755
|
+
"case": case_data,
|
|
756
|
+
"job": job_data,
|
|
757
|
+
},
|
|
758
|
+
)
|
|
759
|
+
|
|
760
|
+
except HTTPException:
|
|
761
|
+
raise
|
|
762
|
+
except Exception as e:
|
|
763
|
+
log.error(f"Get case details error: {e}")
|
|
764
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
765
|
+
|
|
766
|
+
@router.post("/api/jobs/{job_id}/status")
|
|
767
|
+
async def update_job_status(
|
|
768
|
+
job_id: str,
|
|
769
|
+
status_data: Dict[str, str],
|
|
770
|
+
) -> Dict[str, str]:
|
|
771
|
+
"""Update job status."""
|
|
772
|
+
try:
|
|
773
|
+
new_status = status_data.get("status")
|
|
774
|
+
if not new_status or new_status not in [s.value for s in EntityStatus]:
|
|
775
|
+
raise HTTPException(status_code=400, detail="Invalid status")
|
|
776
|
+
|
|
777
|
+
job_data = storage.get_object_by_id("Job", job_id)
|
|
778
|
+
if not job_data:
|
|
779
|
+
raise HTTPException(status_code=404, detail="Job not found")
|
|
780
|
+
|
|
781
|
+
# Update job status
|
|
782
|
+
job_data["status"] = new_status
|
|
783
|
+
if new_status in ["completed", "failed", "cancelled"]:
|
|
784
|
+
job_data["finished_at"] = datetime.now().isoformat()
|
|
785
|
+
|
|
786
|
+
storage.save_object("Job", job_data)
|
|
787
|
+
|
|
788
|
+
return {"message": "Job status updated successfully"}
|
|
789
|
+
|
|
790
|
+
except HTTPException:
|
|
791
|
+
raise
|
|
792
|
+
except Exception as e:
|
|
793
|
+
log.error(f"Update job status error: {e}")
|
|
794
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
795
|
+
|
|
796
|
+
@router.post("/api/cases/{case_id}/status")
|
|
797
|
+
async def update_case_status(
|
|
798
|
+
case_id: str,
|
|
799
|
+
status_data: Dict[str, str],
|
|
800
|
+
) -> Dict[str, str]:
|
|
801
|
+
"""Update case status."""
|
|
802
|
+
try:
|
|
803
|
+
new_status = status_data.get("status")
|
|
804
|
+
if not new_status or new_status not in [s.value for s in EntityStatus]:
|
|
805
|
+
raise HTTPException(status_code=400, detail="Invalid status")
|
|
806
|
+
|
|
807
|
+
case_data = storage.get_object_by_id("Case", case_id)
|
|
808
|
+
if not case_data:
|
|
809
|
+
raise HTTPException(status_code=404, detail="Case not found")
|
|
810
|
+
|
|
811
|
+
# Update case status
|
|
812
|
+
case_data["status"] = new_status
|
|
813
|
+
if new_status in ["completed", "failed", "cancelled"]:
|
|
814
|
+
case_data["finished_at"] = datetime.now().isoformat()
|
|
815
|
+
|
|
816
|
+
storage.save_object("Case", case_data)
|
|
817
|
+
|
|
818
|
+
return {"message": "Case status updated successfully"}
|
|
819
|
+
|
|
820
|
+
except HTTPException:
|
|
821
|
+
raise
|
|
822
|
+
except Exception as e:
|
|
823
|
+
log.error(f"Update case status error: {e}")
|
|
824
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
825
|
+
|
|
826
|
+
@router.delete("/api/jobs/{job_id}")
|
|
827
|
+
async def delete_job(job_id: str) -> Dict[str, str]:
|
|
828
|
+
"""Delete a job and its related cases."""
|
|
829
|
+
try:
|
|
830
|
+
# Delete related cases first
|
|
831
|
+
cases_data = storage.get_cases_for_job(job_id)
|
|
832
|
+
for case_data in cases_data:
|
|
833
|
+
storage.delete_object("Case", case_data["id"])
|
|
834
|
+
|
|
835
|
+
# Delete the job
|
|
836
|
+
deleted = storage.delete_object("Job", job_id)
|
|
837
|
+
if not deleted:
|
|
838
|
+
raise HTTPException(status_code=404, detail="Job not found")
|
|
839
|
+
|
|
840
|
+
return {"message": "Job and related cases deleted successfully"}
|
|
841
|
+
|
|
842
|
+
except HTTPException:
|
|
843
|
+
raise
|
|
844
|
+
except Exception as e:
|
|
845
|
+
log.error(f"Delete job error: {e}")
|
|
846
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
847
|
+
|
|
848
|
+
@router.delete("/api/cases/{case_id}")
|
|
849
|
+
async def delete_case(case_id: str) -> Dict[str, str]:
|
|
850
|
+
"""Delete a case."""
|
|
851
|
+
try:
|
|
852
|
+
deleted = storage.delete_object("Case", case_id)
|
|
853
|
+
if not deleted:
|
|
854
|
+
raise HTTPException(status_code=404, detail="Case not found")
|
|
855
|
+
|
|
856
|
+
return {"message": "Case deleted successfully"}
|
|
857
|
+
|
|
858
|
+
except HTTPException:
|
|
859
|
+
raise
|
|
860
|
+
except Exception as e:
|
|
861
|
+
log.error(f"Delete case error: {e}")
|
|
862
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
863
|
+
|
|
864
|
+
@router.get("/api/recent-activity")
|
|
865
|
+
async def get_recent_activity(request: Request) -> Response:
|
|
866
|
+
"""Get recent entity activity."""
|
|
867
|
+
try:
|
|
868
|
+
# Get recent jobs and cases
|
|
869
|
+
recent_jobs = storage.get_objects("Job")[-5:] # Last 5 jobs
|
|
870
|
+
recent_cases = storage.get_objects("Case")[-5:] # Last 5 cases
|
|
871
|
+
|
|
872
|
+
# Combine and sort by created_at
|
|
873
|
+
activities = []
|
|
874
|
+
for job in recent_jobs:
|
|
875
|
+
activities.append(
|
|
876
|
+
{
|
|
877
|
+
"type": "job",
|
|
878
|
+
"id": job.get("id"),
|
|
879
|
+
"name": job.get("name"),
|
|
880
|
+
"status": job.get("status"),
|
|
881
|
+
"created_at": job.get("created_at"),
|
|
882
|
+
"agent_name": job.get("agent_name"),
|
|
883
|
+
}
|
|
884
|
+
)
|
|
885
|
+
|
|
886
|
+
for case in recent_cases:
|
|
887
|
+
activities.append(
|
|
888
|
+
{
|
|
889
|
+
"type": "case",
|
|
890
|
+
"id": case.get("id"),
|
|
891
|
+
"name": case.get("name"),
|
|
892
|
+
"status": case.get("status"),
|
|
893
|
+
"created_at": case.get("created_at"),
|
|
894
|
+
"job_id": case.get("job_id"),
|
|
895
|
+
}
|
|
896
|
+
)
|
|
897
|
+
|
|
898
|
+
# Sort by created_at descending
|
|
899
|
+
activities.sort(key=lambda x: str(x.get("created_at", "")), reverse=True)
|
|
900
|
+
activities = activities[:10] # Top 10 recent activities
|
|
901
|
+
|
|
902
|
+
return templates.TemplateResponse(
|
|
903
|
+
request,
|
|
904
|
+
"recent_activity.html",
|
|
905
|
+
{
|
|
906
|
+
"request": request,
|
|
907
|
+
"activities": activities,
|
|
908
|
+
},
|
|
909
|
+
)
|
|
910
|
+
|
|
911
|
+
except Exception as e:
|
|
912
|
+
log.error(f"Get recent activity error: {e}")
|
|
913
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
914
|
+
|
|
915
|
+
@router.get("/log-stream")
|
|
916
|
+
async def log_stream(
|
|
917
|
+
token: Optional[str] = Query(None, alias="token"),
|
|
918
|
+
key: Optional[str] = Query(None, alias="key"),
|
|
919
|
+
) -> EventSourceResponse:
|
|
920
|
+
"""Stream log messages via Server-Sent Events."""
|
|
921
|
+
|
|
922
|
+
# Support both console token and API key authentication
|
|
923
|
+
auth_valid = False
|
|
924
|
+
auth_method = None
|
|
925
|
+
|
|
926
|
+
if token:
|
|
927
|
+
auth_valid = validate_console_token(token)
|
|
928
|
+
auth_method = "console_token"
|
|
929
|
+
# If token validation fails, fall back to admin console mode
|
|
930
|
+
if not auth_valid:
|
|
931
|
+
auth_valid = True
|
|
932
|
+
auth_method = "admin_console_fallback"
|
|
933
|
+
elif key:
|
|
934
|
+
# Use API key validation
|
|
935
|
+
try:
|
|
936
|
+
from supervaizer.server import get_server_info_from_storage
|
|
937
|
+
|
|
938
|
+
server_info = get_server_info_from_storage()
|
|
939
|
+
if (
|
|
940
|
+
server_info
|
|
941
|
+
and hasattr(server_info, "api_key")
|
|
942
|
+
and key == server_info.api_key
|
|
943
|
+
):
|
|
944
|
+
auth_valid = True
|
|
945
|
+
auth_method = "api_key"
|
|
946
|
+
except Exception:
|
|
947
|
+
# Fallback: just check if key is provided for now
|
|
948
|
+
if key:
|
|
949
|
+
auth_valid = True
|
|
950
|
+
auth_method = "api_key_fallback"
|
|
951
|
+
else:
|
|
952
|
+
# Allow access without authentication for admin interface live console
|
|
953
|
+
# In a production environment, you might want to add additional security
|
|
954
|
+
auth_valid = True
|
|
955
|
+
auth_method = "admin_console"
|
|
956
|
+
|
|
957
|
+
if not auth_valid:
|
|
958
|
+
raise HTTPException(
|
|
959
|
+
status_code=403,
|
|
960
|
+
detail=f"Invalid or expired authentication token (method: {auth_method or 'none'})",
|
|
961
|
+
)
|
|
962
|
+
|
|
963
|
+
async def generate_log_events() -> AsyncGenerator[str, None]:
|
|
964
|
+
try:
|
|
965
|
+
# Send connection message immediately
|
|
966
|
+
test_message = {
|
|
967
|
+
"timestamp": datetime.now().isoformat(),
|
|
968
|
+
"level": "INFO",
|
|
969
|
+
"message": f"Log stream connected using {auth_method}",
|
|
970
|
+
}
|
|
971
|
+
yield f"data: {json.dumps(test_message, ensure_ascii=False)}\n\n"
|
|
972
|
+
|
|
973
|
+
# Send any existing messages in the queue
|
|
974
|
+
while not log_queue.empty():
|
|
975
|
+
try:
|
|
976
|
+
log_message = log_queue.get_nowait()
|
|
977
|
+
if isinstance(log_message, dict):
|
|
978
|
+
event_data = json.dumps(log_message, ensure_ascii=False)
|
|
979
|
+
yield f"data: {event_data}\n\n"
|
|
980
|
+
else:
|
|
981
|
+
fallback_message = {
|
|
982
|
+
"timestamp": datetime.now().isoformat(),
|
|
983
|
+
"level": "INFO",
|
|
984
|
+
"message": str(log_message),
|
|
985
|
+
}
|
|
986
|
+
event_data = json.dumps(
|
|
987
|
+
fallback_message, ensure_ascii=False
|
|
988
|
+
)
|
|
989
|
+
yield f"data: {event_data}\n\n"
|
|
990
|
+
except Exception: # QueueEmpty or any other exception
|
|
991
|
+
break
|
|
992
|
+
|
|
993
|
+
# Keep alive and wait for new messages
|
|
994
|
+
while True:
|
|
995
|
+
try:
|
|
996
|
+
# Wait for a log message with timeout to send keep-alive
|
|
997
|
+
log_message = await asyncio.wait_for(
|
|
998
|
+
log_queue.get(), timeout=30.0
|
|
999
|
+
)
|
|
1000
|
+
|
|
1001
|
+
if isinstance(log_message, dict):
|
|
1002
|
+
event_data = json.dumps(log_message, ensure_ascii=False)
|
|
1003
|
+
yield f"data: {event_data}\n\n"
|
|
1004
|
+
else:
|
|
1005
|
+
fallback_message = {
|
|
1006
|
+
"timestamp": datetime.now().isoformat(),
|
|
1007
|
+
"level": "INFO",
|
|
1008
|
+
"message": str(log_message),
|
|
1009
|
+
}
|
|
1010
|
+
event_data = json.dumps(
|
|
1011
|
+
fallback_message, ensure_ascii=False
|
|
1012
|
+
)
|
|
1013
|
+
yield f"data: {event_data}\n\n"
|
|
1014
|
+
except asyncio.TimeoutError:
|
|
1015
|
+
# Send keep-alive message
|
|
1016
|
+
keepalive_message = {
|
|
1017
|
+
"timestamp": datetime.now().isoformat(),
|
|
1018
|
+
"level": "SYSTEM",
|
|
1019
|
+
"message": "keepalive",
|
|
1020
|
+
}
|
|
1021
|
+
yield f"data: {json.dumps(keepalive_message, ensure_ascii=False)}\n\n"
|
|
1022
|
+
|
|
1023
|
+
except asyncio.CancelledError:
|
|
1024
|
+
# Client disconnected
|
|
1025
|
+
pass
|
|
1026
|
+
except Exception as e:
|
|
1027
|
+
# Send error and close
|
|
1028
|
+
try:
|
|
1029
|
+
error_data = json.dumps(
|
|
1030
|
+
{
|
|
1031
|
+
"timestamp": datetime.now().isoformat(),
|
|
1032
|
+
"level": "ERROR",
|
|
1033
|
+
"message": f"Log stream error: {str(e)}",
|
|
1034
|
+
},
|
|
1035
|
+
ensure_ascii=False,
|
|
1036
|
+
)
|
|
1037
|
+
yield f"data: {error_data}\n\n"
|
|
1038
|
+
except Exception:
|
|
1039
|
+
# If even error formatting fails, just close
|
|
1040
|
+
pass
|
|
1041
|
+
|
|
1042
|
+
return EventSourceResponse(generate_log_events())
|
|
1043
|
+
|
|
1044
|
+
@router.get("/test-log")
|
|
1045
|
+
async def test_log() -> Dict[str, str]:
|
|
1046
|
+
"""Test endpoint to generate a log message."""
|
|
1047
|
+
test_message = f"Test log message generated at {datetime.now().isoformat()}"
|
|
1048
|
+
add_log_to_queue(
|
|
1049
|
+
timestamp=datetime.now().isoformat(), level="INFO", message=test_message
|
|
1050
|
+
)
|
|
1051
|
+
|
|
1052
|
+
# Also test the loguru logger directly
|
|
1053
|
+
log.info(f"Direct loguru test message: {test_message}")
|
|
1054
|
+
|
|
1055
|
+
return {"message": "Test log added to queue"}
|
|
1056
|
+
|
|
1057
|
+
@router.get("/debug-tokens")
|
|
1058
|
+
async def debug_tokens() -> Dict[str, Any]:
|
|
1059
|
+
"""Debug endpoint to see current tokens."""
|
|
1060
|
+
cleanup_expired_tokens()
|
|
1061
|
+
return {
|
|
1062
|
+
"current_tokens": [
|
|
1063
|
+
{
|
|
1064
|
+
"token": token[:10] + "...",
|
|
1065
|
+
"expires_at": expiry,
|
|
1066
|
+
"expires_in": expiry - time.time(),
|
|
1067
|
+
"is_valid": expiry > time.time(),
|
|
1068
|
+
}
|
|
1069
|
+
for token, expiry in _console_tokens.items()
|
|
1070
|
+
],
|
|
1071
|
+
"token_count": len(_console_tokens),
|
|
1072
|
+
"current_time": time.time(),
|
|
1073
|
+
}
|
|
1074
|
+
|
|
1075
|
+
@router.get("/test-loguru")
|
|
1076
|
+
async def test_loguru() -> Dict[str, str]:
|
|
1077
|
+
"""Test endpoint to generate loguru messages."""
|
|
1078
|
+
log.info("Testing loguru INFO message")
|
|
1079
|
+
log.warning("Testing loguru WARNING message")
|
|
1080
|
+
log.error("Testing loguru ERROR message")
|
|
1081
|
+
return {"message": "Loguru test messages sent"}
|
|
1082
|
+
|
|
1083
|
+
@router.get("/debug-queue")
|
|
1084
|
+
async def debug_queue() -> Dict[str, Any]:
|
|
1085
|
+
"""Debug endpoint to check log queue status."""
|
|
1086
|
+
queue_size = log_queue.qsize()
|
|
1087
|
+
|
|
1088
|
+
# Add a test message directly to queue
|
|
1089
|
+
add_log_to_queue(
|
|
1090
|
+
timestamp=datetime.now().isoformat(),
|
|
1091
|
+
level="DEBUG",
|
|
1092
|
+
message="Direct queue test message",
|
|
1093
|
+
)
|
|
1094
|
+
|
|
1095
|
+
return {
|
|
1096
|
+
"queue_size_before": queue_size,
|
|
1097
|
+
"queue_size_after": log_queue.qsize(),
|
|
1098
|
+
"message": "Test message added to queue",
|
|
1099
|
+
}
|
|
1100
|
+
|
|
1101
|
+
@router.post("/api/console/execute")
|
|
1102
|
+
async def execute_console_command(
|
|
1103
|
+
request: Request,
|
|
1104
|
+
command_data: Dict[str, str],
|
|
1105
|
+
token: Optional[str] = Query(None, alias="token"),
|
|
1106
|
+
) -> Dict[str, str]:
|
|
1107
|
+
"""Execute a console command and add output to log stream."""
|
|
1108
|
+
# Validate console token
|
|
1109
|
+
if not validate_console_token(token):
|
|
1110
|
+
raise HTTPException(
|
|
1111
|
+
status_code=401, detail="Invalid or expired console token"
|
|
1112
|
+
)
|
|
1113
|
+
|
|
1114
|
+
command = command_data.get("command", "").strip()
|
|
1115
|
+
if not command:
|
|
1116
|
+
return {"status": "error", "message": "No command provided"}
|
|
1117
|
+
|
|
1118
|
+
# Add command to log stream
|
|
1119
|
+
add_log_to_queue(
|
|
1120
|
+
timestamp=datetime.now().isoformat(), level="USER", message=f"$ {command}"
|
|
1121
|
+
)
|
|
1122
|
+
|
|
1123
|
+
# Process the command
|
|
1124
|
+
try:
|
|
1125
|
+
result = await process_console_command(command)
|
|
1126
|
+
# Add result to log stream
|
|
1127
|
+
add_log_to_queue(
|
|
1128
|
+
timestamp=datetime.now().isoformat(),
|
|
1129
|
+
level=result.get("level", "INFO"),
|
|
1130
|
+
message=result.get("message", "Command executed"),
|
|
1131
|
+
)
|
|
1132
|
+
return {"status": "success", "message": "Command executed"}
|
|
1133
|
+
except Exception as e:
|
|
1134
|
+
add_log_to_queue(
|
|
1135
|
+
timestamp=datetime.now().isoformat(),
|
|
1136
|
+
level="ERROR",
|
|
1137
|
+
message=f"Command execution failed: {str(e)}",
|
|
1138
|
+
)
|
|
1139
|
+
return {"status": "error", "message": str(e)}
|
|
1140
|
+
|
|
1141
|
+
return router
|
|
1142
|
+
|
|
1143
|
+
|
|
1144
|
+
def get_dashboard_stats(storage: StorageManager) -> AdminStats:
|
|
1145
|
+
"""Get statistics for dashboard."""
|
|
1146
|
+
try:
|
|
1147
|
+
# Get all jobs and cases
|
|
1148
|
+
all_jobs = storage.get_objects("Job")
|
|
1149
|
+
all_cases = storage.get_objects("Case")
|
|
1150
|
+
|
|
1151
|
+
# Calculate job stats
|
|
1152
|
+
job_total = len(all_jobs)
|
|
1153
|
+
job_running = len(
|
|
1154
|
+
[j for j in all_jobs if j.get("status") in ["in_progress", "awaiting"]]
|
|
1155
|
+
)
|
|
1156
|
+
job_completed = len([j for j in all_jobs if j.get("status") == "completed"])
|
|
1157
|
+
job_failed = len(
|
|
1158
|
+
[j for j in all_jobs if j.get("status") in ["failed", "cancelled"]]
|
|
1159
|
+
)
|
|
1160
|
+
|
|
1161
|
+
# Calculate case stats
|
|
1162
|
+
case_total = len(all_cases)
|
|
1163
|
+
case_running = len(
|
|
1164
|
+
[c for c in all_cases if c.get("status") in ["in_progress", "awaiting"]]
|
|
1165
|
+
)
|
|
1166
|
+
case_completed = len([c for c in all_cases if c.get("status") == "completed"])
|
|
1167
|
+
case_failed = len(
|
|
1168
|
+
[c for c in all_cases if c.get("status") in ["failed", "cancelled"]]
|
|
1169
|
+
)
|
|
1170
|
+
|
|
1171
|
+
# TinyDB collections count (tables)
|
|
1172
|
+
collections_count = len(storage._db.tables())
|
|
1173
|
+
|
|
1174
|
+
return AdminStats(
|
|
1175
|
+
jobs={
|
|
1176
|
+
"total": job_total,
|
|
1177
|
+
"running": job_running,
|
|
1178
|
+
"completed": job_completed,
|
|
1179
|
+
"failed": job_failed,
|
|
1180
|
+
},
|
|
1181
|
+
cases={
|
|
1182
|
+
"total": case_total,
|
|
1183
|
+
"running": case_running,
|
|
1184
|
+
"completed": case_completed,
|
|
1185
|
+
"failed": case_failed,
|
|
1186
|
+
},
|
|
1187
|
+
collections=collections_count,
|
|
1188
|
+
)
|
|
1189
|
+
|
|
1190
|
+
except Exception as e:
|
|
1191
|
+
log.error(f"Get dashboard stats error: {e}")
|
|
1192
|
+
return AdminStats(
|
|
1193
|
+
jobs={"total": 0, "running": 0, "completed": 0, "failed": 0},
|
|
1194
|
+
cases={"total": 0, "running": 0, "completed": 0, "failed": 0},
|
|
1195
|
+
collections=0,
|
|
1196
|
+
)
|
|
1197
|
+
|
|
1198
|
+
|
|
1199
|
+
async def process_console_command(command: str) -> Dict[str, str]:
|
|
1200
|
+
"""Process a console command and return the result."""
|
|
1201
|
+
cmd = command.lower().strip()
|
|
1202
|
+
|
|
1203
|
+
try:
|
|
1204
|
+
if cmd == "help":
|
|
1205
|
+
return {
|
|
1206
|
+
"level": "INFO",
|
|
1207
|
+
"message": "Available commands: status, help, clear, reconnect, debug, server-info, memory, uptime",
|
|
1208
|
+
}
|
|
1209
|
+
|
|
1210
|
+
elif cmd == "status":
|
|
1211
|
+
return {
|
|
1212
|
+
"level": "INFO",
|
|
1213
|
+
"message": "Server is running and log stream is active",
|
|
1214
|
+
}
|
|
1215
|
+
|
|
1216
|
+
elif cmd == "server-info":
|
|
1217
|
+
server_status = get_server_status()
|
|
1218
|
+
return {
|
|
1219
|
+
"level": "INFO",
|
|
1220
|
+
"message": f"Server: {server_status.status} | Uptime: {server_status.uptime} | CPU: {server_status.cpu_percent:.1f}% | Memory: {server_status.memory_usage}",
|
|
1221
|
+
}
|
|
1222
|
+
|
|
1223
|
+
elif cmd == "memory":
|
|
1224
|
+
server_status = get_server_status()
|
|
1225
|
+
return {
|
|
1226
|
+
"level": "INFO",
|
|
1227
|
+
"message": f"Memory Usage: {server_status.memory_usage} ({server_status.memory_percent:.1f}%)",
|
|
1228
|
+
}
|
|
1229
|
+
|
|
1230
|
+
elif cmd == "uptime":
|
|
1231
|
+
server_status = get_server_status()
|
|
1232
|
+
return {
|
|
1233
|
+
"level": "INFO",
|
|
1234
|
+
"message": f"Server uptime: {server_status.uptime} ({server_status.uptime_seconds} seconds)",
|
|
1235
|
+
}
|
|
1236
|
+
|
|
1237
|
+
elif cmd == "debug":
|
|
1238
|
+
return {
|
|
1239
|
+
"level": "DEBUG",
|
|
1240
|
+
"message": f"Environment: {os.getenv('SUPERVAIZER_ENVIRONMENT', 'dev')} | API Version: {API_VERSION}",
|
|
1241
|
+
}
|
|
1242
|
+
|
|
1243
|
+
elif cmd == "clear":
|
|
1244
|
+
return {"level": "SYSTEM", "message": "Console cleared"}
|
|
1245
|
+
|
|
1246
|
+
elif cmd == "test-log":
|
|
1247
|
+
# Add a test log message
|
|
1248
|
+
add_log_to_queue(
|
|
1249
|
+
timestamp=datetime.now().isoformat(),
|
|
1250
|
+
level="INFO",
|
|
1251
|
+
message="This is a test log message from console command",
|
|
1252
|
+
)
|
|
1253
|
+
return {"level": "SUCCESS", "message": "Test log message sent"}
|
|
1254
|
+
|
|
1255
|
+
else:
|
|
1256
|
+
return {
|
|
1257
|
+
"level": "ERROR",
|
|
1258
|
+
"message": f"Unknown command: {command}. Type 'help' for available commands.",
|
|
1259
|
+
}
|
|
1260
|
+
|
|
1261
|
+
except Exception as e:
|
|
1262
|
+
return {"level": "ERROR", "message": f"Command processing error: {str(e)}"}
|
|
1263
|
+
|
|
1264
|
+
|
|
1265
|
+
def generate_console_token() -> str:
|
|
1266
|
+
"""Generate a temporary token for console access."""
|
|
1267
|
+
token = secrets.token_urlsafe(32)
|
|
1268
|
+
# Token expires in 1 hour
|
|
1269
|
+
_console_tokens[token] = time.time() + 3600
|
|
1270
|
+
return token
|
|
1271
|
+
|
|
1272
|
+
|
|
1273
|
+
def validate_console_token(token: Optional[str]) -> bool:
|
|
1274
|
+
"""Validate a console token."""
|
|
1275
|
+
if not token or token not in _console_tokens:
|
|
1276
|
+
return False
|
|
1277
|
+
|
|
1278
|
+
# Check if token is expired
|
|
1279
|
+
if time.time() > _console_tokens[token]:
|
|
1280
|
+
del _console_tokens[token]
|
|
1281
|
+
return False
|
|
1282
|
+
|
|
1283
|
+
return True
|
|
1284
|
+
|
|
1285
|
+
|
|
1286
|
+
def cleanup_expired_tokens() -> None:
|
|
1287
|
+
"""Clean up expired tokens."""
|
|
1288
|
+
current_time = time.time()
|
|
1289
|
+
expired_tokens = [
|
|
1290
|
+
token for token, expiry in _console_tokens.items() if current_time > expiry
|
|
1291
|
+
]
|
|
1292
|
+
for token in expired_tokens:
|
|
1293
|
+
del _console_tokens[token]
|