lumen-app 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. lumen_app/__init__.py +7 -0
  2. lumen_app/core/__init__.py +0 -0
  3. lumen_app/core/config.py +661 -0
  4. lumen_app/core/installer.py +274 -0
  5. lumen_app/core/loader.py +45 -0
  6. lumen_app/core/router.py +87 -0
  7. lumen_app/core/server.py +389 -0
  8. lumen_app/core/service.py +49 -0
  9. lumen_app/core/tests/__init__.py +1 -0
  10. lumen_app/core/tests/test_core_integration.py +561 -0
  11. lumen_app/core/tests/test_env_checker.py +487 -0
  12. lumen_app/proto/README.md +12 -0
  13. lumen_app/proto/ml_service.proto +88 -0
  14. lumen_app/proto/ml_service_pb2.py +66 -0
  15. lumen_app/proto/ml_service_pb2.pyi +136 -0
  16. lumen_app/proto/ml_service_pb2_grpc.py +251 -0
  17. lumen_app/server.py +362 -0
  18. lumen_app/utils/env_checker.py +752 -0
  19. lumen_app/utils/installation/__init__.py +25 -0
  20. lumen_app/utils/installation/env_manager.py +152 -0
  21. lumen_app/utils/installation/micromamba_installer.py +459 -0
  22. lumen_app/utils/installation/package_installer.py +149 -0
  23. lumen_app/utils/installation/verifier.py +95 -0
  24. lumen_app/utils/logger.py +181 -0
  25. lumen_app/utils/mamba/cuda.yaml +12 -0
  26. lumen_app/utils/mamba/default.yaml +6 -0
  27. lumen_app/utils/mamba/openvino.yaml +7 -0
  28. lumen_app/utils/mamba/tensorrt.yaml +13 -0
  29. lumen_app/utils/package_resolver.py +309 -0
  30. lumen_app/utils/preset_registry.py +219 -0
  31. lumen_app/web/__init__.py +3 -0
  32. lumen_app/web/api/__init__.py +1 -0
  33. lumen_app/web/api/config.py +229 -0
  34. lumen_app/web/api/hardware.py +201 -0
  35. lumen_app/web/api/install.py +608 -0
  36. lumen_app/web/api/server.py +253 -0
  37. lumen_app/web/core/__init__.py +1 -0
  38. lumen_app/web/core/server_manager.py +348 -0
  39. lumen_app/web/core/state.py +264 -0
  40. lumen_app/web/main.py +145 -0
  41. lumen_app/web/models/__init__.py +28 -0
  42. lumen_app/web/models/config.py +63 -0
  43. lumen_app/web/models/hardware.py +64 -0
  44. lumen_app/web/models/install.py +134 -0
  45. lumen_app/web/models/server.py +95 -0
  46. lumen_app/web/static/assets/index-CGuhGHC9.css +1 -0
  47. lumen_app/web/static/assets/index-DN6HmxWS.js +56 -0
  48. lumen_app/web/static/index.html +14 -0
  49. lumen_app/web/static/vite.svg +1 -0
  50. lumen_app/web/websockets/__init__.py +1 -0
  51. lumen_app/web/websockets/logs.py +159 -0
  52. lumen_app-0.4.2.dist-info/METADATA +23 -0
  53. lumen_app-0.4.2.dist-info/RECORD +56 -0
  54. lumen_app-0.4.2.dist-info/WHEEL +5 -0
  55. lumen_app-0.4.2.dist-info/entry_points.txt +3 -0
  56. lumen_app-0.4.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,264 @@
1
+ """Application state management for Lumen Web API."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ from dataclasses import dataclass, field
7
+ from typing import Any
8
+
9
+ from lumen_app.core.config import Config, DeviceConfig
10
+ from lumen_app.utils.env_checker import EnvironmentReport
11
+ from lumen_app.utils.logger import get_logger
12
+ from lumen_app.web.core.server_manager import ServerManager
13
+
14
+ # Optional import - AppService may not be available if gRPC is not installed
15
+ try:
16
+ from lumen_app.core.service import AppService
17
+ except ImportError:
18
+ AppService = None # type: ignore[misc,assignment]
19
+
20
+ logger = get_logger("lumen.web.state")
21
+
22
+
23
+ @dataclass
24
+ class ServerStatus:
25
+ """Server process status."""
26
+
27
+ running: bool = False
28
+ pid: int | None = None
29
+ port: int = 50051
30
+ config_path: str | None = None
31
+ logs: list[dict] = field(default_factory=list)
32
+
33
+
34
+ @dataclass
35
+ class InstallationTask:
36
+ """Installation task status."""
37
+
38
+ id: str
39
+ type: str # "micromamba", "environment", "drivers"
40
+ status: str # "pending", "running", "completed", "failed"
41
+ progress: int = 0
42
+ message: str = ""
43
+ created_at: float = field(default_factory=lambda: asyncio.get_event_loop().time())
44
+
45
+
46
+ class AppState:
47
+ """Global application state."""
48
+
49
+ def __init__(self):
50
+ self._initialized = False
51
+ self.current_config: Config | None = None
52
+ self.device_config: DeviceConfig | None = None
53
+ self.environment_report: EnvironmentReport | None = None
54
+ self.server_status = ServerStatus()
55
+ self.app_service: AppService | None = None
56
+
57
+ # Server manager for gRPC ML server
58
+ self.server_manager = ServerManager()
59
+
60
+ # Installation tasks (old format - to be deprecated)
61
+ self._tasks: dict[str, InstallationTask] = {}
62
+ self._task_lock = asyncio.Lock()
63
+
64
+ # New installation tasks
65
+ self._install_tasks: dict[str, Any] = {} # task_id -> InstallTaskResponse
66
+ self._install_logs: dict[str, list[str]] = {} # task_id -> logs
67
+ self._install_lock = asyncio.Lock()
68
+
69
+ # Log subscribers
70
+ self._log_queues: list[asyncio.Queue] = []
71
+ self._log_lock = asyncio.Lock()
72
+
73
+ async def initialize(self):
74
+ """Initialize application state."""
75
+ if self._initialized:
76
+ return
77
+
78
+ logger.info("Initializing application state")
79
+
80
+ # Load default configuration
81
+ try:
82
+ # Start with CPU preset as default
83
+ self.device_config = DeviceConfig.cpu()
84
+ logger.info("Loaded default CPU configuration")
85
+ except Exception as e:
86
+ logger.error(f"Failed to load default configuration: {e}")
87
+
88
+ self._initialized = True
89
+ logger.info("Application state initialized")
90
+
91
+ async def cleanup(self):
92
+ """Cleanup application state."""
93
+ logger.info("Cleaning up application state")
94
+
95
+ # Stop server if running
96
+ if self.server_status.running:
97
+ await self.stop_server()
98
+
99
+ # Clear log queues
100
+ async with self._log_lock:
101
+ for queue in self._log_queues:
102
+ while not queue.empty():
103
+ try:
104
+ queue.get_nowait()
105
+ except asyncio.QueueEmpty:
106
+ break
107
+ self._log_queues.clear()
108
+
109
+ self._initialized = False
110
+ logger.info("Application state cleaned up")
111
+
112
+ # Configuration methods
113
+ def set_config(self, config: Config, device_config: DeviceConfig):
114
+ """Set current configuration."""
115
+ self.current_config = config
116
+ self.device_config = device_config
117
+ logger.info(f"Configuration updated: {device_config}")
118
+
119
+ def get_config(self) -> tuple[Config | None, DeviceConfig | None]:
120
+ """Get current configuration."""
121
+ return self.current_config, self.device_config
122
+
123
+ # Task management
124
+ async def create_task(self, task_type: str) -> InstallationTask:
125
+ """Create a new installation task."""
126
+ import uuid
127
+
128
+ task_id = str(uuid.uuid4())
129
+ task = InstallationTask(
130
+ id=task_id,
131
+ type=task_type,
132
+ status="pending",
133
+ )
134
+
135
+ async with self._task_lock:
136
+ self._tasks[task_id] = task
137
+
138
+ logger.info(f"Created task {task_id} of type {task_type}")
139
+ return task
140
+
141
+ async def update_task(
142
+ self,
143
+ task_id: str,
144
+ status: str | None = None,
145
+ progress: int | None = None,
146
+ message: str | None = None,
147
+ ):
148
+ """Update task status."""
149
+ async with self._task_lock:
150
+ task = self._tasks.get(task_id)
151
+ if task:
152
+ if status:
153
+ task.status = status
154
+ if progress is not None:
155
+ task.progress = progress
156
+ if message:
157
+ task.message = message
158
+
159
+ async def get_task(self, task_id: str) -> InstallationTask | None:
160
+ """Get task by ID."""
161
+ async with self._task_lock:
162
+ return self._tasks.get(task_id)
163
+
164
+ async def get_all_tasks(self) -> list[InstallationTask]:
165
+ """Get all tasks."""
166
+ async with self._task_lock:
167
+ return list(self._tasks.values())
168
+
169
+ # New install task management
170
+ async def store_install_task(self, task_id: str, task: Any):
171
+ """Store or update an installation task."""
172
+ async with self._install_lock:
173
+ self._install_tasks[task_id] = task
174
+ if task_id not in self._install_logs:
175
+ self._install_logs[task_id] = []
176
+
177
+ async def get_install_task(self, task_id: str) -> Any | None:
178
+ """Get installation task by ID."""
179
+ async with self._install_lock:
180
+ return self._install_tasks.get(task_id)
181
+
182
+ async def get_all_install_tasks(self) -> list[Any]:
183
+ """Get all installation tasks."""
184
+ async with self._install_lock:
185
+ return list(self._install_tasks.values())
186
+
187
+ async def append_install_log(self, task_id: str, log_line: str):
188
+ """Append a log line to installation task."""
189
+ async with self._install_lock:
190
+ if task_id not in self._install_logs:
191
+ self._install_logs[task_id] = []
192
+ self._install_logs[task_id].append(log_line)
193
+
194
+ async def get_install_task_logs(self, task_id: str) -> list[str]:
195
+ """Get installation task logs."""
196
+ async with self._install_lock:
197
+ return self._install_logs.get(task_id, [])
198
+
199
+ # Log streaming
200
+ async def subscribe_logs(self) -> asyncio.Queue:
201
+ """Subscribe to log stream."""
202
+ queue = asyncio.Queue(maxsize=1000)
203
+ async with self._log_lock:
204
+ self._log_queues.append(queue)
205
+ logger.debug(f"New log subscriber, total: {len(self._log_queues)}")
206
+ return queue
207
+
208
+ async def unsubscribe_logs(self, queue: asyncio.Queue):
209
+ """Unsubscribe from log stream."""
210
+ async with self._log_lock:
211
+ if queue in self._log_queues:
212
+ self._log_queues.remove(queue)
213
+ logger.debug(f"Log subscriber removed, remaining: {len(self._log_queues)}")
214
+
215
+ async def broadcast_log(self, log_entry: dict):
216
+ """Broadcast log to all subscribers."""
217
+ async with self._log_lock:
218
+ dead_queues = []
219
+ for queue in self._log_queues:
220
+ try:
221
+ queue.put_nowait(log_entry)
222
+ except asyncio.QueueFull:
223
+ # Remove oldest log if queue is full
224
+ try:
225
+ queue.get_nowait()
226
+ queue.put_nowait(log_entry)
227
+ except asyncio.QueueEmpty:
228
+ pass
229
+ except Exception:
230
+ dead_queues.append(queue)
231
+
232
+ # Remove dead queues
233
+ for queue in dead_queues:
234
+ if queue in self._log_queues:
235
+ self._log_queues.remove(queue)
236
+
237
+ # Server management
238
+ async def start_server(self, config_path: str | None = None) -> bool:
239
+ """Start the ML server."""
240
+ if self.server_status.running:
241
+ logger.warning("Server is already running")
242
+ return False
243
+
244
+ logger.info("Starting ML server")
245
+ # TODO: Implement actual server startup logic
246
+ self.server_status.running = True
247
+ self.server_status.pid = 0 # Placeholder
248
+ return True
249
+
250
+ async def stop_server(self) -> bool:
251
+ """Stop the ML server."""
252
+ if not self.server_status.running:
253
+ logger.warning("Server is not running")
254
+ return False
255
+
256
+ logger.info("Stopping ML server")
257
+ # TODO: Implement actual server shutdown logic
258
+ self.server_status.running = False
259
+ self.server_status.pid = None
260
+ return True
261
+
262
+
263
+ # Global application state instance
264
+ app_state = AppState()
lumen_app/web/main.py ADDED
@@ -0,0 +1,145 @@
1
+ """FastAPI application entry point for Lumen Web."""
2
+
3
+ from contextlib import asynccontextmanager
4
+ from pathlib import Path
5
+
6
+ import uvicorn
7
+ from fastapi import FastAPI
8
+ from fastapi.middleware.cors import CORSMiddleware
9
+ from fastapi.responses import FileResponse
10
+ from fastapi.staticfiles import StaticFiles
11
+
12
+ from lumen_app.utils.logger import get_logger
13
+
14
+ from .api.config import router as config_router
15
+ from .api.hardware import router as hardware_router
16
+ from .api.install import router as install_router
17
+ from .api.server import router as server_router
18
+ from .core.state import app_state
19
+ from .websockets.logs import router as logs_ws_router
20
+
21
+ logger = get_logger("lumen.web")
22
+
23
+
24
+ @asynccontextmanager
25
+ async def lifespan(app: FastAPI):
26
+ """Application lifespan manager."""
27
+ logger.info("Starting Lumen Web API")
28
+ await app_state.initialize()
29
+ yield
30
+ logger.info("Shutting down Lumen Web API")
31
+ await app_state.cleanup()
32
+
33
+
34
+ def get_webui_dist_paths() -> tuple[Path, Path, Path]:
35
+ """Resolve the Web UI build directory paths."""
36
+ module_dir = Path(__file__).resolve().parent
37
+ packaged_dist = module_dir / "static"
38
+ repo_dist = module_dir.parents[3] / "web-ui" / "dist"
39
+ selected = packaged_dist if packaged_dist.exists() else repo_dist
40
+ return selected, packaged_dist, repo_dist
41
+
42
+
43
+ def create_app() -> FastAPI:
44
+ """Create and configure the FastAPI application."""
45
+ app = FastAPI(
46
+ title="Lumen Web API",
47
+ description="Web API for managing Lumen AI services",
48
+ version="0.1.0",
49
+ lifespan=lifespan,
50
+ )
51
+
52
+ # CORS middleware
53
+ app.add_middleware(
54
+ CORSMiddleware,
55
+ allow_origins=["*"], # Configure for production
56
+ allow_credentials=True,
57
+ allow_methods=["*"],
58
+ allow_headers=["*"],
59
+ )
60
+
61
+ # Include API routers
62
+ app.include_router(config_router, prefix="/api/v1/config", tags=["config"])
63
+ app.include_router(hardware_router, prefix="/api/v1/hardware", tags=["hardware"])
64
+ app.include_router(install_router, prefix="/api/v1/install", tags=["install"])
65
+ app.include_router(server_router, prefix="/api/v1/server", tags=["server"])
66
+ app.include_router(logs_ws_router, prefix="/ws", tags=["websocket"])
67
+
68
+ @app.get("/health")
69
+ async def health_check():
70
+ """Health check endpoint."""
71
+ return {"status": "ok", "version": "0.1.0"}
72
+
73
+ # Static files for Web UI (production build)
74
+ webui_dist, packaged_dist, repo_dist = get_webui_dist_paths()
75
+ if webui_dist.exists():
76
+ # Mount static assets
77
+ app.mount(
78
+ "/assets", StaticFiles(directory=str(webui_dist / "assets")), name="assets"
79
+ )
80
+
81
+ # Catch-all route for SPA - serve index.html for all frontend routes
82
+ @app.get("/{full_path:path}")
83
+ async def serve_spa(full_path: str):
84
+ """Serve index.html for all SPA routes."""
85
+ file_path = webui_dist / full_path
86
+ # Serve file if it exists (e.g., favicon.ico, robots.txt)
87
+ if file_path.is_file():
88
+ return FileResponse(file_path)
89
+ # Otherwise serve index.html for SPA routing
90
+ return FileResponse(webui_dist / "index.html")
91
+ else:
92
+ raise RuntimeError(
93
+ "Web UI build not found. Looked in:\n"
94
+ f" - packaged: {packaged_dist}\n"
95
+ f" - repo: {repo_dist}\n"
96
+ "Run the web-ui build to enable static hosting."
97
+ )
98
+
99
+ return app
100
+
101
+
102
+ def start_server(
103
+ host: str = "0.0.0.0",
104
+ port: int = 6658,
105
+ reload: bool = False,
106
+ workers: int = 1,
107
+ ):
108
+ """Start the uvicorn server."""
109
+ logger.info(f"Starting server on {host}:{port}")
110
+ uvicorn.run(
111
+ "lumen_app.web.main:create_app",
112
+ host=host,
113
+ port=port,
114
+ reload=reload,
115
+ workers=workers,
116
+ factory=True,
117
+ )
118
+
119
+
120
+ def start_webui():
121
+ """Entry point for lumen-webui command."""
122
+ import argparse
123
+
124
+ parser = argparse.ArgumentParser(description="Lumen Web UI Server")
125
+ parser.add_argument("--host", default="0.0.0.0", help="Host to bind to")
126
+ parser.add_argument("--port", type=int, default=8000, help="Port to bind to")
127
+ parser.add_argument("--reload", action="store_true", help="Enable auto-reload")
128
+ parser.add_argument("--workers", type=int, default=1, help="Number of workers")
129
+
130
+ args = parser.parse_args()
131
+ start_server(
132
+ host=args.host,
133
+ port=args.port,
134
+ reload=args.reload,
135
+ workers=args.workers,
136
+ )
137
+
138
+
139
+ def start_app():
140
+ """Legacy entry point (kept for compatibility)."""
141
+ start_webui()
142
+
143
+
144
+ if __name__ == "__main__":
145
+ start_webui()
@@ -0,0 +1,28 @@
1
+ """Pydantic models for API requests and responses."""
2
+
3
+ from .config import ConfigRequest, ConfigResponse
4
+ from .hardware import DriverCheckResponse, HardwareInfoResponse, HardwarePresetResponse
5
+ from .install import (
6
+ InstallLogsResponse,
7
+ InstallSetupRequest,
8
+ InstallStatusResponse,
9
+ InstallTaskListResponse,
10
+ InstallTaskResponse,
11
+ )
12
+ from .server import ServerConfig, ServerLogs, ServerStatus
13
+
14
+ __all__ = [
15
+ "ConfigRequest",
16
+ "ConfigResponse",
17
+ "DriverCheckResponse",
18
+ "HardwareInfoResponse",
19
+ "HardwarePresetResponse",
20
+ "InstallLogsResponse",
21
+ "InstallSetupRequest",
22
+ "InstallStatusResponse",
23
+ "InstallTaskListResponse",
24
+ "InstallTaskResponse",
25
+ "ServerConfig",
26
+ "ServerLogs",
27
+ "ServerStatus",
28
+ ]
@@ -0,0 +1,63 @@
1
+ """Configuration models."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Literal, Union
6
+
7
+ from lumen_resources.lumen_config import Region
8
+ from pydantic import BaseModel, Field
9
+
10
+
11
+ class ConfigRequest(BaseModel):
12
+ """Request to generate configuration.
13
+
14
+ This matches the Config class constructor parameters:
15
+ - cache_dir: str
16
+ - device_config: DeviceConfig (created from preset)
17
+ - region: Region
18
+ - service_name: str
19
+ - port: int | None
20
+
21
+ Plus config generation method selection:
22
+ - config_type: Literal["minimal", "light_weight", "basic", "brave"]
23
+ - clip_model: Optional clip model for light_weight and basic configs
24
+ """
25
+
26
+ # Config constructor parameters
27
+ cache_dir: str = "~/.lumen"
28
+ preset: str # Used to create device_config via PresetRegistry
29
+ region: Region = Region.other
30
+ service_name: str = "lumen-ai"
31
+ port: int | None = 50051
32
+
33
+ # Config generation method selection
34
+ config_type: Literal["minimal", "light_weight", "basic", "brave"] = "minimal"
35
+ clip_model: Union[
36
+ Literal["MobileCLIP2-S2", "CN-CLIP_ViT-B-16"], # For light_weight
37
+ Literal["MobileCLIP2-S4", "CN-CLIP_ViT-L-14"], # For basic
38
+ None,
39
+ ] = None
40
+
41
+ class Config:
42
+ json_schema_extra = {
43
+ "example": {
44
+ "cache_dir": "~/.lumen",
45
+ "preset": "nvidia_gpu",
46
+ "region": "other",
47
+ "service_name": "lumen-ai",
48
+ "port": 50051,
49
+ "config_type": "light_weight",
50
+ "clip_model": "MobileCLIP2-S2",
51
+ }
52
+ }
53
+
54
+
55
+ class ConfigResponse(BaseModel):
56
+ """Generated configuration response."""
57
+
58
+ success: bool
59
+ preset: str
60
+ config_path: str | None = None
61
+ config_content: dict | None = None
62
+ message: str = ""
63
+ warnings: list[str] = Field(default_factory=list)
@@ -0,0 +1,64 @@
1
+ """Hardware detection and driver response models."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Literal
6
+
7
+ from pydantic import BaseModel, Field
8
+
9
+
10
+ class DriverCheckResponse(BaseModel):
11
+ """Driver check result for API responses.
12
+
13
+ Note: This is different from the internal DriverStatus enum in env_checker.py
14
+ """
15
+
16
+ name: str
17
+ status: Literal["available", "missing", "incompatible"] = "missing"
18
+ details: str = ""
19
+ installable_via_mamba: bool = False
20
+ mamba_config_path: str | None = None
21
+
22
+
23
+ class HardwarePresetResponse(BaseModel):
24
+ """Hardware preset information for API responses."""
25
+
26
+ name: str
27
+ description: str
28
+ requires_drivers: bool = True
29
+ runtime: str
30
+ providers: list[str] = Field(default_factory=list)
31
+
32
+
33
+ class HardwareInfoResponse(BaseModel):
34
+ """Complete hardware detection report for API responses."""
35
+
36
+ # System information
37
+ platform: str
38
+ machine: str
39
+ processor: str
40
+ python_version: str
41
+
42
+ # Detected hardware presets
43
+ presets: list[HardwarePresetResponse] = Field(default_factory=list)
44
+ recommended_preset: str | None = None
45
+
46
+ # Driver status for recommended preset
47
+ drivers: list[DriverCheckResponse] = Field(default_factory=list)
48
+ all_drivers_available: bool = False
49
+ missing_installable: list[str] = Field(default_factory=list)
50
+
51
+ class Config:
52
+ json_schema_extra = {
53
+ "example": {
54
+ "platform": "Linux",
55
+ "machine": "x86_64",
56
+ "processor": "x86_64",
57
+ "python_version": "3.11.0",
58
+ "presets": [],
59
+ "recommended_preset": "nvidia_gpu",
60
+ "drivers": [],
61
+ "all_drivers_available": False,
62
+ "missing_installable": [],
63
+ }
64
+ }
@@ -0,0 +1,134 @@
1
+ """Installation models - simplified one-click setup."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Literal
6
+
7
+ from pydantic import BaseModel, Field
8
+
9
+
10
+ class InstallSetupRequest(BaseModel):
11
+ """Request to start a complete installation setup.
12
+
13
+ This will automatically install all required components for the selected preset:
14
+ - micromamba (if not present)
15
+ - conda environment (if not exists)
16
+ - required drivers for the preset
17
+ """
18
+
19
+ preset: str
20
+ cache_dir: str = "~/.lumen"
21
+ environment_name: str = "lumen_env"
22
+ force_reinstall: bool = False
23
+
24
+ class Config:
25
+ json_schema_extra = {
26
+ "example": {
27
+ "preset": "nvidia_gpu",
28
+ "cache_dir": "~/.lumen",
29
+ "environment_name": "lumen_env",
30
+ "force_reinstall": False,
31
+ }
32
+ }
33
+
34
+
35
+ class InstallStep(BaseModel):
36
+ """A single step in the installation process."""
37
+
38
+ name: str
39
+ status: Literal["pending", "running", "completed", "failed", "skipped"] = "pending"
40
+ progress: int = Field(0, ge=0, le=100)
41
+ message: str = ""
42
+ started_at: float | None = None
43
+ completed_at: float | None = None
44
+
45
+
46
+ class InstallTaskResponse(BaseModel):
47
+ """Installation task status and progress."""
48
+
49
+ task_id: str
50
+ preset: str
51
+ status: Literal["pending", "running", "completed", "failed"] = "pending"
52
+ progress: int = Field(0, ge=0, le=100)
53
+ current_step: str = ""
54
+ steps: list[InstallStep] = Field(default_factory=list)
55
+ created_at: float
56
+ updated_at: float
57
+ completed_at: float | None = None
58
+ error: str | None = None
59
+
60
+ class Config:
61
+ json_schema_extra = {
62
+ "example": {
63
+ "task_id": "abc-123",
64
+ "preset": "nvidia_gpu",
65
+ "status": "running",
66
+ "progress": 45,
67
+ "current_step": "Installing CUDA drivers",
68
+ "steps": [
69
+ {
70
+ "name": "Check micromamba",
71
+ "status": "completed",
72
+ "progress": 100,
73
+ "message": "micromamba already installed",
74
+ },
75
+ {
76
+ "name": "Create environment",
77
+ "status": "running",
78
+ "progress": 60,
79
+ "message": "Creating lumen_env...",
80
+ },
81
+ ],
82
+ "created_at": 1234567890.0,
83
+ "updated_at": 1234567895.0,
84
+ "completed_at": None,
85
+ "error": None,
86
+ }
87
+ }
88
+
89
+
90
+ class InstallTaskListResponse(BaseModel):
91
+ """List of installation tasks."""
92
+
93
+ tasks: list[InstallTaskResponse]
94
+ total: int
95
+
96
+
97
+ class InstallStatusResponse(BaseModel):
98
+ """Current installation status of the system."""
99
+
100
+ micromamba_installed: bool
101
+ micromamba_path: str | None = None
102
+ environment_exists: bool
103
+ environment_name: str | None = None
104
+ environment_path: str | None = None
105
+ drivers_checked: bool = False
106
+ drivers: dict[str, str] = Field(default_factory=dict) # driver_name -> status
107
+ ready_for_preset: str | None = None
108
+ missing_components: list[str] = Field(default_factory=list)
109
+
110
+ class Config:
111
+ json_schema_extra = {
112
+ "example": {
113
+ "micromamba_installed": True,
114
+ "micromamba_path": "/usr/local/bin/micromamba",
115
+ "environment_exists": True,
116
+ "environment_name": "lumen_env",
117
+ "environment_path": "~/.lumen/envs/lumen_env",
118
+ "drivers_checked": True,
119
+ "drivers": {
120
+ "cuda": "available",
121
+ "cudnn": "missing",
122
+ },
123
+ "ready_for_preset": "nvidia_gpu",
124
+ "missing_components": ["cudnn"],
125
+ }
126
+ }
127
+
128
+
129
+ class InstallLogsResponse(BaseModel):
130
+ """Installation task logs."""
131
+
132
+ task_id: str
133
+ logs: list[str] = Field(default_factory=list)
134
+ total_lines: int = 0