llama-deploy-appserver 0.2.7a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,433 @@
1
+ import asyncio
2
+ import json
3
+ import logging
4
+ from typing import Annotated, AsyncGenerator, List, Optional
5
+
6
+ import httpx
7
+ import websockets
8
+ from fastapi import (
9
+ APIRouter,
10
+ Depends,
11
+ File,
12
+ HTTPException,
13
+ Request,
14
+ UploadFile,
15
+ WebSocket,
16
+ )
17
+ from fastapi.responses import JSONResponse, StreamingResponse
18
+ from llama_deploy.appserver.deployment import Deployment
19
+ from llama_deploy.appserver.deployment_config_parser import DeploymentConfig
20
+ from llama_deploy.appserver.server import manager
21
+ from llama_deploy.appserver.types import (
22
+ DeploymentDefinition,
23
+ EventDefinition,
24
+ SessionDefinition,
25
+ TaskDefinition,
26
+ TaskResult,
27
+ generate_id,
28
+ )
29
+ from starlette.background import BackgroundTask
30
+ from workflows import Context
31
+ from workflows.context import JsonSerializer
32
+ from workflows.handler import WorkflowHandler
33
+
34
+ deployments_router = APIRouter(
35
+ prefix="/deployments",
36
+ )
37
+ logger = logging.getLogger(__name__)
38
+
39
+
40
+ def deployment(deployment_name: str) -> Deployment:
41
+ """FastAPI dependency to retrieve a Deployment instance"""
42
+ deployment = manager.get_deployment(deployment_name)
43
+ if deployment is None:
44
+ raise HTTPException(status_code=404, detail="Deployment not found")
45
+ return deployment
46
+
47
+
48
+ @deployments_router.get("/")
49
+ async def read_deployments() -> list[DeploymentDefinition]:
50
+ """Returns a list of active deployments."""
51
+ return [DeploymentDefinition(name=k) for k in manager._deployments.keys()]
52
+
53
+
54
+ @deployments_router.get("/{deployment_name}")
55
+ async def read_deployment(
56
+ deployment: Annotated[Deployment, Depends(deployment)],
57
+ ) -> DeploymentDefinition:
58
+ """Returns the details of a specific deployment."""
59
+
60
+ return DeploymentDefinition(name=deployment.name)
61
+
62
+
63
+ @deployments_router.post("/create")
64
+ async def create_deployment(
65
+ base_path: str = ".",
66
+ config_file: UploadFile = File(...),
67
+ reload: bool = False,
68
+ local: bool = False,
69
+ ) -> DeploymentDefinition:
70
+ """Creates a new deployment by uploading a configuration file."""
71
+ config = DeploymentConfig.from_yaml_bytes(await config_file.read())
72
+ await manager.deploy(config, base_path, reload, local)
73
+
74
+ return DeploymentDefinition(name=config.name)
75
+
76
+
77
+ @deployments_router.post("/{deployment_name}/tasks/run")
78
+ async def create_deployment_task(
79
+ deployment: Annotated[Deployment, Depends(deployment)],
80
+ task_definition: TaskDefinition,
81
+ session_id: str | None = None,
82
+ ) -> JSONResponse:
83
+ """Create a task for the deployment, wait for result and delete associated session."""
84
+
85
+ service_id = task_definition.service_id or deployment.default_service
86
+ if service_id is None:
87
+ raise HTTPException(
88
+ status_code=400,
89
+ detail="Service is None and deployment has no default service",
90
+ )
91
+
92
+ if service_id not in deployment.service_names:
93
+ raise HTTPException(
94
+ status_code=404,
95
+ detail=f"Service '{task_definition.service_id}' not found in deployment 'deployment_name'",
96
+ )
97
+
98
+ run_kwargs = json.loads(task_definition.input) if task_definition.input else {}
99
+ result = await deployment.run_workflow(
100
+ service_id=service_id, session_id=session_id, **run_kwargs
101
+ )
102
+ return JSONResponse(result)
103
+
104
+
105
+ @deployments_router.post("/{deployment_name}/tasks/create")
106
+ async def create_deployment_task_nowait(
107
+ deployment: Annotated[Deployment, Depends(deployment)],
108
+ task_definition: TaskDefinition,
109
+ session_id: str | None = None,
110
+ ) -> TaskDefinition:
111
+ """Create a task for the deployment but don't wait for result."""
112
+ service_id = task_definition.service_id or deployment.default_service
113
+ if service_id is None:
114
+ raise HTTPException(
115
+ status_code=400,
116
+ detail="Service is None and deployment has no default service",
117
+ )
118
+
119
+ if service_id not in deployment.service_names:
120
+ raise HTTPException(
121
+ status_code=404,
122
+ detail=f"Service '{task_definition.service_id}' not found in deployment 'deployment_name'",
123
+ )
124
+
125
+ run_kwargs = json.loads(task_definition.input) if task_definition.input else {}
126
+ handler_id, session_id = deployment.run_workflow_no_wait(
127
+ service_id=service_id, session_id=session_id, **run_kwargs
128
+ )
129
+
130
+ task_definition.session_id = session_id
131
+ task_definition.task_id = handler_id
132
+
133
+ return task_definition
134
+
135
+
136
+ @deployments_router.post("/{deployment_name}/tasks/{task_id}/events")
137
+ async def send_event(
138
+ deployment: Annotated[Deployment, Depends(deployment)],
139
+ task_id: str,
140
+ session_id: str,
141
+ event_def: EventDefinition,
142
+ ) -> EventDefinition:
143
+ """Send a human response event to a service for a specific task and session."""
144
+ ctx = deployment._contexts[session_id]
145
+ serializer = JsonSerializer()
146
+ event = serializer.deserialize(event_def.event_obj_str)
147
+ ctx.send_event(event)
148
+
149
+ return event_def
150
+
151
+
152
+ @deployments_router.get("/{deployment_name}/tasks/{task_id}/events")
153
+ async def get_events(
154
+ deployment: Annotated[Deployment, Depends(deployment)],
155
+ session_id: str,
156
+ task_id: str,
157
+ raw_event: bool = False,
158
+ ) -> StreamingResponse:
159
+ """
160
+ Get the stream of events from a given task and session.
161
+
162
+ Args:
163
+ raw_event (bool, default=False): Whether to return the raw event object
164
+ or just the event data.
165
+ """
166
+
167
+ async def event_stream(handler: WorkflowHandler) -> AsyncGenerator[str, None]:
168
+ serializer = JsonSerializer()
169
+ # this will hang indefinitely if done and queue is empty. Bail
170
+ if (
171
+ handler.is_done()
172
+ and handler.ctx is not None
173
+ and handler.ctx.streaming_queue.empty()
174
+ ):
175
+ return
176
+ async for event in handler.stream_events():
177
+ data = json.loads(serializer.serialize(event))
178
+ if raw_event:
179
+ yield json.dumps(data) + "\n"
180
+ else:
181
+ yield json.dumps(data.get("value")) + "\n"
182
+ await asyncio.sleep(0.01)
183
+ await handler
184
+
185
+ return StreamingResponse(
186
+ event_stream(deployment._handlers[task_id]),
187
+ media_type="application/x-ndjson",
188
+ )
189
+
190
+
191
+ @deployments_router.get("/{deployment_name}/tasks/{task_id}/results")
192
+ async def get_task_result(
193
+ deployment: Annotated[Deployment, Depends(deployment)],
194
+ session_id: str,
195
+ task_id: str,
196
+ ) -> TaskResult | None:
197
+ """Get the task result associated with a task and session."""
198
+
199
+ handler = deployment._handlers[task_id]
200
+ return TaskResult(task_id=task_id, history=[], result=await handler)
201
+
202
+
203
+ @deployments_router.get("/{deployment_name}/tasks")
204
+ async def get_tasks(
205
+ deployment: Annotated[Deployment, Depends(deployment)],
206
+ ) -> list[TaskDefinition]:
207
+ """Get all the tasks from all the sessions in a given deployment."""
208
+
209
+ tasks: list[TaskDefinition] = []
210
+ for task_id, handler in deployment._handlers.items():
211
+ if handler.is_done():
212
+ continue
213
+ tasks.append(
214
+ TaskDefinition(
215
+ task_id=task_id,
216
+ input=deployment._handler_inputs[task_id],
217
+ )
218
+ )
219
+
220
+ return tasks
221
+
222
+
223
+ @deployments_router.get("/{deployment_name}/sessions")
224
+ async def get_sessions(
225
+ deployment: Annotated[Deployment, Depends(deployment)],
226
+ ) -> list[SessionDefinition]:
227
+ """Get the active sessions in a deployment and service."""
228
+
229
+ return [SessionDefinition(session_id=k) for k in deployment._contexts.keys()]
230
+
231
+
232
+ @deployments_router.get("/{deployment_name}/sessions/{session_id}")
233
+ async def get_session(
234
+ deployment: Annotated[Deployment, Depends(deployment)], session_id: str
235
+ ) -> SessionDefinition:
236
+ """Get the definition of a session by ID."""
237
+
238
+ return SessionDefinition(session_id=session_id)
239
+
240
+
241
+ @deployments_router.post("/{deployment_name}/sessions/create")
242
+ async def create_session(
243
+ deployment: Annotated[Deployment, Depends(deployment)],
244
+ ) -> SessionDefinition:
245
+ """Create a new session for a deployment."""
246
+
247
+ workflow = deployment._workflow_services[deployment.default_service]
248
+ session_id = generate_id()
249
+ deployment._contexts[session_id] = Context(workflow)
250
+
251
+ return SessionDefinition(session_id=session_id)
252
+
253
+
254
+ @deployments_router.post("/{deployment_name}/sessions/delete")
255
+ async def delete_session(
256
+ deployment: Annotated[Deployment, Depends(deployment)], session_id: str
257
+ ) -> None:
258
+ """Get the active sessions in a deployment and service."""
259
+
260
+ deployment._contexts.pop(session_id)
261
+
262
+
263
+ async def _ws_proxy(ws: WebSocket, upstream_url: str) -> None:
264
+ """Proxy WebSocket connection to upstream server."""
265
+ await ws.accept()
266
+
267
+ # Forward most headers except WebSocket-specific ones
268
+ header_blacklist = {
269
+ "host",
270
+ "connection",
271
+ "upgrade",
272
+ "sec-websocket-key",
273
+ "sec-websocket-version",
274
+ "sec-websocket-extensions",
275
+ }
276
+ hdrs = [(k, v) for k, v in ws.headers.items() if k.lower() not in header_blacklist]
277
+
278
+ try:
279
+ # Parse subprotocols if present
280
+ subprotocols: Optional[List[websockets.Subprotocol]] = None
281
+ if "sec-websocket-protocol" in ws.headers:
282
+ # Parse comma-separated subprotocols
283
+ subprotocols = [
284
+ websockets.Subprotocol(p.strip())
285
+ for p in ws.headers["sec-websocket-protocol"].split(",")
286
+ ]
287
+
288
+ # Open upstream WebSocket connection
289
+ async with websockets.connect(
290
+ upstream_url,
291
+ additional_headers=hdrs,
292
+ subprotocols=subprotocols,
293
+ open_timeout=None,
294
+ ping_interval=None,
295
+ ) as upstream:
296
+
297
+ async def client_to_upstream() -> None:
298
+ try:
299
+ while True:
300
+ msg = await ws.receive()
301
+ if msg["type"] == "websocket.receive":
302
+ if "text" in msg:
303
+ await upstream.send(msg["text"])
304
+ elif "bytes" in msg:
305
+ await upstream.send(msg["bytes"])
306
+ elif msg["type"] == "websocket.disconnect":
307
+ break
308
+ except Exception as e:
309
+ logger.debug(f"Client to upstream connection ended: {e}")
310
+
311
+ async def upstream_to_client() -> None:
312
+ try:
313
+ async for message in upstream:
314
+ if isinstance(message, str):
315
+ await ws.send_text(message)
316
+ else:
317
+ await ws.send_bytes(message)
318
+ except Exception as e:
319
+ logger.debug(f"Upstream to client connection ended: {e}")
320
+
321
+ # Pump both directions concurrently
322
+ await asyncio.gather(
323
+ client_to_upstream(), upstream_to_client(), return_exceptions=True
324
+ )
325
+
326
+ except Exception as e:
327
+ logger.error(f"WebSocket proxy error: {e}")
328
+ finally:
329
+ try:
330
+ await ws.close()
331
+ except Exception as e:
332
+ logger.debug(f"Error closing client connection: {e}")
333
+
334
+
335
+ @deployments_router.websocket("/{deployment_name}/ui/{path:path}")
336
+ @deployments_router.websocket("/{deployment_name}/ui")
337
+ async def websocket_proxy(
338
+ websocket: WebSocket,
339
+ deployment: Annotated[Deployment, Depends(deployment)],
340
+ path: str | None = None,
341
+ ) -> None:
342
+ if deployment._config.ui is None:
343
+ raise HTTPException(status_code=404, detail="Deployment has no ui configured")
344
+
345
+ # Build the upstream WebSocket URL using FastAPI's extracted path parameter
346
+ slash_path = f"/{path}" if path else ""
347
+ upstream_path = f"/deployments/{deployment.name}/ui{slash_path}"
348
+
349
+ # Convert to WebSocket URL
350
+ upstream_url = f"ws://localhost:{deployment._config.ui.port}{upstream_path}"
351
+ if websocket.url.query:
352
+ upstream_url += f"?{websocket.url.query}"
353
+
354
+ logger.debug(f"Proxying WebSocket {websocket.url} -> {upstream_url}")
355
+
356
+ await _ws_proxy(websocket, upstream_url)
357
+
358
+
359
+ @deployments_router.api_route(
360
+ "/{deployment_name}/ui/{path:path}",
361
+ methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH"],
362
+ )
363
+ @deployments_router.api_route(
364
+ "/{deployment_name}/ui",
365
+ methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH"],
366
+ )
367
+ async def proxy(
368
+ request: Request,
369
+ deployment: Annotated[Deployment, Depends(deployment)],
370
+ path: str | None = None,
371
+ ) -> StreamingResponse:
372
+ if deployment._config.ui is None:
373
+ raise HTTPException(status_code=404, detail="Deployment has no ui configured")
374
+
375
+ # Build the upstream URL using FastAPI's extracted path parameter
376
+ slash_path = f"/{path}" if path else ""
377
+ upstream_path = f"/deployments/{deployment.name}/ui{slash_path}"
378
+
379
+ upstream_url = httpx.URL(
380
+ f"http://localhost:{deployment._config.ui.port}{upstream_path}"
381
+ ).copy_with(params=request.query_params)
382
+
383
+ # Debug logging
384
+ logger.debug(f"Proxying {request.method} {request.url} -> {upstream_url}")
385
+
386
+ # Strip hop-by-hop headers + host
387
+ hop_by_hop = {
388
+ "connection",
389
+ "keep-alive",
390
+ "proxy-authenticate",
391
+ "proxy-authorization",
392
+ "te", # codespell:ignore
393
+ "trailers",
394
+ "transfer-encoding",
395
+ "upgrade",
396
+ "host",
397
+ }
398
+ headers = {k: v for k, v in request.headers.items() if k.lower() not in hop_by_hop}
399
+
400
+ try:
401
+ client = httpx.AsyncClient(timeout=None)
402
+
403
+ req = client.build_request(
404
+ request.method,
405
+ upstream_url,
406
+ headers=headers,
407
+ content=request.stream(), # stream uploads
408
+ )
409
+ upstream = await client.send(req, stream=True)
410
+
411
+ resp_headers = {
412
+ k: v for k, v in upstream.headers.items() if k.lower() not in hop_by_hop
413
+ }
414
+
415
+ # Close client when upstream response is done
416
+ async def cleanup() -> None:
417
+ await upstream.aclose()
418
+ await client.aclose()
419
+
420
+ return StreamingResponse(
421
+ upstream.aiter_raw(), # stream downloads
422
+ status_code=upstream.status_code,
423
+ headers=resp_headers,
424
+ background=BackgroundTask(cleanup), # tidy up when finished
425
+ )
426
+
427
+ except httpx.ConnectError:
428
+ raise HTTPException(status_code=502, detail="Upstream server unavailable")
429
+ except httpx.TimeoutException:
430
+ raise HTTPException(status_code=504, detail="Upstream server timeout")
431
+ except Exception as e:
432
+ logger.error(f"Proxy error: {e}")
433
+ raise HTTPException(status_code=502, detail="Proxy error")
@@ -0,0 +1,40 @@
1
+ import httpx
2
+ from fastapi import APIRouter
3
+ from fastapi.exceptions import HTTPException
4
+ from fastapi.responses import PlainTextResponse
5
+ from llama_deploy.appserver.server import manager
6
+ from llama_deploy.appserver.settings import settings
7
+ from llama_deploy.appserver.types import Status, StatusEnum
8
+
9
+ status_router = APIRouter(
10
+ prefix="/status",
11
+ )
12
+
13
+
14
+ @status_router.get("/")
15
+ async def status() -> Status:
16
+ return Status(
17
+ status=StatusEnum.HEALTHY,
18
+ max_deployments=manager._max_deployments,
19
+ deployments=list(manager._deployments.keys()),
20
+ status_message="",
21
+ )
22
+
23
+
24
+ @status_router.get("/metrics")
25
+ async def metrics() -> PlainTextResponse:
26
+ """Proxies the Prometheus metrics endpoint through the API Server.
27
+
28
+ This endpoint is mostly used in serverless environments where the LlamaDeploy
29
+ container cannot expose more than one port (e.g. Knative, Google Cloud Run).
30
+ If Prometheus is not enabled, this endpoint returns an empty HTTP-204 response.
31
+ """
32
+ if not settings.prometheus_enabled:
33
+ return PlainTextResponse(status_code=204)
34
+
35
+ try:
36
+ async with httpx.AsyncClient() as client:
37
+ response = await client.get(f"http://127.0.0.1:{settings.prometheus_port}/")
38
+ return PlainTextResponse(content=response.text)
39
+ except httpx.RequestError as exc:
40
+ raise HTTPException(status_code=500, detail=str(exc))
@@ -0,0 +1,141 @@
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ from pathlib import Path
5
+
6
+ import uvicorn
7
+ import yaml
8
+ from prometheus_client import start_http_server
9
+
10
+ from llama_deploy.appserver.settings import settings
11
+
12
+ CLONED_REPO_FOLDER = Path("cloned_repo")
13
+ RC_PATH = Path("/data")
14
+
15
+
16
+ def run_process(args: list[str], cwd: str | None = None) -> None:
17
+ kwargs = {
18
+ "args": args,
19
+ "capture_output": True,
20
+ "text": True,
21
+ "check": False,
22
+ }
23
+ if cwd:
24
+ kwargs["cwd"] = cwd
25
+ process = subprocess.run(**kwargs) # type: ignore
26
+ if process.returncode != 0:
27
+ stderr = process.stderr or ""
28
+ raise Exception(stderr)
29
+
30
+
31
+ def setup_repo(
32
+ work_dir: Path, source: str, token: str | None = None, force: bool = False
33
+ ) -> None:
34
+ repo_url, ref_name = _parse_source(source, token)
35
+ dest_dir = work_dir / CLONED_REPO_FOLDER
36
+
37
+ # Remove existing repo if force=True
38
+ if dest_dir.exists() and force:
39
+ shutil.rmtree(dest_dir)
40
+
41
+ if not dest_dir.exists():
42
+ # need to do a full clone to resolve any kind of ref without exploding in
43
+ # complexity (tag, branch, commit, short commit)
44
+ clone_args = ["git", "clone", repo_url, str(dest_dir.absolute())]
45
+ run_process(clone_args, cwd=str(work_dir.absolute()))
46
+ else:
47
+ run_process(["git", "fetch", "origin"], cwd=str(dest_dir.absolute()))
48
+
49
+ # Checkout the ref (let git resolve it)
50
+ if ref_name:
51
+ run_process(["git", "checkout", ref_name], cwd=str(dest_dir.absolute()))
52
+ # If no ref specified, stay on whatever the clone gave us (default branch)
53
+
54
+
55
+ def _is_valid_uri(uri: str) -> bool:
56
+ """Check if string looks like a valid URI"""
57
+ return "://" in uri and "/" in uri.split("://", 1)[1]
58
+
59
+
60
+ def _parse_source(source: str, pat: str | None = None) -> tuple[str, str | None]:
61
+ """Accept Github urls like https://github.com/run-llama/llama_deploy.git@main
62
+ or https://user:token@github.com/run-llama/llama_deploy.git@v1.0.0
63
+ Returns the final URL (with auth if needed) and ref name (branch, tag, or commit SHA)"""
64
+
65
+ # Try splitting on last @ to see if we have a ref specifier
66
+ url = source
67
+ ref_name = None
68
+
69
+ if "@" in source:
70
+ potential_url, potential_ref = source.rsplit("@", 1)
71
+ if _is_valid_uri(potential_url):
72
+ url = potential_url
73
+ ref_name = potential_ref
74
+
75
+ # Inject PAT auth if provided and URL doesn't already have auth
76
+ if pat and "://" in url and "@" not in url:
77
+ url = url.replace("https://", f"https://{pat}@")
78
+
79
+ return url, ref_name
80
+
81
+
82
+ def copy_sources(work_dir: Path, deployment_file_path: Path) -> None:
83
+ app_folder = deployment_file_path.parent
84
+ for item in app_folder.iterdir():
85
+ if item.is_dir():
86
+ # For directories, use copytree with dirs_exist_ok=True
87
+ shutil.copytree(
88
+ item, f"{work_dir.absolute()}/{item.name}", dirs_exist_ok=True
89
+ )
90
+ else:
91
+ # For files, use copy2 to preserve metadata
92
+ shutil.copy2(item, str(work_dir))
93
+
94
+
95
+ if __name__ == "__main__":
96
+ if settings.prometheus_enabled:
97
+ start_http_server(settings.prometheus_port)
98
+
99
+ repo_url = os.environ.get("REPO_URL", "")
100
+ if not repo_url.startswith("https://") and not repo_url.startswith("http://"):
101
+ raise ValueError("Git remote must HTTP(S)")
102
+ repo_token = os.environ.get("GITHUB_PAT")
103
+ work_dir = Path(os.environ.get("WORK_DIR", RC_PATH))
104
+ work_dir.mkdir(exist_ok=True, parents=True)
105
+
106
+ setup_repo(work_dir, repo_url, repo_token)
107
+
108
+ if not settings.deployment_file_path:
109
+ # first fall back to none LLAMA_DEPLOY_APISERVER_ prefixed env var (settings requires the prefix)
110
+ settings.deployment_file_path = os.environ.get(
111
+ "DEPLOYMENT_FILE_PATH", "deployment.yml"
112
+ )
113
+ deployment_file_path = settings.deployment_file_path
114
+ deployment_file_abspath = work_dir / CLONED_REPO_FOLDER / deployment_file_path
115
+ if not deployment_file_abspath.exists():
116
+ raise ValueError(f"File {deployment_file_abspath} does not exist")
117
+
118
+ deployment_override_name = os.environ.get("DEPLOYMENT_NAME")
119
+ if deployment_override_name:
120
+ with open(deployment_file_abspath) as f:
121
+ # Replace deployment name with the overridden value
122
+ data = yaml.safe_load(f)
123
+
124
+ # Avoid failing here if the deployment config file has a wrong format,
125
+ # let's do nothing if there's no field `name`
126
+ if "name" in data:
127
+ data["name"] = deployment_override_name
128
+ with open(deployment_file_abspath, "w") as f:
129
+ yaml.safe_dump(data, f)
130
+
131
+ copy_sources(work_dir, deployment_file_abspath)
132
+ shutil.rmtree(work_dir / CLONED_REPO_FOLDER)
133
+
134
+ # update rc_path directly, as it has already been loaded, so setting the environment variable
135
+ # doesn't work
136
+ settings.rc_path = work_dir
137
+ uvicorn.run(
138
+ "llama_deploy.appserver.app:app",
139
+ host=settings.host,
140
+ port=settings.port,
141
+ )
@@ -0,0 +1,60 @@
1
+ import asyncio
2
+ import logging
3
+ from contextlib import asynccontextmanager
4
+ from typing import Any, AsyncGenerator
5
+
6
+ from fastapi import FastAPI
7
+
8
+ from .deployment import Manager
9
+ from .deployment_config_parser import DeploymentConfig
10
+ from .settings import settings
11
+ from .stats import apiserver_state
12
+
13
+ logger = logging.getLogger("uvicorn.info")
14
+ manager = Manager()
15
+
16
+
17
+ @asynccontextmanager
18
+ async def lifespan(app: FastAPI) -> AsyncGenerator[None, Any]:
19
+ apiserver_state.state("starting")
20
+
21
+ manager.set_deployments_path(settings.deployments_path)
22
+ t = asyncio.create_task(manager.serve())
23
+ await asyncio.sleep(0)
24
+
25
+ logger.info(f"deployments folder: {settings.deployments_path}")
26
+ logger.info(f"rc folder: {settings.rc_path}")
27
+
28
+ if settings.rc_path.exists():
29
+ if settings.deployment_file_path:
30
+ logger.info(
31
+ f"Browsing the rc folder {settings.rc_path} for deployment file {settings.deployment_file_path}"
32
+ )
33
+ else:
34
+ logger.info(
35
+ f"Browsing the rc folder {settings.rc_path} for deployments to start"
36
+ )
37
+
38
+ # if a deployment_file_path is provided, use it, otherwise glob all .yml/.yaml files
39
+ # q match both .yml and .yaml files with the glob
40
+ files = (
41
+ [settings.rc_path / settings.deployment_file_path]
42
+ if settings.deployment_file_path
43
+ else [
44
+ x for x in settings.rc_path.iterdir() if x.suffix in (".yml", ".yaml")
45
+ ]
46
+ )
47
+ for yaml_file in files:
48
+ try:
49
+ logger.info(f"Deploying startup configuration from {yaml_file}")
50
+ config = DeploymentConfig.from_yaml(yaml_file)
51
+ await manager.deploy(config, base_path=str(settings.rc_path))
52
+ except Exception as e:
53
+ logger.error(f"Failed to deploy {yaml_file}: {str(e)}")
54
+
55
+ apiserver_state.state("running")
56
+ yield
57
+
58
+ t.cancel()
59
+
60
+ apiserver_state.state("stopped")