llama-deploy-appserver 0.2.7a1__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. llama_deploy/appserver/app.py +274 -26
  2. llama_deploy/appserver/bootstrap.py +55 -25
  3. llama_deploy/appserver/configure_logging.py +189 -0
  4. llama_deploy/appserver/correlation_id.py +24 -0
  5. llama_deploy/appserver/deployment.py +70 -412
  6. llama_deploy/appserver/deployment_config_parser.py +12 -130
  7. llama_deploy/appserver/interrupts.py +55 -0
  8. llama_deploy/appserver/process_utils.py +214 -0
  9. llama_deploy/appserver/py.typed +0 -0
  10. llama_deploy/appserver/routers/__init__.py +4 -3
  11. llama_deploy/appserver/routers/deployments.py +163 -382
  12. llama_deploy/appserver/routers/status.py +4 -31
  13. llama_deploy/appserver/routers/ui_proxy.py +255 -0
  14. llama_deploy/appserver/settings.py +99 -49
  15. llama_deploy/appserver/types.py +0 -3
  16. llama_deploy/appserver/workflow_loader.py +431 -0
  17. llama_deploy/appserver/workflow_store/agent_data_store.py +100 -0
  18. llama_deploy/appserver/workflow_store/keyed_lock.py +32 -0
  19. llama_deploy/appserver/workflow_store/lru_cache.py +49 -0
  20. llama_deploy_appserver-0.3.0.dist-info/METADATA +25 -0
  21. llama_deploy_appserver-0.3.0.dist-info/RECORD +24 -0
  22. {llama_deploy_appserver-0.2.7a1.dist-info → llama_deploy_appserver-0.3.0.dist-info}/WHEEL +1 -1
  23. llama_deploy/appserver/__main__.py +0 -14
  24. llama_deploy/appserver/client/__init__.py +0 -3
  25. llama_deploy/appserver/client/base.py +0 -30
  26. llama_deploy/appserver/client/client.py +0 -49
  27. llama_deploy/appserver/client/models/__init__.py +0 -4
  28. llama_deploy/appserver/client/models/apiserver.py +0 -356
  29. llama_deploy/appserver/client/models/model.py +0 -82
  30. llama_deploy/appserver/run_autodeploy.py +0 -141
  31. llama_deploy/appserver/server.py +0 -60
  32. llama_deploy/appserver/source_managers/__init__.py +0 -5
  33. llama_deploy/appserver/source_managers/base.py +0 -33
  34. llama_deploy/appserver/source_managers/git.py +0 -48
  35. llama_deploy/appserver/source_managers/local.py +0 -51
  36. llama_deploy/appserver/tracing.py +0 -237
  37. llama_deploy_appserver-0.2.7a1.dist-info/METADATA +0 -23
  38. llama_deploy_appserver-0.2.7a1.dist-info/RECORD +0 -28
@@ -1,24 +1,143 @@
1
+ import argparse
1
2
  import logging
2
3
  import os
4
+ import threading
5
+ import time
6
+ import webbrowser
7
+ from contextlib import asynccontextmanager
8
+ from importlib.metadata import version
9
+ from pathlib import Path
10
+ from typing import Any, AsyncGenerator, Literal, cast
3
11
 
12
+ import uvicorn
4
13
  from fastapi import FastAPI
5
14
  from fastapi.middleware.cors import CORSMiddleware
6
- from fastapi.requests import Request
7
- from fastapi.responses import JSONResponse, RedirectResponse
8
-
9
- from .routers import deployments_router, status_router
10
- from .server import lifespan, manager
11
- from .settings import settings
12
- from .tracing import configure_tracing
15
+ from fastapi.responses import RedirectResponse
16
+ from llama_deploy.appserver.configure_logging import (
17
+ add_log_middleware,
18
+ setup_logging,
19
+ )
20
+ from llama_deploy.appserver.deployment_config_parser import (
21
+ get_deployment_config,
22
+ )
23
+ from llama_deploy.appserver.routers.deployments import (
24
+ create_base_router,
25
+ create_deployments_router,
26
+ )
27
+ from llama_deploy.appserver.routers.ui_proxy import (
28
+ create_ui_proxy_router,
29
+ mount_static_files,
30
+ )
31
+ from llama_deploy.appserver.settings import configure_settings, settings
32
+ from llama_deploy.appserver.workflow_loader import (
33
+ _exclude_venv_warning,
34
+ build_ui,
35
+ inject_appserver_into_target,
36
+ install_ui,
37
+ load_environment_variables,
38
+ load_workflows,
39
+ start_dev_ui_process,
40
+ )
41
+ from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
42
+ from prometheus_fastapi_instrumentator import Instrumentator
43
+ from starlette.applications import Starlette
44
+ from workflows.server import WorkflowServer
13
45
 
46
+ from .deployment import Deployment
47
+ from .interrupts import shutdown_event
48
+ from .process_utils import run_process
49
+ from .routers import health_router
50
+ from .stats import apiserver_state
14
51
 
15
52
  logger = logging.getLogger("uvicorn.info")
16
53
 
54
+ # Auto-configure logging on import when requested (e.g., uvicorn reload workers)
55
+ if os.getenv("LLAMA_DEPLOY_AUTO_LOGGING", "0") == "1":
56
+ setup_logging(os.getenv("LOG_LEVEL", "INFO"))
57
+
58
+
59
+ @asynccontextmanager
60
+ async def lifespan(app: FastAPI) -> AsyncGenerator[None, Any]:
61
+ shutdown_event.clear()
62
+ apiserver_state.state("starting")
63
+ config = get_deployment_config()
64
+
65
+ workflows = load_workflows(config)
66
+ deployment = Deployment(workflows)
67
+ base_router = create_base_router(config.name)
68
+ deploy_router = create_deployments_router(config.name, deployment)
69
+ server = deployment.mount_workflow_server(app)
70
+
71
+ app.include_router(base_router)
72
+ app.include_router(deploy_router)
73
+
74
+ _setup_openapi(config.name, app, server)
75
+
76
+ if config.ui is not None:
77
+ if settings.proxy_ui:
78
+ ui_router = create_ui_proxy_router(config.name, settings.proxy_ui_port)
79
+ app.include_router(ui_router)
80
+ else:
81
+ # otherwise serve the pre-built if available
82
+ mount_static_files(app, config, settings)
83
+
84
+ @app.get(f"/deployments/{config.name}", include_in_schema=False)
85
+ @app.get(f"/deployments/{config.name}/", include_in_schema=False)
86
+ @app.get(f"/deployments/{config.name}/ui", include_in_schema=False)
87
+ def redirect_to_ui() -> RedirectResponse:
88
+ return RedirectResponse(f"/deployments/{config.name}/ui/")
89
+ else:
90
+
91
+ @app.get(f"/deployments/{config.name}", include_in_schema=False)
92
+ @app.get(f"/deployments/{config.name}/", include_in_schema=False)
93
+ def redirect_to_docs() -> RedirectResponse:
94
+ return RedirectResponse(f"/deployments/{config.name}/docs")
95
+
96
+ apiserver_state.state("running")
97
+ # terrible sad cludge
98
+ async with server._lifespan(cast(Starlette, {})):
99
+ yield
17
100
 
18
- app = FastAPI(lifespan=lifespan)
101
+ apiserver_state.state("stopped")
102
+
103
+
104
+ def _setup_openapi(name: str, app: FastAPI, server: WorkflowServer) -> None:
105
+ """
106
+ extends the fastapi based openapi schema with starlette generated schema
107
+ """
108
+ schema_title = "Llama Deploy App Server"
109
+ app_version = version("llama-deploy-appserver")
110
+
111
+ prefix = f"/deployments/{name}"
112
+
113
+ schema = server.openapi_schema()
114
+ schema["info"]["title"] = schema_title
115
+ schema["info"]["version"] = app_version
116
+ paths = cast(dict, schema["paths"])
117
+ new_paths = {}
118
+ for path, methods in list(paths.items()):
119
+ if "head" in methods:
120
+ methods.pop("head")
121
+ new_paths[prefix + path] = methods
122
+
123
+ schema["paths"] = new_paths
124
+
125
+ def custom_openapi():
126
+ return schema
127
+
128
+ app.openapi = custom_openapi # ty: ignore[invalid-assignment] - doesn't like us overwriting the method
129
+
130
+
131
+ _config = get_deployment_config()
132
+ _prefix = f"/deployments/{_config.name}"
133
+ app = FastAPI(
134
+ lifespan=lifespan,
135
+ docs_url=_prefix + "/docs",
136
+ redoc_url=_prefix + "/redoc",
137
+ openapi_url=_prefix + "/openapi.json",
138
+ )
139
+ Instrumentator().instrument(app).expose(app, include_in_schema=False)
19
140
 
20
- # Setup tracing
21
- configure_tracing(settings)
22
141
 
23
142
  # Configure CORS middleware if the environment variable is set
24
143
  if not os.environ.get("DISABLE_CORS", False):
@@ -30,20 +149,149 @@ if not os.environ.get("DISABLE_CORS", False):
30
149
  allow_headers=["Content-Type", "Authorization"],
31
150
  )
32
151
 
33
- app.include_router(deployments_router)
34
- app.include_router(status_router)
35
-
36
-
37
- @app.get("/", response_model=None)
38
- async def root(request: Request) -> JSONResponse | RedirectResponse:
39
- # for local dev, just redirect to the one UI if we have one
40
- if len(manager.deployment_names) == 1:
41
- deployment = manager.get_deployment(manager.deployment_names[0])
42
- if deployment is not None and deployment._ui_server_process is not None:
43
- return RedirectResponse(f"deployments/{deployment.name}/ui")
44
- return JSONResponse(
45
- {
46
- "swagger_docs": f"{request.base_url}docs",
47
- "status": f"{request.base_url}status",
48
- }
152
+ app.include_router(health_router)
153
+ add_log_middleware(app)
154
+
155
+
156
+ def open_browser_async(host: str, port: int) -> None:
157
+ def _open_with_delay() -> None:
158
+ time.sleep(1)
159
+ webbrowser.open(f"http://{host}:{port}")
160
+
161
+ threading.Thread(target=_open_with_delay).start()
162
+
163
+
164
+ def prepare_server(
165
+ deployment_file: Path | None = None,
166
+ install: bool = False,
167
+ build: bool = False,
168
+ ) -> None:
169
+ configure_settings(
170
+ deployment_file_path=deployment_file or Path(DEFAULT_DEPLOYMENT_FILE_PATH)
171
+ )
172
+ load_environment_variables(get_deployment_config(), settings.resolved_config_parent)
173
+ if install:
174
+ config = get_deployment_config()
175
+ inject_appserver_into_target(config, settings.resolved_config_parent)
176
+ install_ui(config, settings.resolved_config_parent)
177
+ if build:
178
+ build_ui(settings.resolved_config_parent, get_deployment_config(), settings)
179
+
180
+
181
+ def start_server(
182
+ proxy_ui: bool = False,
183
+ reload: bool = False,
184
+ cwd: Path | None = None,
185
+ deployment_file: Path | None = None,
186
+ open_browser: bool = False,
187
+ configure_logging: bool = True,
188
+ ) -> None:
189
+ # Configure via environment so uvicorn reload workers inherit the values
190
+ configure_settings(
191
+ proxy_ui=proxy_ui,
192
+ app_root=cwd,
193
+ deployment_file_path=deployment_file or Path(DEFAULT_DEPLOYMENT_FILE_PATH),
194
+ reload=reload,
195
+ )
196
+ load_environment_variables(get_deployment_config(), settings.resolved_config_parent)
197
+
198
+ ui_process = None
199
+ if proxy_ui:
200
+ ui_process = start_dev_ui_process(
201
+ settings.resolved_config_parent, settings, get_deployment_config()
202
+ )
203
+ try:
204
+ if open_browser:
205
+ open_browser_async(settings.host, settings.port)
206
+ # Ensure reload workers configure logging on import
207
+ os.environ["LLAMA_DEPLOY_AUTO_LOGGING"] = "1"
208
+ # Configure logging for the launcher process as well
209
+ if configure_logging:
210
+ setup_logging(os.getenv("LOG_LEVEL", "INFO"))
211
+ uvicorn.run(
212
+ "llama_deploy.appserver.app:app",
213
+ host=settings.host,
214
+ port=settings.port,
215
+ reload=reload,
216
+ timeout_graceful_shutdown=60,
217
+ access_log=False,
218
+ log_config=None,
219
+ )
220
+ finally:
221
+ if ui_process is not None:
222
+ ui_process.terminate()
223
+
224
+
225
+ def start_server_in_target_venv(
226
+ proxy_ui: bool = False,
227
+ reload: bool = False,
228
+ cwd: Path | None = None,
229
+ deployment_file: Path | None = None,
230
+ open_browser: bool = False,
231
+ port: int | None = None,
232
+ ui_port: int | None = None,
233
+ log_level: str | None = None,
234
+ log_format: str | None = None,
235
+ persistence: Literal["memory", "local", "cloud"] | None = None,
236
+ local_persistence_path: str | None = None,
237
+ cloud_persistence_name: str | None = None,
238
+ ) -> None:
239
+ # Ensure settings reflect the intended working directory before computing paths
240
+
241
+ configure_settings(
242
+ app_root=cwd,
243
+ deployment_file_path=deployment_file,
244
+ reload=reload,
245
+ proxy_ui=proxy_ui,
246
+ persistence=persistence,
247
+ local_persistence_path=local_persistence_path,
248
+ cloud_persistence_name=cloud_persistence_name,
249
+ )
250
+ base_dir = cwd or Path.cwd()
251
+ path = settings.resolved_config_parent.relative_to(base_dir)
252
+ args = ["uv", "run", "--no-progress", "python", "-m", "llama_deploy.appserver.app"]
253
+ if proxy_ui:
254
+ args.append("--proxy-ui")
255
+ if reload:
256
+ args.append("--reload")
257
+ if deployment_file:
258
+ args.append("--deployment-file")
259
+ args.append(str(deployment_file))
260
+ if open_browser:
261
+ args.append("--open-browser")
262
+
263
+ env = os.environ.copy()
264
+ if port:
265
+ env["LLAMA_DEPLOY_APISERVER_PORT"] = str(port)
266
+ if ui_port:
267
+ env["LLAMA_DEPLOY_APISERVER_PROXY_UI_PORT"] = str(ui_port)
268
+ if log_level:
269
+ env["LOG_LEVEL"] = log_level
270
+ if log_format:
271
+ env["LOG_FORMAT"] = log_format
272
+
273
+ ret = run_process(
274
+ args,
275
+ cwd=path,
276
+ env=env,
277
+ line_transform=_exclude_venv_warning,
278
+ )
279
+
280
+ if ret != 0:
281
+ raise SystemExit(ret)
282
+
283
+
284
+ if __name__ == "__main__":
285
+ parser = argparse.ArgumentParser()
286
+ parser.add_argument("--proxy-ui", action="store_true")
287
+ parser.add_argument("--reload", action="store_true")
288
+ parser.add_argument("--deployment-file", type=Path)
289
+ parser.add_argument("--open-browser", action="store_true")
290
+
291
+ args = parser.parse_args()
292
+ start_server(
293
+ proxy_ui=args.proxy_ui,
294
+ reload=args.reload,
295
+ deployment_file=args.deployment_file,
296
+ open_browser=args.open_browser,
49
297
  )
@@ -1,43 +1,73 @@
1
1
  """
2
2
  Bootstraps an application from a remote github repository given environment variables.
3
3
 
4
- This just sets up the files from the repository. It's more of a build process, does not start an application.
4
+ This just sets up the files from the repository. It's more of a build process.
5
5
  """
6
6
 
7
- import asyncio
7
+ import os
8
+ from pathlib import Path
9
+
10
+ from llama_deploy.appserver.deployment_config_parser import get_deployment_config
11
+ from llama_deploy.appserver.settings import (
12
+ BootstrapSettings,
13
+ configure_settings,
14
+ settings,
15
+ )
16
+ from llama_deploy.appserver.workflow_loader import (
17
+ build_ui,
18
+ inject_appserver_into_target,
19
+ install_ui,
20
+ load_environment_variables,
21
+ )
8
22
  from llama_deploy.core.git.git_util import (
9
23
  clone_repo,
10
24
  )
11
- from pydantic import Field
12
- from pydantic_settings import BaseSettings, SettingsConfigDict
13
25
 
14
26
 
15
- class BootstrapSettings(BaseSettings):
16
- model_config = SettingsConfigDict(env_prefix="LLAMA_DEPLOY_")
17
- git_url: str = Field(..., description="The URL of the git repository to clone")
18
- git_token: str | None = Field(
19
- default=None, description="The token to use to clone the git repository"
20
- )
21
- git_ref: str | None = Field(
22
- default=None, description="The git reference to checkout"
23
- )
24
- git_sha: str | None = Field(default=None, description="The git SHA to checkout")
25
- deployment_file_path: str = Field(
26
- default="llama_deploy.yaml", description="The path to the deployment file"
27
+ def bootstrap_app_from_repo(
28
+ target_dir: str = "/opt/app",
29
+ ):
30
+ bootstrap_settings = BootstrapSettings()
31
+ # Needs the github url+auth, and the deployment file path
32
+ # clones the repo to a standard directory
33
+ # (eventually) runs the UI build process and moves that to a standard directory for a file server
34
+
35
+ repo_url = bootstrap_settings.repo_url
36
+ if repo_url is None:
37
+ raise ValueError("repo_url is required to bootstrap")
38
+ clone_repo(
39
+ repository_url=repo_url,
40
+ git_ref=bootstrap_settings.git_sha or bootstrap_settings.git_ref,
41
+ basic_auth=bootstrap_settings.auth_token,
42
+ dest_dir=target_dir,
27
43
  )
28
- deployment_name: str | None = Field(
29
- default=None, description="The name of the deployment"
44
+ # Ensure target_dir exists locally when running tests outside a container
45
+ os.makedirs(target_dir, exist_ok=True)
46
+ os.chdir(target_dir)
47
+ configure_settings(
48
+ app_root=Path(target_dir),
49
+ deployment_file_path=Path(bootstrap_settings.deployment_file_path),
30
50
  )
51
+ config = get_deployment_config()
52
+ load_environment_variables(config, settings.resolved_config_parent)
31
53
 
54
+ sdists = None
55
+ if bootstrap_settings.bootstrap_sdists:
56
+ sdists = [
57
+ Path(bootstrap_settings.bootstrap_sdists) / f
58
+ for f in os.listdir(bootstrap_settings.bootstrap_sdists)
59
+ ]
60
+ sdists = [f for f in sdists if f.is_file() and f.name.endswith(".tar.gz")]
61
+ if not sdists:
62
+ sdists = None
63
+ # Use the explicit base path rather than relying on global settings so tests
64
+ # can safely mock configure_settings without affecting call arguments.
65
+ inject_appserver_into_target(config, settings.resolved_config_parent, sdists)
66
+ install_ui(config, settings.resolved_config_parent)
67
+ build_ui(settings.resolved_config_parent, config, settings)
32
68
 
33
- async def main():
34
- settings = BootstrapSettings()
35
- # Needs the github url+auth, and the deployment file path
36
- # clones the repo to a standard directory
37
- # (eventually) runs the UI build process and moves that to a standard directory for a file server
38
- clone_repo(settings.git_url, "/app/", settings.git_token)
39
69
  pass
40
70
 
41
71
 
42
72
  if __name__ == "__main__":
43
- asyncio.run(main())
73
+ bootstrap_app_from_repo()
@@ -0,0 +1,189 @@
1
+ import logging
2
+ import logging.config
3
+ import os
4
+ import time
5
+ from contextlib import asynccontextmanager
6
+ from contextvars import ContextVar
7
+ from typing import Any, AsyncGenerator, Awaitable, Callable
8
+
9
+ import structlog
10
+ from fastapi import FastAPI, Request, Response
11
+ from llama_deploy.appserver.correlation_id import (
12
+ create_correlation_id,
13
+ get_correlation_id,
14
+ set_correlation_id,
15
+ )
16
+ from llama_deploy.appserver.process_utils import should_use_color
17
+
18
+ access_logger = logging.getLogger("app.access")
19
+
20
+
21
+ def _get_or_create_correlation_id(request: Request) -> str:
22
+ return request.headers.get("X-Request-ID", create_correlation_id())
23
+
24
+
25
+ def add_log_middleware(app: FastAPI):
26
+ @app.middleware("http")
27
+ async def add_log_id(
28
+ request: Request, call_next: Callable[[Request], Awaitable[Response]]
29
+ ):
30
+ set_correlation_id(_get_or_create_correlation_id(request))
31
+ return await call_next(request)
32
+
33
+ @app.middleware("http")
34
+ async def access_log_middleware(
35
+ request: Request, call_next: Callable[[Request], Awaitable[Response]]
36
+ ):
37
+ if _is_proxy_request(request):
38
+ return await call_next(request)
39
+ start = time.perf_counter()
40
+ response = await call_next(request)
41
+ dur_ms = (time.perf_counter() - start) * 1000
42
+ qp = str(request.query_params)
43
+ if qp:
44
+ qp = f"?{qp}"
45
+ access_logger.info(
46
+ f"{request.method} {request.url.path}{qp}",
47
+ extra={
48
+ "duration_ms": round(dur_ms, 2),
49
+ "status_code": response.status_code,
50
+ },
51
+ )
52
+ return response
53
+
54
+
55
+ def _add_request_id(_: Any, __: str, event_dict: dict[str, Any]) -> dict[str, Any]:
56
+ req_id = get_correlation_id()
57
+ if req_id and "request_id" not in event_dict:
58
+ event_dict["request_id"] = req_id
59
+ return event_dict
60
+
61
+
62
+ def _drop_uvicorn_color_message(
63
+ _: Any, __: str, event_dict: dict[str, Any]
64
+ ) -> dict[str, Any]:
65
+ # Uvicorn injects an ANSI-colored duplicate of the message under this key
66
+ event_dict.pop("color_message", None)
67
+ return event_dict
68
+
69
+
70
+ def setup_logging(level: str = "INFO") -> None:
71
+ """
72
+ Configure console logging via structlog with a compact, dev-friendly format.
73
+ Includes request_id and respects logging.extra.
74
+ """
75
+ # Choose renderer and timestamp format based on LOG_FORMAT
76
+ log_format = os.getenv("LOG_FORMAT", "console").lower()
77
+ is_console = log_format == "console"
78
+
79
+ if log_format == "json":
80
+ renderer = structlog.processors.JSONRenderer()
81
+ timestamper = structlog.processors.TimeStamper(fmt="iso", key="timestamp")
82
+ else:
83
+ renderer = structlog.dev.ConsoleRenderer(colors=should_use_color())
84
+ timestamper = structlog.processors.TimeStamper(fmt="%H:%M:%S", key="timestamp")
85
+
86
+ pre_chain = [
87
+ structlog.contextvars.merge_contextvars,
88
+ structlog.stdlib.add_logger_name,
89
+ structlog.stdlib.add_log_level,
90
+ timestamper,
91
+ _add_request_id,
92
+ ]
93
+
94
+ # Ensure stdlib logs (foreign to structlog) also include `extra={...}` fields
95
+ # and that exceptions/stack info are rendered nicely (esp. for JSON format)
96
+ foreign_pre_chain = [
97
+ *pre_chain,
98
+ structlog.stdlib.ExtraAdder(),
99
+ *( # otherwise ConsoleRenderer will render nice rich stack traces
100
+ [
101
+ structlog.processors.StackInfoRenderer(),
102
+ structlog.processors.format_exc_info,
103
+ ]
104
+ if not is_console
105
+ else []
106
+ ),
107
+ _drop_uvicorn_color_message,
108
+ ]
109
+
110
+ structlog.configure(
111
+ processors=[
112
+ *pre_chain,
113
+ structlog.stdlib.PositionalArgumentsFormatter(),
114
+ structlog.stdlib.ExtraAdder(),
115
+ structlog.processors.StackInfoRenderer(),
116
+ structlog.processors.format_exc_info,
117
+ structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
118
+ ],
119
+ logger_factory=structlog.stdlib.LoggerFactory(),
120
+ cache_logger_on_first_use=True,
121
+ )
122
+
123
+ handler = {
124
+ "class": "logging.StreamHandler",
125
+ "level": level,
126
+ "formatter": "console",
127
+ "stream": "ext://sys.stdout",
128
+ }
129
+
130
+ logging.config.dictConfig(
131
+ {
132
+ "version": 1,
133
+ "disable_existing_loggers": False,
134
+ "formatters": {
135
+ "console": {
136
+ "()": structlog.stdlib.ProcessorFormatter,
137
+ # With Rich, let it handle the final formatting; otherwise use our renderer
138
+ "processor": renderer,
139
+ "foreign_pre_chain": foreign_pre_chain,
140
+ }
141
+ },
142
+ "handlers": {"console": handler, "default": handler},
143
+ "root": {
144
+ "handlers": ["console"],
145
+ "level": level,
146
+ },
147
+ "loggers": {
148
+ "uvicorn.access": { # disable access logging, we have our own access log
149
+ "level": "WARNING",
150
+ "handlers": ["console"],
151
+ "propagate": False,
152
+ },
153
+ },
154
+ }
155
+ )
156
+
157
+ # Reduce noise from httpx globally, with fine-grained suppression controlled per-request
158
+ logging.getLogger("httpx").addFilter(_HttpxProxyNoiseFilter())
159
+
160
+
161
+ #####################################################################################
162
+ ### Proxying through the fastapi server in dev mode is noisy, various suppressions
163
+ ###
164
+ def _is_proxy_request(request: Request) -> bool:
165
+ parts = request.url.path.split("/")
166
+ return len(parts) >= 4 and parts[1] == "deployments" and parts[3] == "ui"
167
+
168
+
169
+ _suppress_httpx_logging: ContextVar[bool] = ContextVar(
170
+ "suppress_httpx_logging", default=False
171
+ )
172
+
173
+
174
+ class _HttpxProxyNoiseFilter(logging.Filter):
175
+ def filter(self, record: logging.LogRecord) -> bool:
176
+ """Return False to drop httpx info/debug logs when suppression is active."""
177
+ try:
178
+ if record.name.startswith("httpx") and record.levelno <= logging.INFO:
179
+ return not _suppress_httpx_logging.get()
180
+ except Exception:
181
+ return True
182
+ return True
183
+
184
+
185
+ @asynccontextmanager
186
+ async def suppress_httpx_logs() -> AsyncGenerator[None, None]:
187
+ _suppress_httpx_logging.set(True)
188
+ yield
189
+ _suppress_httpx_logging.set(False)
@@ -0,0 +1,24 @@
1
+ import random
2
+ import string
3
+ from contextvars import ContextVar
4
+
5
+ correlation_id_var: ContextVar[str] = ContextVar("correlation_id", default="")
6
+
7
+
8
+ def get_correlation_id() -> str:
9
+ return correlation_id_var.get()
10
+
11
+
12
+ def set_correlation_id(correlation_id: str) -> None:
13
+ correlation_id_var.set(correlation_id)
14
+
15
+
16
+ def create_correlation_id() -> str:
17
+ return random_alphanumeric_string(8)
18
+
19
+
20
+ _alphanumeric_chars = string.ascii_letters + string.digits
21
+
22
+
23
+ def random_alphanumeric_string(length: int) -> str:
24
+ return "".join(random.choices(_alphanumeric_chars, k=length))