llama-deploy-appserver 0.3.0a5__py3-none-any.whl → 0.3.0a7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,12 +5,14 @@ import threading
5
5
  import time
6
6
  import webbrowser
7
7
  from contextlib import asynccontextmanager
8
+ from importlib.metadata import version
8
9
  from pathlib import Path
9
10
  from typing import Any, AsyncGenerator
10
11
 
11
12
  import uvicorn
12
13
  from fastapi import FastAPI
13
14
  from fastapi.middleware.cors import CORSMiddleware
15
+ from fastapi.openapi.utils import get_openapi
14
16
  from llama_deploy.appserver.deployment_config_parser import (
15
17
  get_deployment_config,
16
18
  )
@@ -26,14 +28,17 @@ from llama_deploy.appserver.settings import configure_settings, settings
26
28
  from llama_deploy.appserver.workflow_loader import (
27
29
  _exclude_venv_warning,
28
30
  build_ui,
29
- find_python_pyproject,
30
31
  inject_appserver_into_target,
31
32
  install_ui,
32
33
  load_environment_variables,
33
34
  load_workflows,
34
35
  start_dev_ui_process,
35
36
  )
37
+ from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
36
38
  from prometheus_fastapi_instrumentator import Instrumentator
39
+ from starlette.routing import Route
40
+ from starlette.schemas import SchemaGenerator
41
+ from workflows.server import WorkflowServer
37
42
 
38
43
  from .deployment import Deployment
39
44
  from .process_utils import run_process
@@ -52,12 +57,28 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, Any]:
52
57
  deployment = Deployment(workflows)
53
58
  base_router = create_base_router(config.name)
54
59
  deploy_router = create_deployments_router(config.name, deployment)
60
+ server = deployment.create_workflow_server()
61
+
62
+ for route in server.app.routes:
63
+ # add routes directly rather than mounting, so that we can share a root
64
+ if isinstance(route, Route):
65
+ app.add_api_route(
66
+ f"/deployments/{config.name}{route.path}",
67
+ route.endpoint,
68
+ name=f"{config.name}_{route.name}",
69
+ methods=route.methods,
70
+ include_in_schema=True, # change to false when schemas are added to workflow server
71
+ tags=["workflows"],
72
+ )
73
+
55
74
  app.include_router(base_router)
56
75
  app.include_router(deploy_router)
57
- # proxy UI in dev mode
76
+
77
+ _setup_openapi(config.name, app, server)
78
+
58
79
  if config.ui is not None:
59
80
  if settings.proxy_ui:
60
- ui_router = create_ui_proxy_router(config.name, config.ui.port)
81
+ ui_router = create_ui_proxy_router(config.name, settings.proxy_ui_port)
61
82
  app.include_router(ui_router)
62
83
  else:
63
84
  # otherwise serve the pre-built if available
@@ -69,6 +90,50 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, Any]:
69
90
  apiserver_state.state("stopped")
70
91
 
71
92
 
93
+ def _setup_openapi(name: str, app: FastAPI, server: WorkflowServer) -> None:
94
+ """
95
+ extends the fastapi based openapi schema with starlette generated schema
96
+ """
97
+ schema_title = "Llama Deploy App Server"
98
+ app_version = version("llama-deploy-appserver")
99
+
100
+ prefix = f"/deployments/{name}"
101
+ workflow_routes = [x for x in server.app.routes if isinstance(x, Route)]
102
+ server_app_routes = []
103
+ for route in workflow_routes:
104
+ server_app_routes.append(
105
+ Route(
106
+ path=prefix + route.path,
107
+ endpoint=route.endpoint,
108
+ methods=route.methods,
109
+ name=route.name,
110
+ include_in_schema=route.include_in_schema,
111
+ )
112
+ )
113
+
114
+ schemas = SchemaGenerator(
115
+ {"openapi": "3.0.0", "info": {"title": schema_title, "version": app_version}}
116
+ )
117
+ starlette_schema = schemas.get_schema(server_app_routes)
118
+
119
+ def custom_openapi():
120
+ if app.openapi_schema:
121
+ return app.openapi_schema
122
+ openapi_schema = get_openapi(
123
+ title=schema_title,
124
+ version=app_version,
125
+ routes=app.routes + server_app_routes,
126
+ )
127
+ openapi_schema["paths"] = {
128
+ **openapi_schema["paths"],
129
+ **starlette_schema["paths"],
130
+ }
131
+ app.openapi_schema = openapi_schema
132
+ return app.openapi_schema
133
+
134
+ app.openapi = custom_openapi # ty: ignore[invalid-assignment] - doesn't like us overwriting the method
135
+
136
+
72
137
  app = FastAPI(lifespan=lifespan)
73
138
  Instrumentator().instrument(app).expose(app)
74
139
 
@@ -85,19 +150,29 @@ if not os.environ.get("DISABLE_CORS", False):
85
150
  app.include_router(health_router)
86
151
 
87
152
 
153
+ def open_browser_async(host: str, port: int) -> None:
154
+ def _open_with_delay() -> None:
155
+ time.sleep(1)
156
+ webbrowser.open(f"http://{host}:{port}")
157
+
158
+ threading.Thread(target=_open_with_delay).start()
159
+
160
+
88
161
  def prepare_server(
89
162
  deployment_file: Path | None = None,
90
163
  install: bool = False,
91
164
  build: bool = False,
92
165
  ) -> None:
93
- configure_settings(deployment_file_path=deployment_file)
94
- load_environment_variables(get_deployment_config(), settings.config_parent)
166
+ configure_settings(
167
+ deployment_file_path=deployment_file or Path(DEFAULT_DEPLOYMENT_FILE_PATH)
168
+ )
169
+ load_environment_variables(get_deployment_config(), settings.resolved_config_parent)
95
170
  if install:
96
171
  config = get_deployment_config()
97
- inject_appserver_into_target(config, settings.config_parent)
98
- install_ui(config, settings.config_parent)
172
+ inject_appserver_into_target(config, settings.resolved_config_parent)
173
+ install_ui(config, settings.resolved_config_parent)
99
174
  if build:
100
- build_ui(settings.config_parent, get_deployment_config())
175
+ build_ui(settings.resolved_config_parent, get_deployment_config(), settings)
101
176
 
102
177
 
103
178
  def start_server(
@@ -111,15 +186,15 @@ def start_server(
111
186
  configure_settings(
112
187
  proxy_ui=proxy_ui,
113
188
  app_root=cwd,
114
- deployment_file_path=deployment_file,
189
+ deployment_file_path=deployment_file or Path(DEFAULT_DEPLOYMENT_FILE_PATH),
115
190
  reload=reload,
116
191
  )
117
- load_environment_variables(get_deployment_config(), settings.config_parent)
192
+ load_environment_variables(get_deployment_config(), settings.resolved_config_parent)
118
193
 
119
194
  ui_process = None
120
195
  if proxy_ui:
121
196
  ui_process = start_dev_ui_process(
122
- settings.config_parent, settings.port, get_deployment_config()
197
+ settings.resolved_config_parent, settings, get_deployment_config()
123
198
  )
124
199
  try:
125
200
  if open_browser:
@@ -142,11 +217,20 @@ def start_server_in_target_venv(
142
217
  cwd: Path | None = None,
143
218
  deployment_file: Path | None = None,
144
219
  open_browser: bool = False,
220
+ port: int | None = None,
221
+ ui_port: int | None = None,
145
222
  ) -> None:
146
- cfg = get_deployment_config()
147
- path = find_python_pyproject(cwd or Path.cwd(), cfg)
223
+ # Ensure settings reflect the intended working directory before computing paths
148
224
 
149
- args = ["uv", "run", "python", "-m", "llama_deploy.appserver.app"]
225
+ configure_settings(
226
+ app_root=cwd,
227
+ deployment_file_path=deployment_file,
228
+ reload=reload,
229
+ proxy_ui=proxy_ui,
230
+ )
231
+ base_dir = cwd or Path.cwd()
232
+ path = settings.resolved_config_parent.relative_to(base_dir)
233
+ args = ["uv", "run", "--no-progress", "python", "-m", "llama_deploy.appserver.app"]
150
234
  if proxy_ui:
151
235
  args.append("--proxy-ui")
152
236
  if reload:
@@ -156,11 +240,17 @@ def start_server_in_target_venv(
156
240
  args.append(str(deployment_file))
157
241
  if open_browser:
158
242
  args.append("--open-browser")
159
- # All the streaming/PTY/pipe handling is centralized
243
+
244
+ env = os.environ.copy()
245
+ if port:
246
+ env["LLAMA_DEPLOY_APISERVER_PORT"] = str(port)
247
+ if ui_port:
248
+ env["LLAMA_DEPLOY_APISERVER_PROXY_UI_PORT"] = str(ui_port)
249
+
160
250
  ret = run_process(
161
251
  args,
162
252
  cwd=path,
163
- env=None,
253
+ env=env,
164
254
  line_transform=_exclude_venv_warning,
165
255
  )
166
256
 
@@ -169,7 +259,6 @@ def start_server_in_target_venv(
169
259
 
170
260
 
171
261
  if __name__ == "__main__":
172
- print("starting server")
173
262
  parser = argparse.ArgumentParser()
174
263
  parser.add_argument("--proxy-ui", action="store_true")
175
264
  parser.add_argument("--reload", action="store_true")
@@ -183,11 +272,3 @@ if __name__ == "__main__":
183
272
  deployment_file=args.deployment_file,
184
273
  open_browser=args.open_browser,
185
274
  )
186
-
187
-
188
- def open_browser_async(host: str, port: int) -> None:
189
- def _open_with_delay() -> None:
190
- time.sleep(1)
191
- webbrowser.open(f"http://{host}:{port}")
192
-
193
- threading.Thread(target=_open_with_delay).start()
@@ -8,7 +8,11 @@ import os
8
8
  from pathlib import Path
9
9
 
10
10
  from llama_deploy.appserver.deployment_config_parser import get_deployment_config
11
- from llama_deploy.appserver.settings import BootstrapSettings, configure_settings
11
+ from llama_deploy.appserver.settings import (
12
+ BootstrapSettings,
13
+ configure_settings,
14
+ settings,
15
+ )
12
16
  from llama_deploy.appserver.workflow_loader import (
13
17
  build_ui,
14
18
  inject_appserver_into_target,
@@ -45,8 +49,7 @@ def bootstrap_app_from_repo(
45
49
  deployment_file_path=Path(bootstrap_settings.deployment_file_path),
46
50
  )
47
51
  config = get_deployment_config()
48
- base_path = Path(target_dir)
49
- load_environment_variables(config, base_path)
52
+ load_environment_variables(config, settings.resolved_config_parent)
50
53
 
51
54
  sdists = None
52
55
  if bootstrap_settings.bootstrap_sdists:
@@ -59,9 +62,9 @@ def bootstrap_app_from_repo(
59
62
  sdists = None
60
63
  # Use the explicit base path rather than relying on global settings so tests
61
64
  # can safely mock configure_settings without affecting call arguments.
62
- inject_appserver_into_target(config, base_path, sdists)
63
- install_ui(config, base_path)
64
- build_ui(base_path, config)
65
+ inject_appserver_into_target(config, settings.resolved_config_parent, sdists)
66
+ install_ui(config, settings.resolved_config_parent)
67
+ build_ui(settings.resolved_config_parent, config, settings)
65
68
 
66
69
  pass
67
70
 
@@ -7,6 +7,7 @@ from llama_deploy.appserver.types import generate_id
7
7
  from llama_deploy.appserver.workflow_loader import DEFAULT_SERVICE_ID
8
8
  from workflows import Context, Workflow
9
9
  from workflows.handler import WorkflowHandler
10
+ from workflows.server import WorkflowServer
10
11
 
11
12
  logger = logging.getLogger()
12
13
 
@@ -78,3 +79,9 @@ class Deployment:
78
79
  self._handlers[handler_id] = handler
79
80
  self._handler_inputs[handler_id] = json.dumps(run_kwargs)
80
81
  return handler_id, session_id
82
+
83
+ def create_workflow_server(self) -> WorkflowServer:
84
+ server = WorkflowServer()
85
+ for service_id, workflow in self._workflow_services.items():
86
+ server.add_workflow(service_id, workflow)
87
+ return server
@@ -1,13 +1,15 @@
1
1
  import functools
2
2
 
3
3
  from llama_deploy.appserver.settings import BootstrapSettings, settings
4
- from llama_deploy.core.deployment_config import DeploymentConfig
4
+ from llama_deploy.core.deployment_config import DeploymentConfig, read_deployment_config
5
5
 
6
6
 
7
- @functools.lru_cache
7
+ @functools.cache
8
8
  def get_deployment_config() -> DeploymentConfig:
9
9
  base_settings = BootstrapSettings()
10
10
  base = settings.app_root.resolve()
11
- yaml_file = base / settings.deployment_file_path
12
11
  name = base_settings.deployment_name
13
- return DeploymentConfig.from_yaml(yaml_file, name)
12
+ parsed = read_deployment_config(base, settings.deployment_file_path)
13
+ if name is not None:
14
+ parsed.name = name
15
+ return parsed
@@ -30,6 +30,7 @@ def run_process(
30
30
  threads: list[threading.Thread] = []
31
31
  try:
32
32
  cleanup()
33
+ _log_command(cmd, prefixer)
33
34
  threads = _start_stream_threads(sources, prefixer)
34
35
  ret = process.wait()
35
36
  if ret != 0:
@@ -58,6 +59,7 @@ def spawn_process(
58
59
 
59
60
  process, sources, cleanup = _spawn_process(cmd, cwd=cwd, env=env, use_pty=use_pty)
60
61
  cleanup()
62
+ _log_command(cmd, prefixer)
61
63
  _start_stream_threads(sources, prefixer)
62
64
  return process
63
65
 
@@ -187,6 +189,13 @@ def _stream_source(
187
189
  pass
188
190
 
189
191
 
192
+ def _log_command(cmd: list[str], transform: Callable[[str], str | None] | None) -> None:
193
+ cmd_str = "> " + " ".join(cmd)
194
+ if transform:
195
+ cmd_str = transform(cmd_str)
196
+ sys.stderr.write(cmd_str + "\n")
197
+
198
+
190
199
  def _start_stream_threads(
191
200
  sources: list[tuple[int | TextIO, TextIO]],
192
201
  transform: Callable[[str], str | None] | None,
@@ -33,9 +33,7 @@ def create_base_router(name: str) -> APIRouter:
33
33
  return base_router
34
34
 
35
35
 
36
- def create_deployments_router(
37
- name: str, deployment: Deployment, serve_static: bool = False
38
- ) -> APIRouter:
36
+ def create_deployments_router(name: str, deployment: Deployment) -> APIRouter:
39
37
  deployments_router = APIRouter(
40
38
  prefix="/deployments",
41
39
  )
@@ -1,6 +1,6 @@
1
1
  import asyncio
2
2
  import logging
3
- from typing import List, Optional
3
+ from typing import List
4
4
 
5
5
  import httpx
6
6
  import websockets
@@ -120,10 +120,12 @@ def create_ui_proxy_router(name: str, port: int) -> APIRouter:
120
120
  @deployment_router.api_route(
121
121
  "/ui/{path:path}",
122
122
  methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH"],
123
+ include_in_schema=False,
123
124
  )
124
125
  @deployment_router.api_route(
125
126
  "/ui",
126
127
  methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH"],
128
+ include_in_schema=False,
127
129
  )
128
130
  async def proxy(
129
131
  request: Request,
@@ -197,17 +199,17 @@ def create_ui_proxy_router(name: str, port: int) -> APIRouter:
197
199
  def mount_static_files(
198
200
  app: FastAPI, config: DeploymentConfig, settings: ApiserverSettings
199
201
  ) -> None:
200
- if not config.ui or not config.ui.source:
202
+ path = settings.app_root / config.build_output_path()
203
+ if not path:
201
204
  return
202
205
 
203
- ui_path = settings.config_parent / config.ui.source.location / "dist"
204
- if not ui_path.exists():
206
+ if not path.exists():
205
207
  return
206
208
 
207
209
  # Serve index.html when accessing the directory path
208
210
  app.mount(
209
211
  f"/deployments/{config.name}/ui",
210
- StaticFiles(directory=str(ui_path), html=True),
212
+ StaticFiles(directory=str(path), html=True),
211
213
  name=f"ui-static-{config.name}",
212
214
  )
213
215
  return None
@@ -2,6 +2,7 @@ import os
2
2
  from pathlib import Path
3
3
 
4
4
  from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
5
+ from llama_deploy.core.deployment_config import resolve_config_parent
5
6
  from pydantic import Field
6
7
  from pydantic_settings import BaseSettings, SettingsConfigDict
7
8
 
@@ -24,7 +25,7 @@ class BootstrapSettings(BaseSettings):
24
25
  )
25
26
  git_sha: str | None = Field(default=None, description="The git SHA to checkout")
26
27
  deployment_file_path: str = Field(
27
- default="llama_deploy.yaml",
28
+ default=".",
28
29
  description="The path to the deployment file, relative to the root of the repository",
29
30
  )
30
31
  deployment_name: str | None = Field(
@@ -55,13 +56,17 @@ class ApiserverSettings(BaseSettings):
55
56
 
56
57
  deployment_file_path: Path = Field(
57
58
  default=Path(DEFAULT_DEPLOYMENT_FILE_PATH),
58
- description="path, relative to the repository root, where the deployment file is located. If not provided, will look for ./llama_deploy.yaml",
59
+ description="path, relative to the repository root, where the pyproject.toml file is located",
59
60
  )
60
61
 
61
62
  proxy_ui: bool = Field(
62
63
  default=False,
63
64
  description="If true, proxy a development UI server instead of serving built assets",
64
65
  )
66
+ proxy_ui_port: int = Field(
67
+ default=4502,
68
+ description="The TCP port where to bind the UI proxy server",
69
+ )
65
70
 
66
71
  reload: bool = Field(
67
72
  default=False,
@@ -69,8 +74,8 @@ class ApiserverSettings(BaseSettings):
69
74
  )
70
75
 
71
76
  @property
72
- def config_parent(self) -> Path:
73
- return (self.app_root / self.deployment_file_path).parent
77
+ def resolved_config_parent(self) -> Path:
78
+ return resolve_config_parent(self.app_root, self.deployment_file_path)
74
79
 
75
80
 
76
81
  settings = ApiserverSettings()
@@ -13,9 +13,11 @@ from llama_deploy.appserver.deployment_config_parser import (
13
13
  DeploymentConfig,
14
14
  )
15
15
  from llama_deploy.appserver.process_utils import run_process, spawn_process
16
+ from llama_deploy.appserver.settings import ApiserverSettings, settings
16
17
  from llama_deploy.core.ui_build import ui_build_output_path
17
18
  from packaging.version import InvalidVersion, Version
18
19
  from workflows import Workflow
20
+ from workflows.server import WorkflowServer
19
21
 
20
22
  logger = logging.getLogger(__name__)
21
23
 
@@ -27,44 +29,28 @@ def load_workflows(config: DeploymentConfig) -> dict[str, Workflow]:
27
29
  Creates WorkflowService instances according to the configuration object.
28
30
 
29
31
  """
30
- workflow_services = {}
31
-
32
- # Pre-compute per-service import info
33
- per_service: list[tuple[str, str]] = []
34
- for service_id, service_config in config.services.items():
35
- if service_config.import_path is None:
36
- continue
37
- raw_mod_path, workflow_name = service_config.import_path.split(":", 1)
38
- module_name = Path(raw_mod_path).name
39
- per_service.append((service_id, workflow_name))
40
-
41
- for service_id, workflow_name in per_service:
42
- import_path = config.services[service_id].import_path
43
- if import_path is None:
44
- continue
45
- raw_mod_path = import_path.split(":", 1)[0]
46
- module_name = Path(raw_mod_path).name
32
+ workflow_services: dict[str, Workflow] = {}
47
33
 
34
+ if config.app:
35
+ module_name, app_name = config.app.split(":", 1)
48
36
  module = importlib.import_module(module_name)
49
-
50
- if hasattr(module, workflow_name):
37
+ workflow = getattr(module, app_name)
38
+ if not isinstance(workflow, WorkflowServer):
39
+ raise ValueError(
40
+ f"Workflow {app_name} in {module_name} is not a WorkflowServer object"
41
+ )
42
+ # kludge to get the workflows
43
+ workflow_services = workflow._workflows
44
+ else:
45
+ for service_id, workflow_name in config.workflows.items():
46
+ module_name, workflow_name = workflow_name.split(":", 1)
47
+ module = importlib.import_module(module_name)
51
48
  workflow = getattr(module, workflow_name)
52
49
  if not isinstance(workflow, Workflow):
53
50
  logger.warning(
54
51
  f"Workflow {workflow_name} in {module_name} is not a Workflow object",
55
52
  )
56
53
  workflow_services[service_id] = workflow
57
- else:
58
- logger.warning("Workflow %s not found in %s", workflow_name, module_name)
59
-
60
- if config.default_service:
61
- if config.default_service in workflow_services:
62
- workflow_services[DEFAULT_SERVICE_ID] = workflow_services[
63
- config.default_service
64
- ]
65
- else:
66
- msg = f"Service with id '{config.default_service}' does not exist, cannot set it as default."
67
- logger.warning(msg)
68
54
 
69
55
  return workflow_services
70
56
 
@@ -73,15 +59,14 @@ def load_environment_variables(config: DeploymentConfig, source_root: Path) -> N
73
59
  """
74
60
  Load environment variables from the deployment config.
75
61
  """
76
- for service_id, service_config in config.services.items():
77
- env_vars = {**service_config.env} if service_config.env else {}
78
- for env_file in service_config.env_files or []:
79
- env_file_path = source_root / env_file
80
- values = dotenv_values(env_file_path)
81
- env_vars.update(**values)
82
- for key, value in env_vars.items():
83
- if value:
84
- os.environ[key] = value
62
+ env_vars = {**config.env} if config.env else {}
63
+ for env_file in config.env_files or []:
64
+ env_file_path = source_root / env_file
65
+ values = dotenv_values(env_file_path)
66
+ env_vars.update(**values)
67
+ for key, value in env_vars.items():
68
+ if value:
69
+ os.environ[key] = value
85
70
 
86
71
 
87
72
  @functools.cache
@@ -116,12 +101,7 @@ def inject_appserver_into_target(
116
101
  source_root: The root directory of the deployment
117
102
  sdists: A list of tar.gz sdists files to install instead of installing the appserver
118
103
  """
119
- path = _find_install_target(source_root, config)
120
- if path is None:
121
- logger.warning(
122
- "No python_dependencies and no root pyproject.toml; skipping dependency installation."
123
- )
124
- return
104
+ path = settings.resolved_config_parent
125
105
  logger.info(f"Installing ensuring venv at {path} and adding appserver to it")
126
106
  _ensure_uv_available()
127
107
  _add_appserver_if_missing(path, source_root, sdists=sdists)
@@ -178,69 +158,61 @@ def _add_appserver_if_missing(
178
158
  )
179
159
  return
180
160
 
181
- def ensure_venv(path: Path) -> Path:
161
+ def run_uv(cmd: str, args: list[str]):
162
+ run_process(
163
+ ["uv", cmd] + args,
164
+ cwd=source_root / path,
165
+ prefix=f"[uv {cmd}]",
166
+ color_code="36",
167
+ use_tty=False,
168
+ line_transform=_exclude_venv_warning,
169
+ )
170
+
171
+ def ensure_venv(path: Path, force: bool = False) -> Path:
182
172
  venv_path = source_root / path / ".venv"
183
- if not venv_path.exists():
184
- run_process(
185
- ["uv", "venv", str(venv_path)],
186
- cwd=source_root / path,
187
- prefix="[uv venv]",
188
- color_code="36",
189
- )
173
+ if force or not venv_path.exists():
174
+ run_uv("venv", [str(venv_path)])
190
175
  return venv_path
191
176
 
192
177
  if sdists:
193
- run_process(
194
- ["uv", "pip", "install"]
178
+ run_uv(
179
+ "pip",
180
+ ["install"]
195
181
  + [str(s.absolute()) for s in sdists]
196
182
  + ["--prefix", str(ensure_venv(path))],
197
- cwd=source_root / path,
198
- prefix="[uv pip install]",
199
- color_code="36",
200
183
  )
201
184
  elif are_we_editable_mode():
202
185
  pyproject = _find_development_pyproject()
203
186
  if pyproject is None:
204
187
  raise RuntimeError("No pyproject.toml found in llama-deploy-appserver")
205
188
  target = f"file://{str(pyproject.relative_to(source_root.resolve() / path, walk_up=True))}"
206
- run_process(
189
+
190
+ run_uv(
191
+ "pip",
207
192
  [
208
- "uv",
209
- "pip",
210
193
  "install",
211
- "--reinstall",
194
+ "--reinstall-package",
195
+ "llama-deploy-appserver",
212
196
  target,
213
197
  "--prefix",
214
- str(ensure_venv(path)),
198
+ str(ensure_venv(path, force=True)),
215
199
  ],
216
- cwd=source_root / path,
217
- prefix="[uv pip install]",
218
- color_code="36",
219
200
  )
201
+
220
202
  else:
221
203
  version = _is_missing_or_outdated(path)
222
204
  if version is not None:
223
205
  if save_version:
224
- run_process(
225
- ["uv", "add", f"llama-deploy-appserver>={version}"],
226
- cwd=source_root / path,
227
- prefix="[uv add]",
228
- color_code="36",
229
- line_transform=_exclude_venv_warning,
230
- )
206
+ run_uv("add", [f"llama-deploy-appserver>={version}"])
231
207
  else:
232
- run_process(
208
+ run_uv(
209
+ "pip",
233
210
  [
234
- "uv",
235
- "pip",
236
211
  "install",
237
212
  f"llama-deploy-appserver=={version}",
238
213
  "--prefix",
239
214
  str(ensure_venv(path)),
240
215
  ],
241
- cwd=source_root / path,
242
- prefix="[uv pip install]",
243
- color_code="36",
244
216
  )
245
217
 
246
218
 
@@ -253,27 +225,6 @@ def _find_development_pyproject() -> Path | None:
253
225
  return dir
254
226
 
255
227
 
256
- def find_python_pyproject(base: Path, config: DeploymentConfig) -> Path | None:
257
- path: Path | None = None
258
- for service_id, service_config in config.services.items():
259
- if service_config.python_dependencies:
260
- if len(service_config.python_dependencies) > 1:
261
- logger.warning(
262
- "Llama Deploy now only supports installing from a single pyproject.toml path"
263
- )
264
- this_path = Path(service_config.python_dependencies[0])
265
- if path is not None and this_path != path:
266
- logger.warning(
267
- f"Llama Deploy now only supports installing from a single pyproject.toml path, ignoring {this_path}"
268
- )
269
- else:
270
- path = this_path
271
- if path is None:
272
- if (base / "pyproject.toml").exists():
273
- path = Path(".")
274
- return path
275
-
276
-
277
228
  def _exclude_venv_warning(line: str) -> str | None:
278
229
  if "use `--active` to target the active environment instead" in line:
279
230
  return None
@@ -311,82 +262,56 @@ def _ensure_uv_available() -> None:
311
262
  raise RuntimeError(msg)
312
263
 
313
264
 
314
- def _find_install_target(base: Path, config: DeploymentConfig) -> Path | None:
315
- return find_python_pyproject(base, config)
316
-
317
-
318
- def _validate_path_is_safe(
319
- path: Path, source_root: Path, path_type: str = "path"
320
- ) -> None:
321
- """Validates that a path is within the source root to prevent path traversal attacks.
322
-
323
- Args:
324
- path: The path to validate
325
- source_root: The root directory that paths should be relative to
326
- path_type: Description of the path type for error messages
327
-
328
- Raises:
329
- DeploymentError: If the path is outside the source root
330
- """
331
- resolved_path = (source_root / path).resolve()
332
- resolved_source_root = source_root.resolve()
333
-
334
- if not resolved_path.is_relative_to(resolved_source_root):
335
- msg = (
336
- f"{path_type} {path} is not a subdirectory of the source root {source_root}"
337
- )
338
- raise RuntimeError(msg)
339
-
340
-
341
265
  def install_ui(config: DeploymentConfig, config_parent: Path) -> None:
342
266
  if config.ui is None:
343
267
  return
344
- path = Path(config.ui.source.location) if config.ui.source else Path(".")
345
- _validate_path_is_safe(path, config_parent, "ui_source")
268
+ package_manager = config.ui.package_manager
346
269
  run_process(
347
- ["pnpm", "install"],
348
- cwd=config_parent / path,
349
- prefix="[pnpm install]",
270
+ [package_manager, "install"],
271
+ cwd=config_parent / config.ui.directory,
272
+ prefix=f"[{package_manager} install]",
350
273
  color_code="33",
351
274
  )
352
275
 
353
276
 
354
- def _ui_env(config: DeploymentConfig) -> dict[str, str]:
277
+ def _ui_env(config: DeploymentConfig, settings: ApiserverSettings) -> dict[str, str]:
355
278
  env = os.environ.copy()
356
279
  env["LLAMA_DEPLOY_DEPLOYMENT_URL_ID"] = config.name
357
280
  env["LLAMA_DEPLOY_DEPLOYMENT_BASE_PATH"] = f"/deployments/{config.name}/ui"
358
281
  if config.ui is not None:
359
- env["PORT"] = str(config.ui.port)
282
+ env["PORT"] = str(settings.proxy_ui_port)
360
283
  return env
361
284
 
362
285
 
363
- def build_ui(config_parent: Path, config: DeploymentConfig) -> bool:
286
+ def build_ui(
287
+ config_parent: Path, config: DeploymentConfig, settings: ApiserverSettings
288
+ ) -> bool:
364
289
  """
365
290
  Returns True if the UI was built (and supports building), otherwise False if there's no build command
366
291
  """
367
292
  if config.ui is None:
368
293
  return False
369
- path = Path(config.ui.source.location) if config.ui.source else Path(".")
370
- _validate_path_is_safe(path, config_parent, "ui_source")
371
- env = _ui_env(config)
294
+ path = Path(config.ui.directory)
295
+ env = _ui_env(config, settings)
372
296
 
373
297
  has_build = ui_build_output_path(config_parent, config)
374
298
  if has_build is None:
375
299
  return False
376
300
 
377
301
  run_process(
378
- ["pnpm", "run", "build"],
302
+ ["npm", "run", "build"],
379
303
  cwd=config_parent / path,
380
304
  env=env,
381
- prefix="[pnpm run build]",
305
+ prefix="[npm run build]",
382
306
  color_code="34",
383
307
  )
384
308
  return True
385
309
 
386
310
 
387
311
  def start_dev_ui_process(
388
- root: Path, main_port: int, config: DeploymentConfig
312
+ root: Path, settings: ApiserverSettings, config: DeploymentConfig
389
313
  ) -> None | subprocess.Popen:
314
+ ui_port = settings.proxy_ui_port
390
315
  ui = config.ui
391
316
  if ui is None:
392
317
  return None
@@ -400,29 +325,29 @@ def start_dev_ui_process(
400
325
  except Exception:
401
326
  return False
402
327
 
403
- if _is_port_open(ui.port):
328
+ if _is_port_open(ui_port):
404
329
  logger.info(
405
- "Detected process already running on port %s; not starting a new one.",
406
- ui.port,
330
+ f"Detected process already running on port {ui_port}; not starting a new one."
407
331
  )
408
332
  return None
409
333
  # start the ui process
410
- env = _ui_env(config)
334
+ env = _ui_env(config, settings)
411
335
  # Transform first 20 lines to replace the default UI port with the main server port
412
336
  line_counter = 0
413
337
 
414
338
  def _transform(line: str) -> str:
415
339
  nonlocal line_counter
416
340
  if line_counter < 20:
417
- line = line.replace(f":{ui.port}", f":{main_port}")
341
+ line = line.replace(f":{ui_port}", f":{settings.port}")
418
342
  line_counter += 1
419
343
  return line
420
344
 
421
345
  return spawn_process(
422
- ["pnpm", "run", "dev"],
423
- cwd=root / (ui.source.location if ui.source else "."),
346
+ ["npm", "run", ui.serve_command],
347
+ cwd=root / (ui.directory),
424
348
  env=env,
425
- prefix="[pnpm run dev]",
426
- color_code="35", # magenta
349
+ prefix=f"[npm run {ui.serve_command}]",
350
+ color_code="35",
427
351
  line_transform=_transform,
352
+ use_tty=False,
428
353
  )
@@ -1,16 +1,16 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-deploy-appserver
3
- Version: 0.3.0a5
3
+ Version: 0.3.0a7
4
4
  Summary: Application server components for LlamaDeploy
5
5
  Author: Massimiliano Pippi
6
6
  Author-email: Massimiliano Pippi <mpippi@gmail.com>
7
7
  License: MIT
8
- Requires-Dist: llama-index-workflows>=1.1.0
8
+ Requires-Dist: llama-index-workflows[server]>=1.3.0
9
9
  Requires-Dist: pydantic-settings>=2.10.1
10
10
  Requires-Dist: uvicorn>=0.24.0
11
11
  Requires-Dist: fastapi>=0.100.0
12
12
  Requires-Dist: websockets>=12.0
13
- Requires-Dist: llama-deploy-core>=0.3.0a5,<0.4.0
13
+ Requires-Dist: llama-deploy-core>=0.3.0a7,<0.4.0
14
14
  Requires-Dist: httpx>=0.28.1
15
15
  Requires-Dist: prometheus-fastapi-instrumentator>=7.1.0
16
16
  Requires-Dist: packaging>=25.0
@@ -0,0 +1,18 @@
1
+ llama_deploy/appserver/__init__.py,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
2
+ llama_deploy/appserver/__main__.py,sha256=32eff329cadb4f883c9df3a1b2bcb908d5adde765e7c7e761d25b7df4827b9ca,196
3
+ llama_deploy/appserver/app.py,sha256=e127cfde4204e84a8f00289a624ee35f3b16ea6686b6935b873da6ba4020df5d,8527
4
+ llama_deploy/appserver/bootstrap.py,sha256=fa32be007f18b4b3af92c878bac417416c9afb09b1beddf51b5cd73115e6b7c6,2453
5
+ llama_deploy/appserver/deployment.py,sha256=1a7c75d12abbf7c93d1c2ab791cedfe5431a36a6f7a0d3642d487f8b6336206d,2950
6
+ llama_deploy/appserver/deployment_config_parser.py,sha256=e2b6c483203d96ab795c4e55df15c694c20458d5a03fab89c2b71e481291a2d3,510
7
+ llama_deploy/appserver/process_utils.py,sha256=22ca4db8f5df489fdfcc1859ad47674c0a77a03e1de56966bf936c3b256dd73f,5954
8
+ llama_deploy/appserver/routers/__init__.py,sha256=ee2d14ebf4b067c844947ed1cc98186456e8bfa4919282722eaaf8cca345a138,214
9
+ llama_deploy/appserver/routers/deployments.py,sha256=510b6f22118256ce9b8ba6a116ecd21f5d5e052a3a300ce60e0ce0afe135b9e3,7257
10
+ llama_deploy/appserver/routers/status.py,sha256=eead8e0aebbc7e5e3ca8f0c00d0c1b6df1d6cde7844edfbe9350bf64ab85006f,257
11
+ llama_deploy/appserver/routers/ui_proxy.py,sha256=5742f6d5d8cc6cd9a180d579a98e165f709e3db80f6413d1c127d4f7263147fa,7169
12
+ llama_deploy/appserver/settings.py,sha256=7f1f481216b29614a94783c81cb49f0790d66e9e0cacef407da4ed3c8fcbbeeb,3484
13
+ llama_deploy/appserver/stats.py,sha256=1f3989f6705a6de3e4d61ee8cdd189fbe04a2c53ec5e720b2e5168acc331427f,691
14
+ llama_deploy/appserver/types.py,sha256=4edc991aafb6b8497f068d12387455df292da3ff8440223637641ab1632553ec,2133
15
+ llama_deploy/appserver/workflow_loader.py,sha256=fbd98790524104014a0c329d368f48c3072207f80a008201c76d67993b3a65dc,11221
16
+ llama_deploy_appserver-0.3.0a7.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
17
+ llama_deploy_appserver-0.3.0a7.dist-info/METADATA,sha256=67a4b3e18bf88a217f7643823983618e49cf1b17ff2da26bc6d670cd33570d95,785
18
+ llama_deploy_appserver-0.3.0a7.dist-info/RECORD,,
@@ -1,18 +0,0 @@
1
- llama_deploy/appserver/__init__.py,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
2
- llama_deploy/appserver/__main__.py,sha256=32eff329cadb4f883c9df3a1b2bcb908d5adde765e7c7e761d25b7df4827b9ca,196
3
- llama_deploy/appserver/app.py,sha256=a91a518dbce113043852217008a09b21dc6093bf78173c5d5f2b2ba3a3522094,5628
4
- llama_deploy/appserver/bootstrap.py,sha256=b2b032af98f477c1e3b515442251890027f4a83c5e73c19af43b36a2c5e1e479,2361
5
- llama_deploy/appserver/deployment.py,sha256=8dc31107a8dbe30feb3d414540a5cbd4753faffd617fdab69407ee19a0ecd1bb,2670
6
- llama_deploy/appserver/deployment_config_parser.py,sha256=160f6ead40028de8d92af927ac8427c13e62fdfaccbcaf605906d0645e06ba74,455
7
- llama_deploy/appserver/process_utils.py,sha256=4fae398607fd84187343b7aeefb16d7314366cbec91c82f0fbb76479386104fb,5668
8
- llama_deploy/appserver/routers/__init__.py,sha256=ee2d14ebf4b067c844947ed1cc98186456e8bfa4919282722eaaf8cca345a138,214
9
- llama_deploy/appserver/routers/deployments.py,sha256=4b4179c4cbd48548a0cde9bbc07f3668c8f5abcffa4bfb1c98654e81e40b8329,7291
10
- llama_deploy/appserver/routers/status.py,sha256=eead8e0aebbc7e5e3ca8f0c00d0c1b6df1d6cde7844edfbe9350bf64ab85006f,257
11
- llama_deploy/appserver/routers/ui_proxy.py,sha256=8b034dd2615bc6f992b0e825c644bdef256ee2b46eac00184bf7dedfdb7a2309,7164
12
- llama_deploy/appserver/settings.py,sha256=46d209715f4e143eb1f4a44906bacfbd8eb79593d54b9de29fb49e7edc6a2a51,3326
13
- llama_deploy/appserver/stats.py,sha256=1f3989f6705a6de3e4d61ee8cdd189fbe04a2c53ec5e720b2e5168acc331427f,691
14
- llama_deploy/appserver/types.py,sha256=4edc991aafb6b8497f068d12387455df292da3ff8440223637641ab1632553ec,2133
15
- llama_deploy/appserver/workflow_loader.py,sha256=ba863d36cef3bc65ce1227ed3302b9e569e2765e6dd53ba9dcbbf30c0800c1c8,14168
16
- llama_deploy_appserver-0.3.0a5.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
17
- llama_deploy_appserver-0.3.0a5.dist-info/METADATA,sha256=f28d7fbc89fb899065d94a929370013a4e2eeec6e52dd5a1a18cc54bbed54609,777
18
- llama_deploy_appserver-0.3.0a5.dist-info/RECORD,,