llama-deploy-appserver 0.2.7a1__py3-none-any.whl → 0.3.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. llama_deploy/appserver/__main__.py +0 -4
  2. llama_deploy/appserver/app.py +105 -25
  3. llama_deploy/appserver/bootstrap.py +76 -24
  4. llama_deploy/appserver/deployment.py +7 -421
  5. llama_deploy/appserver/deployment_config_parser.py +35 -59
  6. llama_deploy/appserver/routers/__init__.py +4 -3
  7. llama_deploy/appserver/routers/deployments.py +162 -385
  8. llama_deploy/appserver/routers/status.py +4 -31
  9. llama_deploy/appserver/routers/ui_proxy.py +213 -0
  10. llama_deploy/appserver/settings.py +57 -55
  11. llama_deploy/appserver/types.py +0 -3
  12. llama_deploy/appserver/workflow_loader.py +383 -0
  13. {llama_deploy_appserver-0.2.7a1.dist-info → llama_deploy_appserver-0.3.0a1.dist-info}/METADATA +3 -6
  14. llama_deploy_appserver-0.3.0a1.dist-info/RECORD +17 -0
  15. {llama_deploy_appserver-0.2.7a1.dist-info → llama_deploy_appserver-0.3.0a1.dist-info}/WHEEL +1 -1
  16. llama_deploy/appserver/client/__init__.py +0 -3
  17. llama_deploy/appserver/client/base.py +0 -30
  18. llama_deploy/appserver/client/client.py +0 -49
  19. llama_deploy/appserver/client/models/__init__.py +0 -4
  20. llama_deploy/appserver/client/models/apiserver.py +0 -356
  21. llama_deploy/appserver/client/models/model.py +0 -82
  22. llama_deploy/appserver/run_autodeploy.py +0 -141
  23. llama_deploy/appserver/server.py +0 -60
  24. llama_deploy/appserver/source_managers/__init__.py +0 -5
  25. llama_deploy/appserver/source_managers/base.py +0 -33
  26. llama_deploy/appserver/source_managers/git.py +0 -48
  27. llama_deploy/appserver/source_managers/local.py +0 -51
  28. llama_deploy/appserver/tracing.py +0 -237
  29. llama_deploy_appserver-0.2.7a1.dist-info/RECORD +0 -28
@@ -0,0 +1,383 @@
1
+ import importlib
2
+ from pathlib import Path
3
+ import logging
4
+ import socket
5
+ import subprocess
6
+ import sys
7
+ import os
8
+ import site
9
+ import threading
10
+ from typing import TextIO, Callable, cast
11
+ import json
12
+ from llama_deploy.appserver.settings import settings
13
+ from llama_deploy.appserver.deployment_config_parser import (
14
+ DeploymentConfig,
15
+ get_deployment_config,
16
+ )
17
+ from workflows import Workflow
18
+ from dotenv import dotenv_values
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+ DEFAULT_SERVICE_ID = "default"
23
+
24
+
25
+ def _stream_subprocess_output(
26
+ process: subprocess.Popen,
27
+ prefix: str,
28
+ color_code: str,
29
+ ) -> None:
30
+ """Stream a subprocess's stdout to our stdout with a colored prefix.
31
+
32
+ The function runs in the caller thread and returns when the subprocess exits
33
+ or its stdout closes.
34
+ """
35
+
36
+ def _forward_output_with_prefix(pipe: TextIO | None) -> None:
37
+ if pipe is None:
38
+ return
39
+ if sys.stdout.isatty():
40
+ colored_prefix = f"\x1b[{color_code}m{prefix}\x1b[0m"
41
+ else:
42
+ colored_prefix = prefix
43
+ for line in iter(pipe.readline, ""):
44
+ sys.stdout.write(f"{colored_prefix} {line}")
45
+ sys.stdout.flush()
46
+ try:
47
+ pipe.close()
48
+ except Exception:
49
+ pass
50
+
51
+ _forward_output_with_prefix(cast(TextIO, process.stdout))
52
+
53
+
54
+ def _run_with_prefix(
55
+ cmd: list[str],
56
+ *,
57
+ cwd: Path | None = None,
58
+ env: dict[str, str] | None = None,
59
+ prefix: str,
60
+ color_code: str = "36", # cyan by default
61
+ ) -> None:
62
+ """Run a command streaming output with a colored prefix.
63
+
64
+ Raises RuntimeError on non-zero exit.
65
+ """
66
+ process = subprocess.Popen(
67
+ cmd,
68
+ cwd=cwd,
69
+ env=env,
70
+ stdout=subprocess.PIPE,
71
+ stderr=subprocess.STDOUT,
72
+ bufsize=1,
73
+ text=True,
74
+ )
75
+ _stream_subprocess_output(process, prefix, color_code)
76
+ ret = process.wait()
77
+ if ret != 0:
78
+ raise RuntimeError(f"Command failed ({ret}): {' '.join(cmd)}")
79
+
80
+
81
+ def _start_streaming_process(
82
+ cmd: list[str],
83
+ *,
84
+ cwd: Path | None = None,
85
+ env: dict[str, str] | None = None,
86
+ prefix: str,
87
+ color_code: str,
88
+ line_transform: Callable[[str], str] | None = None,
89
+ ) -> subprocess.Popen:
90
+ """Start a subprocess and stream its output on a background thread with a colored prefix.
91
+
92
+ Returns the Popen object immediately; caller is responsible for lifecycle.
93
+ """
94
+ process = subprocess.Popen(
95
+ cmd,
96
+ cwd=cwd,
97
+ env=env,
98
+ stdout=subprocess.PIPE,
99
+ stderr=subprocess.STDOUT,
100
+ bufsize=1,
101
+ text=True,
102
+ )
103
+
104
+ def _forward(pipe: TextIO | None) -> None:
105
+ if pipe is None:
106
+ return
107
+ if sys.stdout.isatty():
108
+ colored_prefix = f"\x1b[{color_code}m{prefix}\x1b[0m"
109
+ else:
110
+ colored_prefix = prefix
111
+ for line in iter(pipe.readline, ""):
112
+ out_line = line_transform(line) if line_transform else line
113
+ sys.stdout.write(f"{colored_prefix} {out_line}")
114
+ sys.stdout.flush()
115
+ try:
116
+ pipe.close()
117
+ except Exception:
118
+ pass
119
+
120
+ threading.Thread(target=_forward, args=(process.stdout,), daemon=True).start()
121
+ return process
122
+
123
+
124
+ def do_install():
125
+ config = get_deployment_config()
126
+
127
+ install_python_dependencies(config, settings.config_parent)
128
+ install_ui(config, settings.config_parent)
129
+
130
+
131
+ def load_workflows(config: DeploymentConfig) -> dict[str, Workflow]:
132
+ """
133
+ Creates WorkflowService instances according to the configuration object.
134
+
135
+ """
136
+ workflow_services = {}
137
+ for service_id, service_config in config.services.items():
138
+ # Search for a workflow instance in the service path
139
+ if service_config.import_path is None:
140
+ continue
141
+ module_name, workflow_name = service_config.module_location()
142
+ module = importlib.import_module(module_name)
143
+ workflow_services[service_id] = getattr(module, workflow_name)
144
+
145
+ if config.default_service:
146
+ if config.default_service in workflow_services:
147
+ workflow_services[DEFAULT_SERVICE_ID] = workflow_services[
148
+ config.default_service
149
+ ]
150
+ else:
151
+ msg = f"Service with id '{config.default_service}' does not exist, cannot set it as default."
152
+ logger.warning(msg)
153
+
154
+ return workflow_services
155
+
156
+
157
+ def load_environment_variables(config: DeploymentConfig, source_root: Path) -> None:
158
+ """
159
+ Load environment variables from the deployment config.
160
+ """
161
+ for service_id, service_config in config.services.items():
162
+ if service_config.env:
163
+ env_vars = {**service_config.env}
164
+ for env_file in service_config.env_files or []:
165
+ env_file_path = source_root / env_file
166
+ env_vars.update(**dotenv_values(env_file_path))
167
+ for key, value in env_vars.items():
168
+ if value:
169
+ os.environ[key] = value
170
+
171
+
172
+ def install_python_dependencies(config: DeploymentConfig, source_root: Path) -> None:
173
+ """
174
+ Sync the deployment to the base path.
175
+ """
176
+ path = _find_install_target(source_root, config)
177
+ if path is not None:
178
+ print(f"Installing python dependencies from {path}")
179
+ _ensure_uv_available()
180
+ _install_to_current_python(path, source_root)
181
+
182
+
183
+ def _find_install_target(base: Path, config: DeploymentConfig) -> str | None:
184
+ path: str | None = None
185
+ for service_id, service_config in config.services.items():
186
+ if service_config.python_dependencies:
187
+ if len(service_config.python_dependencies) > 1:
188
+ logger.warning(
189
+ "Llama Deploy now only supports installing from a single pyproject.toml path"
190
+ )
191
+ this_path = service_config.python_dependencies[0]
192
+ if path is not None and this_path != path:
193
+ logger.warning(
194
+ f"Llama Deploy now only supports installing from a single pyproject.toml path, ignoring {this_path}"
195
+ )
196
+ path = this_path
197
+ if path is None:
198
+ if (base / "pyproject.toml").exists():
199
+ path = "."
200
+ return path
201
+
202
+
203
+ def _ensure_uv_available() -> None:
204
+ # Check if uv is available on the path
205
+ uv_available = False
206
+ try:
207
+ subprocess.check_call(
208
+ ["uv", "--version"],
209
+ stdout=subprocess.DEVNULL,
210
+ stderr=subprocess.DEVNULL,
211
+ )
212
+ uv_available = True
213
+ except (subprocess.CalledProcessError, FileNotFoundError):
214
+ pass
215
+ if not uv_available:
216
+ # bootstrap uv with pip
217
+ try:
218
+ _run_with_prefix(
219
+ [
220
+ sys.executable,
221
+ "-m",
222
+ "pip",
223
+ "install",
224
+ "uv",
225
+ ],
226
+ prefix="[pip]",
227
+ color_code="31", # red
228
+ )
229
+ except subprocess.CalledProcessError as e:
230
+ msg = f"Unable to install uv. Environment must include uv, or uv must be installed with pip: {e.stderr}"
231
+ raise RuntimeError(msg)
232
+
233
+
234
+ def _install_to_current_python(path: str, source_root: Path) -> None:
235
+ # Bit of an ugly hack, install to whatever python environment we're currently in
236
+ # Find the python bin path and get its parent dir, and install into whatever that
237
+ # python is. Hopefully we're in a container or a venv, otherwise this is installing to
238
+ # the system python
239
+ # https://docs.astral.sh/uv/concepts/projects/config/#project-environment-path
240
+ python_bin_path = os.path.dirname(sys.executable)
241
+ python_parent_dir = os.path.dirname(python_bin_path)
242
+ _validate_path_is_safe(path, source_root, "python_dependencies")
243
+ print(
244
+ f"Installing python dependencies from {path} to {python_parent_dir} to {source_root}"
245
+ )
246
+ try:
247
+ _run_with_prefix(
248
+ [
249
+ "uv",
250
+ "pip",
251
+ "install",
252
+ f"--prefix={python_parent_dir}",
253
+ path,
254
+ ],
255
+ cwd=source_root,
256
+ prefix="[uv]",
257
+ color_code="36",
258
+ )
259
+
260
+ # Force Python to refresh its package discovery after installing new packages
261
+ site.main() # Refresh site-packages paths
262
+ # Clear import caches to ensure newly installed packages are discoverable
263
+ importlib.invalidate_caches()
264
+
265
+ except subprocess.CalledProcessError as e:
266
+ msg = f"Unable to install service dependencies using command '{e.cmd}': {e.stderr}"
267
+ raise RuntimeError(msg) from None
268
+
269
+
270
+ def _validate_path_is_safe(
271
+ path: str, source_root: Path, path_type: str = "path"
272
+ ) -> None:
273
+ """Validates that a path is within the source root to prevent path traversal attacks.
274
+
275
+ Args:
276
+ path: The path to validate
277
+ source_root: The root directory that paths should be relative to
278
+ path_type: Description of the path type for error messages
279
+
280
+ Raises:
281
+ DeploymentError: If the path is outside the source root
282
+ """
283
+ resolved_path = (source_root / path).resolve()
284
+ resolved_source_root = source_root.resolve()
285
+
286
+ if not resolved_path.is_relative_to(resolved_source_root):
287
+ msg = (
288
+ f"{path_type} {path} is not a subdirectory of the source root {source_root}"
289
+ )
290
+ raise RuntimeError(msg)
291
+
292
+
293
+ def install_ui(config: DeploymentConfig, config_parent: Path) -> None:
294
+ if config.ui is None:
295
+ return
296
+ path = config.ui.source.location if config.ui.source else "."
297
+ _validate_path_is_safe(path, config_parent, "ui_source")
298
+ _run_with_prefix(
299
+ ["pnpm", "install"],
300
+ cwd=config_parent / path,
301
+ prefix="[pnpm-install]",
302
+ color_code="33",
303
+ )
304
+
305
+
306
+ def _ui_env(config: DeploymentConfig) -> dict[str, str]:
307
+ env = os.environ.copy()
308
+ env["LLAMA_DEPLOY_DEPLOYMENT_URL_ID"] = config.name
309
+ env["LLAMA_DEPLOY_DEPLOYMENT_BASE_PATH"] = f"/deployments/{config.name}/ui"
310
+ if config.ui is not None:
311
+ env["PORT"] = str(config.ui.port)
312
+ return env
313
+
314
+
315
+ def build_ui(config_parent: Path, config: DeploymentConfig) -> bool:
316
+ """
317
+ Returns True if the UI was built (and supports building), otherwise False if there's no build command
318
+ """
319
+ if config.ui is None:
320
+ return False
321
+ path = config.ui.source.location if config.ui.source else "."
322
+ _validate_path_is_safe(path, config_parent, "ui_source")
323
+ env = _ui_env(config)
324
+
325
+ package_json_path = config_parent / path / "package.json"
326
+
327
+ with open(package_json_path, "r", encoding="utf-8") as f:
328
+ pkg = json.load(f)
329
+ scripts = pkg.get("scripts", {})
330
+ if "build" not in scripts:
331
+ return False
332
+
333
+ _run_with_prefix(
334
+ ["pnpm", "build"],
335
+ cwd=config_parent / path,
336
+ env=env,
337
+ prefix="[pnpm-build]",
338
+ color_code="34",
339
+ )
340
+ return True
341
+
342
+
343
+ def start_dev_ui_process(
344
+ root: Path, main_port: int, config: DeploymentConfig
345
+ ) -> None | subprocess.Popen:
346
+ ui = config.ui
347
+ if ui is None:
348
+ return None
349
+
350
+ # If a UI dev server is already listening on the configured port, do not start another
351
+ def _is_port_open(port: int) -> bool:
352
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
353
+ sock.settimeout(0.2)
354
+ try:
355
+ return sock.connect_ex(("127.0.0.1", port)) == 0
356
+ except Exception:
357
+ return False
358
+
359
+ if _is_port_open(ui.port):
360
+ logger.info(
361
+ "Detected process already running on port %s; not starting a new one.",
362
+ ui.port,
363
+ )
364
+ return None
365
+ # start the ui process
366
+ env = _ui_env(config)
367
+ # Transform first 20 lines to replace the default UI port with the main server port
368
+ line_counter = {"n": 0}
369
+
370
+ def _transform(line: str) -> str:
371
+ if line_counter["n"] < 20:
372
+ line = line.replace(f":{ui.port}", f":{main_port}")
373
+ line_counter["n"] += 1
374
+ return line
375
+
376
+ return _start_streaming_process(
377
+ ["pnpm", "run", "dev"],
378
+ cwd=root / (ui.source.location if ui.source else "."),
379
+ env=env,
380
+ prefix="[ui-server]",
381
+ color_code="35", # magenta
382
+ line_transform=_transform,
383
+ )
@@ -1,21 +1,18 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-deploy-appserver
3
- Version: 0.2.7a1
3
+ Version: 0.3.0a1
4
4
  Summary: Application server components for LlamaDeploy
5
5
  Author: Massimiliano Pippi
6
6
  Author-email: Massimiliano Pippi <mpippi@gmail.com>
7
7
  License: MIT
8
- Requires-Dist: asgiref>=3.9.1
9
8
  Requires-Dist: llama-index-workflows>=1.1.0
10
9
  Requires-Dist: pydantic-settings>=2.10.1
11
10
  Requires-Dist: uvicorn>=0.24.0
12
- Requires-Dist: prometheus-client>=0.20.0
13
- Requires-Dist: python-multipart>=0.0.18,<0.0.19
14
11
  Requires-Dist: fastapi>=0.100.0
15
12
  Requires-Dist: websockets>=12.0
16
- Requires-Dist: gitpython>=3.1.40,<4
17
- Requires-Dist: llama-deploy-core>=0.2.7a1,<0.3.0
13
+ Requires-Dist: llama-deploy-core>=0.3.0a1,<0.4.0
18
14
  Requires-Dist: httpx>=0.28.1
15
+ Requires-Dist: prometheus-fastapi-instrumentator>=7.1.0
19
16
  Requires-Python: >=3.12, <4
20
17
  Description-Content-Type: text/markdown
21
18
 
@@ -0,0 +1,17 @@
1
+ llama_deploy/appserver/__init__.py,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
2
+ llama_deploy/appserver/__main__.py,sha256=32eff329cadb4f883c9df3a1b2bcb908d5adde765e7c7e761d25b7df4827b9ca,196
3
+ llama_deploy/appserver/app.py,sha256=cc19f4f955fd9b5ef6d7ebb009800444073082e13482c165e2550d244c24ad0a,3598
4
+ llama_deploy/appserver/bootstrap.py,sha256=abf2ab94a4f15b851689de9b48ae5c57aa3a648de95c4102465ca5350cecabad,2988
5
+ llama_deploy/appserver/deployment.py,sha256=bd35b20e0068c44965e0b76126170a292c151665417a0b322b977641ce88bfc1,2671
6
+ llama_deploy/appserver/deployment_config_parser.py,sha256=27b3f20b95703293059182e6ef2a5a44b59c66ae73517eba9f53b62cd5b0f833,3395
7
+ llama_deploy/appserver/routers/__init__.py,sha256=ed8fa7613eb5584bcc1b40e18a40a0e29ce39cc9c8d4bf9ad8c79e5b1d050700,214
8
+ llama_deploy/appserver/routers/deployments.py,sha256=4b4179c4cbd48548a0cde9bbc07f3668c8f5abcffa4bfb1c98654e81e40b8329,7291
9
+ llama_deploy/appserver/routers/status.py,sha256=eead8e0aebbc7e5e3ca8f0c00d0c1b6df1d6cde7844edfbe9350bf64ab85006f,257
10
+ llama_deploy/appserver/routers/ui_proxy.py,sha256=9bf7e433e387bb3aa3c6fabee09313c95ca715c41dea5915ee55cd8acaa792ef,7194
11
+ llama_deploy/appserver/settings.py,sha256=741b2ac12f37ea4bd7da4fbac4170ca7719095f123ff7a451ae8be36a530b4a6,2820
12
+ llama_deploy/appserver/stats.py,sha256=1f3989f6705a6de3e4d61ee8cdd189fbe04a2c53ec5e720b2e5168acc331427f,691
13
+ llama_deploy/appserver/types.py,sha256=4edc991aafb6b8497f068d12387455df292da3ff8440223637641ab1632553ec,2133
14
+ llama_deploy/appserver/workflow_loader.py,sha256=304f4532a6a51a326a57bbdea251ab7a6398cdf5aff16081b295cb915c6fd231,12266
15
+ llama_deploy_appserver-0.3.0a1.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
16
+ llama_deploy_appserver-0.3.0a1.dist-info/METADATA,sha256=a7f4a18efe0e4054a057ad96c3ada3dbd7810c66796a5bb9ad70d80c4be209de,746
17
+ llama_deploy_appserver-0.3.0a1.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: uv 0.7.21
2
+ Generator: uv 0.7.20
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,3 +0,0 @@
1
- from .client import Client
2
-
3
- __all__ = ["Client"]
@@ -1,30 +0,0 @@
1
- from typing import Any
2
-
3
- import httpx
4
- from pydantic_settings import BaseSettings, SettingsConfigDict
5
-
6
-
7
- class _BaseClient(BaseSettings):
8
- """Base type for clients, to be used in Pydantic models to avoid circular imports.
9
-
10
- Settings can be passed to the Client constructor when creating an instance, or defined with environment variables
11
- having names prefixed with the string `LLAMA_DEPLOY_`, e.g. `LLAMA_DEPLOY_DISABLE_SSL`.
12
- """
13
-
14
- model_config = SettingsConfigDict(env_prefix="LLAMA_DEPLOY_")
15
-
16
- api_server_url: str = "http://localhost:4501"
17
- disable_ssl: bool = False
18
- timeout: float | None = 120.0
19
- poll_interval: float = 0.5
20
-
21
- async def request(
22
- self, method: str, url: str | httpx.URL, **kwargs: Any
23
- ) -> httpx.Response:
24
- """Performs an async HTTP request using httpx."""
25
- verify = kwargs.pop("verify", True)
26
- timeout = kwargs.pop("timeout", self.timeout)
27
- async with httpx.AsyncClient(verify=verify) as client:
28
- response = await client.request(method, url, timeout=timeout, **kwargs)
29
- response.raise_for_status()
30
- return response
@@ -1,49 +0,0 @@
1
- import asyncio
2
- from typing import Any
3
-
4
- from .base import _BaseClient
5
- from .models import ApiServer, make_sync
6
-
7
-
8
- class Client(_BaseClient):
9
- """The LlamaDeploy Python client.
10
-
11
- The client is gives access to both the asyncio and non-asyncio APIs. To access the sync
12
- API just use methods of `client.sync`.
13
-
14
- Example usage:
15
- ```py
16
- from llama_deploy.client import Client
17
-
18
- # Use the same client instance
19
- c = Client()
20
-
21
- async def an_async_function():
22
- status = await client.apiserver.status()
23
-
24
- def normal_function():
25
- status = client.sync.apiserver.status()
26
- ```
27
- """
28
-
29
- @property
30
- def sync(self) -> "_SyncClient":
31
- """Returns the sync version of the client API."""
32
- try:
33
- asyncio.get_running_loop()
34
- except RuntimeError:
35
- return _SyncClient(**self.model_dump())
36
-
37
- msg = "You cannot use the sync client within an async event loop - just await the async methods directly."
38
- raise RuntimeError(msg)
39
-
40
- @property
41
- def apiserver(self) -> ApiServer:
42
- """Access the API Server functionalities."""
43
- return ApiServer(client=self, id="apiserver")
44
-
45
-
46
- class _SyncClient(_BaseClient):
47
- @property
48
- def apiserver(self) -> Any:
49
- return make_sync(ApiServer)(client=self, id="apiserver")
@@ -1,4 +0,0 @@
1
- from .apiserver import ApiServer
2
- from .model import Collection, Model, make_sync
3
-
4
- __all__ = ["ApiServer", "Collection", "Model", "make_sync"]