llama-deploy-appserver 0.3.0a4__py3-none-any.whl → 0.3.0a6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,28 +1,19 @@
1
+ import argparse
1
2
  import logging
2
3
  import os
3
- from pathlib import Path
4
4
  import threading
5
5
  import time
6
- from llama_deploy.appserver.deployment_config_parser import (
7
- get_deployment_config,
8
- )
9
- from llama_deploy.appserver.settings import configure_settings, settings
6
+ import webbrowser
7
+ from contextlib import asynccontextmanager
8
+ from pathlib import Path
9
+ from typing import Any, AsyncGenerator
10
10
 
11
+ import uvicorn
11
12
  from fastapi import FastAPI
12
13
  from fastapi.middleware.cors import CORSMiddleware
13
- from llama_deploy.appserver.workflow_loader import (
14
- build_ui,
15
- do_install,
16
- load_environment_variables,
17
- start_dev_ui_process,
14
+ from llama_deploy.appserver.deployment_config_parser import (
15
+ get_deployment_config,
18
16
  )
19
- import uvicorn
20
-
21
- from .routers import health_router
22
- from prometheus_fastapi_instrumentator import Instrumentator
23
- from contextlib import asynccontextmanager
24
- from typing import Any, AsyncGenerator
25
-
26
17
  from llama_deploy.appserver.routers.deployments import (
27
18
  create_base_router,
28
19
  create_deployments_router,
@@ -31,13 +22,23 @@ from llama_deploy.appserver.routers.ui_proxy import (
31
22
  create_ui_proxy_router,
32
23
  mount_static_files,
33
24
  )
25
+ from llama_deploy.appserver.settings import configure_settings, settings
34
26
  from llama_deploy.appserver.workflow_loader import (
27
+ _exclude_venv_warning,
28
+ build_ui,
29
+ find_python_pyproject,
30
+ inject_appserver_into_target,
31
+ install_ui,
32
+ load_environment_variables,
35
33
  load_workflows,
34
+ start_dev_ui_process,
36
35
  )
36
+ from prometheus_fastapi_instrumentator import Instrumentator
37
37
 
38
38
  from .deployment import Deployment
39
+ from .process_utils import run_process
40
+ from .routers import health_router
39
41
  from .stats import apiserver_state
40
- import webbrowser
41
42
 
42
43
  logger = logging.getLogger("uvicorn.info")
43
44
 
@@ -45,7 +46,6 @@ logger = logging.getLogger("uvicorn.info")
45
46
  @asynccontextmanager
46
47
  async def lifespan(app: FastAPI) -> AsyncGenerator[None, Any]:
47
48
  apiserver_state.state("starting")
48
-
49
49
  config = get_deployment_config()
50
50
 
51
51
  workflows = load_workflows(config)
@@ -85,24 +85,44 @@ if not os.environ.get("DISABLE_CORS", False):
85
85
  app.include_router(health_router)
86
86
 
87
87
 
88
+ def open_browser_async(host: str, port: int) -> None:
89
+ def _open_with_delay() -> None:
90
+ time.sleep(1)
91
+ webbrowser.open(f"http://{host}:{port}")
92
+
93
+ threading.Thread(target=_open_with_delay).start()
94
+
95
+
96
+ def prepare_server(
97
+ deployment_file: Path | None = None,
98
+ install: bool = False,
99
+ build: bool = False,
100
+ ) -> None:
101
+ configure_settings(deployment_file_path=deployment_file)
102
+ load_environment_variables(get_deployment_config(), settings.config_parent)
103
+ if install:
104
+ config = get_deployment_config()
105
+ inject_appserver_into_target(config, settings.config_parent)
106
+ install_ui(config, settings.config_parent)
107
+ if build:
108
+ build_ui(settings.config_parent, get_deployment_config())
109
+
110
+
88
111
  def start_server(
89
112
  proxy_ui: bool = False,
90
113
  reload: bool = False,
91
114
  cwd: Path | None = None,
92
115
  deployment_file: Path | None = None,
93
- install: bool = False,
94
- build: bool = False,
95
116
  open_browser: bool = False,
96
117
  ) -> None:
97
118
  # Configure via environment so uvicorn reload workers inherit the values
98
119
  configure_settings(
99
- proxy_ui=proxy_ui, app_root=cwd, deployment_file_path=deployment_file
120
+ proxy_ui=proxy_ui,
121
+ app_root=cwd,
122
+ deployment_file_path=deployment_file,
123
+ reload=reload,
100
124
  )
101
125
  load_environment_variables(get_deployment_config(), settings.config_parent)
102
- if install:
103
- do_install()
104
- if build:
105
- build_ui(settings.config_parent, get_deployment_config())
106
126
 
107
127
  ui_process = None
108
128
  if proxy_ui:
@@ -111,14 +131,7 @@ def start_server(
111
131
  )
112
132
  try:
113
133
  if open_browser:
114
-
115
- def open_with_delay():
116
- time.sleep(1)
117
- webbrowser.open(f"http://{settings.host}:{settings.port}")
118
-
119
- threading.Thread(
120
- target=open_with_delay,
121
- ).start()
134
+ open_browser_async(settings.host, settings.port)
122
135
 
123
136
  uvicorn.run(
124
137
  "llama_deploy.appserver.app:app",
@@ -129,3 +142,52 @@ def start_server(
129
142
  finally:
130
143
  if ui_process is not None:
131
144
  ui_process.terminate()
145
+
146
+
147
+ def start_server_in_target_venv(
148
+ proxy_ui: bool = False,
149
+ reload: bool = False,
150
+ cwd: Path | None = None,
151
+ deployment_file: Path | None = None,
152
+ open_browser: bool = False,
153
+ ) -> None:
154
+ cfg = get_deployment_config()
155
+ path = find_python_pyproject(cwd or Path.cwd(), cfg)
156
+
157
+ args = ["uv", "run", "python", "-m", "llama_deploy.appserver.app"]
158
+ if proxy_ui:
159
+ args.append("--proxy-ui")
160
+ if reload:
161
+ args.append("--reload")
162
+ if deployment_file:
163
+ args.append("--deployment-file")
164
+ args.append(str(deployment_file))
165
+ if open_browser:
166
+ args.append("--open-browser")
167
+ # All the streaming/PTY/pipe handling is centralized
168
+ ret = run_process(
169
+ args,
170
+ cwd=path,
171
+ env=None,
172
+ line_transform=_exclude_venv_warning,
173
+ )
174
+
175
+ if ret != 0:
176
+ raise SystemExit(ret)
177
+
178
+
179
+ if __name__ == "__main__":
180
+ print("starting server")
181
+ parser = argparse.ArgumentParser()
182
+ parser.add_argument("--proxy-ui", action="store_true")
183
+ parser.add_argument("--reload", action="store_true")
184
+ parser.add_argument("--deployment-file", type=Path)
185
+ parser.add_argument("--open-browser", action="store_true")
186
+
187
+ args = parser.parse_args()
188
+ start_server(
189
+ proxy_ui=args.proxy_ui,
190
+ reload=args.reload,
191
+ deployment_file=args.deployment_file,
192
+ open_browser=args.open_browser,
193
+ )
@@ -1,100 +1,70 @@
1
1
  """
2
2
  Bootstraps an application from a remote github repository given environment variables.
3
3
 
4
- This just sets up the files from the repository. It's more of a build process, does not start an application.
4
+ This just sets up the files from the repository. It's more of a build process.
5
5
  """
6
6
 
7
7
  import os
8
8
  from pathlib import Path
9
- from llama_deploy.appserver.settings import settings
9
+
10
10
  from llama_deploy.appserver.deployment_config_parser import get_deployment_config
11
+ from llama_deploy.appserver.settings import BootstrapSettings, configure_settings
11
12
  from llama_deploy.appserver.workflow_loader import (
12
13
  build_ui,
13
- do_install,
14
+ inject_appserver_into_target,
15
+ install_ui,
14
16
  load_environment_variables,
15
17
  )
16
18
  from llama_deploy.core.git.git_util import (
17
19
  clone_repo,
18
20
  )
19
- from llama_deploy.appserver.app import start_server
20
- from llama_deploy.appserver.settings import BootstrapSettings, configure_settings
21
-
22
- import argparse
23
21
 
24
22
 
25
23
  def bootstrap_app_from_repo(
26
- clone: bool = False,
27
- build: bool = False,
28
- serve: bool = False,
29
- target_dir: str = "/opt/app/",
24
+ target_dir: str = "/opt/app",
30
25
  ):
31
26
  bootstrap_settings = BootstrapSettings()
32
27
  # Needs the github url+auth, and the deployment file path
33
28
  # clones the repo to a standard directory
34
29
  # (eventually) runs the UI build process and moves that to a standard directory for a file server
35
- if clone:
36
- repo_url = bootstrap_settings.repo_url
37
- if repo_url is None:
38
- raise ValueError("repo_url is required to bootstrap")
39
- clone_repo(
40
- repository_url=repo_url,
41
- git_ref=bootstrap_settings.git_sha or bootstrap_settings.git_ref,
42
- basic_auth=bootstrap_settings.auth_token,
43
- dest_dir=target_dir,
44
- )
45
- # Ensure target_dir exists locally when running tests outside a container
46
- os.makedirs(target_dir, exist_ok=True)
47
- os.chdir(target_dir)
30
+
31
+ repo_url = bootstrap_settings.repo_url
32
+ if repo_url is None:
33
+ raise ValueError("repo_url is required to bootstrap")
34
+ clone_repo(
35
+ repository_url=repo_url,
36
+ git_ref=bootstrap_settings.git_sha or bootstrap_settings.git_ref,
37
+ basic_auth=bootstrap_settings.auth_token,
38
+ dest_dir=target_dir,
39
+ )
40
+ # Ensure target_dir exists locally when running tests outside a container
41
+ os.makedirs(target_dir, exist_ok=True)
42
+ os.chdir(target_dir)
48
43
  configure_settings(
49
44
  app_root=Path(target_dir),
50
45
  deployment_file_path=Path(bootstrap_settings.deployment_file_path),
51
46
  )
47
+ config = get_deployment_config()
48
+ base_path = Path(target_dir)
49
+ load_environment_variables(config, base_path)
52
50
 
53
- built = True
54
- load_environment_variables(get_deployment_config(), Path(target_dir))
55
- if build:
56
- do_install()
57
- built = build_ui(settings.config_parent, get_deployment_config())
51
+ sdists = None
52
+ if bootstrap_settings.bootstrap_sdists:
53
+ sdists = [
54
+ Path(bootstrap_settings.bootstrap_sdists) / f
55
+ for f in os.listdir(bootstrap_settings.bootstrap_sdists)
56
+ ]
57
+ sdists = [f for f in sdists if f.is_file() and f.name.endswith(".tar.gz")]
58
+ if not sdists:
59
+ sdists = None
60
+ # Use the explicit base path rather than relying on global settings so tests
61
+ # can safely mock configure_settings without affecting call arguments.
62
+ inject_appserver_into_target(config, base_path, sdists)
63
+ install_ui(config, base_path)
64
+ build_ui(base_path, config)
58
65
 
59
- if serve:
60
- start_server(
61
- proxy_ui=not built,
62
- )
63
66
  pass
64
67
 
65
68
 
66
69
  if __name__ == "__main__":
67
- parser = argparse.ArgumentParser()
68
- parser.add_argument(
69
- "--clone",
70
- action=argparse.BooleanOptionalAction,
71
- default=False,
72
- help="Clone the repository before bootstrapping (use --no-clone to disable)",
73
- )
74
- parser.add_argument(
75
- "--build",
76
- action=argparse.BooleanOptionalAction,
77
- default=False,
78
- help="Build the UI/assets (use --no-build to disable)",
79
- )
80
- parser.add_argument(
81
- "--serve",
82
- action=argparse.BooleanOptionalAction,
83
- default=False,
84
- help="Start the API server after bootstrap (use --no-serve to disable)",
85
- )
86
- args = parser.parse_args()
87
- try:
88
- bootstrap_app_from_repo(
89
- clone=args.clone,
90
- build=args.build,
91
- serve=args.serve,
92
- )
93
- except Exception as e:
94
- import logging
95
-
96
- logging.exception("Error during bootstrap. Pausing for debugging.")
97
- import time
98
-
99
- time.sleep(1000000)
100
- raise e
70
+ bootstrap_app_from_repo()
@@ -8,7 +8,6 @@ from llama_deploy.appserver.workflow_loader import DEFAULT_SERVICE_ID
8
8
  from workflows import Context, Workflow
9
9
  from workflows.handler import WorkflowHandler
10
10
 
11
-
12
11
  logger = logging.getLogger()
13
12
 
14
13
 
@@ -1,103 +1,7 @@
1
1
  import functools
2
- from pathlib import Path
3
- from typing import Any
4
2
 
5
-
6
- from llama_deploy.appserver.settings import settings, BootstrapSettings
7
- import yaml
8
- from pydantic import BaseModel, ConfigDict, Field, model_validator
9
-
10
-
11
- class ServiceSource(BaseModel):
12
- """Configuration for where to load the workflow or other source. Path is relative to the config file its declared within."""
13
-
14
- location: str
15
-
16
- @model_validator(mode="before")
17
- @classmethod
18
- def validate_fields(cls, data: Any) -> Any:
19
- if isinstance(data, dict):
20
- if "name" in data:
21
- data["location"] = data.pop("name")
22
- return data
23
-
24
-
25
- class Service(BaseModel):
26
- """Configuration for a single service."""
27
-
28
- source: ServiceSource | None = Field(None)
29
- import_path: str | None = Field(None)
30
- env: dict[str, str] | None = Field(None)
31
- env_files: list[str] | None = Field(None)
32
- python_dependencies: list[str] | None = Field(None)
33
-
34
- @model_validator(mode="before")
35
- @classmethod
36
- def validate_fields(cls, data: Any) -> Any:
37
- if isinstance(data, dict):
38
- # Handle YAML aliases
39
- if "path" in data:
40
- data["import_path"] = data.pop("path")
41
- if "import-path" in data:
42
- data["import_path"] = data.pop("import-path")
43
- if "env-files" in data:
44
- data["env_files"] = data.pop("env-files")
45
-
46
- return data
47
-
48
- def module_location(self) -> tuple[str, str]:
49
- """
50
- Parses the import path, and target, discarding legacy file path portion, if any
51
-
52
- "src/module.workflow:my_workflow" -> ("module.workflow", "my_workflow")
53
- """
54
- module_name, workflow_name = self.import_path.split(":")
55
- return Path(module_name).name, workflow_name
56
-
57
-
58
- class UIService(Service):
59
- port: int = Field(
60
- default=3000,
61
- description="The TCP port to use for the nextjs server",
62
- )
63
-
64
-
65
- class DeploymentConfig(BaseModel):
66
- """Model definition mapping a deployment config file."""
67
-
68
- model_config = ConfigDict(populate_by_name=True, extra="ignore")
69
-
70
- name: str
71
- default_service: str | None = Field(None)
72
- services: dict[str, Service]
73
- ui: UIService | None = None
74
-
75
- @model_validator(mode="before")
76
- @classmethod
77
- def validate_fields(cls, data: Any) -> Any:
78
- # Handle YAML aliases
79
- if isinstance(data, dict):
80
- if "default-service" in data:
81
- data["default_service"] = data.pop("default-service")
82
-
83
- return data
84
-
85
- @classmethod
86
- def from_yaml_bytes(cls, src: bytes) -> "DeploymentConfig":
87
- """Read config data from bytes containing yaml code."""
88
- config = yaml.safe_load(src) or {}
89
- return cls(**config)
90
-
91
- @classmethod
92
- def from_yaml(cls, path: Path, name: str | None = None) -> "DeploymentConfig":
93
- """Read config data from a yaml file."""
94
- with open(path, "r") as yaml_file:
95
- config = yaml.safe_load(yaml_file) or {}
96
-
97
- instance = cls(**config)
98
- if name:
99
- instance.name = name
100
- return instance
3
+ from llama_deploy.appserver.settings import BootstrapSettings, settings
4
+ from llama_deploy.core.deployment_config import DeploymentConfig
101
5
 
102
6
 
103
7
  @functools.lru_cache
@@ -0,0 +1,201 @@
1
+ import functools
2
+ import os
3
+ import platform
4
+ import pty
5
+ import subprocess
6
+ import sys
7
+ import threading
8
+ from typing import Callable, TextIO, cast
9
+
10
+
11
+ def run_process(
12
+ cmd: list[str],
13
+ *,
14
+ cwd: os.PathLike | None = None,
15
+ env: dict[str, str] | None = None,
16
+ prefix: str | None = None,
17
+ color_code: str = "36",
18
+ line_transform: Callable[[str], str | None] | None = None,
19
+ use_tty: bool | None = None,
20
+ ) -> None:
21
+ """Run a process and stream its output with optional TTY semantics.
22
+
23
+ If use_tty is None, a PTY will be used only when the parent's stdout is a TTY
24
+ and the platform supports PTYs. When a PTY is used, stdout/stderr are merged.
25
+ """
26
+ use_pty = _should_use_pty(use_tty)
27
+ prefixer = _make_prefixer(prefix, color_code, line_transform)
28
+
29
+ process, sources, cleanup = _spawn_process(cmd, cwd=cwd, env=env, use_pty=use_pty)
30
+ threads: list[threading.Thread] = []
31
+ try:
32
+ cleanup()
33
+ threads = _start_stream_threads(sources, prefixer)
34
+ ret = process.wait()
35
+ if ret != 0:
36
+ raise subprocess.CalledProcessError(ret, cmd)
37
+ finally:
38
+ for t in threads:
39
+ t.join()
40
+
41
+
42
+ def spawn_process(
43
+ cmd: list[str],
44
+ *,
45
+ cwd: os.PathLike | None = None,
46
+ env: dict[str, str] | None = None,
47
+ prefix: str | None = None,
48
+ color_code: str = "36",
49
+ line_transform: Callable[[str], str | None] | None = None,
50
+ use_tty: bool | None = None,
51
+ ) -> subprocess.Popen:
52
+ """Spawn a process and stream its output in background threads.
53
+
54
+ Returns immediately with the Popen object. Streaming threads are daemons.
55
+ """
56
+ use_pty = _should_use_pty(use_tty)
57
+ prefixer = _make_prefixer(prefix, color_code, line_transform)
58
+
59
+ process, sources, cleanup = _spawn_process(cmd, cwd=cwd, env=env, use_pty=use_pty)
60
+ cleanup()
61
+ _start_stream_threads(sources, prefixer)
62
+ return process
63
+
64
+
65
+ @functools.cache
66
+ def _use_color() -> bool:
67
+ """Return True if ANSI colors should be emitted to stdout.
68
+
69
+ Respects common environment variables and falls back to TTY detection.
70
+ """
71
+ force_color = os.environ.get("FORCE_COLOR")
72
+
73
+ return sys.stdout.isatty() or force_color is not None and force_color != "0"
74
+
75
+
76
+ def _colored_prefix(prefix: str, color_code: str) -> str:
77
+ return f"\x1b[{color_code}m{prefix}\x1b[0m " if _use_color() else f"{prefix} "
78
+
79
+
80
+ def _make_prefixer(
81
+ prefix: str | None,
82
+ color_code: str,
83
+ line_transform: Callable[[str], str | None] | None = None,
84
+ ) -> Callable[[str], str | None]:
85
+ colored = _colored_prefix(prefix, color_code) if prefix else ""
86
+
87
+ def _prefixer(line: str) -> str | None:
88
+ transformed = line_transform(line) if line_transform else line
89
+ if transformed is None:
90
+ return None
91
+ return f"{colored}{transformed}"
92
+
93
+ return _prefixer
94
+
95
+
96
+ # Unified PTY/Pipe strategy helpers
97
+
98
+
99
+ def _should_use_pty(use_tty: bool | None) -> bool:
100
+ if platform.system() == "Windows":
101
+ return False
102
+ if use_tty is None:
103
+ return sys.stdout.isatty()
104
+ return use_tty and sys.stdout.isatty()
105
+
106
+
107
+ def _spawn_process(
108
+ cmd: list[str],
109
+ *,
110
+ cwd: os.PathLike | None,
111
+ env: dict[str, str] | None,
112
+ use_pty: bool,
113
+ ) -> tuple[subprocess.Popen, list[tuple[int | TextIO, TextIO]], Callable[[], None]]:
114
+ if use_pty:
115
+ master_fd, slave_fd = pty.openpty()
116
+ process = subprocess.Popen(
117
+ cmd,
118
+ env=env,
119
+ cwd=cwd,
120
+ stdin=slave_fd,
121
+ stdout=slave_fd,
122
+ stderr=slave_fd,
123
+ close_fds=True,
124
+ )
125
+
126
+ def cleanup() -> None:
127
+ try:
128
+ os.close(slave_fd)
129
+ except OSError:
130
+ pass
131
+
132
+ sources: list[tuple[int | TextIO, TextIO]] = [
133
+ (master_fd, cast(TextIO, sys.stdout)),
134
+ ]
135
+ return process, sources, cleanup
136
+
137
+ process = subprocess.Popen(
138
+ cmd,
139
+ env=env,
140
+ cwd=cwd,
141
+ stdin=None,
142
+ stdout=subprocess.PIPE,
143
+ stderr=subprocess.PIPE,
144
+ text=True,
145
+ encoding="utf-8",
146
+ )
147
+
148
+ def cleanup() -> None:
149
+ return None
150
+
151
+ assert process.stdout is not None and process.stderr is not None
152
+ sources = [
153
+ (process.stdout, cast(TextIO, sys.stdout)),
154
+ (process.stderr, cast(TextIO, sys.stderr)),
155
+ ]
156
+ return process, sources, cleanup
157
+
158
+
159
+ def _stream_source(
160
+ source: int | TextIO,
161
+ writer: TextIO,
162
+ transform: Callable[[str], str | None] | None,
163
+ ) -> None:
164
+ if isinstance(source, int):
165
+ try:
166
+ with os.fdopen(
167
+ source, "r", encoding="utf-8", errors="replace", buffering=1
168
+ ) as f:
169
+ for line in f:
170
+ out = transform(line) if transform else line
171
+ if out is not None:
172
+ writer.write(out)
173
+ writer.flush()
174
+ except OSError:
175
+ # PTY EOF may raise EIO; ignore
176
+ pass
177
+ else:
178
+ for line in iter(source.readline, ""):
179
+ out = transform(line) if transform else line
180
+ if out is None:
181
+ continue
182
+ writer.write(out)
183
+ writer.flush()
184
+ try:
185
+ source.close()
186
+ except Exception:
187
+ pass
188
+
189
+
190
+ def _start_stream_threads(
191
+ sources: list[tuple[int | TextIO, TextIO]],
192
+ transform: Callable[[str], str | None] | None,
193
+ ) -> list[threading.Thread]:
194
+ threads: list[threading.Thread] = []
195
+ for src, dst in sources:
196
+ t = threading.Thread(
197
+ target=_stream_source, args=(src, dst, transform), daemon=True
198
+ )
199
+ t.start()
200
+ threads.append(t)
201
+ return threads
@@ -1,5 +1,5 @@
1
1
  from .deployments import create_deployments_router
2
- from .ui_proxy import create_ui_proxy_router
3
2
  from .status import health_router
3
+ from .ui_proxy import create_ui_proxy_router
4
4
 
5
5
  __all__ = ["create_deployments_router", "create_ui_proxy_router", "health_router"]
@@ -2,19 +2,19 @@ import asyncio
2
2
  import logging
3
3
  from typing import List, Optional
4
4
 
5
- from fastapi.staticfiles import StaticFiles
6
- from fastapi import FastAPI
7
5
  import httpx
8
- from llama_deploy.appserver.deployment_config_parser import DeploymentConfig
9
- from llama_deploy.appserver.settings import ApiserverSettings
10
6
  import websockets
11
7
  from fastapi import (
12
8
  APIRouter,
9
+ FastAPI,
13
10
  HTTPException,
14
11
  Request,
15
12
  WebSocket,
16
13
  )
17
14
  from fastapi.responses import StreamingResponse
15
+ from fastapi.staticfiles import StaticFiles
16
+ from llama_deploy.appserver.settings import ApiserverSettings
17
+ from llama_deploy.core.deployment_config import DeploymentConfig
18
18
  from starlette.background import BackgroundTask
19
19
 
20
20
  logger = logging.getLogger(__name__)
@@ -37,7 +37,7 @@ async def _ws_proxy(ws: WebSocket, upstream_url: str) -> None:
37
37
 
38
38
  try:
39
39
  # Parse subprotocols if present
40
- subprotocols: Optional[List[websockets.Subprotocol]] = None
40
+ subprotocols: List[websockets.Subprotocol] | None = None
41
41
  if "sec-websocket-protocol" in ws.headers:
42
42
  # Parse comma-separated subprotocols
43
43
  subprotocols = [
@@ -1,9 +1,9 @@
1
1
  import os
2
2
  from pathlib import Path
3
3
 
4
+ from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
4
5
  from pydantic import Field
5
6
  from pydantic_settings import BaseSettings, SettingsConfigDict
6
- from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
7
7
 
8
8
 
9
9
  class BootstrapSettings(BaseSettings):
@@ -30,6 +30,10 @@ class BootstrapSettings(BaseSettings):
30
30
  deployment_name: str | None = Field(
31
31
  default=None, description="The name of the deployment"
32
32
  )
33
+ bootstrap_sdists: str | None = Field(
34
+ default=None,
35
+ description="A directory containing tar.gz sdists to install instead of installing the appserver",
36
+ )
33
37
 
34
38
 
35
39
  class ApiserverSettings(BaseSettings):
@@ -59,6 +63,11 @@ class ApiserverSettings(BaseSettings):
59
63
  description="If true, proxy a development UI server instead of serving built assets",
60
64
  )
61
65
 
66
+ reload: bool = Field(
67
+ default=False,
68
+ description="If true, reload the workflow modules, for use in a dev server environment",
69
+ )
70
+
62
71
  @property
63
72
  def config_parent(self) -> Path:
64
73
  return (self.app_root / self.deployment_file_path).parent
@@ -71,6 +80,7 @@ def configure_settings(
71
80
  proxy_ui: bool | None = None,
72
81
  deployment_file_path: Path | None = None,
73
82
  app_root: Path | None = None,
83
+ reload: bool | None = None,
74
84
  ) -> None:
75
85
  if proxy_ui is not None:
76
86
  settings.proxy_ui = proxy_ui
@@ -83,3 +93,6 @@ def configure_settings(
83
93
  if app_root is not None:
84
94
  settings.app_root = app_root
85
95
  os.environ["LLAMA_DEPLOY_APISERVER_APP_ROOT"] = str(app_root)
96
+ if reload is not None:
97
+ settings.reload = reload
98
+ os.environ["LLAMA_DEPLOY_APISERVER_RELOAD"] = "true" if reload else "false"
@@ -1,146 +1,61 @@
1
+ import functools
1
2
  import importlib
2
- from pathlib import Path
3
3
  import logging
4
+ import os
4
5
  import socket
5
6
  import subprocess
6
7
  import sys
7
- import os
8
- import site
9
- import threading
10
- from typing import TextIO, Callable, cast
11
- import json
12
- from llama_deploy.appserver.settings import settings
8
+ from importlib.metadata import version as pkg_version
9
+ from pathlib import Path
10
+
11
+ from dotenv import dotenv_values
13
12
  from llama_deploy.appserver.deployment_config_parser import (
14
13
  DeploymentConfig,
15
- get_deployment_config,
16
14
  )
15
+ from llama_deploy.appserver.process_utils import run_process, spawn_process
16
+ from llama_deploy.core.ui_build import ui_build_output_path
17
+ from packaging.version import InvalidVersion, Version
17
18
  from workflows import Workflow
18
- from dotenv import dotenv_values
19
19
 
20
20
  logger = logging.getLogger(__name__)
21
21
 
22
22
  DEFAULT_SERVICE_ID = "default"
23
23
 
24
24
 
25
- def _stream_subprocess_output(
26
- process: subprocess.Popen,
27
- prefix: str,
28
- color_code: str,
29
- ) -> None:
30
- """Stream a subprocess's stdout to our stdout with a colored prefix.
31
-
32
- The function runs in the caller thread and returns when the subprocess exits
33
- or its stdout closes.
34
- """
35
-
36
- def _forward_output_with_prefix(pipe: TextIO | None) -> None:
37
- if pipe is None:
38
- return
39
- if sys.stdout.isatty():
40
- colored_prefix = f"\x1b[{color_code}m{prefix}\x1b[0m"
41
- else:
42
- colored_prefix = prefix
43
- for line in iter(pipe.readline, ""):
44
- sys.stdout.write(f"{colored_prefix} {line}")
45
- sys.stdout.flush()
46
- try:
47
- pipe.close()
48
- except Exception:
49
- pass
50
-
51
- _forward_output_with_prefix(cast(TextIO, process.stdout))
52
-
53
-
54
- def _run_with_prefix(
55
- cmd: list[str],
56
- *,
57
- cwd: Path | None = None,
58
- env: dict[str, str] | None = None,
59
- prefix: str,
60
- color_code: str = "36", # cyan by default
61
- ) -> None:
62
- """Run a command streaming output with a colored prefix.
63
-
64
- Raises RuntimeError on non-zero exit.
65
- """
66
- process = subprocess.Popen(
67
- cmd,
68
- cwd=cwd,
69
- env=env,
70
- stdout=subprocess.PIPE,
71
- stderr=subprocess.STDOUT,
72
- bufsize=1,
73
- text=True,
74
- )
75
- _stream_subprocess_output(process, prefix, color_code)
76
- ret = process.wait()
77
- if ret != 0:
78
- raise RuntimeError(f"Command failed ({ret}): {' '.join(cmd)}")
79
-
80
-
81
- def _start_streaming_process(
82
- cmd: list[str],
83
- *,
84
- cwd: Path | None = None,
85
- env: dict[str, str] | None = None,
86
- prefix: str,
87
- color_code: str,
88
- line_transform: Callable[[str], str] | None = None,
89
- ) -> subprocess.Popen:
90
- """Start a subprocess and stream its output on a background thread with a colored prefix.
91
-
92
- Returns the Popen object immediately; caller is responsible for lifecycle.
93
- """
94
- process = subprocess.Popen(
95
- cmd,
96
- cwd=cwd,
97
- env=env,
98
- stdout=subprocess.PIPE,
99
- stderr=subprocess.STDOUT,
100
- bufsize=1,
101
- text=True,
102
- )
103
-
104
- def _forward(pipe: TextIO | None) -> None:
105
- if pipe is None:
106
- return
107
- if sys.stdout.isatty():
108
- colored_prefix = f"\x1b[{color_code}m{prefix}\x1b[0m"
109
- else:
110
- colored_prefix = prefix
111
- for line in iter(pipe.readline, ""):
112
- out_line = line_transform(line) if line_transform else line
113
- sys.stdout.write(f"{colored_prefix} {out_line}")
114
- sys.stdout.flush()
115
- try:
116
- pipe.close()
117
- except Exception:
118
- pass
119
-
120
- threading.Thread(target=_forward, args=(process.stdout,), daemon=True).start()
121
- return process
122
-
123
-
124
- def do_install():
125
- config = get_deployment_config()
126
-
127
- install_python_dependencies(config, settings.config_parent)
128
- install_ui(config, settings.config_parent)
129
-
130
-
131
25
  def load_workflows(config: DeploymentConfig) -> dict[str, Workflow]:
132
26
  """
133
27
  Creates WorkflowService instances according to the configuration object.
134
28
 
135
29
  """
136
30
  workflow_services = {}
31
+
32
+ # Pre-compute per-service import info
33
+ per_service: list[tuple[str, str]] = []
137
34
  for service_id, service_config in config.services.items():
138
- # Search for a workflow instance in the service path
139
35
  if service_config.import_path is None:
140
36
  continue
141
- module_name, workflow_name = service_config.module_location()
37
+ raw_mod_path, workflow_name = service_config.import_path.split(":", 1)
38
+ module_name = Path(raw_mod_path).name
39
+ per_service.append((service_id, workflow_name))
40
+
41
+ for service_id, workflow_name in per_service:
42
+ import_path = config.services[service_id].import_path
43
+ if import_path is None:
44
+ continue
45
+ raw_mod_path = import_path.split(":", 1)[0]
46
+ module_name = Path(raw_mod_path).name
47
+
142
48
  module = importlib.import_module(module_name)
143
- workflow_services[service_id] = getattr(module, workflow_name)
49
+
50
+ if hasattr(module, workflow_name):
51
+ workflow = getattr(module, workflow_name)
52
+ if not isinstance(workflow, Workflow):
53
+ logger.warning(
54
+ f"Workflow {workflow_name} in {module_name} is not a Workflow object",
55
+ )
56
+ workflow_services[service_id] = workflow
57
+ else:
58
+ logger.warning("Workflow %s not found in %s", workflow_name, module_name)
144
59
 
145
60
  if config.default_service:
146
61
  if config.default_service in workflow_services:
@@ -169,37 +84,202 @@ def load_environment_variables(config: DeploymentConfig, source_root: Path) -> N
169
84
  os.environ[key] = value
170
85
 
171
86
 
172
- def install_python_dependencies(config: DeploymentConfig, source_root: Path) -> None:
87
+ @functools.cache
88
+ def are_we_editable_mode() -> bool:
173
89
  """
174
- Sync the deployment to the base path.
90
+ Check if we're in editable mode.
91
+ """
92
+ # Heuristic: if the package path does not include 'site-packages', treat as editable
93
+ top_level_pkg = "llama_deploy.appserver"
94
+ try:
95
+ pkg = importlib.import_module(top_level_pkg)
96
+ pkg_path = Path(getattr(pkg, "__file__", "")).resolve()
97
+ if not pkg_path.exists():
98
+ return False
99
+
100
+ return "site-packages" not in pkg_path.parts
101
+ except Exception:
102
+ return False
103
+
104
+
105
+ def inject_appserver_into_target(
106
+ config: DeploymentConfig, source_root: Path, sdists: list[Path] | None = None
107
+ ) -> None:
108
+ """
109
+ Ensures uv, and uses it to add the appserver as a dependency to the target app.
110
+ - If sdists are provided, they will be installed directly for offline-ish installs (still fetches dependencies)
111
+ - If the appserver is currently editable, it will be installed directly from the source repo
112
+ - otherwise fetches the current version from pypi
113
+
114
+ Args:
115
+ config: The deployment config
116
+ source_root: The root directory of the deployment
117
+ sdists: A list of tar.gz sdists files to install instead of installing the appserver
175
118
  """
176
119
  path = _find_install_target(source_root, config)
177
- if path is not None:
178
- logger.info(f"Installing python dependencies from {path}")
179
- _ensure_uv_available()
180
- _install_to_current_python(path, source_root)
120
+ if path is None:
121
+ logger.warning(
122
+ "No python_dependencies and no root pyproject.toml; skipping dependency installation."
123
+ )
124
+ return
125
+ logger.info(f"Installing ensuring venv at {path} and adding appserver to it")
126
+ _ensure_uv_available()
127
+ _add_appserver_if_missing(path, source_root, sdists=sdists)
128
+
129
+
130
+ def _get_installed_version_within_target(path: Path) -> Version | None:
131
+ try:
132
+ result = subprocess.check_output(
133
+ [
134
+ "uv",
135
+ "run",
136
+ "python",
137
+ "-c",
138
+ """from importlib.metadata import version; print(version("llama-deploy-appserver"))""",
139
+ ],
140
+ cwd=path,
141
+ )
142
+ try:
143
+ return Version(result.decode("utf-8").strip())
144
+ except InvalidVersion:
145
+ return None
146
+ except subprocess.CalledProcessError:
147
+ return None
148
+
149
+
150
+ def _get_current_version() -> Version:
151
+ return Version(pkg_version("llama-deploy-appserver"))
152
+
153
+
154
+ def _is_missing_or_outdated(path: Path) -> Version | None:
155
+ """
156
+ returns the current version if the installed version is missing or outdated, otherwise None
157
+ """
158
+ installed = _get_installed_version_within_target(path)
159
+ current = _get_current_version()
160
+ if installed is None or installed < current:
161
+ return current
162
+ return None
163
+
164
+
165
+ def _add_appserver_if_missing(
166
+ path: Path,
167
+ source_root: Path,
168
+ save_version: bool = False,
169
+ sdists: list[Path] | None = None,
170
+ ) -> None:
171
+ """
172
+ Add the appserver to the venv if it's not already there.
173
+ """
174
+
175
+ if not (source_root / path / "pyproject.toml").exists():
176
+ logger.warning(
177
+ f"No pyproject.toml found at {source_root / path}, skipping appserver injection. The server will likely not be able to install your workflows."
178
+ )
179
+ return
180
+
181
+ def ensure_venv(path: Path) -> Path:
182
+ venv_path = source_root / path / ".venv"
183
+ if not venv_path.exists():
184
+ run_process(
185
+ ["uv", "venv", str(venv_path)],
186
+ cwd=source_root / path,
187
+ prefix="[uv venv]",
188
+ color_code="36",
189
+ )
190
+ return venv_path
191
+
192
+ if sdists:
193
+ run_process(
194
+ ["uv", "pip", "install"]
195
+ + [str(s.absolute()) for s in sdists]
196
+ + ["--prefix", str(ensure_venv(path))],
197
+ cwd=source_root / path,
198
+ prefix="[uv pip install]",
199
+ color_code="36",
200
+ )
201
+ elif are_we_editable_mode():
202
+ pyproject = _find_development_pyproject()
203
+ if pyproject is None:
204
+ raise RuntimeError("No pyproject.toml found in llama-deploy-appserver")
205
+ target = f"file://{str(pyproject.relative_to(source_root.resolve() / path, walk_up=True))}"
206
+ run_process(
207
+ [
208
+ "uv",
209
+ "pip",
210
+ "install",
211
+ "--reinstall",
212
+ target,
213
+ "--prefix",
214
+ str(ensure_venv(path)),
215
+ ],
216
+ cwd=source_root / path,
217
+ prefix="[uv pip install]",
218
+ color_code="36",
219
+ )
220
+ else:
221
+ version = _is_missing_or_outdated(path)
222
+ if version is not None:
223
+ if save_version:
224
+ run_process(
225
+ ["uv", "add", f"llama-deploy-appserver>={version}"],
226
+ cwd=source_root / path,
227
+ prefix="[uv add]",
228
+ color_code="36",
229
+ line_transform=_exclude_venv_warning,
230
+ )
231
+ else:
232
+ run_process(
233
+ [
234
+ "uv",
235
+ "pip",
236
+ "install",
237
+ f"llama-deploy-appserver=={version}",
238
+ "--prefix",
239
+ str(ensure_venv(path)),
240
+ ],
241
+ cwd=source_root / path,
242
+ prefix="[uv pip install]",
243
+ color_code="36",
244
+ )
181
245
 
182
246
 
183
- def _find_install_target(base: Path, config: DeploymentConfig) -> str | None:
184
- path: str | None = None
247
+ def _find_development_pyproject() -> Path | None:
248
+ dir = Path(__file__).parent.resolve()
249
+ while not (dir / "pyproject.toml").exists():
250
+ dir = dir.parent
251
+ if dir == dir.root:
252
+ return None
253
+ return dir
254
+
255
+
256
+ def find_python_pyproject(base: Path, config: DeploymentConfig) -> Path | None:
257
+ path: Path | None = None
185
258
  for service_id, service_config in config.services.items():
186
259
  if service_config.python_dependencies:
187
260
  if len(service_config.python_dependencies) > 1:
188
261
  logger.warning(
189
262
  "Llama Deploy now only supports installing from a single pyproject.toml path"
190
263
  )
191
- this_path = service_config.python_dependencies[0]
264
+ this_path = Path(service_config.python_dependencies[0])
192
265
  if path is not None and this_path != path:
193
266
  logger.warning(
194
267
  f"Llama Deploy now only supports installing from a single pyproject.toml path, ignoring {this_path}"
195
268
  )
196
- path = this_path
269
+ else:
270
+ path = this_path
197
271
  if path is None:
198
272
  if (base / "pyproject.toml").exists():
199
- path = "."
273
+ path = Path(".")
200
274
  return path
201
275
 
202
276
 
277
+ def _exclude_venv_warning(line: str) -> str | None:
278
+ if "use `--active` to target the active environment instead" in line:
279
+ return None
280
+ return line
281
+
282
+
203
283
  def _ensure_uv_available() -> None:
204
284
  # Check if uv is available on the path
205
285
  uv_available = False
@@ -215,7 +295,7 @@ def _ensure_uv_available() -> None:
215
295
  if not uv_available:
216
296
  # bootstrap uv with pip
217
297
  try:
218
- _run_with_prefix(
298
+ run_process(
219
299
  [
220
300
  sys.executable,
221
301
  "-m",
@@ -223,7 +303,7 @@ def _ensure_uv_available() -> None:
223
303
  "install",
224
304
  "uv",
225
305
  ],
226
- prefix="[pip]",
306
+ prefix="[python -m pip]",
227
307
  color_code="31", # red
228
308
  )
229
309
  except subprocess.CalledProcessError as e:
@@ -231,41 +311,12 @@ def _ensure_uv_available() -> None:
231
311
  raise RuntimeError(msg)
232
312
 
233
313
 
234
- def _install_to_current_python(path: str, source_root: Path) -> None:
235
- # Bit of an ugly hack, install to whatever python environment we're currently in
236
- # Find the python bin path and get its parent dir, and install into whatever that
237
- # python is. Hopefully we're in a container or a venv, otherwise this is installing to
238
- # the system python
239
- # https://docs.astral.sh/uv/concepts/projects/config/#project-environment-path
240
- python_bin_path = os.path.dirname(sys.executable)
241
- python_parent_dir = os.path.dirname(python_bin_path)
242
- _validate_path_is_safe(path, source_root, "python_dependencies")
243
- try:
244
- _run_with_prefix(
245
- [
246
- "uv",
247
- "pip",
248
- "install",
249
- f"--prefix={python_parent_dir}",
250
- path,
251
- ],
252
- cwd=source_root,
253
- prefix="[uv]",
254
- color_code="36",
255
- )
256
-
257
- # Force Python to refresh its package discovery after installing new packages
258
- site.main() # Refresh site-packages paths
259
- # Clear import caches to ensure newly installed packages are discoverable
260
- importlib.invalidate_caches()
261
-
262
- except subprocess.CalledProcessError as e:
263
- msg = f"Unable to install service dependencies using command '{e.cmd}': {e.stderr}"
264
- raise RuntimeError(msg) from None
314
+ def _find_install_target(base: Path, config: DeploymentConfig) -> Path | None:
315
+ return find_python_pyproject(base, config)
265
316
 
266
317
 
267
318
  def _validate_path_is_safe(
268
- path: str, source_root: Path, path_type: str = "path"
319
+ path: Path, source_root: Path, path_type: str = "path"
269
320
  ) -> None:
270
321
  """Validates that a path is within the source root to prevent path traversal attacks.
271
322
 
@@ -290,12 +341,12 @@ def _validate_path_is_safe(
290
341
  def install_ui(config: DeploymentConfig, config_parent: Path) -> None:
291
342
  if config.ui is None:
292
343
  return
293
- path = config.ui.source.location if config.ui.source else "."
344
+ path = Path(config.ui.source.location) if config.ui.source else Path(".")
294
345
  _validate_path_is_safe(path, config_parent, "ui_source")
295
- _run_with_prefix(
346
+ run_process(
296
347
  ["pnpm", "install"],
297
348
  cwd=config_parent / path,
298
- prefix="[pnpm-install]",
349
+ prefix="[pnpm install]",
299
350
  color_code="33",
300
351
  )
301
352
 
@@ -315,23 +366,19 @@ def build_ui(config_parent: Path, config: DeploymentConfig) -> bool:
315
366
  """
316
367
  if config.ui is None:
317
368
  return False
318
- path = config.ui.source.location if config.ui.source else "."
369
+ path = Path(config.ui.source.location) if config.ui.source else Path(".")
319
370
  _validate_path_is_safe(path, config_parent, "ui_source")
320
371
  env = _ui_env(config)
321
372
 
322
- package_json_path = config_parent / path / "package.json"
323
-
324
- with open(package_json_path, "r", encoding="utf-8") as f:
325
- pkg = json.load(f)
326
- scripts = pkg.get("scripts", {})
327
- if "build" not in scripts:
373
+ has_build = ui_build_output_path(config_parent, config)
374
+ if has_build is None:
328
375
  return False
329
376
 
330
- _run_with_prefix(
331
- ["pnpm", "build"],
377
+ run_process(
378
+ ["pnpm", "run", "build"],
332
379
  cwd=config_parent / path,
333
380
  env=env,
334
- prefix="[pnpm-build]",
381
+ prefix="[pnpm run build]",
335
382
  color_code="34",
336
383
  )
337
384
  return True
@@ -362,19 +409,20 @@ def start_dev_ui_process(
362
409
  # start the ui process
363
410
  env = _ui_env(config)
364
411
  # Transform first 20 lines to replace the default UI port with the main server port
365
- line_counter = {"n": 0}
412
+ line_counter = 0
366
413
 
367
414
  def _transform(line: str) -> str:
368
- if line_counter["n"] < 20:
415
+ nonlocal line_counter
416
+ if line_counter < 20:
369
417
  line = line.replace(f":{ui.port}", f":{main_port}")
370
- line_counter["n"] += 1
418
+ line_counter += 1
371
419
  return line
372
420
 
373
- return _start_streaming_process(
421
+ return spawn_process(
374
422
  ["pnpm", "run", "dev"],
375
423
  cwd=root / (ui.source.location if ui.source else "."),
376
424
  env=env,
377
- prefix="[ui-server]",
425
+ prefix="[pnpm run dev]",
378
426
  color_code="35", # magenta
379
427
  line_transform=_transform,
380
428
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-deploy-appserver
3
- Version: 0.3.0a4
3
+ Version: 0.3.0a6
4
4
  Summary: Application server components for LlamaDeploy
5
5
  Author: Massimiliano Pippi
6
6
  Author-email: Massimiliano Pippi <mpippi@gmail.com>
@@ -10,9 +10,10 @@ Requires-Dist: pydantic-settings>=2.10.1
10
10
  Requires-Dist: uvicorn>=0.24.0
11
11
  Requires-Dist: fastapi>=0.100.0
12
12
  Requires-Dist: websockets>=12.0
13
- Requires-Dist: llama-deploy-core>=0.3.0a4,<0.4.0
13
+ Requires-Dist: llama-deploy-core>=0.3.0a6,<0.4.0
14
14
  Requires-Dist: httpx>=0.28.1
15
15
  Requires-Dist: prometheus-fastapi-instrumentator>=7.1.0
16
+ Requires-Dist: packaging>=25.0
16
17
  Requires-Python: >=3.12, <4
17
18
  Description-Content-Type: text/markdown
18
19
 
@@ -0,0 +1,18 @@
1
+ llama_deploy/appserver/__init__.py,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
2
+ llama_deploy/appserver/__main__.py,sha256=32eff329cadb4f883c9df3a1b2bcb908d5adde765e7c7e761d25b7df4827b9ca,196
3
+ llama_deploy/appserver/app.py,sha256=9a4b453f65f7004413dee654baf6d3fb7cc359730b01c271449a66b0efc4a5e1,5628
4
+ llama_deploy/appserver/bootstrap.py,sha256=b2b032af98f477c1e3b515442251890027f4a83c5e73c19af43b36a2c5e1e479,2361
5
+ llama_deploy/appserver/deployment.py,sha256=8dc31107a8dbe30feb3d414540a5cbd4753faffd617fdab69407ee19a0ecd1bb,2670
6
+ llama_deploy/appserver/deployment_config_parser.py,sha256=160f6ead40028de8d92af927ac8427c13e62fdfaccbcaf605906d0645e06ba74,455
7
+ llama_deploy/appserver/process_utils.py,sha256=4fae398607fd84187343b7aeefb16d7314366cbec91c82f0fbb76479386104fb,5668
8
+ llama_deploy/appserver/routers/__init__.py,sha256=ee2d14ebf4b067c844947ed1cc98186456e8bfa4919282722eaaf8cca345a138,214
9
+ llama_deploy/appserver/routers/deployments.py,sha256=4b4179c4cbd48548a0cde9bbc07f3668c8f5abcffa4bfb1c98654e81e40b8329,7291
10
+ llama_deploy/appserver/routers/status.py,sha256=eead8e0aebbc7e5e3ca8f0c00d0c1b6df1d6cde7844edfbe9350bf64ab85006f,257
11
+ llama_deploy/appserver/routers/ui_proxy.py,sha256=8b034dd2615bc6f992b0e825c644bdef256ee2b46eac00184bf7dedfdb7a2309,7164
12
+ llama_deploy/appserver/settings.py,sha256=46d209715f4e143eb1f4a44906bacfbd8eb79593d54b9de29fb49e7edc6a2a51,3326
13
+ llama_deploy/appserver/stats.py,sha256=1f3989f6705a6de3e4d61ee8cdd189fbe04a2c53ec5e720b2e5168acc331427f,691
14
+ llama_deploy/appserver/types.py,sha256=4edc991aafb6b8497f068d12387455df292da3ff8440223637641ab1632553ec,2133
15
+ llama_deploy/appserver/workflow_loader.py,sha256=ba863d36cef3bc65ce1227ed3302b9e569e2765e6dd53ba9dcbbf30c0800c1c8,14168
16
+ llama_deploy_appserver-0.3.0a6.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
17
+ llama_deploy_appserver-0.3.0a6.dist-info/METADATA,sha256=ad52ee37a3964e138ec2f64995dd3698ac0a86ae921476ed6aedcd0e73dca01e,777
18
+ llama_deploy_appserver-0.3.0a6.dist-info/RECORD,,
@@ -1,17 +0,0 @@
1
- llama_deploy/appserver/__init__.py,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
2
- llama_deploy/appserver/__main__.py,sha256=32eff329cadb4f883c9df3a1b2bcb908d5adde765e7c7e761d25b7df4827b9ca,196
3
- llama_deploy/appserver/app.py,sha256=b426223a4f1a7077c3705e818938ecd7696fa8cc625304f1ae6fe1a687195311,3710
4
- llama_deploy/appserver/bootstrap.py,sha256=e98e58ff38c44eb68ecf2ea3644d8376111d0e18e650589d6aef29e4caef1e15,3107
5
- llama_deploy/appserver/deployment.py,sha256=bd35b20e0068c44965e0b76126170a292c151665417a0b322b977641ce88bfc1,2671
6
- llama_deploy/appserver/deployment_config_parser.py,sha256=27b3f20b95703293059182e6ef2a5a44b59c66ae73517eba9f53b62cd5b0f833,3395
7
- llama_deploy/appserver/routers/__init__.py,sha256=ed8fa7613eb5584bcc1b40e18a40a0e29ce39cc9c8d4bf9ad8c79e5b1d050700,214
8
- llama_deploy/appserver/routers/deployments.py,sha256=4b4179c4cbd48548a0cde9bbc07f3668c8f5abcffa4bfb1c98654e81e40b8329,7291
9
- llama_deploy/appserver/routers/status.py,sha256=eead8e0aebbc7e5e3ca8f0c00d0c1b6df1d6cde7844edfbe9350bf64ab85006f,257
10
- llama_deploy/appserver/routers/ui_proxy.py,sha256=9bf7e433e387bb3aa3c6fabee09313c95ca715c41dea5915ee55cd8acaa792ef,7194
11
- llama_deploy/appserver/settings.py,sha256=741b2ac12f37ea4bd7da4fbac4170ca7719095f123ff7a451ae8be36a530b4a6,2820
12
- llama_deploy/appserver/stats.py,sha256=1f3989f6705a6de3e4d61ee8cdd189fbe04a2c53ec5e720b2e5168acc331427f,691
13
- llama_deploy/appserver/types.py,sha256=4edc991aafb6b8497f068d12387455df292da3ff8440223637641ab1632553ec,2133
14
- llama_deploy/appserver/workflow_loader.py,sha256=26cf958f8d1ec5fcb266ce465292d883b4f7c64b2832abca04bbf69ae303393d,12160
15
- llama_deploy_appserver-0.3.0a4.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
16
- llama_deploy_appserver-0.3.0a4.dist-info/METADATA,sha256=a50e0888f6a99eec2c09c0c1d7b0b84912920470bfb431e32070b31608c42956,746
17
- llama_deploy_appserver-0.3.0a4.dist-info/RECORD,,