llama-deploy-appserver 0.3.0a4__tar.gz → 0.3.0a6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/PKG-INFO +3 -2
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/pyproject.toml +3 -2
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/src/llama_deploy/appserver/app.py +96 -34
- llama_deploy_appserver-0.3.0a6/src/llama_deploy/appserver/bootstrap.py +70 -0
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/src/llama_deploy/appserver/deployment.py +0 -1
- llama_deploy_appserver-0.3.0a6/src/llama_deploy/appserver/deployment_config_parser.py +13 -0
- llama_deploy_appserver-0.3.0a6/src/llama_deploy/appserver/process_utils.py +201 -0
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/src/llama_deploy/appserver/routers/ui_proxy.py +5 -5
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/src/llama_deploy/appserver/settings.py +14 -1
- llama_deploy_appserver-0.3.0a6/src/llama_deploy/appserver/workflow_loader.py +428 -0
- llama_deploy_appserver-0.3.0a4/src/llama_deploy/appserver/bootstrap.py +0 -100
- llama_deploy_appserver-0.3.0a4/src/llama_deploy/appserver/deployment_config_parser.py +0 -109
- llama_deploy_appserver-0.3.0a4/src/llama_deploy/appserver/workflow_loader.py +0 -380
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/README.md +0 -0
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/src/llama_deploy/appserver/__init__.py +0 -0
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/src/llama_deploy/appserver/__main__.py +0 -0
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/src/llama_deploy/appserver/routers/__init__.py +1 -1
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/src/llama_deploy/appserver/routers/deployments.py +0 -0
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/src/llama_deploy/appserver/routers/status.py +0 -0
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/src/llama_deploy/appserver/stats.py +0 -0
- {llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/src/llama_deploy/appserver/types.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: llama-deploy-appserver
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.0a6
|
4
4
|
Summary: Application server components for LlamaDeploy
|
5
5
|
Author: Massimiliano Pippi
|
6
6
|
Author-email: Massimiliano Pippi <mpippi@gmail.com>
|
@@ -10,9 +10,10 @@ Requires-Dist: pydantic-settings>=2.10.1
|
|
10
10
|
Requires-Dist: uvicorn>=0.24.0
|
11
11
|
Requires-Dist: fastapi>=0.100.0
|
12
12
|
Requires-Dist: websockets>=12.0
|
13
|
-
Requires-Dist: llama-deploy-core>=0.3.
|
13
|
+
Requires-Dist: llama-deploy-core>=0.3.0a6,<0.4.0
|
14
14
|
Requires-Dist: httpx>=0.28.1
|
15
15
|
Requires-Dist: prometheus-fastapi-instrumentator>=7.1.0
|
16
|
+
Requires-Dist: packaging>=25.0
|
16
17
|
Requires-Python: >=3.12, <4
|
17
18
|
Description-Content-Type: text/markdown
|
18
19
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "llama-deploy-appserver"
|
3
|
-
version = "0.3.
|
3
|
+
version = "0.3.0a6"
|
4
4
|
description = "Application server components for LlamaDeploy"
|
5
5
|
readme = "README.md"
|
6
6
|
license = { text = "MIT" }
|
@@ -14,9 +14,10 @@ dependencies = [
|
|
14
14
|
"uvicorn>=0.24.0",
|
15
15
|
"fastapi>=0.100.0",
|
16
16
|
"websockets>=12.0",
|
17
|
-
"llama-deploy-core>=0.3.
|
17
|
+
"llama-deploy-core>=0.3.0a6,<0.4.0",
|
18
18
|
"httpx>=0.28.1",
|
19
19
|
"prometheus-fastapi-instrumentator>=7.1.0",
|
20
|
+
"packaging>=25.0",
|
20
21
|
]
|
21
22
|
|
22
23
|
[build-system]
|
{llama_deploy_appserver-0.3.0a4 → llama_deploy_appserver-0.3.0a6}/src/llama_deploy/appserver/app.py
RENAMED
@@ -1,28 +1,19 @@
|
|
1
|
+
import argparse
|
1
2
|
import logging
|
2
3
|
import os
|
3
|
-
from pathlib import Path
|
4
4
|
import threading
|
5
5
|
import time
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
from
|
6
|
+
import webbrowser
|
7
|
+
from contextlib import asynccontextmanager
|
8
|
+
from pathlib import Path
|
9
|
+
from typing import Any, AsyncGenerator
|
10
10
|
|
11
|
+
import uvicorn
|
11
12
|
from fastapi import FastAPI
|
12
13
|
from fastapi.middleware.cors import CORSMiddleware
|
13
|
-
from llama_deploy.appserver.
|
14
|
-
|
15
|
-
do_install,
|
16
|
-
load_environment_variables,
|
17
|
-
start_dev_ui_process,
|
14
|
+
from llama_deploy.appserver.deployment_config_parser import (
|
15
|
+
get_deployment_config,
|
18
16
|
)
|
19
|
-
import uvicorn
|
20
|
-
|
21
|
-
from .routers import health_router
|
22
|
-
from prometheus_fastapi_instrumentator import Instrumentator
|
23
|
-
from contextlib import asynccontextmanager
|
24
|
-
from typing import Any, AsyncGenerator
|
25
|
-
|
26
17
|
from llama_deploy.appserver.routers.deployments import (
|
27
18
|
create_base_router,
|
28
19
|
create_deployments_router,
|
@@ -31,13 +22,23 @@ from llama_deploy.appserver.routers.ui_proxy import (
|
|
31
22
|
create_ui_proxy_router,
|
32
23
|
mount_static_files,
|
33
24
|
)
|
25
|
+
from llama_deploy.appserver.settings import configure_settings, settings
|
34
26
|
from llama_deploy.appserver.workflow_loader import (
|
27
|
+
_exclude_venv_warning,
|
28
|
+
build_ui,
|
29
|
+
find_python_pyproject,
|
30
|
+
inject_appserver_into_target,
|
31
|
+
install_ui,
|
32
|
+
load_environment_variables,
|
35
33
|
load_workflows,
|
34
|
+
start_dev_ui_process,
|
36
35
|
)
|
36
|
+
from prometheus_fastapi_instrumentator import Instrumentator
|
37
37
|
|
38
38
|
from .deployment import Deployment
|
39
|
+
from .process_utils import run_process
|
40
|
+
from .routers import health_router
|
39
41
|
from .stats import apiserver_state
|
40
|
-
import webbrowser
|
41
42
|
|
42
43
|
logger = logging.getLogger("uvicorn.info")
|
43
44
|
|
@@ -45,7 +46,6 @@ logger = logging.getLogger("uvicorn.info")
|
|
45
46
|
@asynccontextmanager
|
46
47
|
async def lifespan(app: FastAPI) -> AsyncGenerator[None, Any]:
|
47
48
|
apiserver_state.state("starting")
|
48
|
-
|
49
49
|
config = get_deployment_config()
|
50
50
|
|
51
51
|
workflows = load_workflows(config)
|
@@ -85,24 +85,44 @@ if not os.environ.get("DISABLE_CORS", False):
|
|
85
85
|
app.include_router(health_router)
|
86
86
|
|
87
87
|
|
88
|
+
def open_browser_async(host: str, port: int) -> None:
|
89
|
+
def _open_with_delay() -> None:
|
90
|
+
time.sleep(1)
|
91
|
+
webbrowser.open(f"http://{host}:{port}")
|
92
|
+
|
93
|
+
threading.Thread(target=_open_with_delay).start()
|
94
|
+
|
95
|
+
|
96
|
+
def prepare_server(
|
97
|
+
deployment_file: Path | None = None,
|
98
|
+
install: bool = False,
|
99
|
+
build: bool = False,
|
100
|
+
) -> None:
|
101
|
+
configure_settings(deployment_file_path=deployment_file)
|
102
|
+
load_environment_variables(get_deployment_config(), settings.config_parent)
|
103
|
+
if install:
|
104
|
+
config = get_deployment_config()
|
105
|
+
inject_appserver_into_target(config, settings.config_parent)
|
106
|
+
install_ui(config, settings.config_parent)
|
107
|
+
if build:
|
108
|
+
build_ui(settings.config_parent, get_deployment_config())
|
109
|
+
|
110
|
+
|
88
111
|
def start_server(
|
89
112
|
proxy_ui: bool = False,
|
90
113
|
reload: bool = False,
|
91
114
|
cwd: Path | None = None,
|
92
115
|
deployment_file: Path | None = None,
|
93
|
-
install: bool = False,
|
94
|
-
build: bool = False,
|
95
116
|
open_browser: bool = False,
|
96
117
|
) -> None:
|
97
118
|
# Configure via environment so uvicorn reload workers inherit the values
|
98
119
|
configure_settings(
|
99
|
-
proxy_ui=proxy_ui,
|
120
|
+
proxy_ui=proxy_ui,
|
121
|
+
app_root=cwd,
|
122
|
+
deployment_file_path=deployment_file,
|
123
|
+
reload=reload,
|
100
124
|
)
|
101
125
|
load_environment_variables(get_deployment_config(), settings.config_parent)
|
102
|
-
if install:
|
103
|
-
do_install()
|
104
|
-
if build:
|
105
|
-
build_ui(settings.config_parent, get_deployment_config())
|
106
126
|
|
107
127
|
ui_process = None
|
108
128
|
if proxy_ui:
|
@@ -111,14 +131,7 @@ def start_server(
|
|
111
131
|
)
|
112
132
|
try:
|
113
133
|
if open_browser:
|
114
|
-
|
115
|
-
def open_with_delay():
|
116
|
-
time.sleep(1)
|
117
|
-
webbrowser.open(f"http://{settings.host}:{settings.port}")
|
118
|
-
|
119
|
-
threading.Thread(
|
120
|
-
target=open_with_delay,
|
121
|
-
).start()
|
134
|
+
open_browser_async(settings.host, settings.port)
|
122
135
|
|
123
136
|
uvicorn.run(
|
124
137
|
"llama_deploy.appserver.app:app",
|
@@ -129,3 +142,52 @@ def start_server(
|
|
129
142
|
finally:
|
130
143
|
if ui_process is not None:
|
131
144
|
ui_process.terminate()
|
145
|
+
|
146
|
+
|
147
|
+
def start_server_in_target_venv(
|
148
|
+
proxy_ui: bool = False,
|
149
|
+
reload: bool = False,
|
150
|
+
cwd: Path | None = None,
|
151
|
+
deployment_file: Path | None = None,
|
152
|
+
open_browser: bool = False,
|
153
|
+
) -> None:
|
154
|
+
cfg = get_deployment_config()
|
155
|
+
path = find_python_pyproject(cwd or Path.cwd(), cfg)
|
156
|
+
|
157
|
+
args = ["uv", "run", "python", "-m", "llama_deploy.appserver.app"]
|
158
|
+
if proxy_ui:
|
159
|
+
args.append("--proxy-ui")
|
160
|
+
if reload:
|
161
|
+
args.append("--reload")
|
162
|
+
if deployment_file:
|
163
|
+
args.append("--deployment-file")
|
164
|
+
args.append(str(deployment_file))
|
165
|
+
if open_browser:
|
166
|
+
args.append("--open-browser")
|
167
|
+
# All the streaming/PTY/pipe handling is centralized
|
168
|
+
ret = run_process(
|
169
|
+
args,
|
170
|
+
cwd=path,
|
171
|
+
env=None,
|
172
|
+
line_transform=_exclude_venv_warning,
|
173
|
+
)
|
174
|
+
|
175
|
+
if ret != 0:
|
176
|
+
raise SystemExit(ret)
|
177
|
+
|
178
|
+
|
179
|
+
if __name__ == "__main__":
|
180
|
+
print("starting server")
|
181
|
+
parser = argparse.ArgumentParser()
|
182
|
+
parser.add_argument("--proxy-ui", action="store_true")
|
183
|
+
parser.add_argument("--reload", action="store_true")
|
184
|
+
parser.add_argument("--deployment-file", type=Path)
|
185
|
+
parser.add_argument("--open-browser", action="store_true")
|
186
|
+
|
187
|
+
args = parser.parse_args()
|
188
|
+
start_server(
|
189
|
+
proxy_ui=args.proxy_ui,
|
190
|
+
reload=args.reload,
|
191
|
+
deployment_file=args.deployment_file,
|
192
|
+
open_browser=args.open_browser,
|
193
|
+
)
|
@@ -0,0 +1,70 @@
|
|
1
|
+
"""
|
2
|
+
Bootstraps an application from a remote github repository given environment variables.
|
3
|
+
|
4
|
+
This just sets up the files from the repository. It's more of a build process.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import os
|
8
|
+
from pathlib import Path
|
9
|
+
|
10
|
+
from llama_deploy.appserver.deployment_config_parser import get_deployment_config
|
11
|
+
from llama_deploy.appserver.settings import BootstrapSettings, configure_settings
|
12
|
+
from llama_deploy.appserver.workflow_loader import (
|
13
|
+
build_ui,
|
14
|
+
inject_appserver_into_target,
|
15
|
+
install_ui,
|
16
|
+
load_environment_variables,
|
17
|
+
)
|
18
|
+
from llama_deploy.core.git.git_util import (
|
19
|
+
clone_repo,
|
20
|
+
)
|
21
|
+
|
22
|
+
|
23
|
+
def bootstrap_app_from_repo(
|
24
|
+
target_dir: str = "/opt/app",
|
25
|
+
):
|
26
|
+
bootstrap_settings = BootstrapSettings()
|
27
|
+
# Needs the github url+auth, and the deployment file path
|
28
|
+
# clones the repo to a standard directory
|
29
|
+
# (eventually) runs the UI build process and moves that to a standard directory for a file server
|
30
|
+
|
31
|
+
repo_url = bootstrap_settings.repo_url
|
32
|
+
if repo_url is None:
|
33
|
+
raise ValueError("repo_url is required to bootstrap")
|
34
|
+
clone_repo(
|
35
|
+
repository_url=repo_url,
|
36
|
+
git_ref=bootstrap_settings.git_sha or bootstrap_settings.git_ref,
|
37
|
+
basic_auth=bootstrap_settings.auth_token,
|
38
|
+
dest_dir=target_dir,
|
39
|
+
)
|
40
|
+
# Ensure target_dir exists locally when running tests outside a container
|
41
|
+
os.makedirs(target_dir, exist_ok=True)
|
42
|
+
os.chdir(target_dir)
|
43
|
+
configure_settings(
|
44
|
+
app_root=Path(target_dir),
|
45
|
+
deployment_file_path=Path(bootstrap_settings.deployment_file_path),
|
46
|
+
)
|
47
|
+
config = get_deployment_config()
|
48
|
+
base_path = Path(target_dir)
|
49
|
+
load_environment_variables(config, base_path)
|
50
|
+
|
51
|
+
sdists = None
|
52
|
+
if bootstrap_settings.bootstrap_sdists:
|
53
|
+
sdists = [
|
54
|
+
Path(bootstrap_settings.bootstrap_sdists) / f
|
55
|
+
for f in os.listdir(bootstrap_settings.bootstrap_sdists)
|
56
|
+
]
|
57
|
+
sdists = [f for f in sdists if f.is_file() and f.name.endswith(".tar.gz")]
|
58
|
+
if not sdists:
|
59
|
+
sdists = None
|
60
|
+
# Use the explicit base path rather than relying on global settings so tests
|
61
|
+
# can safely mock configure_settings without affecting call arguments.
|
62
|
+
inject_appserver_into_target(config, base_path, sdists)
|
63
|
+
install_ui(config, base_path)
|
64
|
+
build_ui(base_path, config)
|
65
|
+
|
66
|
+
pass
|
67
|
+
|
68
|
+
|
69
|
+
if __name__ == "__main__":
|
70
|
+
bootstrap_app_from_repo()
|
@@ -0,0 +1,13 @@
|
|
1
|
+
import functools
|
2
|
+
|
3
|
+
from llama_deploy.appserver.settings import BootstrapSettings, settings
|
4
|
+
from llama_deploy.core.deployment_config import DeploymentConfig
|
5
|
+
|
6
|
+
|
7
|
+
@functools.lru_cache
|
8
|
+
def get_deployment_config() -> DeploymentConfig:
|
9
|
+
base_settings = BootstrapSettings()
|
10
|
+
base = settings.app_root.resolve()
|
11
|
+
yaml_file = base / settings.deployment_file_path
|
12
|
+
name = base_settings.deployment_name
|
13
|
+
return DeploymentConfig.from_yaml(yaml_file, name)
|
@@ -0,0 +1,201 @@
|
|
1
|
+
import functools
|
2
|
+
import os
|
3
|
+
import platform
|
4
|
+
import pty
|
5
|
+
import subprocess
|
6
|
+
import sys
|
7
|
+
import threading
|
8
|
+
from typing import Callable, TextIO, cast
|
9
|
+
|
10
|
+
|
11
|
+
def run_process(
|
12
|
+
cmd: list[str],
|
13
|
+
*,
|
14
|
+
cwd: os.PathLike | None = None,
|
15
|
+
env: dict[str, str] | None = None,
|
16
|
+
prefix: str | None = None,
|
17
|
+
color_code: str = "36",
|
18
|
+
line_transform: Callable[[str], str | None] | None = None,
|
19
|
+
use_tty: bool | None = None,
|
20
|
+
) -> None:
|
21
|
+
"""Run a process and stream its output with optional TTY semantics.
|
22
|
+
|
23
|
+
If use_tty is None, a PTY will be used only when the parent's stdout is a TTY
|
24
|
+
and the platform supports PTYs. When a PTY is used, stdout/stderr are merged.
|
25
|
+
"""
|
26
|
+
use_pty = _should_use_pty(use_tty)
|
27
|
+
prefixer = _make_prefixer(prefix, color_code, line_transform)
|
28
|
+
|
29
|
+
process, sources, cleanup = _spawn_process(cmd, cwd=cwd, env=env, use_pty=use_pty)
|
30
|
+
threads: list[threading.Thread] = []
|
31
|
+
try:
|
32
|
+
cleanup()
|
33
|
+
threads = _start_stream_threads(sources, prefixer)
|
34
|
+
ret = process.wait()
|
35
|
+
if ret != 0:
|
36
|
+
raise subprocess.CalledProcessError(ret, cmd)
|
37
|
+
finally:
|
38
|
+
for t in threads:
|
39
|
+
t.join()
|
40
|
+
|
41
|
+
|
42
|
+
def spawn_process(
|
43
|
+
cmd: list[str],
|
44
|
+
*,
|
45
|
+
cwd: os.PathLike | None = None,
|
46
|
+
env: dict[str, str] | None = None,
|
47
|
+
prefix: str | None = None,
|
48
|
+
color_code: str = "36",
|
49
|
+
line_transform: Callable[[str], str | None] | None = None,
|
50
|
+
use_tty: bool | None = None,
|
51
|
+
) -> subprocess.Popen:
|
52
|
+
"""Spawn a process and stream its output in background threads.
|
53
|
+
|
54
|
+
Returns immediately with the Popen object. Streaming threads are daemons.
|
55
|
+
"""
|
56
|
+
use_pty = _should_use_pty(use_tty)
|
57
|
+
prefixer = _make_prefixer(prefix, color_code, line_transform)
|
58
|
+
|
59
|
+
process, sources, cleanup = _spawn_process(cmd, cwd=cwd, env=env, use_pty=use_pty)
|
60
|
+
cleanup()
|
61
|
+
_start_stream_threads(sources, prefixer)
|
62
|
+
return process
|
63
|
+
|
64
|
+
|
65
|
+
@functools.cache
|
66
|
+
def _use_color() -> bool:
|
67
|
+
"""Return True if ANSI colors should be emitted to stdout.
|
68
|
+
|
69
|
+
Respects common environment variables and falls back to TTY detection.
|
70
|
+
"""
|
71
|
+
force_color = os.environ.get("FORCE_COLOR")
|
72
|
+
|
73
|
+
return sys.stdout.isatty() or force_color is not None and force_color != "0"
|
74
|
+
|
75
|
+
|
76
|
+
def _colored_prefix(prefix: str, color_code: str) -> str:
|
77
|
+
return f"\x1b[{color_code}m{prefix}\x1b[0m " if _use_color() else f"{prefix} "
|
78
|
+
|
79
|
+
|
80
|
+
def _make_prefixer(
|
81
|
+
prefix: str | None,
|
82
|
+
color_code: str,
|
83
|
+
line_transform: Callable[[str], str | None] | None = None,
|
84
|
+
) -> Callable[[str], str | None]:
|
85
|
+
colored = _colored_prefix(prefix, color_code) if prefix else ""
|
86
|
+
|
87
|
+
def _prefixer(line: str) -> str | None:
|
88
|
+
transformed = line_transform(line) if line_transform else line
|
89
|
+
if transformed is None:
|
90
|
+
return None
|
91
|
+
return f"{colored}{transformed}"
|
92
|
+
|
93
|
+
return _prefixer
|
94
|
+
|
95
|
+
|
96
|
+
# Unified PTY/Pipe strategy helpers
|
97
|
+
|
98
|
+
|
99
|
+
def _should_use_pty(use_tty: bool | None) -> bool:
|
100
|
+
if platform.system() == "Windows":
|
101
|
+
return False
|
102
|
+
if use_tty is None:
|
103
|
+
return sys.stdout.isatty()
|
104
|
+
return use_tty and sys.stdout.isatty()
|
105
|
+
|
106
|
+
|
107
|
+
def _spawn_process(
|
108
|
+
cmd: list[str],
|
109
|
+
*,
|
110
|
+
cwd: os.PathLike | None,
|
111
|
+
env: dict[str, str] | None,
|
112
|
+
use_pty: bool,
|
113
|
+
) -> tuple[subprocess.Popen, list[tuple[int | TextIO, TextIO]], Callable[[], None]]:
|
114
|
+
if use_pty:
|
115
|
+
master_fd, slave_fd = pty.openpty()
|
116
|
+
process = subprocess.Popen(
|
117
|
+
cmd,
|
118
|
+
env=env,
|
119
|
+
cwd=cwd,
|
120
|
+
stdin=slave_fd,
|
121
|
+
stdout=slave_fd,
|
122
|
+
stderr=slave_fd,
|
123
|
+
close_fds=True,
|
124
|
+
)
|
125
|
+
|
126
|
+
def cleanup() -> None:
|
127
|
+
try:
|
128
|
+
os.close(slave_fd)
|
129
|
+
except OSError:
|
130
|
+
pass
|
131
|
+
|
132
|
+
sources: list[tuple[int | TextIO, TextIO]] = [
|
133
|
+
(master_fd, cast(TextIO, sys.stdout)),
|
134
|
+
]
|
135
|
+
return process, sources, cleanup
|
136
|
+
|
137
|
+
process = subprocess.Popen(
|
138
|
+
cmd,
|
139
|
+
env=env,
|
140
|
+
cwd=cwd,
|
141
|
+
stdin=None,
|
142
|
+
stdout=subprocess.PIPE,
|
143
|
+
stderr=subprocess.PIPE,
|
144
|
+
text=True,
|
145
|
+
encoding="utf-8",
|
146
|
+
)
|
147
|
+
|
148
|
+
def cleanup() -> None:
|
149
|
+
return None
|
150
|
+
|
151
|
+
assert process.stdout is not None and process.stderr is not None
|
152
|
+
sources = [
|
153
|
+
(process.stdout, cast(TextIO, sys.stdout)),
|
154
|
+
(process.stderr, cast(TextIO, sys.stderr)),
|
155
|
+
]
|
156
|
+
return process, sources, cleanup
|
157
|
+
|
158
|
+
|
159
|
+
def _stream_source(
|
160
|
+
source: int | TextIO,
|
161
|
+
writer: TextIO,
|
162
|
+
transform: Callable[[str], str | None] | None,
|
163
|
+
) -> None:
|
164
|
+
if isinstance(source, int):
|
165
|
+
try:
|
166
|
+
with os.fdopen(
|
167
|
+
source, "r", encoding="utf-8", errors="replace", buffering=1
|
168
|
+
) as f:
|
169
|
+
for line in f:
|
170
|
+
out = transform(line) if transform else line
|
171
|
+
if out is not None:
|
172
|
+
writer.write(out)
|
173
|
+
writer.flush()
|
174
|
+
except OSError:
|
175
|
+
# PTY EOF may raise EIO; ignore
|
176
|
+
pass
|
177
|
+
else:
|
178
|
+
for line in iter(source.readline, ""):
|
179
|
+
out = transform(line) if transform else line
|
180
|
+
if out is None:
|
181
|
+
continue
|
182
|
+
writer.write(out)
|
183
|
+
writer.flush()
|
184
|
+
try:
|
185
|
+
source.close()
|
186
|
+
except Exception:
|
187
|
+
pass
|
188
|
+
|
189
|
+
|
190
|
+
def _start_stream_threads(
|
191
|
+
sources: list[tuple[int | TextIO, TextIO]],
|
192
|
+
transform: Callable[[str], str | None] | None,
|
193
|
+
) -> list[threading.Thread]:
|
194
|
+
threads: list[threading.Thread] = []
|
195
|
+
for src, dst in sources:
|
196
|
+
t = threading.Thread(
|
197
|
+
target=_stream_source, args=(src, dst, transform), daemon=True
|
198
|
+
)
|
199
|
+
t.start()
|
200
|
+
threads.append(t)
|
201
|
+
return threads
|
@@ -2,19 +2,19 @@ import asyncio
|
|
2
2
|
import logging
|
3
3
|
from typing import List, Optional
|
4
4
|
|
5
|
-
from fastapi.staticfiles import StaticFiles
|
6
|
-
from fastapi import FastAPI
|
7
5
|
import httpx
|
8
|
-
from llama_deploy.appserver.deployment_config_parser import DeploymentConfig
|
9
|
-
from llama_deploy.appserver.settings import ApiserverSettings
|
10
6
|
import websockets
|
11
7
|
from fastapi import (
|
12
8
|
APIRouter,
|
9
|
+
FastAPI,
|
13
10
|
HTTPException,
|
14
11
|
Request,
|
15
12
|
WebSocket,
|
16
13
|
)
|
17
14
|
from fastapi.responses import StreamingResponse
|
15
|
+
from fastapi.staticfiles import StaticFiles
|
16
|
+
from llama_deploy.appserver.settings import ApiserverSettings
|
17
|
+
from llama_deploy.core.deployment_config import DeploymentConfig
|
18
18
|
from starlette.background import BackgroundTask
|
19
19
|
|
20
20
|
logger = logging.getLogger(__name__)
|
@@ -37,7 +37,7 @@ async def _ws_proxy(ws: WebSocket, upstream_url: str) -> None:
|
|
37
37
|
|
38
38
|
try:
|
39
39
|
# Parse subprotocols if present
|
40
|
-
subprotocols:
|
40
|
+
subprotocols: List[websockets.Subprotocol] | None = None
|
41
41
|
if "sec-websocket-protocol" in ws.headers:
|
42
42
|
# Parse comma-separated subprotocols
|
43
43
|
subprotocols = [
|
@@ -1,9 +1,9 @@
|
|
1
1
|
import os
|
2
2
|
from pathlib import Path
|
3
3
|
|
4
|
+
from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
|
4
5
|
from pydantic import Field
|
5
6
|
from pydantic_settings import BaseSettings, SettingsConfigDict
|
6
|
-
from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
|
7
7
|
|
8
8
|
|
9
9
|
class BootstrapSettings(BaseSettings):
|
@@ -30,6 +30,10 @@ class BootstrapSettings(BaseSettings):
|
|
30
30
|
deployment_name: str | None = Field(
|
31
31
|
default=None, description="The name of the deployment"
|
32
32
|
)
|
33
|
+
bootstrap_sdists: str | None = Field(
|
34
|
+
default=None,
|
35
|
+
description="A directory containing tar.gz sdists to install instead of installing the appserver",
|
36
|
+
)
|
33
37
|
|
34
38
|
|
35
39
|
class ApiserverSettings(BaseSettings):
|
@@ -59,6 +63,11 @@ class ApiserverSettings(BaseSettings):
|
|
59
63
|
description="If true, proxy a development UI server instead of serving built assets",
|
60
64
|
)
|
61
65
|
|
66
|
+
reload: bool = Field(
|
67
|
+
default=False,
|
68
|
+
description="If true, reload the workflow modules, for use in a dev server environment",
|
69
|
+
)
|
70
|
+
|
62
71
|
@property
|
63
72
|
def config_parent(self) -> Path:
|
64
73
|
return (self.app_root / self.deployment_file_path).parent
|
@@ -71,6 +80,7 @@ def configure_settings(
|
|
71
80
|
proxy_ui: bool | None = None,
|
72
81
|
deployment_file_path: Path | None = None,
|
73
82
|
app_root: Path | None = None,
|
83
|
+
reload: bool | None = None,
|
74
84
|
) -> None:
|
75
85
|
if proxy_ui is not None:
|
76
86
|
settings.proxy_ui = proxy_ui
|
@@ -83,3 +93,6 @@ def configure_settings(
|
|
83
93
|
if app_root is not None:
|
84
94
|
settings.app_root = app_root
|
85
95
|
os.environ["LLAMA_DEPLOY_APISERVER_APP_ROOT"] = str(app_root)
|
96
|
+
if reload is not None:
|
97
|
+
settings.reload = reload
|
98
|
+
os.environ["LLAMA_DEPLOY_APISERVER_RELOAD"] = "true" if reload else "false"
|