llama-deploy-appserver 0.2.7a1__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. llama_deploy/appserver/app.py +274 -26
  2. llama_deploy/appserver/bootstrap.py +55 -25
  3. llama_deploy/appserver/configure_logging.py +189 -0
  4. llama_deploy/appserver/correlation_id.py +24 -0
  5. llama_deploy/appserver/deployment.py +70 -412
  6. llama_deploy/appserver/deployment_config_parser.py +12 -130
  7. llama_deploy/appserver/interrupts.py +55 -0
  8. llama_deploy/appserver/process_utils.py +214 -0
  9. llama_deploy/appserver/py.typed +0 -0
  10. llama_deploy/appserver/routers/__init__.py +4 -3
  11. llama_deploy/appserver/routers/deployments.py +163 -382
  12. llama_deploy/appserver/routers/status.py +4 -31
  13. llama_deploy/appserver/routers/ui_proxy.py +255 -0
  14. llama_deploy/appserver/settings.py +99 -49
  15. llama_deploy/appserver/types.py +0 -3
  16. llama_deploy/appserver/workflow_loader.py +431 -0
  17. llama_deploy/appserver/workflow_store/agent_data_store.py +100 -0
  18. llama_deploy/appserver/workflow_store/keyed_lock.py +32 -0
  19. llama_deploy/appserver/workflow_store/lru_cache.py +49 -0
  20. llama_deploy_appserver-0.3.0.dist-info/METADATA +25 -0
  21. llama_deploy_appserver-0.3.0.dist-info/RECORD +24 -0
  22. {llama_deploy_appserver-0.2.7a1.dist-info → llama_deploy_appserver-0.3.0.dist-info}/WHEEL +1 -1
  23. llama_deploy/appserver/__main__.py +0 -14
  24. llama_deploy/appserver/client/__init__.py +0 -3
  25. llama_deploy/appserver/client/base.py +0 -30
  26. llama_deploy/appserver/client/client.py +0 -49
  27. llama_deploy/appserver/client/models/__init__.py +0 -4
  28. llama_deploy/appserver/client/models/apiserver.py +0 -356
  29. llama_deploy/appserver/client/models/model.py +0 -82
  30. llama_deploy/appserver/run_autodeploy.py +0 -141
  31. llama_deploy/appserver/server.py +0 -60
  32. llama_deploy/appserver/source_managers/__init__.py +0 -5
  33. llama_deploy/appserver/source_managers/base.py +0 -33
  34. llama_deploy/appserver/source_managers/git.py +0 -48
  35. llama_deploy/appserver/source_managers/local.py +0 -51
  36. llama_deploy/appserver/tracing.py +0 -237
  37. llama_deploy_appserver-0.2.7a1.dist-info/METADATA +0 -23
  38. llama_deploy_appserver-0.2.7a1.dist-info/RECORD +0 -28
@@ -0,0 +1,55 @@
1
+ import asyncio
2
+ import signal
3
+ from asyncio import Event
4
+ from contextlib import suppress
5
+ from typing import Awaitable, TypeVar
6
+
7
+ shutdown_event = Event()
8
+
9
+
10
+ def setup_interrupts() -> None:
11
+ loop = asyncio.get_running_loop()
12
+ for sig in (signal.SIGINT, signal.SIGTERM):
13
+ loop.add_signal_handler(sig, shutdown_event.set)
14
+
15
+
16
+ class OperationAborted(Exception):
17
+ """Raised when an operation is aborted due to shutdown/interrupt."""
18
+
19
+
20
+ T = TypeVar("T")
21
+
22
+
23
+ async def wait_or_abort(
24
+ awaitable: Awaitable[T], shutdown_event: asyncio.Event = shutdown_event
25
+ ) -> T:
26
+ """Await an operation, aborting early if shutdown is requested.
27
+
28
+ If the shutdown event is set before the awaitable completes, cancel the
29
+ awaitable and raise OperationAborted. Otherwise, return the awaitable's result.
30
+ """
31
+ event = shutdown_event
32
+ if event.is_set():
33
+ raise OperationAborted()
34
+
35
+ op_task = asyncio.create_task(awaitable)
36
+ stop_task = asyncio.create_task(event.wait())
37
+ try:
38
+ done, _ = await asyncio.wait(
39
+ {op_task, stop_task}, return_when=asyncio.FIRST_COMPLETED
40
+ )
41
+ if stop_task in done:
42
+ op_task.cancel()
43
+ with suppress(asyncio.CancelledError):
44
+ await op_task
45
+ raise OperationAborted()
46
+ # Operation finished first
47
+ stop_task.cancel()
48
+ with suppress(asyncio.CancelledError):
49
+ await stop_task
50
+ return await op_task
51
+ finally:
52
+ # Ensure no leaked tasks if an exception propagates
53
+ for t in (op_task, stop_task):
54
+ if not t.done():
55
+ t.cancel()
@@ -0,0 +1,214 @@
1
+ import functools
2
+ import os
3
+ import platform
4
+ import pty
5
+ import subprocess
6
+ import sys
7
+ import threading
8
+ from typing import Callable, TextIO, cast
9
+
10
+
11
+ def run_process(
12
+ cmd: list[str],
13
+ *,
14
+ cwd: os.PathLike | None = None,
15
+ env: dict[str, str] | None = None,
16
+ prefix: str | None = None,
17
+ color_code: str = "36",
18
+ line_transform: Callable[[str], str | None] | None = None,
19
+ use_tty: bool | None = None,
20
+ ) -> None:
21
+ """Run a process and stream its output with optional TTY semantics.
22
+
23
+ If use_tty is None, a PTY will be used only when the parent's stdout is a TTY
24
+ and the platform supports PTYs. When a PTY is used, stdout/stderr are merged.
25
+ """
26
+ use_pty = _should_use_pty(use_tty)
27
+ prefixer = _make_prefixer(prefix, color_code, line_transform)
28
+
29
+ process, sources, cleanup = _spawn_process(cmd, cwd=cwd, env=env, use_pty=use_pty)
30
+ threads: list[threading.Thread] = []
31
+ try:
32
+ cleanup()
33
+ _log_command(cmd, prefixer)
34
+ threads = _start_stream_threads(sources, prefixer)
35
+ ret = process.wait()
36
+ if ret != 0:
37
+ raise subprocess.CalledProcessError(ret, cmd)
38
+ finally:
39
+ for t in threads:
40
+ t.join()
41
+
42
+
43
+ def spawn_process(
44
+ cmd: list[str],
45
+ *,
46
+ cwd: os.PathLike | None = None,
47
+ env: dict[str, str] | None = None,
48
+ prefix: str | None = None,
49
+ color_code: str = "36",
50
+ line_transform: Callable[[str], str | None] | None = None,
51
+ use_tty: bool | None = None,
52
+ ) -> subprocess.Popen:
53
+ """Spawn a process and stream its output in background threads.
54
+
55
+ Returns immediately with the Popen object. Streaming threads are daemons.
56
+ """
57
+ use_pty = _should_use_pty(use_tty)
58
+ prefixer = _make_prefixer(prefix, color_code, line_transform)
59
+
60
+ process, sources, cleanup = _spawn_process(cmd, cwd=cwd, env=env, use_pty=use_pty)
61
+ cleanup()
62
+ _log_command(cmd, prefixer)
63
+ _start_stream_threads(sources, prefixer)
64
+ return process
65
+
66
+
67
+ @functools.cache
68
+ def _use_color() -> bool:
69
+ """Return True if ANSI colors should be emitted to stdout.
70
+
71
+ Respects common environment variables and falls back to TTY detection.
72
+ """
73
+ force_color = os.environ.get("FORCE_COLOR")
74
+
75
+ return sys.stdout.isatty() or force_color is not None and force_color != "0"
76
+
77
+
78
+ def _colored_prefix(prefix: str, color_code: str) -> str:
79
+ return f"\x1b[{color_code}m{prefix}\x1b[0m " if _use_color() else f"{prefix} "
80
+
81
+
82
+ def _make_prefixer(
83
+ prefix: str | None,
84
+ color_code: str,
85
+ line_transform: Callable[[str], str | None] | None = None,
86
+ ) -> Callable[[str], str | None]:
87
+ colored = _colored_prefix(prefix, color_code) if prefix else ""
88
+
89
+ def _prefixer(line: str) -> str | None:
90
+ transformed = line_transform(line) if line_transform else line
91
+ if transformed is None:
92
+ return None
93
+ return f"{colored}{transformed}"
94
+
95
+ return _prefixer
96
+
97
+
98
+ # Unified PTY/Pipe strategy helpers
99
+
100
+
101
+ def _should_use_pty(use_tty: bool | None) -> bool:
102
+ if platform.system() == "Windows":
103
+ return False
104
+ if use_tty is None:
105
+ return sys.stdout.isatty()
106
+ return use_tty and sys.stdout.isatty() and not os.environ.get("NO_COLOR")
107
+
108
+
109
+ def should_use_color() -> bool:
110
+ return _should_use_pty(None)
111
+
112
+
113
+ def _spawn_process(
114
+ cmd: list[str],
115
+ *,
116
+ cwd: os.PathLike | None,
117
+ env: dict[str, str] | None,
118
+ use_pty: bool,
119
+ ) -> tuple[subprocess.Popen, list[tuple[int | TextIO, TextIO]], Callable[[], None]]:
120
+ if use_pty:
121
+ master_fd, slave_fd = pty.openpty()
122
+ process = subprocess.Popen(
123
+ cmd,
124
+ env=env,
125
+ cwd=cwd,
126
+ stdin=slave_fd,
127
+ stdout=slave_fd,
128
+ stderr=slave_fd,
129
+ close_fds=True,
130
+ )
131
+
132
+ def cleanup() -> None:
133
+ try:
134
+ os.close(slave_fd)
135
+ except OSError:
136
+ pass
137
+
138
+ sources: list[tuple[int | TextIO, TextIO]] = [
139
+ (master_fd, cast(TextIO, sys.stdout)),
140
+ ]
141
+ return process, sources, cleanup
142
+
143
+ process = subprocess.Popen(
144
+ cmd,
145
+ env=env,
146
+ cwd=cwd,
147
+ stdin=None,
148
+ stdout=subprocess.PIPE,
149
+ stderr=subprocess.PIPE,
150
+ text=True,
151
+ encoding="utf-8",
152
+ )
153
+
154
+ def cleanup() -> None:
155
+ return None
156
+
157
+ assert process.stdout is not None and process.stderr is not None
158
+ sources = [
159
+ (process.stdout, cast(TextIO, sys.stdout)),
160
+ (process.stderr, cast(TextIO, sys.stderr)),
161
+ ]
162
+ return process, sources, cleanup
163
+
164
+
165
+ def _stream_source(
166
+ source: int | TextIO,
167
+ writer: TextIO,
168
+ transform: Callable[[str], str | None] | None,
169
+ ) -> None:
170
+ if isinstance(source, int):
171
+ try:
172
+ with os.fdopen(
173
+ source, "r", encoding="utf-8", errors="replace", buffering=1
174
+ ) as f:
175
+ for line in f:
176
+ out = transform(line) if transform else line
177
+ if out is not None:
178
+ writer.write(out)
179
+ writer.flush()
180
+ except OSError:
181
+ # PTY EOF may raise EIO; ignore
182
+ pass
183
+ else:
184
+ for line in iter(source.readline, ""):
185
+ out = transform(line) if transform else line
186
+ if out is None:
187
+ continue
188
+ writer.write(out)
189
+ writer.flush()
190
+ try:
191
+ source.close()
192
+ except Exception:
193
+ pass
194
+
195
+
196
+ def _log_command(cmd: list[str], transform: Callable[[str], str | None] | None) -> None:
197
+ cmd_str = "> " + " ".join(cmd)
198
+ if transform:
199
+ cmd_str = transform(cmd_str)
200
+ sys.stderr.write(cmd_str + "\n")
201
+
202
+
203
+ def _start_stream_threads(
204
+ sources: list[tuple[int | TextIO, TextIO]],
205
+ transform: Callable[[str], str | None] | None,
206
+ ) -> list[threading.Thread]:
207
+ threads: list[threading.Thread] = []
208
+ for src, dst in sources:
209
+ t = threading.Thread(
210
+ target=_stream_source, args=(src, dst, transform), daemon=True
211
+ )
212
+ t.start()
213
+ threads.append(t)
214
+ return threads
File without changes
@@ -1,4 +1,5 @@
1
- from .deployments import deployments_router
2
- from .status import status_router
1
+ from .deployments import create_deployments_router
2
+ from .status import health_router
3
+ from .ui_proxy import create_ui_proxy_router
3
4
 
4
- __all__ = ["deployments_router", "status_router"]
5
+ __all__ = ["create_deployments_router", "create_ui_proxy_router", "health_router"]