llamactl 0.3.0a7__tar.gz → 0.3.0a8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/PKG-INFO +4 -4
  2. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/pyproject.toml +5 -5
  3. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/__init__.py +2 -1
  4. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/client.py +112 -15
  5. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/commands/deployment.py +14 -4
  6. llamactl-0.3.0a8/src/llama_deploy/cli/commands/init.py +210 -0
  7. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/textual/deployment_form.py +30 -5
  8. llamactl-0.3.0a8/src/llama_deploy/cli/textual/deployment_monitor.py +429 -0
  9. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/README.md +0 -0
  10. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/app.py +0 -0
  11. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/commands/aliased_group.py +0 -0
  12. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/commands/profile.py +0 -0
  13. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/commands/serve.py +0 -0
  14. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/config.py +0 -0
  15. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/debug.py +0 -0
  16. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/env.py +0 -0
  17. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/interactive_prompts/utils.py +0 -0
  18. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/options.py +0 -0
  19. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/textual/deployment_help.py +0 -0
  20. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/textual/git_validation.py +0 -0
  21. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/textual/github_callback_server.py +0 -0
  22. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/textual/llama_loader.py +0 -0
  23. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/textual/profile_form.py +0 -0
  24. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/textual/secrets_form.py +0 -0
  25. {llamactl-0.3.0a7 → llamactl-0.3.0a8}/src/llama_deploy/cli/textual/styles.tcss +0 -0
@@ -1,19 +1,19 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llamactl
3
- Version: 0.3.0a7
3
+ Version: 0.3.0a8
4
4
  Summary: A command-line interface for managing LlamaDeploy projects and deployments
5
5
  Author: Adrian Lyjak
6
6
  Author-email: Adrian Lyjak <adrianlyjak@gmail.com>
7
7
  License: MIT
8
- Requires-Dist: llama-deploy-core>=0.3.0a7,<0.4.0
9
- Requires-Dist: llama-deploy-appserver>=0.3.0a7,<0.4.0
8
+ Requires-Dist: llama-deploy-core>=0.3.0a8,<0.4.0
9
+ Requires-Dist: llama-deploy-appserver>=0.3.0a8,<0.4.0
10
10
  Requires-Dist: httpx>=0.24.0
11
11
  Requires-Dist: rich>=13.0.0
12
12
  Requires-Dist: questionary>=2.0.0
13
13
  Requires-Dist: click>=8.2.1
14
14
  Requires-Dist: python-dotenv>=1.0.0
15
15
  Requires-Dist: tenacity>=9.1.2
16
- Requires-Dist: textual>=4.0.0
16
+ Requires-Dist: textual>=5.3.0
17
17
  Requires-Dist: aiohttp>=3.12.14
18
18
  Requires-Dist: copier>=9.9.0
19
19
  Requires-Python: >=3.12, <4
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "llamactl"
3
- version = "0.3.0a7"
3
+ version = "0.3.0a8"
4
4
  description = "A command-line interface for managing LlamaDeploy projects and deployments"
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -9,17 +9,17 @@ authors = [
9
9
  ]
10
10
  requires-python = ">=3.12, <4"
11
11
  dependencies = [
12
- "llama-deploy-core>=0.3.0a7,<0.4.0",
13
- "llama-deploy-appserver>=0.3.0a7,<0.4.0",
12
+ "llama-deploy-core>=0.3.0a8,<0.4.0",
13
+ "llama-deploy-appserver>=0.3.0a8,<0.4.0",
14
14
  "httpx>=0.24.0",
15
15
  "rich>=13.0.0",
16
16
  "questionary>=2.0.0",
17
17
  "click>=8.2.1",
18
18
  "python-dotenv>=1.0.0",
19
19
  "tenacity>=9.1.2",
20
- "textual>=4.0.0",
20
+ "textual>=5.3.0",
21
21
  "aiohttp>=3.12.14",
22
- "copier>=9.9.0"
22
+ "copier>=9.9.0",
23
23
  ]
24
24
 
25
25
  [project.scripts]
@@ -1,4 +1,5 @@
1
1
  from llama_deploy.cli.commands.deployment import deployments
2
+ from llama_deploy.cli.commands.init import init
2
3
  from llama_deploy.cli.commands.profile import profiles
3
4
  from llama_deploy.cli.commands.serve import serve
4
5
 
@@ -10,7 +11,7 @@ def main() -> None:
10
11
  app()
11
12
 
12
13
 
13
- __all__ = ["app", "deployments", "profiles", "serve"]
14
+ __all__ = ["app", "deployments", "profiles", "serve", "init"]
14
15
 
15
16
 
16
17
  if __name__ == "__main__":
@@ -1,6 +1,8 @@
1
- from typing import List
1
+ import contextlib
2
+ from typing import Iterator, List
2
3
 
3
4
  import httpx
5
+ from llama_deploy.core.schema.base import LogEvent
4
6
  from llama_deploy.core.schema.deployments import (
5
7
  DeploymentCreate,
6
8
  DeploymentResponse,
@@ -17,19 +19,24 @@ from rich.console import Console
17
19
  from .config import config_manager
18
20
 
19
21
 
22
+ class ClientError(Exception):
23
+ """Base class for client errors."""
24
+
25
+ def __init__(self, message: str) -> None:
26
+ super().__init__(message)
27
+
28
+
20
29
  class BaseClient:
21
30
  def __init__(self, base_url: str, console: Console) -> None:
22
31
  self.base_url = base_url.rstrip("/")
23
32
  self.console = console
24
33
  self.client = httpx.Client(
25
- base_url=self.base_url, event_hooks={"response": [self._handle_response]}
34
+ base_url=self.base_url,
35
+ event_hooks={"response": [self._handle_response]},
26
36
  )
37
+ self.hookless_client = httpx.Client(base_url=self.base_url)
27
38
 
28
39
  def _handle_response(self, response: httpx.Response) -> None:
29
- if "X-Warning" in response.headers:
30
- self.console.print(
31
- f"[yellow]Warning: {response.headers['X-Warning']}[/yellow]"
32
- )
33
40
  try:
34
41
  response.raise_for_status()
35
42
  except httpx.HTTPStatusError as e:
@@ -42,9 +49,9 @@ class BaseClient:
42
49
  error_message = str(error_data)
43
50
  except (ValueError, KeyError):
44
51
  error_message = e.response.text
45
- raise Exception(f"HTTP {e.response.status_code}: {error_message}") from e
52
+ raise ClientError(f"HTTP {e.response.status_code}: {error_message}") from e
46
53
  except httpx.RequestError as e:
47
- raise Exception(f"Request failed: {e}") from e
54
+ raise ClientError(f"Request failed: {e}") from e
48
55
 
49
56
 
50
57
  class ControlPlaneClient(BaseClient):
@@ -59,7 +66,7 @@ class ControlPlaneClient(BaseClient):
59
66
  return response.json()
60
67
 
61
68
  def list_projects(self) -> List[ProjectSummary]:
62
- response = self.client.get("/projects/")
69
+ response = self.client.get("/api/v1beta1/deployments/list-projects")
63
70
  projects_response = ProjectsListResponse.model_validate(response.json())
64
71
  return [project for project in projects_response.projects]
65
72
 
@@ -95,25 +102,35 @@ class ProjectClient(BaseClient):
95
102
  self.project_id = project_id
96
103
 
97
104
  def list_deployments(self) -> List[DeploymentResponse]:
98
- response = self.client.get(f"/{self.project_id}/deployments/")
105
+ response = self.client.get(
106
+ "/api/v1beta1/deployments",
107
+ params={"project_id": self.project_id},
108
+ )
99
109
  deployments_response = DeploymentsListResponse.model_validate(response.json())
100
110
  return [deployment for deployment in deployments_response.deployments]
101
111
 
102
112
  def get_deployment(self, deployment_id: str) -> DeploymentResponse:
103
- response = self.client.get(f"/{self.project_id}/deployments/{deployment_id}")
113
+ response = self.client.get(
114
+ f"/api/v1beta1/deployments/{deployment_id}",
115
+ params={"project_id": self.project_id},
116
+ )
104
117
  return DeploymentResponse.model_validate(response.json())
105
118
 
106
119
  def create_deployment(
107
120
  self, deployment_data: DeploymentCreate
108
121
  ) -> DeploymentResponse:
109
122
  response = self.client.post(
110
- f"/{self.project_id}/deployments/",
123
+ "/api/v1beta1/deployments",
124
+ params={"project_id": self.project_id},
111
125
  json=deployment_data.model_dump(exclude_none=True),
112
126
  )
113
127
  return DeploymentResponse.model_validate(response.json())
114
128
 
115
129
  def delete_deployment(self, deployment_id: str) -> None:
116
- self.client.delete(f"/{self.project_id}/deployments/{deployment_id}")
130
+ self.client.delete(
131
+ f"/api/v1beta1/deployments/{deployment_id}",
132
+ params={"project_id": self.project_id},
133
+ )
117
134
 
118
135
  def update_deployment(
119
136
  self,
@@ -121,7 +138,8 @@ class ProjectClient(BaseClient):
121
138
  update_data: DeploymentUpdate,
122
139
  ) -> DeploymentResponse:
123
140
  response = self.client.patch(
124
- f"/{self.project_id}/deployments/{deployment_id}",
141
+ f"/api/v1beta1/deployments/{deployment_id}",
142
+ params={"project_id": self.project_id},
125
143
  json=update_data.model_dump(),
126
144
  )
127
145
  return DeploymentResponse.model_validate(response.json())
@@ -133,7 +151,8 @@ class ProjectClient(BaseClient):
133
151
  pat: str | None = None,
134
152
  ) -> RepositoryValidationResponse:
135
153
  response = self.client.post(
136
- f"/{self.project_id}/deployments/validate-repository",
154
+ "/api/v1beta1/deployments/validate-repository",
155
+ params={"project_id": self.project_id},
137
156
  json=RepositoryValidationRequest(
138
157
  repository_url=repo_url,
139
158
  deployment_id=deployment_id,
@@ -142,6 +161,81 @@ class ProjectClient(BaseClient):
142
161
  )
143
162
  return RepositoryValidationResponse.model_validate(response.json())
144
163
 
164
+ def stream_deployment_logs(
165
+ self,
166
+ deployment_id: str,
167
+ *,
168
+ include_init_containers: bool = False,
169
+ since_seconds: int | None = None,
170
+ tail_lines: int | None = None,
171
+ ) -> tuple["Closer", Iterator[LogEvent]]:
172
+ """Stream logs as LogEvent items from the control plane using SSE.
173
+
174
+ This yields `LogEvent` models until the stream ends (e.g. rollout).
175
+ """
176
+ # Use a separate client without response hooks so we don't consume the stream
177
+
178
+ params = {
179
+ "project_id": self.project_id,
180
+ "include_init_containers": include_init_containers,
181
+ }
182
+ if since_seconds is not None:
183
+ params["since_seconds"] = since_seconds
184
+ if tail_lines is not None:
185
+ params["tail_lines"] = tail_lines
186
+
187
+ url = f"/api/v1beta1/deployments/{deployment_id}/logs"
188
+ headers = {"Accept": "text/event-stream"}
189
+
190
+ stack = contextlib.ExitStack()
191
+ response = stack.enter_context(
192
+ self.hookless_client.stream(
193
+ "GET", url, params=params, headers=headers, timeout=None
194
+ )
195
+ )
196
+ try:
197
+ response.raise_for_status()
198
+ except Exception:
199
+ stack.close()
200
+ raise
201
+
202
+ return stack.close, _iterate_log_stream(response, stack.close)
203
+
204
+
205
+ def _iterate_log_stream(
206
+ response: httpx.Response, closer: "Closer"
207
+ ) -> Iterator[LogEvent]:
208
+ event_name: str | None = None
209
+ data_lines: list[str] = []
210
+
211
+ try:
212
+ for line in response.iter_lines():
213
+ if line is None:
214
+ continue
215
+ line = line.decode() if isinstance(line, (bytes, bytearray)) else line
216
+ print("got line", line)
217
+ if line.startswith("event:"):
218
+ event_name = line[len("event:") :].strip()
219
+ elif line.startswith("data:"):
220
+ data_lines.append(line[len("data:") :].lstrip())
221
+ elif line.strip() == "":
222
+ if event_name == "log" and data_lines:
223
+ data_str = "\n".join(data_lines)
224
+ try:
225
+ yield LogEvent.model_validate_json(data_str)
226
+ print("yielded log event", data_str)
227
+ except Exception:
228
+ # If parsing fails, skip malformed event
229
+ pass
230
+ # reset for next event
231
+ event_name = None
232
+ data_lines = []
233
+ finally:
234
+ try:
235
+ closer()
236
+ except Exception:
237
+ pass
238
+
145
239
 
146
240
  def get_control_plane_client(base_url: str | None = None) -> ControlPlaneClient:
147
241
  console = Console()
@@ -174,3 +268,6 @@ def get_project_client(
174
268
  if not resolved_project_id:
175
269
  raise ValueError("Project ID is required")
176
270
  return ProjectClient(resolved_base_url, resolved_project_id, console)
271
+
272
+
273
+ type Closer = callable[tuple[()], None]
@@ -19,6 +19,7 @@ from ..interactive_prompts.utils import (
19
19
  )
20
20
  from ..options import global_options
21
21
  from ..textual.deployment_form import create_deployment_form, edit_deployment_form
22
+ from ..textual.deployment_monitor import monitor_deployment_screen
22
23
 
23
24
 
24
25
  @app.group(
@@ -88,7 +89,12 @@ def list_deployments() -> None:
88
89
  @deployments.command("get")
89
90
  @global_options
90
91
  @click.argument("deployment_id", required=False)
91
- def get_deployment(deployment_id: str | None) -> None:
92
+ @click.option(
93
+ "--non-interactive",
94
+ is_flag=True,
95
+ help="Do not open a live monitor screen showing status and streaming logs",
96
+ )
97
+ def get_deployment(deployment_id: str | None, non_interactive: bool) -> None:
92
98
  """Get details of a specific deployment"""
93
99
  try:
94
100
  client = get_project_client()
@@ -98,6 +104,10 @@ def get_deployment(deployment_id: str | None) -> None:
98
104
  rprint("[yellow]No deployment selected[/yellow]")
99
105
  return
100
106
 
107
+ if not non_interactive:
108
+ monitor_deployment_screen(deployment_id)
109
+ return
110
+
101
111
  deployment = client.get_deployment(deployment_id)
102
112
 
103
113
  table = Table(title=f"Deployment: {deployment.name}")
@@ -143,7 +153,7 @@ def create_deployment(
143
153
  git_ref: str | None,
144
154
  personal_access_token: str | None,
145
155
  ) -> None:
146
- """Create a new deployment"""
156
+ """Interactively create a new deployment"""
147
157
 
148
158
  # Use interactive creation
149
159
  deployment_form = create_deployment_form()
@@ -214,11 +224,11 @@ def edit_deployment(deployment_id: str | None) -> None:
214
224
  raise click.Abort()
215
225
 
216
226
 
217
- @deployments.command("refresh")
227
+ @deployments.command("update")
218
228
  @global_options
219
229
  @click.argument("deployment_id", required=False)
220
230
  def refresh_deployment(deployment_id: str | None) -> None:
221
- """Refresh a deployment with the latest code from its git reference"""
231
+ """Update the deployment, pulling the latest code from it's branch"""
222
232
  try:
223
233
  client = get_project_client()
224
234
 
@@ -0,0 +1,210 @@
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ from dataclasses import dataclass
5
+ from pathlib import Path
6
+
7
+ import click
8
+ import copier
9
+ import questionary
10
+ from click.exceptions import Exit
11
+ from llama_deploy.cli.app import app
12
+ from llama_deploy.cli.options import global_options
13
+ from rich import print as rprint
14
+
15
+
16
+ @dataclass
17
+ class TemplateOption:
18
+ id: str
19
+ name: str
20
+ description: str
21
+ git_url: str
22
+
23
+
24
+ options = [
25
+ TemplateOption(
26
+ id="basic-ui",
27
+ name="Basic UI",
28
+ description="A basic starter workflow with a React Vite UI",
29
+ git_url="https://github.com/adrianlyjak/qs",
30
+ ),
31
+ TemplateOption(
32
+ id="extraction-review",
33
+ name="Extraction Agent with Review UI",
34
+ description="Extract data from documents using a custom schema and Llama Cloud. Includes a UI to review and correct the results",
35
+ git_url="https://github.com/run-llama/template-workflow-data-extraction",
36
+ ),
37
+ ]
38
+
39
+
40
+ @app.command()
41
+ @click.option(
42
+ "--update",
43
+ is_flag=True,
44
+ help="Instead of creating a new app, update the current app to the latest version. Other options will be ignored.",
45
+ )
46
+ @click.option(
47
+ "--template",
48
+ type=click.Choice([o.id for o in options]),
49
+ help="The template to use for the new app",
50
+ )
51
+ @click.option(
52
+ "--dir",
53
+ help="The directory to create the new app in",
54
+ type=click.Path(
55
+ file_okay=False, dir_okay=True, writable=True, resolve_path=True, path_type=Path
56
+ ),
57
+ )
58
+ @click.option(
59
+ "--force",
60
+ is_flag=True,
61
+ help="Force overwrite the directory if it exists",
62
+ )
63
+ @global_options
64
+ def init(
65
+ update: bool,
66
+ template: str | None,
67
+ dir: Path | None,
68
+ force: bool,
69
+ ) -> None:
70
+ """Create a new app repository from a template"""
71
+ if update:
72
+ _update()
73
+ else:
74
+ _create(template, dir, force)
75
+
76
+
77
+ def _create(template: str | None, dir: Path | None, force: bool) -> None:
78
+ if template is None:
79
+ template = questionary.select(
80
+ "Choose a template",
81
+ choices=[
82
+ questionary.Choice(title=o.name, value=o.id, description=o.description)
83
+ for o in options
84
+ ],
85
+ ).ask()
86
+ if template is None:
87
+ rprint("No template selected")
88
+ raise Exit(1)
89
+ if dir is None:
90
+ dir_str = questionary.text(
91
+ "Enter the directory to create the new app in", default=template
92
+ ).ask()
93
+ if not dir_str:
94
+ rprint("No directory provided")
95
+ raise Exit(1)
96
+ dir = Path(dir_str)
97
+ resolved_template = next((o for o in options if o.id == template), None)
98
+ if resolved_template is None:
99
+ rprint(f"Template {template} not found")
100
+ raise Exit(1)
101
+ if dir.exists():
102
+ is_ok = (
103
+ force
104
+ or questionary.confirm("Directory exists. Overwrite?", default=False).ask()
105
+ )
106
+ if not is_ok:
107
+ raise Exit(1)
108
+ else:
109
+ shutil.rmtree(dir, ignore_errors=True)
110
+ copier.run_copy(
111
+ resolved_template.git_url,
112
+ dir,
113
+ quiet=True,
114
+ )
115
+ # Initialize git repository if git is available
116
+ is_git_initialized = False
117
+ try:
118
+ subprocess.run(["git", "--version"], check=True, capture_output=True)
119
+
120
+ # Change to the new directory and initialize git repo
121
+ original_cwd = Path.cwd()
122
+ os.chdir(dir)
123
+
124
+ try:
125
+ subprocess.run(["git", "init"], check=True, capture_output=True)
126
+ subprocess.run(["git", "add", "."], check=True, capture_output=True)
127
+ subprocess.run(
128
+ ["git", "commit", "-m", "Initial commit"],
129
+ check=True,
130
+ capture_output=True,
131
+ )
132
+ is_git_initialized = True
133
+ finally:
134
+ os.chdir(original_cwd)
135
+
136
+ except (subprocess.CalledProcessError, FileNotFoundError):
137
+ # Git not available or failed - continue without git initialization
138
+ pass
139
+
140
+ rprint(
141
+ f"Successfully created [blue]{dir}[/] using the [blue]{resolved_template.name}[/] template! 🎉 🦙 💾"
142
+ )
143
+ rprint("")
144
+ rprint("[bold]To run locally:[/]")
145
+ rprint(f" [orange3]cd[/] {dir}")
146
+ rprint(" [orange3]uvx[/] llamactl serve")
147
+ rprint("")
148
+ rprint("[bold]To deploy:[/]")
149
+ if not is_git_initialized:
150
+ rprint(" [orange3]git[/] init")
151
+ rprint(" [orange3]git[/] add .")
152
+ rprint(" [orange3]git[/] commit -m 'Initial commit'")
153
+ rprint("")
154
+ rprint("[dim](Create a new repo and add it as a remote)[/]")
155
+ rprint("")
156
+ rprint(" [orange3]git[/] remote add origin <your-repo-url>")
157
+ rprint(" [orange3]git[/] push -u origin main")
158
+ rprint("")
159
+ # rprint(" [orange3]uvx[/] llamactl login")
160
+ rprint(" [orange3]uvx[/] llamactl deploy")
161
+ rprint("")
162
+
163
+
164
+ def _update():
165
+ """Update the app to the latest version"""
166
+ try:
167
+ copier.run_update(
168
+ overwrite=True,
169
+ skip_answered=True,
170
+ quiet=True,
171
+ )
172
+ except copier.UserMessageError as e:
173
+ rprint(f"{e}")
174
+ raise Exit(1)
175
+
176
+ # Check git status and warn about conflicts
177
+ try:
178
+ result = subprocess.run(
179
+ ["git", "status", "--porcelain"],
180
+ check=True,
181
+ capture_output=True,
182
+ text=True,
183
+ )
184
+
185
+ if result.stdout.strip():
186
+ conflicted_files = []
187
+ modified_files = []
188
+
189
+ for line in result.stdout.strip().split("\n"):
190
+ status = line[:2]
191
+ filename = line[3:]
192
+
193
+ if "UU" in status or "AA" in status or "DD" in status:
194
+ conflicted_files.append(filename)
195
+ elif status.strip():
196
+ modified_files.append(filename)
197
+
198
+ if conflicted_files:
199
+ rprint("")
200
+ rprint("⚠️ [bold]Files with conflicts detected:[/]")
201
+ for file in conflicted_files:
202
+ rprint(f" {file}")
203
+ rprint("")
204
+ rprint(
205
+ "Please manually resolve conflicts with a merge editor before proceeding."
206
+ )
207
+
208
+ except (subprocess.CalledProcessError, FileNotFoundError):
209
+ # Git not available or not in a git repo - continue silently
210
+ pass
@@ -13,6 +13,10 @@ from llama_deploy.cli.textual.deployment_help import (
13
13
  DeploymentHelpBackMessage,
14
14
  DeploymentHelpWidget,
15
15
  )
16
+ from llama_deploy.cli.textual.deployment_monitor import (
17
+ DeploymentMonitorWidget,
18
+ MonitorCloseMessage,
19
+ )
16
20
  from llama_deploy.cli.textual.git_validation import (
17
21
  GitValidationWidget,
18
22
  ValidationCancelMessage,
@@ -334,10 +338,11 @@ class DeploymentEditApp(App[DeploymentResponse | None]):
334
338
 
335
339
  CSS_PATH = Path(__file__).parent / "styles.tcss"
336
340
 
337
- # App states: 'form', 'validation', or 'help'
341
+ # App states: 'form', 'validation', 'help', or 'monitor'
338
342
  current_state: reactive[str] = reactive("form", recompose=True)
339
343
  form_data: reactive[DeploymentForm] = reactive(DeploymentForm())
340
344
  save_error: reactive[str] = reactive("", recompose=True)
345
+ saved_deployment = reactive[DeploymentResponse | None](None, recompose=True)
341
346
 
342
347
  def __init__(self, initial_data: DeploymentForm):
343
348
  super().__init__()
@@ -350,10 +355,14 @@ class DeploymentEditApp(App[DeploymentResponse | None]):
350
355
  def on_key(self, event) -> None:
351
356
  """Handle key events, including Ctrl+C"""
352
357
  if event.key == "ctrl+c":
353
- self.exit(None)
358
+ if self.current_state == "monitor" and self.saved_deployment is not None:
359
+ self.exit(self.saved_deployment)
360
+ else:
361
+ self.exit(None)
354
362
 
355
363
  def compose(self) -> ComposeResult:
356
- with Container(classes="form-container"):
364
+ is_slim = self.current_state != "monitor"
365
+ with Container(classes="form-container" if is_slim else ""):
357
366
  if self.current_state == "form":
358
367
  yield DeploymentFormWidget(self.form_data, self.save_error)
359
368
  elif self.current_state == "validation":
@@ -368,6 +377,11 @@ class DeploymentEditApp(App[DeploymentResponse | None]):
368
377
  )
369
378
  elif self.current_state == "help":
370
379
  yield DeploymentHelpWidget()
380
+ elif self.current_state == "monitor":
381
+ deployment_id = (
382
+ self.saved_deployment.id if self.saved_deployment else ""
383
+ )
384
+ yield DeploymentMonitorWidget(deployment_id)
371
385
  else:
372
386
  yield Static("Unknown state: " + self.current_state)
373
387
 
@@ -432,8 +446,15 @@ class DeploymentEditApp(App[DeploymentResponse | None]):
432
446
  )
433
447
  else:
434
448
  update_deployment = client.create_deployment(result.to_create())
435
- # Exit with result
436
- self.exit(update_deployment)
449
+ # Save and navigate to embedded monitor screen
450
+ self.saved_deployment = update_deployment
451
+ # Ensure form_data carries the new ID for any subsequent operations
452
+ if not result.is_editing and update_deployment.id:
453
+ updated_form = dataclasses.replace(self.form_data)
454
+ updated_form.id = update_deployment.id
455
+ updated_form.is_editing = True
456
+ self.form_data = updated_form
457
+ self.current_state = "monitor"
437
458
  except Exception as e:
438
459
  # Return to form and show error
439
460
  self.save_error = f"Error saving deployment: {e}"
@@ -447,6 +468,10 @@ class DeploymentEditApp(App[DeploymentResponse | None]):
447
468
  """Handle cancel message from form widget"""
448
469
  self.exit(None)
449
470
 
471
+ def on_monitor_close_message(self, _: MonitorCloseMessage) -> None:
472
+ """Handle close from embedded monitor by exiting with saved deployment."""
473
+ self.exit(self.saved_deployment)
474
+
450
475
 
451
476
  def edit_deployment_form(
452
477
  deployment: DeploymentResponse,
@@ -0,0 +1,429 @@
1
+ """Textual component to monitor a deployment and stream its logs."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import hashlib
7
+ import threading
8
+ import time
9
+ from pathlib import Path
10
+ from typing import Iterator
11
+
12
+ from llama_deploy.cli.client import Closer
13
+ from llama_deploy.cli.client import get_project_client as get_client
14
+ from llama_deploy.core.schema.base import LogEvent
15
+ from llama_deploy.core.schema.deployments import DeploymentResponse
16
+ from rich.text import Text
17
+ from textual.app import App, ComposeResult
18
+ from textual.containers import Container, HorizontalGroup, Widget
19
+ from textual.message import Message
20
+ from textual.reactive import reactive
21
+ from textual.widgets import Button, RichLog, Static
22
+
23
+
24
+ class DeploymentMonitorWidget(Widget):
25
+ """Widget that fetches deployment details once and streams logs.
26
+
27
+ Notes:
28
+ - Status is polled periodically
29
+ - Log stream is started with init container logs included on first connect
30
+ - If the stream ends or hangs, we reconnect with duration-aware backoff
31
+ """
32
+
33
+ DEFAULT_CSS = """
34
+ DeploymentMonitorWidget {
35
+ layout: vertical;
36
+ width: 1fr;
37
+ height: 1fr;
38
+ }
39
+
40
+ .monitor-container {
41
+ width: 1fr;
42
+ height: 1fr;
43
+ padding: 0;
44
+ margin: 0;
45
+ }
46
+
47
+ .details-grid {
48
+ layout: grid;
49
+ grid-size: 2;
50
+ grid-columns: auto 1fr;
51
+ grid-gutter: 0 1;
52
+ grid-rows: auto;
53
+ height: auto;
54
+ width: 1fr;
55
+ }
56
+
57
+ .log-header {
58
+ margin-top: 1;
59
+ }
60
+
61
+
62
+ """
63
+
64
+ deployment_id: str
65
+ deployment = reactive[DeploymentResponse | None](None, recompose=False)
66
+ error_message = reactive("", recompose=False)
67
+ wrap_enabled = reactive(False, recompose=False)
68
+ autoscroll_enabled = reactive(True, recompose=False)
69
+ stream_closer: Closer | None = None
70
+
71
+ def __init__(self, deployment_id: str) -> None:
72
+ super().__init__()
73
+ self.deployment_id = deployment_id
74
+ self._stop_stream = threading.Event()
75
+ # Persist content written to the RichLog across recomposes
76
+ self._log_buffer: list[Text] = []
77
+
78
+ def on_mount(self) -> None:
79
+ # Kick off initial fetch and start logs stream in background
80
+ self.run_worker(self._fetch_deployment(), exclusive=True)
81
+ self.run_worker(self._stream_logs, exclusive=False, thread=True)
82
+ # Start periodic polling of deployment status
83
+ self.run_worker(self._poll_deployment_status(), exclusive=False)
84
+
85
+ def compose(self) -> ComposeResult:
86
+ yield Static("Deployment Status", classes="primary-message")
87
+ yield Static("", classes="error-message", id="error_message")
88
+
89
+ # Single-line status bar with colored icon and deployment ID
90
+ with HorizontalGroup(classes="mb-1"):
91
+ yield Static(
92
+ self._render_status_line(), classes="status-line", id="status_line"
93
+ )
94
+
95
+ yield Static("Logs", classes="secondary-message log-header")
96
+ yield RichLog(
97
+ id="log_view",
98
+ classes="log-view mb-1",
99
+ auto_scroll=self.autoscroll_enabled,
100
+ wrap=self.wrap_enabled,
101
+ highlight=True,
102
+ )
103
+
104
+ with HorizontalGroup(classes="button-row"):
105
+ wrap_label = "Wrap: On" if self.wrap_enabled else "Wrap: Off"
106
+ auto_label = (
107
+ "Auto-scroll: On" if self.autoscroll_enabled else "Auto-scroll: Off"
108
+ )
109
+ yield Button(wrap_label, id="toggle_wrap", variant="default", compact=True)
110
+ yield Button(
111
+ auto_label, id="toggle_autoscroll", variant="default", compact=True
112
+ )
113
+ yield Button("Copy", id="copy_log", variant="default", compact=True)
114
+ yield Button("Close", id="close", variant="default", compact=True)
115
+
116
+ def on_button_pressed(self, event: Button.Pressed) -> None:
117
+ if event.button.id == "close":
118
+ # Signal parent app to close
119
+ self.post_message(MonitorCloseMessage())
120
+ elif event.button.id == "toggle_wrap":
121
+ self.wrap_enabled = not self.wrap_enabled
122
+ elif event.button.id == "toggle_autoscroll":
123
+ self.autoscroll_enabled = not self.autoscroll_enabled
124
+ elif event.button.id == "copy_log":
125
+ txt = "\n".join([str(x) for x in self._log_buffer])
126
+ self.app.copy_to_clipboard(txt)
127
+
128
+ async def _fetch_deployment(self) -> None:
129
+ try:
130
+ client = get_client()
131
+ self.deployment = client.get_deployment(self.deployment_id)
132
+ # Clear any previous error on success
133
+ self.error_message = ""
134
+ except Exception as e: # pragma: no cover - network errors
135
+ self.error_message = f"Failed to fetch deployment: {e}"
136
+
137
+ def _stream_logs(self) -> None:
138
+ """Consume the blocking log iterator in a single worker thread.
139
+
140
+ Cooperative cancellation uses `self._stop_stream` to exit cleanly.
141
+ """
142
+ client = get_client()
143
+
144
+ def _sleep_with_cancel(total_seconds: float) -> None:
145
+ step = 0.2
146
+ remaining = total_seconds
147
+ while remaining > 0 and not self._stop_stream.is_set():
148
+ time.sleep(min(step, remaining))
149
+ remaining -= step
150
+
151
+ base_backoff_seconds = 0.2
152
+ backoff_seconds = base_backoff_seconds
153
+ max_backoff_seconds = 30.0
154
+
155
+ while not self._stop_stream.is_set():
156
+ try:
157
+ connect_started_at = time.monotonic()
158
+ closer, stream = client.stream_deployment_logs(
159
+ self.deployment_id,
160
+ include_init_containers=True,
161
+ )
162
+ # On any (re)connect, clear existing content
163
+ self.app.call_from_thread(self._reset_log_view_for_reconnect)
164
+
165
+ buffered_stream = _buffer_log_lines(stream)
166
+
167
+ def close_stream():
168
+ try:
169
+ closer()
170
+ except Exception:
171
+ pass
172
+
173
+ self.stream_closer = close_stream
174
+ # Stream connected; consume until end
175
+ for events in buffered_stream:
176
+ if self._stop_stream.is_set():
177
+ break
178
+ # Marshal UI updates back to the main thread via the App
179
+ self.app.call_from_thread(self._handle_log_events, events)
180
+ if self._stop_stream.is_set():
181
+ break
182
+ # Stream ended without explicit error; attempt reconnect
183
+ self.app.call_from_thread(
184
+ self._set_error_message, "Log stream disconnected. Reconnecting..."
185
+ )
186
+ except Exception as e:
187
+ if self._stop_stream.is_set():
188
+ break
189
+ # Surface the error to the UI and attempt reconnect with backoff
190
+ self.app.call_from_thread(
191
+ self._set_error_message, f"Log stream failed: {e}. Reconnecting..."
192
+ )
193
+
194
+ # Duration-aware backoff: subtract how long the last connection lived
195
+ connection_lifetime = 0.0
196
+ try:
197
+ connection_lifetime = max(0.0, time.monotonic() - connect_started_at)
198
+ except Exception:
199
+ connection_lifetime = 0.0
200
+
201
+ # If the connection lived longer than the current backoff window,
202
+ # reset to base so the next reconnect is immediate.
203
+ if connection_lifetime >= backoff_seconds:
204
+ backoff_seconds = base_backoff_seconds
205
+ else:
206
+ backoff_seconds = min(backoff_seconds * 2.0, max_backoff_seconds)
207
+
208
+ delay = max(0.0, backoff_seconds - connection_lifetime)
209
+ if delay > 0:
210
+ _sleep_with_cancel(delay)
211
+
212
+ def _reset_log_view_for_reconnect(self) -> None:
213
+ """Clear UI and buffers so new stream replaces previous content."""
214
+ try:
215
+ log_widget = self.query_one("#log_view", RichLog)
216
+ except Exception:
217
+ log_widget = None
218
+ if log_widget is not None:
219
+ log_widget.clear()
220
+
221
+ def _set_error_message(self, message: str) -> None:
222
+ self.error_message = message
223
+
224
+ def _handle_log_events(self, events: list[LogEvent]) -> None:
225
+ def to_text(event: LogEvent) -> Text:
226
+ txt = Text()
227
+ txt.append(
228
+ f"[{event.container}] ", style=self._container_style(event.container)
229
+ )
230
+ txt.append(event.text)
231
+ return txt
232
+
233
+ texts = [to_text(event) for event in events]
234
+ if not texts:
235
+ return
236
+
237
+ log_widget = self.query_one("#log_view", RichLog)
238
+ for text in texts:
239
+ log_widget.write(text)
240
+ self._log_buffer.append(text)
241
+ # Clear any previous error once we successfully receive logs
242
+ if self.error_message:
243
+ self.error_message = ""
244
+
245
+ def _container_style(self, container_name: str) -> str:
246
+ palette = [
247
+ "bold magenta",
248
+ "bold cyan",
249
+ "bold blue",
250
+ "bold green",
251
+ "bold red",
252
+ "bold bright_blue",
253
+ ]
254
+ # Stable hash to pick a color per container name
255
+ h = int(hashlib.sha256(container_name.encode()).hexdigest(), 16)
256
+ return palette[h % len(palette)]
257
+
258
+ def _status_icon_and_style(self, phase: str) -> tuple[str, str]:
259
+ # Map deployment phase to a colored icon
260
+ phase = phase or "-"
261
+ green = "bold green"
262
+ yellow = "bold yellow"
263
+ red = "bold red"
264
+ gray = "grey50"
265
+ if phase in {"Running", "Succeeded"}:
266
+ return "●", green
267
+ if phase in {"Pending", "Syncing", "RollingOut"}:
268
+ return "●", yellow
269
+ if phase in {"Failed", "RolloutFailed"}:
270
+ return "●", red
271
+ return "●", gray
272
+
273
+ def _render_status_line(self) -> Text:
274
+ phase = self.deployment.status if self.deployment else "-"
275
+ icon, style = self._status_icon_and_style(phase)
276
+ line = Text()
277
+ line.append(icon, style=style)
278
+ line.append(" ")
279
+ line.append(f"Status: {phase} — Deployment ID: {self.deployment_id or '-'}")
280
+ return line
281
+
282
+ def on_unmount(self) -> None:
283
+ # Attempt to stop the streaming loop
284
+ self._stop_stream.set()
285
+ if self.stream_closer is not None:
286
+ self.stream_closer()
287
+ self.stream_closer = None
288
+
289
+ # Reactive watchers to update widgets in place instead of recomposing
290
+ def watch_error_message(self, message: str) -> None:
291
+ try:
292
+ widget = self.query_one("#error_message", Static)
293
+ except Exception:
294
+ return
295
+ widget.update(message)
296
+ widget.display = bool(message)
297
+
298
+ def watch_deployment(self, deployment: DeploymentResponse | None) -> None:
299
+ if deployment is None:
300
+ return
301
+ phase = deployment.status or "-"
302
+ last = getattr(self, "_last_phase", None)
303
+ if last == phase:
304
+ return
305
+ self._last_phase = phase
306
+ try:
307
+ widget = self.query_one("#status_line", Static)
308
+ except Exception:
309
+ return
310
+ widget.update(self._render_status_line())
311
+
312
+ def watch_wrap_enabled(self, enabled: bool) -> None:
313
+ try:
314
+ log_widget = self.query_one("#log_view", RichLog)
315
+ log_widget.wrap = enabled
316
+ # Clear existing lines; new wrap mode will apply to subsequent events
317
+ log_widget.clear()
318
+ for text in self._log_buffer:
319
+ log_widget.write(text)
320
+ except Exception:
321
+ pass
322
+ try:
323
+ btn = self.query_one("#toggle_wrap", Button)
324
+ btn.label = "Wrap: On" if enabled else "Wrap: Off"
325
+ except Exception:
326
+ pass
327
+
328
+ def watch_autoscroll_enabled(self, enabled: bool) -> None:
329
+ try:
330
+ log_widget = self.query_one("#log_view", RichLog)
331
+ log_widget.auto_scroll = enabled
332
+ except Exception:
333
+ pass
334
+ try:
335
+ btn = self.query_one("#toggle_autoscroll", Button)
336
+ btn.label = "Auto-scroll: On" if enabled else "Auto-scroll: Off"
337
+ except Exception:
338
+ pass
339
+
340
+ async def _poll_deployment_status(self) -> None:
341
+ """Periodically refresh deployment status to reflect updates in the UI."""
342
+ client = get_client()
343
+ while not self._stop_stream.is_set():
344
+ try:
345
+ self.deployment = client.get_deployment(self.deployment_id)
346
+ # Clear any previous error on success
347
+ if self.error_message:
348
+ self.error_message = ""
349
+ except Exception as e: # pragma: no cover - network errors
350
+ # Non-fatal; will try again on next interval
351
+ self.error_message = f"Failed to refresh status: {e}"
352
+ await asyncio.sleep(5)
353
+
354
+
355
+ class MonitorCloseMessage(Message):
356
+ pass
357
+
358
+
359
+ class DeploymentMonitorApp(App[None]):
360
+ """Standalone app wrapper around the monitor widget.
361
+
362
+ This allows easy reuse in other flows by embedding the widget.
363
+ """
364
+
365
+ CSS_PATH = Path(__file__).parent / "styles.tcss"
366
+
367
+ def __init__(self, deployment_id: str) -> None:
368
+ super().__init__()
369
+ self.deployment_id = deployment_id
370
+
371
+ def on_mount(self) -> None:
372
+ self.theme = "tokyo-night"
373
+
374
+ def compose(self) -> ComposeResult:
375
+ with Container():
376
+ yield DeploymentMonitorWidget(self.deployment_id)
377
+
378
+ def on_monitor_close_message(self, _: MonitorCloseMessage) -> None:
379
+ self.exit(None)
380
+
381
+ def on_key(self, event) -> None:
382
+ # Support Ctrl+C to exit, consistent with other screens and terminals
383
+ if event.key == "ctrl+c":
384
+ self.exit(None)
385
+
386
+
387
+ def monitor_deployment_screen(deployment_id: str) -> None:
388
+ """Launch the standalone deployment monitor screen."""
389
+ app = DeploymentMonitorApp(deployment_id)
390
+ app.run()
391
+
392
+
393
+ def _buffer_log_lines(iter: Iterator[LogEvent]) -> Iterator[list[LogEvent]]:
394
+ """Batch log events into small lists using a background reader.
395
+
396
+ This reduces UI churn while still reacting quickly. On shutdown we
397
+ absorb stream read errors that are expected when the connection is
398
+ closed from another thread.
399
+ """
400
+ buffer: list[LogEvent] = []
401
+ bg_error: Exception | None = None
402
+ done = threading.Event()
403
+
404
+ def pump() -> None:
405
+ nonlocal bg_error
406
+ try:
407
+ for event in iter:
408
+ buffer.append(event)
409
+ except Exception as e:
410
+ bg_error = e
411
+ finally:
412
+ done.set()
413
+
414
+ t = threading.Thread(target=pump, daemon=True)
415
+ t.start()
416
+ try:
417
+ while not done.is_set():
418
+ if buffer:
419
+ # Yield a snapshot and clear in-place to avoid reallocating list
420
+ yield list(buffer)
421
+ buffer.clear()
422
+ time.sleep(0.5)
423
+ if bg_error is not None:
424
+ raise bg_error
425
+ finally:
426
+ try:
427
+ t.join(timeout=0.1)
428
+ except Exception:
429
+ pass
File without changes