llamactl 0.3.0a8__tar.gz → 0.3.0a10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/PKG-INFO +3 -3
  2. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/pyproject.toml +3 -3
  3. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/client.py +4 -2
  4. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/commands/aliased_group.py +3 -1
  5. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/commands/init.py +1 -1
  6. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/commands/serve.py +16 -2
  7. llamactl-0.3.0a10/src/llama_deploy/cli/options.py +29 -0
  8. llamactl-0.3.0a10/src/llama_deploy/cli/py.typed +0 -0
  9. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/textual/deployment_form.py +2 -1
  10. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/textual/deployment_monitor.py +61 -15
  11. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/textual/git_validation.py +3 -3
  12. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/textual/llama_loader.py +12 -1
  13. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/textual/profile_form.py +2 -1
  14. llamactl-0.3.0a8/src/llama_deploy/cli/options.py +0 -23
  15. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/README.md +0 -0
  16. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/__init__.py +0 -0
  17. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/app.py +0 -0
  18. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/commands/deployment.py +0 -0
  19. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/commands/profile.py +0 -0
  20. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/config.py +0 -0
  21. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/debug.py +0 -0
  22. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/env.py +0 -0
  23. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/interactive_prompts/utils.py +0 -0
  24. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/textual/deployment_help.py +0 -0
  25. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/textual/github_callback_server.py +0 -0
  26. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/textual/secrets_form.py +0 -0
  27. {llamactl-0.3.0a8 → llamactl-0.3.0a10}/src/llama_deploy/cli/textual/styles.tcss +0 -0
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llamactl
3
- Version: 0.3.0a8
3
+ Version: 0.3.0a10
4
4
  Summary: A command-line interface for managing LlamaDeploy projects and deployments
5
5
  Author: Adrian Lyjak
6
6
  Author-email: Adrian Lyjak <adrianlyjak@gmail.com>
7
7
  License: MIT
8
- Requires-Dist: llama-deploy-core>=0.3.0a8,<0.4.0
9
- Requires-Dist: llama-deploy-appserver>=0.3.0a8,<0.4.0
8
+ Requires-Dist: llama-deploy-core>=0.3.0a10,<0.4.0
9
+ Requires-Dist: llama-deploy-appserver>=0.3.0a10,<0.4.0
10
10
  Requires-Dist: httpx>=0.24.0
11
11
  Requires-Dist: rich>=13.0.0
12
12
  Requires-Dist: questionary>=2.0.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "llamactl"
3
- version = "0.3.0a8"
3
+ version = "0.3.0a10"
4
4
  description = "A command-line interface for managing LlamaDeploy projects and deployments"
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -9,8 +9,8 @@ authors = [
9
9
  ]
10
10
  requires-python = ">=3.12, <4"
11
11
  dependencies = [
12
- "llama-deploy-core>=0.3.0a8,<0.4.0",
13
- "llama-deploy-appserver>=0.3.0a8,<0.4.0",
12
+ "llama-deploy-core>=0.3.0a10,<0.4.0",
13
+ "llama-deploy-appserver>=0.3.0a10,<0.4.0",
14
14
  "httpx>=0.24.0",
15
15
  "rich>=13.0.0",
16
16
  "questionary>=2.0.0",
@@ -109,10 +109,12 @@ class ProjectClient(BaseClient):
109
109
  deployments_response = DeploymentsListResponse.model_validate(response.json())
110
110
  return [deployment for deployment in deployments_response.deployments]
111
111
 
112
- def get_deployment(self, deployment_id: str) -> DeploymentResponse:
112
+ def get_deployment(
113
+ self, deployment_id: str, include_events: bool = False
114
+ ) -> DeploymentResponse:
113
115
  response = self.client.get(
114
116
  f"/api/v1beta1/deployments/{deployment_id}",
115
- params={"project_id": self.project_id},
117
+ params={"project_id": self.project_id, "include_events": include_events},
116
118
  )
117
119
  return DeploymentResponse.model_validate(response.json())
118
120
 
@@ -25,7 +25,9 @@ class AliasedGroup(click.Group):
25
25
 
26
26
  ctx.fail(f"Too many matches: {', '.join(sorted(matches))}")
27
27
 
28
- def resolve_command(self, ctx, args):
28
+ def resolve_command(
29
+ self, ctx: click.Context, args: list[str]
30
+ ) -> tuple[str, click.Command, list[str]]:
29
31
  # always return the full command name
30
32
  _, cmd, args = super().resolve_command(ctx, args)
31
33
  return cmd.name, cmd, args
@@ -26,7 +26,7 @@ options = [
26
26
  id="basic-ui",
27
27
  name="Basic UI",
28
28
  description="A basic starter workflow with a React Vite UI",
29
- git_url="https://github.com/adrianlyjak/qs",
29
+ git_url="https://github.com/run-llama/template-workflow-basic-ui",
30
30
  ),
31
31
  TemplateOption(
32
32
  id="extraction-review",
@@ -9,7 +9,6 @@ from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
9
9
  from rich import print as rprint
10
10
 
11
11
  from ..app import app
12
- from ..options import global_options
13
12
 
14
13
 
15
14
  @app.command(
@@ -36,7 +35,18 @@ from ..options import global_options
36
35
  )
37
36
  @click.option("--port", type=int, help="The port to run the API server on")
38
37
  @click.option("--ui-port", type=int, help="The port to run the UI proxy server on")
39
- @global_options
38
+ @click.option(
39
+ "--log-level",
40
+ type=click.Choice(
41
+ ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], case_sensitive=False
42
+ ),
43
+ help="The log level to run the API server at",
44
+ )
45
+ @click.option(
46
+ "--log-format",
47
+ type=click.Choice(["console", "json"], case_sensitive=False),
48
+ help="The format to use for logging",
49
+ )
40
50
  def serve(
41
51
  deployment_file: Path,
42
52
  no_install: bool,
@@ -45,6 +55,8 @@ def serve(
45
55
  preview: bool,
46
56
  port: int | None = None,
47
57
  ui_port: int | None = None,
58
+ log_level: str | None = None,
59
+ log_format: str | None = None,
48
60
  ) -> None:
49
61
  """Run llama_deploy API Server in the foreground. Reads the deployment configuration from the current directory. Can optionally specify a deployment file path."""
50
62
  if not deployment_file.exists():
@@ -64,6 +76,8 @@ def serve(
64
76
  open_browser=not no_open_browser,
65
77
  port=port,
66
78
  ui_port=ui_port,
79
+ log_level=log_level.upper() if log_level else None,
80
+ log_format=log_format.lower() if log_format else None,
67
81
  )
68
82
 
69
83
  except KeyboardInterrupt:
@@ -0,0 +1,29 @@
1
+ import logging
2
+ from typing import Callable, ParamSpec, TypeVar
3
+
4
+ import click
5
+
6
+ P = ParamSpec("P")
7
+ R = TypeVar("R")
8
+
9
+
10
+ def global_options(f: Callable[P, R]) -> Callable[P, R]:
11
+ """Common decorator to add global options to command groups"""
12
+ from .debug import setup_file_logging
13
+
14
+ def debug_callback(ctx: click.Context, param: click.Parameter, value: str) -> str:
15
+ if value:
16
+ setup_file_logging(level=logging._nameToLevel.get(value, logging.INFO))
17
+ return value
18
+
19
+ return click.option(
20
+ "--log-level",
21
+ type=click.Choice(
22
+ ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], case_sensitive=False
23
+ ),
24
+ help="Enable debug logging to file",
25
+ callback=debug_callback,
26
+ expose_value=False,
27
+ is_eager=True,
28
+ hidden=True,
29
+ )(f)
File without changes
@@ -38,6 +38,7 @@ from llama_deploy.core.schema.deployments import (
38
38
  DeploymentResponse,
39
39
  DeploymentUpdate,
40
40
  )
41
+ from textual import events
41
42
  from textual.app import App, ComposeResult
42
43
  from textual.containers import Container, HorizontalGroup, Widget
43
44
  from textual.content import Content
@@ -352,7 +353,7 @@ class DeploymentEditApp(App[DeploymentResponse | None]):
352
353
  def on_mount(self) -> None:
353
354
  self.theme = "tokyo-night"
354
355
 
355
- def on_key(self, event) -> None:
356
+ def on_key(self, event: events.Key) -> None:
356
357
  """Handle key events, including Ctrl+C"""
357
358
  if event.key == "ctrl+c":
358
359
  if self.current_state == "monitor" and self.saved_deployment is not None:
@@ -14,8 +14,10 @@ from llama_deploy.cli.client import get_project_client as get_client
14
14
  from llama_deploy.core.schema.base import LogEvent
15
15
  from llama_deploy.core.schema.deployments import DeploymentResponse
16
16
  from rich.text import Text
17
+ from textual import events
17
18
  from textual.app import App, ComposeResult
18
19
  from textual.containers import Container, HorizontalGroup, Widget
20
+ from textual.content import Content
19
21
  from textual.message import Message
20
22
  from textual.reactive import reactive
21
23
  from textual.widgets import Button, RichLog, Static
@@ -58,6 +60,16 @@ class DeploymentMonitorWidget(Widget):
58
60
  margin-top: 1;
59
61
  }
60
62
 
63
+ .status-line .status-main {
64
+ width: auto;
65
+ }
66
+
67
+ .status-line .status-right {
68
+ width: 1fr;
69
+ text-align: right;
70
+ min-width: 12;
71
+ }
72
+
61
73
 
62
74
  """
63
75
 
@@ -87,10 +99,12 @@ class DeploymentMonitorWidget(Widget):
87
99
  yield Static("", classes="error-message", id="error_message")
88
100
 
89
101
  # Single-line status bar with colored icon and deployment ID
90
- with HorizontalGroup(classes="mb-1"):
102
+ with HorizontalGroup(classes="status-line"):
91
103
  yield Static(
92
- self._render_status_line(), classes="status-line", id="status_line"
104
+ self._render_status_line(), classes="status-main", id="status_line"
93
105
  )
106
+ yield Static("", classes="status-right", id="last_event_status")
107
+ yield Static("", classes="last-event mb-1", id="last_event_details")
94
108
 
95
109
  yield Static("Logs", classes="secondary-message log-header")
96
110
  yield RichLog(
@@ -128,7 +142,9 @@ class DeploymentMonitorWidget(Widget):
128
142
  async def _fetch_deployment(self) -> None:
129
143
  try:
130
144
  client = get_client()
131
- self.deployment = client.get_deployment(self.deployment_id)
145
+ self.deployment = client.get_deployment(
146
+ self.deployment_id, include_events=True
147
+ )
132
148
  # Clear any previous error on success
133
149
  self.error_message = ""
134
150
  except Exception as e: # pragma: no cover - network errors
@@ -271,7 +287,7 @@ class DeploymentMonitorWidget(Widget):
271
287
  return "●", gray
272
288
 
273
289
  def _render_status_line(self) -> Text:
274
- phase = self.deployment.status if self.deployment else "-"
290
+ phase = self.deployment.status if self.deployment else "Unknown"
275
291
  icon, style = self._status_icon_and_style(phase)
276
292
  line = Text()
277
293
  line.append(icon, style=style)
@@ -279,6 +295,34 @@ class DeploymentMonitorWidget(Widget):
279
295
  line.append(f"Status: {phase} — Deployment ID: {self.deployment_id or '-'}")
280
296
  return line
281
297
 
298
+ def _render_last_event_details(self) -> Content:
299
+ if not self.deployment or not self.deployment.events:
300
+ return Content()
301
+ latest = self.deployment.events[-1]
302
+ txt = Text(f" {latest.message}", style="dim")
303
+ return Content.from_rich_text(txt)
304
+
305
+ def _render_last_event_status(self) -> Content:
306
+ if not self.deployment or not self.deployment.events:
307
+ return Content()
308
+ txt = Text()
309
+ # Pick the most recent by last_timestamp
310
+ latest = self.deployment.events[-1]
311
+ ts = None
312
+ ts = (latest.last_timestamp or latest.first_timestamp).strftime(
313
+ "%Y-%m-%d %H:%M:%S"
314
+ )
315
+ parts: list[str] = []
316
+ if latest.type:
317
+ parts.append(latest.type)
318
+ if latest.reason:
319
+ parts.append(latest.reason)
320
+ kind = "/".join(parts) if parts else None
321
+ if kind:
322
+ txt.append(f"{kind} ", style="medium_purple3")
323
+ txt.append(f"{ts}", style="dim")
324
+ return Content.from_rich_text(txt)
325
+
282
326
  def on_unmount(self) -> None:
283
327
  # Attempt to stop the streaming loop
284
328
  self._stop_stream.set()
@@ -298,16 +342,16 @@ class DeploymentMonitorWidget(Widget):
298
342
  def watch_deployment(self, deployment: DeploymentResponse | None) -> None:
299
343
  if deployment is None:
300
344
  return
301
- phase = deployment.status or "-"
302
- last = getattr(self, "_last_phase", None)
303
- if last == phase:
304
- return
305
- self._last_phase = phase
306
- try:
307
- widget = self.query_one("#status_line", Static)
308
- except Exception:
309
- return
345
+
346
+ widget = self.query_one("#status_line", Static)
347
+ ev_widget = self.query_one("#last_event_status", Static)
348
+ ev_details_widget = self.query_one("#last_event_details", Static)
349
+
310
350
  widget.update(self._render_status_line())
351
+ # Update last event line
352
+ ev_widget.update(self._render_last_event_status())
353
+ ev_details_widget.update(self._render_last_event_details())
354
+ ev_details_widget.display = bool(self.deployment and self.deployment.events)
311
355
 
312
356
  def watch_wrap_enabled(self, enabled: bool) -> None:
313
357
  try:
@@ -342,7 +386,9 @@ class DeploymentMonitorWidget(Widget):
342
386
  client = get_client()
343
387
  while not self._stop_stream.is_set():
344
388
  try:
345
- self.deployment = client.get_deployment(self.deployment_id)
389
+ self.deployment = client.get_deployment(
390
+ self.deployment_id, include_events=True
391
+ )
346
392
  # Clear any previous error on success
347
393
  if self.error_message:
348
394
  self.error_message = ""
@@ -378,7 +424,7 @@ class DeploymentMonitorApp(App[None]):
378
424
  def on_monitor_close_message(self, _: MonitorCloseMessage) -> None:
379
425
  self.exit(None)
380
426
 
381
- def on_key(self, event) -> None:
427
+ def on_key(self, event: events.Key) -> None:
382
428
  # Support Ctrl+C to exit, consistent with other screens and terminals
383
429
  if event.key == "ctrl+c":
384
430
  self.exit(None)
@@ -224,7 +224,7 @@ class GitValidationWidget(Widget):
224
224
  self.current_state = "options"
225
225
  self.error_message = ""
226
226
  elif event.button.id == "cancel_github_auth":
227
- self._cancel_github_auth()
227
+ self.run_worker(self._cancel_github_auth())
228
228
  elif event.button.id == "recheck_github":
229
229
  self.run_worker(self._recheck_github_auth())
230
230
  elif event.button.id == "continue_success":
@@ -258,10 +258,10 @@ class GitValidationWidget(Widget):
258
258
  self.github_callback_server = GitHubCallbackServer()
259
259
  self.run_worker(self._wait_for_callback())
260
260
 
261
- def _cancel_github_auth(self) -> None:
261
+ async def _cancel_github_auth(self) -> None:
262
262
  """Cancel GitHub authentication and return to options"""
263
263
  if self.github_callback_server:
264
- self.github_callback_server.stop()
264
+ await self.github_callback_server.stop()
265
265
  self.github_callback_server = None
266
266
  self.current_state = "options"
267
267
 
@@ -1,12 +1,23 @@
1
1
  import re
2
+ from typing import TypedDict
2
3
 
3
4
  from textual.widgets import Static
4
5
 
5
6
 
7
+ class StaticKwargs(TypedDict, total=False):
8
+ expand: bool
9
+ shrink: bool
10
+ markup: bool
11
+ name: str | None
12
+ id: str | None
13
+ classes: str | None
14
+ disabled: bool
15
+
16
+
6
17
  class PixelLlamaLoader(Static):
7
18
  """Pixelated llama loading animation using block characters"""
8
19
 
9
- def __init__(self, **kwargs):
20
+ def __init__(self, **kwargs: StaticKwargs):
10
21
  self.frame = 0
11
22
  # Pixelated llama frames using Unicode block characters
12
23
  self.frames = [
@@ -3,6 +3,7 @@
3
3
  from dataclasses import dataclass
4
4
  from pathlib import Path
5
5
 
6
+ from textual import events
6
7
  from textual.app import App, ComposeResult
7
8
  from textual.containers import (
8
9
  Container,
@@ -45,7 +46,7 @@ class ProfileEditApp(App[ProfileForm | None]):
45
46
  def on_mount(self) -> None:
46
47
  self.theme = "tokyo-night"
47
48
 
48
- def on_key(self, event) -> None:
49
+ def on_key(self, event: events.Key) -> None:
49
50
  """Handle key events, including Ctrl+C"""
50
51
  if event.key == "ctrl+c":
51
52
  self.exit(None)
@@ -1,23 +0,0 @@
1
- import logging
2
-
3
- import click
4
-
5
-
6
- def global_options(f):
7
- """Common decorator to add global options to command groups"""
8
- from .debug import setup_file_logging
9
-
10
- def debug_callback(ctx, param, value):
11
- if value:
12
- setup_file_logging(level=logging._nameToLevel[value])
13
- return value
14
-
15
- return click.option(
16
- "--log-level",
17
- type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]),
18
- help="Enable debug logging to file",
19
- callback=debug_callback,
20
- expose_value=False,
21
- is_eager=True,
22
- hidden=True,
23
- )(f)
File without changes