llamactl 0.2.7a1__tar.gz → 0.3.0a2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llamactl
3
- Version: 0.2.7a1
3
+ Version: 0.3.0a2
4
4
  Summary: A command-line interface for managing LlamaDeploy projects and deployments
5
5
  Author: Adrian Lyjak
6
6
  Author-email: Adrian Lyjak <adrianlyjak@gmail.com>
7
7
  License: MIT
8
- Requires-Dist: llama-deploy-core>=0.2.7a1,<0.3.0
9
- Requires-Dist: llama-deploy-appserver>=0.2.7a1,<0.3.0
8
+ Requires-Dist: llama-deploy-core>=0.3.0a2,<0.4.0
9
+ Requires-Dist: llama-deploy-appserver>=0.3.0a2,<0.4.0
10
10
  Requires-Dist: httpx>=0.24.0
11
11
  Requires-Dist: rich>=13.0.0
12
12
  Requires-Dist: questionary>=2.0.0
@@ -15,6 +15,7 @@ Requires-Dist: python-dotenv>=1.0.0
15
15
  Requires-Dist: tenacity>=9.1.2
16
16
  Requires-Dist: textual>=4.0.0
17
17
  Requires-Dist: aiohttp>=3.12.14
18
+ Requires-Dist: copier>=9.9.0
18
19
  Requires-Python: >=3.12, <4
19
20
  Description-Content-Type: text/markdown
20
21
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "llamactl"
3
- version = "0.2.7a1"
3
+ version = "0.3.0a2"
4
4
  description = "A command-line interface for managing LlamaDeploy projects and deployments"
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -9,8 +9,8 @@ authors = [
9
9
  ]
10
10
  requires-python = ">=3.12, <4"
11
11
  dependencies = [
12
- "llama-deploy-core>=0.2.7a1,<0.3.0",
13
- "llama-deploy-appserver>=0.2.7a1,<0.3.0",
12
+ "llama-deploy-core>=0.3.0a2,<0.4.0",
13
+ "llama-deploy-appserver>=0.3.0a2,<0.4.0",
14
14
  "httpx>=0.24.0",
15
15
  "rich>=13.0.0",
16
16
  "questionary>=2.0.0",
@@ -19,6 +19,7 @@ dependencies = [
19
19
  "tenacity>=9.1.2",
20
20
  "textual>=4.0.0",
21
21
  "aiohttp>=3.12.14",
22
+ "copier>=9.9.0",
22
23
  ]
23
24
 
24
25
  [project.scripts]
@@ -1,5 +1,5 @@
1
1
  import click
2
- from .commands import projects, deployments, profile, health_check, serve
2
+ from .commands import projects, deployments, profile, health_check, serve, version
3
3
  from .options import global_options
4
4
 
5
5
 
@@ -22,6 +22,9 @@ app.add_command(health_check, name="health")
22
22
  # Add serve command at root level
23
23
  app.add_command(serve, name="serve")
24
24
 
25
+ # Add version command at root level
26
+ app.add_command(version, name="version")
27
+
25
28
 
26
29
  # Main entry point function (called by the script)
27
30
  def main() -> None:
@@ -1,4 +1,3 @@
1
- import logging
2
1
  from typing import List, Optional
3
2
 
4
3
  import httpx
@@ -18,109 +17,102 @@ from rich.console import Console
18
17
  from .config import config_manager
19
18
 
20
19
 
21
- class LlamaDeployClient:
22
- """HTTP client for communicating with the LlamaDeploy control plane API"""
23
-
24
- def __init__(
25
- self, base_url: Optional[str] = None, project_id: Optional[str] = None
26
- ):
27
- """Initialize the client with a configured profile"""
28
- self.console = Console()
29
-
30
- # Get profile data
31
- profile = config_manager.get_current_profile()
32
- if not profile:
33
- self.console.print("\n[bold red]No profile configured![/bold red]")
34
- self.console.print("\nTo get started, create a profile with:")
35
- self.console.print("[cyan]llamactl profile create[/cyan]")
36
- raise SystemExit(1)
37
-
38
- # Use profile data with optional overrides
39
- self.base_url = base_url or profile.api_url
40
- self.project_id = project_id or profile.active_project_id
41
-
42
- if not self.base_url:
43
- raise ValueError("API URL is required")
44
-
45
- if not self.project_id:
46
- raise ValueError("Project ID is required")
47
-
48
- self.base_url = self.base_url.rstrip("/")
49
-
50
- # Create persistent client with event hooks
20
+ class BaseClient:
21
+ def __init__(self, base_url: str, console: Console) -> None:
22
+ self.base_url = base_url.rstrip("/")
23
+ self.console = console
51
24
  self.client = httpx.Client(
52
25
  base_url=self.base_url, event_hooks={"response": [self._handle_response]}
53
26
  )
54
27
 
55
28
  def _handle_response(self, response: httpx.Response) -> None:
56
- """Handle response middleware - warnings and error conversion"""
57
- # Check for warnings in response headers
58
29
  if "X-Warning" in response.headers:
59
30
  self.console.print(
60
31
  f"[yellow]Warning: {response.headers['X-Warning']}[/yellow]"
61
32
  )
62
-
63
- # Convert httpx errors to our current exception format
64
33
  try:
65
34
  response.raise_for_status()
66
35
  except httpx.HTTPStatusError as e:
67
- # Try to parse JSON error response
68
36
  try:
69
- response.read() # need to collect streaming data before calling json
37
+ response.read()
70
38
  error_data = e.response.json()
71
39
  if isinstance(error_data, dict) and "detail" in error_data:
72
40
  error_message = error_data["detail"]
73
41
  else:
74
42
  error_message = str(error_data)
75
43
  except (ValueError, KeyError):
76
- # Fallback to raw response text
77
44
  error_message = e.response.text
78
-
79
45
  raise Exception(f"HTTP {e.response.status_code}: {error_message}") from e
80
46
  except httpx.RequestError as e:
81
47
  raise Exception(f"Request failed: {e}") from e
82
48
 
83
- # Health check
49
+
50
+ class ControlPlaneClient(BaseClient):
51
+ """Unscoped client for non-project endpoints."""
52
+
84
53
  def health_check(self) -> dict:
85
- """Check if the API server is healthy"""
86
54
  response = self.client.get("/health")
87
55
  return response.json()
88
56
 
89
- # Projects
57
+ def server_version(self) -> dict:
58
+ response = self.client.get("/version")
59
+ return response.json()
60
+
90
61
  def list_projects(self) -> List[ProjectSummary]:
91
- """List all projects with deployment counts"""
92
62
  response = self.client.get("/projects/")
93
63
  projects_response = ProjectsListResponse.model_validate(response.json())
94
64
  return [project for project in projects_response.projects]
95
65
 
96
- # Deployments
66
+
67
+ class ProjectClient(BaseClient):
68
+ """Project-scoped client for deployment operations."""
69
+
70
+ def __init__(
71
+ self,
72
+ base_url: Optional[str] = None,
73
+ project_id: Optional[str] = None,
74
+ console: Optional[Console] = None,
75
+ ) -> None:
76
+ # Allow default construction using active profile (for tests and convenience)
77
+ if base_url is None or project_id is None:
78
+ profile = config_manager.get_current_profile()
79
+ if not profile:
80
+ # Match previous behavior for missing profiles
81
+ (console or Console()).print(
82
+ "\n[bold red]No profile configured![/bold red]"
83
+ )
84
+ (console or Console()).print("\nTo get started, create a profile with:")
85
+ (console or Console()).print("[cyan]llamactl profile create[/cyan]")
86
+ raise SystemExit(1)
87
+ base_url = base_url or profile.api_url or ""
88
+ project_id = project_id or profile.active_project_id
89
+ if not base_url:
90
+ raise ValueError("API URL is required")
91
+ if not project_id:
92
+ raise ValueError("Project ID is required")
93
+ resolved_console = console or Console()
94
+ super().__init__(base_url, resolved_console)
95
+ self.project_id = project_id
96
+
97
97
  def list_deployments(self) -> List[DeploymentResponse]:
98
- """List deployments for the configured project"""
99
98
  response = self.client.get(f"/{self.project_id}/deployments/")
100
99
  deployments_response = DeploymentsListResponse.model_validate(response.json())
101
100
  return [deployment for deployment in deployments_response.deployments]
102
101
 
103
102
  def get_deployment(self, deployment_id: str) -> DeploymentResponse:
104
- """Get a specific deployment"""
105
103
  response = self.client.get(f"/{self.project_id}/deployments/{deployment_id}")
106
- deployment = DeploymentResponse.model_validate(response.json())
107
- return deployment
104
+ return DeploymentResponse.model_validate(response.json())
108
105
 
109
106
  def create_deployment(
110
- self,
111
- deployment_data: DeploymentCreate,
107
+ self, deployment_data: DeploymentCreate
112
108
  ) -> DeploymentResponse:
113
- """Create a new deployment"""
114
-
115
109
  response = self.client.post(
116
110
  f"/{self.project_id}/deployments/",
117
111
  json=deployment_data.model_dump(exclude_none=True),
118
112
  )
119
- deployment = DeploymentResponse.model_validate(response.json())
120
- return deployment
113
+ return DeploymentResponse.model_validate(response.json())
121
114
 
122
115
  def delete_deployment(self, deployment_id: str) -> None:
123
- """Delete a deployment"""
124
116
  self.client.delete(f"/{self.project_id}/deployments/{deployment_id}")
125
117
 
126
118
  def update_deployment(
@@ -129,19 +121,15 @@ class LlamaDeployClient:
129
121
  update_data: DeploymentUpdate,
130
122
  force_git_sha_update: bool = False,
131
123
  ) -> DeploymentResponse:
132
- """Update an existing deployment"""
133
-
134
124
  params = {}
135
125
  if force_git_sha_update:
136
126
  params["force_git_sha_update"] = True
137
-
138
127
  response = self.client.patch(
139
128
  f"/{self.project_id}/deployments/{deployment_id}",
140
129
  json=update_data.model_dump(),
141
130
  params=params,
142
131
  )
143
- deployment = DeploymentResponse.model_validate(response.json())
144
- return deployment
132
+ return DeploymentResponse.model_validate(response.json())
145
133
 
146
134
  def validate_repository(
147
135
  self,
@@ -149,10 +137,6 @@ class LlamaDeployClient:
149
137
  deployment_id: str | None = None,
150
138
  pat: str | None = None,
151
139
  ) -> RepositoryValidationResponse:
152
- """Validate a repository URL"""
153
- logging.info(
154
- f"Validating repository with params: {repo_url}, {deployment_id}, {pat}"
155
- )
156
140
  response = self.client.post(
157
141
  f"/{self.project_id}/deployments/validate-repository",
158
142
  json=RepositoryValidationRequest(
@@ -161,13 +145,37 @@ class LlamaDeployClient:
161
145
  pat=pat,
162
146
  ).model_dump(),
163
147
  )
164
- logging.info(f"Response: {response.json()}")
165
148
  return RepositoryValidationResponse.model_validate(response.json())
166
149
 
167
150
 
168
- # Global client factory function
169
- def get_client(
151
+ def get_control_plane_client(base_url: Optional[str] = None) -> ControlPlaneClient:
152
+ console = Console()
153
+ profile = config_manager.get_current_profile()
154
+ if not profile and not base_url:
155
+ console.print("\n[bold red]No profile configured![/bold red]")
156
+ console.print("\nTo get started, create a profile with:")
157
+ console.print("[cyan]llamactl profile create[/cyan]")
158
+ raise SystemExit(1)
159
+ resolved_base_url = (base_url or (profile.api_url if profile else "")).rstrip("/")
160
+ if not resolved_base_url:
161
+ raise ValueError("API URL is required")
162
+ return ControlPlaneClient(resolved_base_url, console)
163
+
164
+
165
+ def get_project_client(
170
166
  base_url: Optional[str] = None, project_id: Optional[str] = None
171
- ) -> LlamaDeployClient:
172
- """Get a client instance with optional overrides"""
173
- return LlamaDeployClient(base_url=base_url, project_id=project_id)
167
+ ) -> ProjectClient:
168
+ console = Console()
169
+ profile = config_manager.get_current_profile()
170
+ if not profile:
171
+ console.print("\n[bold red]No profile configured![/bold red]")
172
+ console.print("\nTo get started, create a profile with:")
173
+ console.print("[cyan]llamactl profile create[/cyan]")
174
+ raise SystemExit(1)
175
+ resolved_base_url = (base_url or profile.api_url or "").rstrip("/")
176
+ if not resolved_base_url:
177
+ raise ValueError("API URL is required")
178
+ resolved_project_id = project_id or profile.active_project_id
179
+ if not resolved_project_id:
180
+ raise ValueError("Project ID is required")
181
+ return ProjectClient(resolved_base_url, resolved_project_id, console)
@@ -1,18 +1,15 @@
1
- import os
2
- import subprocess
3
1
  from pathlib import Path
4
2
  from typing import Optional
5
3
 
6
4
  import click
7
- from llama_deploy.appserver.client import Client as ApiserverClient
5
+ from llama_deploy.appserver.app import start_server
8
6
  from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
9
7
  from llama_deploy.core.schema.deployments import DeploymentUpdate
10
8
  from rich import print as rprint
11
9
  from rich.console import Console
12
10
  from rich.table import Table
13
- from tenacity import RetryError, Retrying, stop_after_attempt, wait_fixed
14
11
 
15
- from .client import get_client
12
+ from .client import get_project_client, get_control_plane_client
16
13
  from .config import config_manager
17
14
  from .interactive_prompts.utils import (
18
15
  confirm_action,
@@ -220,7 +217,7 @@ def edit_profile(name: Optional[str]) -> None:
220
217
  def list_projects() -> None:
221
218
  """List all projects with deployment counts"""
222
219
  try:
223
- client = get_client()
220
+ client = get_control_plane_client()
224
221
  projects = client.list_projects()
225
222
 
226
223
  if not projects:
@@ -249,7 +246,7 @@ def list_projects() -> None:
249
246
  def health_check() -> None:
250
247
  """Check if the API server is healthy"""
251
248
  try:
252
- client = get_client()
249
+ client = get_control_plane_client()
253
250
  health = client.health_check()
254
251
 
255
252
  status = health.get("status", "unknown")
@@ -269,7 +266,7 @@ def health_check() -> None:
269
266
  def list_deployments() -> None:
270
267
  """List deployments for the configured project"""
271
268
  try:
272
- client = get_client()
269
+ client = get_project_client()
273
270
  deployments = client.list_deployments()
274
271
 
275
272
  if not deployments:
@@ -323,7 +320,7 @@ def list_deployments() -> None:
323
320
  def get_deployment(deployment_id: Optional[str]) -> None:
324
321
  """Get details of a specific deployment"""
325
322
  try:
326
- client = get_client()
323
+ client = get_project_client()
327
324
 
328
325
  deployment_id = select_deployment(deployment_id)
329
326
  if not deployment_id:
@@ -395,7 +392,7 @@ def create_deployment(
395
392
  def delete_deployment(deployment_id: Optional[str], confirm: bool) -> None:
396
393
  """Delete a deployment"""
397
394
  try:
398
- client = get_client()
395
+ client = get_project_client()
399
396
 
400
397
  deployment_id = select_deployment(deployment_id)
401
398
  if not deployment_id:
@@ -421,7 +418,7 @@ def delete_deployment(deployment_id: Optional[str], confirm: bool) -> None:
421
418
  def edit_deployment(deployment_id: Optional[str]) -> None:
422
419
  """Interactively edit a deployment"""
423
420
  try:
424
- client = get_client()
421
+ client = get_project_client()
425
422
 
426
423
  deployment_id = select_deployment(deployment_id)
427
424
  if not deployment_id:
@@ -452,7 +449,7 @@ def edit_deployment(deployment_id: Optional[str]) -> None:
452
449
  def refresh_deployment(deployment_id: Optional[str]) -> None:
453
450
  """Refresh a deployment with the latest code from its git reference"""
454
451
  try:
455
- client = get_client()
452
+ client = get_project_client()
456
453
 
457
454
  deployment_id = select_deployment(deployment_id)
458
455
  if not deployment_id:
@@ -493,53 +490,41 @@ def refresh_deployment(deployment_id: Optional[str]) -> None:
493
490
  default=DEFAULT_DEPLOYMENT_FILE_PATH,
494
491
  type=click.Path(dir_okay=False, resolve_path=True, path_type=Path), # type: ignore
495
492
  )
493
+ @click.option(
494
+ "--no-install", is_flag=True, help="Skip installing python and js dependencies"
495
+ )
496
+ @click.option(
497
+ "--no-reload", is_flag=True, help="Skip reloading the API server on code changes"
498
+ )
499
+ @click.option("--no-open-browser", is_flag=True, help="Skip opening the browser")
500
+ @click.option(
501
+ "--preview",
502
+ is_flag=True,
503
+ help="Preview mode pre-builds the UI to static files, like a production build",
504
+ )
496
505
  @global_options
497
- def serve(deployment_file: Path) -> None:
506
+ def serve(
507
+ deployment_file: Path,
508
+ no_install: bool,
509
+ no_reload: bool,
510
+ no_open_browser: bool,
511
+ preview: bool,
512
+ ) -> None:
498
513
  """Run llama_deploy API Server in the foreground. If no deployment_file is provided, will look for a llama_deploy.yaml in the current directory."""
499
514
  if not deployment_file.exists():
500
515
  rprint(f"[red]Deployment file '{deployment_file}' not found[/red]")
501
516
  raise click.Abort()
502
517
 
503
518
  try:
504
- env = os.environ.copy()
505
- env["LLAMA_DEPLOY_APISERVER_DEPLOYMENTS_PATH"] = str(deployment_file.parent)
506
-
507
- client = ApiserverClient()
508
-
509
- uvicorn_p = subprocess.Popen(
510
- [
511
- "uvicorn",
512
- "llama_deploy.appserver.app:app",
513
- "--host",
514
- "localhost",
515
- "--port",
516
- "4501",
517
- ],
518
- env=env,
519
- )
520
-
521
- retrying = Retrying(
522
- stop=stop_after_attempt(5), wait=wait_fixed(RETRY_WAIT_SECONDS)
519
+ start_server(
520
+ cwd=deployment_file.parent,
521
+ deployment_file=deployment_file,
522
+ proxy_ui=not preview,
523
+ reload=not no_reload,
524
+ install=not no_install,
525
+ build=preview,
526
+ open_browser=not no_open_browser,
523
527
  )
524
- try:
525
- for attempt in retrying:
526
- with attempt:
527
- client.sync.apiserver.deployments.create(
528
- deployment_file.open("rb"),
529
- base_path=deployment_file.parent,
530
- local=True,
531
- )
532
- except RetryError as e:
533
- uvicorn_p.terminate()
534
- last: Optional[BaseException] = e.last_attempt.exception(0)
535
- last_msg = ""
536
- if last is not None:
537
- last_msg = ": " + (
538
- str(last.message) if hasattr(last, "message") else str(last)
539
- )
540
- raise click.ClickException(f"Failed to create deployment{last_msg}")
541
-
542
- uvicorn_p.wait()
543
528
 
544
529
  except KeyboardInterrupt:
545
530
  print("Shutting down...")
@@ -547,3 +532,31 @@ def serve(deployment_file: Path) -> None:
547
532
  except Exception as e:
548
533
  rprint(f"[red]Error: {e}[/red]")
549
534
  raise click.Abort()
535
+
536
+
537
+ @click.command("version")
538
+ @global_options
539
+ def version() -> None:
540
+ """Print the version of llama_deploy"""
541
+ try:
542
+ from importlib.metadata import PackageNotFoundError, version as pkg_version
543
+
544
+ ver = pkg_version("llamactl")
545
+ rprint(f"client version: {ver}")
546
+
547
+ # If there is an active profile, attempt to query server version
548
+ profile = config_manager.get_current_profile()
549
+ if profile and profile.api_url:
550
+ try:
551
+ cp_client = get_control_plane_client()
552
+ data = cp_client.server_version()
553
+ server_ver = data.get("version", "unknown")
554
+ rprint(f"server version: {server_ver}")
555
+ except Exception as e:
556
+ rprint(f"server version: [yellow]unavailable[/yellow] ({e})")
557
+ except PackageNotFoundError:
558
+ rprint("[red]Package 'llamactl' not found[/red]")
559
+ raise click.Abort()
560
+ except Exception as e:
561
+ rprint(f"[red]Error: {e}[/red]")
562
+ raise click.Abort()
@@ -6,7 +6,7 @@ import questionary
6
6
  from rich import print as rprint
7
7
  from rich.console import Console
8
8
 
9
- from ..client import get_client
9
+ from ..client import get_project_client as get_client
10
10
  from ..config import config_manager
11
11
 
12
12
  console = Console()
@@ -21,7 +21,7 @@ from textual.containers import Container, HorizontalGroup, Widget
21
21
  from textual.validation import Length
22
22
  from textual.widgets import Button, Input, Label, Static
23
23
  from textual.reactive import reactive
24
- from llama_deploy.cli.client import get_client
24
+ from llama_deploy.cli.client import get_project_client as get_client
25
25
  from textual.message import Message
26
26
 
27
27
 
@@ -361,7 +361,6 @@ class DeploymentEditApp(App[DeploymentResponse | None]):
361
361
  def on_validation_cancel_message(self, message: ValidationCancelMessage) -> None:
362
362
  """Handle validation cancellation from git validation widget"""
363
363
  # Return to form, clearing any save error
364
- print("DEBUG: on_validation_cancel_message")
365
364
  self.save_error = ""
366
365
  self.current_state = "form"
367
366
 
@@ -11,7 +11,7 @@ from textual.message import Message
11
11
  from textual.content import Content
12
12
  from textual.reactive import reactive
13
13
 
14
- from llama_deploy.cli.client import get_client
14
+ from llama_deploy.cli.client import get_project_client as get_client
15
15
  from llama_deploy.core.schema.git_validation import RepositoryValidationResponse
16
16
  from llama_deploy.cli.textual.llama_loader import PixelLlamaLoader
17
17
  from llama_deploy.cli.textual.github_callback_server import GitHubCallbackServer
@@ -206,7 +206,6 @@ class GitValidationWidget(Widget):
206
206
  yield Button(
207
207
  "Continue", id="continue_success", variant="primary", compact=True
208
208
  )
209
- print("DEBUG: render cancel button")
210
209
  # Always show cancel button
211
210
  yield Button("Back to Edit", id="cancel", variant="default", compact=True)
212
211
 
@@ -238,7 +237,6 @@ class GitValidationWidget(Widget):
238
237
  )
239
238
  self.post_message(ValidationResultMessage(self.repo_url, pat_to_send))
240
239
  elif event.button.id == "cancel":
241
- print("DEBUG: cancel button pressed")
242
240
  self.post_message(ValidationCancelMessage())
243
241
 
244
242
  def _start_github_auth(self) -> None:
File without changes