llamactl 0.2.7a1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,122 @@
1
+ Metadata-Version: 2.3
2
+ Name: llamactl
3
+ Version: 0.2.7a1
4
+ Summary: A command-line interface for managing LlamaDeploy projects and deployments
5
+ Author: Adrian Lyjak
6
+ Author-email: Adrian Lyjak <adrianlyjak@gmail.com>
7
+ License: MIT
8
+ Requires-Dist: llama-deploy-core>=0.2.7a1,<0.3.0
9
+ Requires-Dist: llama-deploy-appserver>=0.2.7a1,<0.3.0
10
+ Requires-Dist: httpx>=0.24.0
11
+ Requires-Dist: rich>=13.0.0
12
+ Requires-Dist: questionary>=2.0.0
13
+ Requires-Dist: click>=8.2.1
14
+ Requires-Dist: python-dotenv>=1.0.0
15
+ Requires-Dist: tenacity>=9.1.2
16
+ Requires-Dist: textual>=4.0.0
17
+ Requires-Dist: aiohttp>=3.12.14
18
+ Requires-Python: >=3.12, <4
19
+ Description-Content-Type: text/markdown
20
+
21
+ # llamactl
22
+
23
+ > [!WARNING]
24
+ > This repository contains pre-release software. It is unstable, incomplete, and subject to breaking changes. Not recommended for use.
25
+
26
+
27
+ A command-line interface for managing LlamaDeploy projects and deployments.
28
+
29
+ ## Installation
30
+
31
+ Install from PyPI:
32
+
33
+ ```bash
34
+ pip install llamactl
35
+ ```
36
+
37
+ Or using uv:
38
+
39
+ ```bash
40
+ uv add llamactl
41
+ ```
42
+
43
+ ## Quick Start
44
+
45
+ 1. **Configure your profile**: Set up connection to your LlamaDeploy control plane
46
+ ```bash
47
+ llamactl profile configure
48
+ ```
49
+
50
+ 2. **Check health**: Verify connection to the control plane
51
+ ```bash
52
+ llamactl health
53
+ ```
54
+
55
+ 3. **Create a project**: Initialize a new deployment project
56
+ ```bash
57
+ llamactl project create my-project
58
+ ```
59
+
60
+ 4. **Deploy**: Deploy your project to the control plane
61
+ ```bash
62
+ llamactl deployment create my-deployment --project-name my-project
63
+ ```
64
+
65
+ ## Commands
66
+
67
+ ### Profile Management
68
+ - `llamactl profile configure` - Configure connection to control plane
69
+ - `llamactl profile show` - Show current profile configuration
70
+ - `llamactl profile list` - List all configured profiles
71
+
72
+ ### Project Management
73
+ - `llamactl project create <name>` - Create a new project
74
+ - `llamactl project list` - List all projects
75
+ - `llamactl project show <name>` - Show project details
76
+ - `llamactl project delete <name>` - Delete a project
77
+
78
+ ### Deployment Management
79
+ - `llamactl deployment create <name>` - Create a new deployment
80
+ - `llamactl deployment list` - List all deployments
81
+ - `llamactl deployment show <name>` - Show deployment details
82
+ - `llamactl deployment delete <name>` - Delete a deployment
83
+ - `llamactl deployment logs <name>` - View deployment logs
84
+
85
+ ### Health & Status
86
+ - `llamactl health` - Check control plane health
87
+ - `llamactl serve` - Start local development server
88
+
89
+ ## Configuration
90
+
91
+ llamactl stores configuration in your home directory at `~/.llamactl/`.
92
+
93
+ ### Profile Configuration
94
+ Profiles allow you to manage multiple control plane connections:
95
+
96
+ ```bash
97
+ # Configure default profile
98
+ llamactl profile configure
99
+
100
+ # Configure named profile
101
+ llamactl profile configure --profile production
102
+
103
+ # Use specific profile for commands
104
+ llamactl --profile production deployment list
105
+ ```
106
+
107
+ ## Development
108
+
109
+ This CLI is part of the LlamaDeploy ecosystem. For development setup:
110
+
111
+ 1. Clone the repository
112
+ 2. Install dependencies: `uv sync`
113
+ 3. Run tests: `uv run pytest`
114
+
115
+ ## Requirements
116
+
117
+ - Python 3.12+
118
+ - Access to a LlamaDeploy control plane
119
+
120
+ ## License
121
+
122
+ This project is licensed under the MIT License.
@@ -0,0 +1,102 @@
1
+ # llamactl
2
+
3
+ > [!WARNING]
4
+ > This repository contains pre-release software. It is unstable, incomplete, and subject to breaking changes. Not recommended for use.
5
+
6
+
7
+ A command-line interface for managing LlamaDeploy projects and deployments.
8
+
9
+ ## Installation
10
+
11
+ Install from PyPI:
12
+
13
+ ```bash
14
+ pip install llamactl
15
+ ```
16
+
17
+ Or using uv:
18
+
19
+ ```bash
20
+ uv add llamactl
21
+ ```
22
+
23
+ ## Quick Start
24
+
25
+ 1. **Configure your profile**: Set up connection to your LlamaDeploy control plane
26
+ ```bash
27
+ llamactl profile configure
28
+ ```
29
+
30
+ 2. **Check health**: Verify connection to the control plane
31
+ ```bash
32
+ llamactl health
33
+ ```
34
+
35
+ 3. **Create a project**: Initialize a new deployment project
36
+ ```bash
37
+ llamactl project create my-project
38
+ ```
39
+
40
+ 4. **Deploy**: Deploy your project to the control plane
41
+ ```bash
42
+ llamactl deployment create my-deployment --project-name my-project
43
+ ```
44
+
45
+ ## Commands
46
+
47
+ ### Profile Management
48
+ - `llamactl profile configure` - Configure connection to control plane
49
+ - `llamactl profile show` - Show current profile configuration
50
+ - `llamactl profile list` - List all configured profiles
51
+
52
+ ### Project Management
53
+ - `llamactl project create <name>` - Create a new project
54
+ - `llamactl project list` - List all projects
55
+ - `llamactl project show <name>` - Show project details
56
+ - `llamactl project delete <name>` - Delete a project
57
+
58
+ ### Deployment Management
59
+ - `llamactl deployment create <name>` - Create a new deployment
60
+ - `llamactl deployment list` - List all deployments
61
+ - `llamactl deployment show <name>` - Show deployment details
62
+ - `llamactl deployment delete <name>` - Delete a deployment
63
+ - `llamactl deployment logs <name>` - View deployment logs
64
+
65
+ ### Health & Status
66
+ - `llamactl health` - Check control plane health
67
+ - `llamactl serve` - Start local development server
68
+
69
+ ## Configuration
70
+
71
+ llamactl stores configuration in your home directory at `~/.llamactl/`.
72
+
73
+ ### Profile Configuration
74
+ Profiles allow you to manage multiple control plane connections:
75
+
76
+ ```bash
77
+ # Configure default profile
78
+ llamactl profile configure
79
+
80
+ # Configure named profile
81
+ llamactl profile configure --profile production
82
+
83
+ # Use specific profile for commands
84
+ llamactl --profile production deployment list
85
+ ```
86
+
87
+ ## Development
88
+
89
+ This CLI is part of the LlamaDeploy ecosystem. For development setup:
90
+
91
+ 1. Clone the repository
92
+ 2. Install dependencies: `uv sync`
93
+ 3. Run tests: `uv run pytest`
94
+
95
+ ## Requirements
96
+
97
+ - Python 3.12+
98
+ - Access to a LlamaDeploy control plane
99
+
100
+ ## License
101
+
102
+ This project is licensed under the MIT License.
@@ -0,0 +1,42 @@
1
+ [project]
2
+ name = "llamactl"
3
+ version = "0.2.7a1"
4
+ description = "A command-line interface for managing LlamaDeploy projects and deployments"
5
+ readme = "README.md"
6
+ license = { text = "MIT" }
7
+ authors = [
8
+ { name = "Adrian Lyjak", email = "adrianlyjak@gmail.com" }
9
+ ]
10
+ requires-python = ">=3.12, <4"
11
+ dependencies = [
12
+ "llama-deploy-core>=0.2.7a1,<0.3.0",
13
+ "llama-deploy-appserver>=0.2.7a1,<0.3.0",
14
+ "httpx>=0.24.0",
15
+ "rich>=13.0.0",
16
+ "questionary>=2.0.0",
17
+ "click>=8.2.1",
18
+ "python-dotenv>=1.0.0",
19
+ "tenacity>=9.1.2",
20
+ "textual>=4.0.0",
21
+ "aiohttp>=3.12.14",
22
+ ]
23
+
24
+ [project.scripts]
25
+ llamactl = "llama_deploy.cli:main"
26
+
27
+ [build-system]
28
+ requires = ["uv_build>=0.7.20,<0.8.0"]
29
+ build-backend = "uv_build"
30
+
31
+ [dependency-groups]
32
+ dev = [
33
+ "pytest>=8.3.4",
34
+ "pytest-asyncio>=0.25.3",
35
+ "respx>=0.22.0",
36
+ ]
37
+
38
+ [tool.uv.build-backend]
39
+ module-name = "llama_deploy.cli"
40
+
41
+ [tool.uv.sources]
42
+ llama-deploy-appserver = { workspace = true }
@@ -0,0 +1,32 @@
1
+ import click
2
+ from .commands import projects, deployments, profile, health_check, serve
3
+ from .options import global_options
4
+
5
+
6
+ # Main CLI application
7
+ @click.group(help="LlamaDeploy CLI - Manage projects and deployments")
8
+ @global_options
9
+ def app():
10
+ """LlamaDeploy CLI - Manage projects and deployments"""
11
+ pass
12
+
13
+
14
+ # Add sub-commands
15
+ app.add_command(profile, name="profile")
16
+ app.add_command(projects, name="project")
17
+ app.add_command(deployments, name="deployment")
18
+
19
+ # Add health check at root level
20
+ app.add_command(health_check, name="health")
21
+
22
+ # Add serve command at root level
23
+ app.add_command(serve, name="serve")
24
+
25
+
26
+ # Main entry point function (called by the script)
27
+ def main() -> None:
28
+ app()
29
+
30
+
31
+ if __name__ == "__main__":
32
+ app()
@@ -0,0 +1,173 @@
1
+ import logging
2
+ from typing import List, Optional
3
+
4
+ import httpx
5
+ from llama_deploy.core.schema.deployments import (
6
+ DeploymentCreate,
7
+ DeploymentResponse,
8
+ DeploymentsListResponse,
9
+ DeploymentUpdate,
10
+ )
11
+ from llama_deploy.core.schema.git_validation import (
12
+ RepositoryValidationRequest,
13
+ RepositoryValidationResponse,
14
+ )
15
+ from llama_deploy.core.schema.projects import ProjectSummary, ProjectsListResponse
16
+ from rich.console import Console
17
+
18
+ from .config import config_manager
19
+
20
+
21
+ class LlamaDeployClient:
22
+ """HTTP client for communicating with the LlamaDeploy control plane API"""
23
+
24
+ def __init__(
25
+ self, base_url: Optional[str] = None, project_id: Optional[str] = None
26
+ ):
27
+ """Initialize the client with a configured profile"""
28
+ self.console = Console()
29
+
30
+ # Get profile data
31
+ profile = config_manager.get_current_profile()
32
+ if not profile:
33
+ self.console.print("\n[bold red]No profile configured![/bold red]")
34
+ self.console.print("\nTo get started, create a profile with:")
35
+ self.console.print("[cyan]llamactl profile create[/cyan]")
36
+ raise SystemExit(1)
37
+
38
+ # Use profile data with optional overrides
39
+ self.base_url = base_url or profile.api_url
40
+ self.project_id = project_id or profile.active_project_id
41
+
42
+ if not self.base_url:
43
+ raise ValueError("API URL is required")
44
+
45
+ if not self.project_id:
46
+ raise ValueError("Project ID is required")
47
+
48
+ self.base_url = self.base_url.rstrip("/")
49
+
50
+ # Create persistent client with event hooks
51
+ self.client = httpx.Client(
52
+ base_url=self.base_url, event_hooks={"response": [self._handle_response]}
53
+ )
54
+
55
+ def _handle_response(self, response: httpx.Response) -> None:
56
+ """Handle response middleware - warnings and error conversion"""
57
+ # Check for warnings in response headers
58
+ if "X-Warning" in response.headers:
59
+ self.console.print(
60
+ f"[yellow]Warning: {response.headers['X-Warning']}[/yellow]"
61
+ )
62
+
63
+ # Convert httpx errors to our current exception format
64
+ try:
65
+ response.raise_for_status()
66
+ except httpx.HTTPStatusError as e:
67
+ # Try to parse JSON error response
68
+ try:
69
+ response.read() # need to collect streaming data before calling json
70
+ error_data = e.response.json()
71
+ if isinstance(error_data, dict) and "detail" in error_data:
72
+ error_message = error_data["detail"]
73
+ else:
74
+ error_message = str(error_data)
75
+ except (ValueError, KeyError):
76
+ # Fallback to raw response text
77
+ error_message = e.response.text
78
+
79
+ raise Exception(f"HTTP {e.response.status_code}: {error_message}") from e
80
+ except httpx.RequestError as e:
81
+ raise Exception(f"Request failed: {e}") from e
82
+
83
+ # Health check
84
+ def health_check(self) -> dict:
85
+ """Check if the API server is healthy"""
86
+ response = self.client.get("/health")
87
+ return response.json()
88
+
89
+ # Projects
90
+ def list_projects(self) -> List[ProjectSummary]:
91
+ """List all projects with deployment counts"""
92
+ response = self.client.get("/projects/")
93
+ projects_response = ProjectsListResponse.model_validate(response.json())
94
+ return [project for project in projects_response.projects]
95
+
96
+ # Deployments
97
+ def list_deployments(self) -> List[DeploymentResponse]:
98
+ """List deployments for the configured project"""
99
+ response = self.client.get(f"/{self.project_id}/deployments/")
100
+ deployments_response = DeploymentsListResponse.model_validate(response.json())
101
+ return [deployment for deployment in deployments_response.deployments]
102
+
103
+ def get_deployment(self, deployment_id: str) -> DeploymentResponse:
104
+ """Get a specific deployment"""
105
+ response = self.client.get(f"/{self.project_id}/deployments/{deployment_id}")
106
+ deployment = DeploymentResponse.model_validate(response.json())
107
+ return deployment
108
+
109
+ def create_deployment(
110
+ self,
111
+ deployment_data: DeploymentCreate,
112
+ ) -> DeploymentResponse:
113
+ """Create a new deployment"""
114
+
115
+ response = self.client.post(
116
+ f"/{self.project_id}/deployments/",
117
+ json=deployment_data.model_dump(exclude_none=True),
118
+ )
119
+ deployment = DeploymentResponse.model_validate(response.json())
120
+ return deployment
121
+
122
+ def delete_deployment(self, deployment_id: str) -> None:
123
+ """Delete a deployment"""
124
+ self.client.delete(f"/{self.project_id}/deployments/{deployment_id}")
125
+
126
+ def update_deployment(
127
+ self,
128
+ deployment_id: str,
129
+ update_data: DeploymentUpdate,
130
+ force_git_sha_update: bool = False,
131
+ ) -> DeploymentResponse:
132
+ """Update an existing deployment"""
133
+
134
+ params = {}
135
+ if force_git_sha_update:
136
+ params["force_git_sha_update"] = True
137
+
138
+ response = self.client.patch(
139
+ f"/{self.project_id}/deployments/{deployment_id}",
140
+ json=update_data.model_dump(),
141
+ params=params,
142
+ )
143
+ deployment = DeploymentResponse.model_validate(response.json())
144
+ return deployment
145
+
146
+ def validate_repository(
147
+ self,
148
+ repo_url: str,
149
+ deployment_id: str | None = None,
150
+ pat: str | None = None,
151
+ ) -> RepositoryValidationResponse:
152
+ """Validate a repository URL"""
153
+ logging.info(
154
+ f"Validating repository with params: {repo_url}, {deployment_id}, {pat}"
155
+ )
156
+ response = self.client.post(
157
+ f"/{self.project_id}/deployments/validate-repository",
158
+ json=RepositoryValidationRequest(
159
+ repository_url=repo_url,
160
+ deployment_id=deployment_id,
161
+ pat=pat,
162
+ ).model_dump(),
163
+ )
164
+ logging.info(f"Response: {response.json()}")
165
+ return RepositoryValidationResponse.model_validate(response.json())
166
+
167
+
168
+ # Global client factory function
169
+ def get_client(
170
+ base_url: Optional[str] = None, project_id: Optional[str] = None
171
+ ) -> LlamaDeployClient:
172
+ """Get a client instance with optional overrides"""
173
+ return LlamaDeployClient(base_url=base_url, project_id=project_id)