llamactl 0.2.7a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,32 @@
1
+ import click
2
+ from .commands import projects, deployments, profile, health_check, serve
3
+ from .options import global_options
4
+
5
+
6
+ # Main CLI application
7
+ @click.group(help="LlamaDeploy CLI - Manage projects and deployments")
8
+ @global_options
9
+ def app():
10
+ """LlamaDeploy CLI - Manage projects and deployments"""
11
+ pass
12
+
13
+
14
+ # Add sub-commands
15
+ app.add_command(profile, name="profile")
16
+ app.add_command(projects, name="project")
17
+ app.add_command(deployments, name="deployment")
18
+
19
+ # Add health check at root level
20
+ app.add_command(health_check, name="health")
21
+
22
+ # Add serve command at root level
23
+ app.add_command(serve, name="serve")
24
+
25
+
26
+ # Main entry point function (called by the script)
27
+ def main() -> None:
28
+ app()
29
+
30
+
31
+ if __name__ == "__main__":
32
+ app()
@@ -0,0 +1,173 @@
1
+ import logging
2
+ from typing import List, Optional
3
+
4
+ import httpx
5
+ from llama_deploy.core.schema.deployments import (
6
+ DeploymentCreate,
7
+ DeploymentResponse,
8
+ DeploymentsListResponse,
9
+ DeploymentUpdate,
10
+ )
11
+ from llama_deploy.core.schema.git_validation import (
12
+ RepositoryValidationRequest,
13
+ RepositoryValidationResponse,
14
+ )
15
+ from llama_deploy.core.schema.projects import ProjectSummary, ProjectsListResponse
16
+ from rich.console import Console
17
+
18
+ from .config import config_manager
19
+
20
+
21
+ class LlamaDeployClient:
22
+ """HTTP client for communicating with the LlamaDeploy control plane API"""
23
+
24
+ def __init__(
25
+ self, base_url: Optional[str] = None, project_id: Optional[str] = None
26
+ ):
27
+ """Initialize the client with a configured profile"""
28
+ self.console = Console()
29
+
30
+ # Get profile data
31
+ profile = config_manager.get_current_profile()
32
+ if not profile:
33
+ self.console.print("\n[bold red]No profile configured![/bold red]")
34
+ self.console.print("\nTo get started, create a profile with:")
35
+ self.console.print("[cyan]llamactl profile create[/cyan]")
36
+ raise SystemExit(1)
37
+
38
+ # Use profile data with optional overrides
39
+ self.base_url = base_url or profile.api_url
40
+ self.project_id = project_id or profile.active_project_id
41
+
42
+ if not self.base_url:
43
+ raise ValueError("API URL is required")
44
+
45
+ if not self.project_id:
46
+ raise ValueError("Project ID is required")
47
+
48
+ self.base_url = self.base_url.rstrip("/")
49
+
50
+ # Create persistent client with event hooks
51
+ self.client = httpx.Client(
52
+ base_url=self.base_url, event_hooks={"response": [self._handle_response]}
53
+ )
54
+
55
+ def _handle_response(self, response: httpx.Response) -> None:
56
+ """Handle response middleware - warnings and error conversion"""
57
+ # Check for warnings in response headers
58
+ if "X-Warning" in response.headers:
59
+ self.console.print(
60
+ f"[yellow]Warning: {response.headers['X-Warning']}[/yellow]"
61
+ )
62
+
63
+ # Convert httpx errors to our current exception format
64
+ try:
65
+ response.raise_for_status()
66
+ except httpx.HTTPStatusError as e:
67
+ # Try to parse JSON error response
68
+ try:
69
+ response.read() # need to collect streaming data before calling json
70
+ error_data = e.response.json()
71
+ if isinstance(error_data, dict) and "detail" in error_data:
72
+ error_message = error_data["detail"]
73
+ else:
74
+ error_message = str(error_data)
75
+ except (ValueError, KeyError):
76
+ # Fallback to raw response text
77
+ error_message = e.response.text
78
+
79
+ raise Exception(f"HTTP {e.response.status_code}: {error_message}") from e
80
+ except httpx.RequestError as e:
81
+ raise Exception(f"Request failed: {e}") from e
82
+
83
+ # Health check
84
+ def health_check(self) -> dict:
85
+ """Check if the API server is healthy"""
86
+ response = self.client.get("/health")
87
+ return response.json()
88
+
89
+ # Projects
90
+ def list_projects(self) -> List[ProjectSummary]:
91
+ """List all projects with deployment counts"""
92
+ response = self.client.get("/projects/")
93
+ projects_response = ProjectsListResponse.model_validate(response.json())
94
+ return [project for project in projects_response.projects]
95
+
96
+ # Deployments
97
+ def list_deployments(self) -> List[DeploymentResponse]:
98
+ """List deployments for the configured project"""
99
+ response = self.client.get(f"/{self.project_id}/deployments/")
100
+ deployments_response = DeploymentsListResponse.model_validate(response.json())
101
+ return [deployment for deployment in deployments_response.deployments]
102
+
103
+ def get_deployment(self, deployment_id: str) -> DeploymentResponse:
104
+ """Get a specific deployment"""
105
+ response = self.client.get(f"/{self.project_id}/deployments/{deployment_id}")
106
+ deployment = DeploymentResponse.model_validate(response.json())
107
+ return deployment
108
+
109
+ def create_deployment(
110
+ self,
111
+ deployment_data: DeploymentCreate,
112
+ ) -> DeploymentResponse:
113
+ """Create a new deployment"""
114
+
115
+ response = self.client.post(
116
+ f"/{self.project_id}/deployments/",
117
+ json=deployment_data.model_dump(exclude_none=True),
118
+ )
119
+ deployment = DeploymentResponse.model_validate(response.json())
120
+ return deployment
121
+
122
+ def delete_deployment(self, deployment_id: str) -> None:
123
+ """Delete a deployment"""
124
+ self.client.delete(f"/{self.project_id}/deployments/{deployment_id}")
125
+
126
+ def update_deployment(
127
+ self,
128
+ deployment_id: str,
129
+ update_data: DeploymentUpdate,
130
+ force_git_sha_update: bool = False,
131
+ ) -> DeploymentResponse:
132
+ """Update an existing deployment"""
133
+
134
+ params = {}
135
+ if force_git_sha_update:
136
+ params["force_git_sha_update"] = True
137
+
138
+ response = self.client.patch(
139
+ f"/{self.project_id}/deployments/{deployment_id}",
140
+ json=update_data.model_dump(),
141
+ params=params,
142
+ )
143
+ deployment = DeploymentResponse.model_validate(response.json())
144
+ return deployment
145
+
146
+ def validate_repository(
147
+ self,
148
+ repo_url: str,
149
+ deployment_id: str | None = None,
150
+ pat: str | None = None,
151
+ ) -> RepositoryValidationResponse:
152
+ """Validate a repository URL"""
153
+ logging.info(
154
+ f"Validating repository with params: {repo_url}, {deployment_id}, {pat}"
155
+ )
156
+ response = self.client.post(
157
+ f"/{self.project_id}/deployments/validate-repository",
158
+ json=RepositoryValidationRequest(
159
+ repository_url=repo_url,
160
+ deployment_id=deployment_id,
161
+ pat=pat,
162
+ ).model_dump(),
163
+ )
164
+ logging.info(f"Response: {response.json()}")
165
+ return RepositoryValidationResponse.model_validate(response.json())
166
+
167
+
168
+ # Global client factory function
169
+ def get_client(
170
+ base_url: Optional[str] = None, project_id: Optional[str] = None
171
+ ) -> LlamaDeployClient:
172
+ """Get a client instance with optional overrides"""
173
+ return LlamaDeployClient(base_url=base_url, project_id=project_id)