llamactl 0.3.0a9__py3-none-any.whl → 0.3.0a11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_deploy/cli/client.py +11 -252
- llama_deploy/cli/commands/aliased_group.py +3 -1
- llama_deploy/cli/commands/init.py +1 -1
- llama_deploy/cli/commands/serve.py +16 -2
- llama_deploy/cli/options.py +10 -4
- llama_deploy/cli/py.typed +0 -0
- llama_deploy/cli/textual/deployment_form.py +2 -1
- llama_deploy/cli/textual/deployment_monitor.py +3 -2
- llama_deploy/cli/textual/git_validation.py +3 -3
- llama_deploy/cli/textual/llama_loader.py +12 -1
- llama_deploy/cli/textual/profile_form.py +2 -1
- {llamactl-0.3.0a9.dist-info → llamactl-0.3.0a11.dist-info}/METADATA +3 -3
- llamactl-0.3.0a11.dist-info/RECORD +27 -0
- llamactl-0.3.0a9.dist-info/RECORD +0 -26
- {llamactl-0.3.0a9.dist-info → llamactl-0.3.0a11.dist-info}/WHEEL +0 -0
- {llamactl-0.3.0a9.dist-info → llamactl-0.3.0a11.dist-info}/entry_points.txt +0 -0
llama_deploy/cli/client.py
CHANGED
|
@@ -1,267 +1,29 @@
|
|
|
1
|
-
import
|
|
2
|
-
from
|
|
3
|
-
|
|
4
|
-
import httpx
|
|
5
|
-
from llama_deploy.core.schema.base import LogEvent
|
|
6
|
-
from llama_deploy.core.schema.deployments import (
|
|
7
|
-
DeploymentCreate,
|
|
8
|
-
DeploymentResponse,
|
|
9
|
-
DeploymentsListResponse,
|
|
10
|
-
DeploymentUpdate,
|
|
11
|
-
)
|
|
12
|
-
from llama_deploy.core.schema.git_validation import (
|
|
13
|
-
RepositoryValidationRequest,
|
|
14
|
-
RepositoryValidationResponse,
|
|
15
|
-
)
|
|
16
|
-
from llama_deploy.core.schema.projects import ProjectsListResponse, ProjectSummary
|
|
17
|
-
from rich.console import Console
|
|
18
|
-
|
|
19
|
-
from .config import config_manager
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class ClientError(Exception):
|
|
23
|
-
"""Base class for client errors."""
|
|
24
|
-
|
|
25
|
-
def __init__(self, message: str) -> None:
|
|
26
|
-
super().__init__(message)
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
class BaseClient:
|
|
30
|
-
def __init__(self, base_url: str, console: Console) -> None:
|
|
31
|
-
self.base_url = base_url.rstrip("/")
|
|
32
|
-
self.console = console
|
|
33
|
-
self.client = httpx.Client(
|
|
34
|
-
base_url=self.base_url,
|
|
35
|
-
event_hooks={"response": [self._handle_response]},
|
|
36
|
-
)
|
|
37
|
-
self.hookless_client = httpx.Client(base_url=self.base_url)
|
|
38
|
-
|
|
39
|
-
def _handle_response(self, response: httpx.Response) -> None:
|
|
40
|
-
try:
|
|
41
|
-
response.raise_for_status()
|
|
42
|
-
except httpx.HTTPStatusError as e:
|
|
43
|
-
try:
|
|
44
|
-
response.read()
|
|
45
|
-
error_data = e.response.json()
|
|
46
|
-
if isinstance(error_data, dict) and "detail" in error_data:
|
|
47
|
-
error_message = error_data["detail"]
|
|
48
|
-
else:
|
|
49
|
-
error_message = str(error_data)
|
|
50
|
-
except (ValueError, KeyError):
|
|
51
|
-
error_message = e.response.text
|
|
52
|
-
raise ClientError(f"HTTP {e.response.status_code}: {error_message}") from e
|
|
53
|
-
except httpx.RequestError as e:
|
|
54
|
-
raise ClientError(f"Request failed: {e}") from e
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
class ControlPlaneClient(BaseClient):
|
|
58
|
-
"""Unscoped client for non-project endpoints."""
|
|
59
|
-
|
|
60
|
-
def health_check(self) -> dict:
|
|
61
|
-
response = self.client.get("/health")
|
|
62
|
-
return response.json()
|
|
63
|
-
|
|
64
|
-
def server_version(self) -> dict:
|
|
65
|
-
response = self.client.get("/version")
|
|
66
|
-
return response.json()
|
|
67
|
-
|
|
68
|
-
def list_projects(self) -> List[ProjectSummary]:
|
|
69
|
-
response = self.client.get("/api/v1beta1/deployments/list-projects")
|
|
70
|
-
projects_response = ProjectsListResponse.model_validate(response.json())
|
|
71
|
-
return [project for project in projects_response.projects]
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
class ProjectClient(BaseClient):
|
|
75
|
-
"""Project-scoped client for deployment operations."""
|
|
76
|
-
|
|
77
|
-
def __init__(
|
|
78
|
-
self,
|
|
79
|
-
base_url: str | None = None,
|
|
80
|
-
project_id: str | None = None,
|
|
81
|
-
console: Console | None = None,
|
|
82
|
-
) -> None:
|
|
83
|
-
# Allow default construction using active profile (for tests and convenience)
|
|
84
|
-
if base_url is None or project_id is None:
|
|
85
|
-
profile = config_manager.get_current_profile()
|
|
86
|
-
if not profile:
|
|
87
|
-
# Match previous behavior for missing profiles
|
|
88
|
-
(console or Console()).print(
|
|
89
|
-
"\n[bold red]No profile configured![/bold red]"
|
|
90
|
-
)
|
|
91
|
-
(console or Console()).print("\nTo get started, create a profile with:")
|
|
92
|
-
(console or Console()).print("[cyan]llamactl profile create[/cyan]")
|
|
93
|
-
raise SystemExit(1)
|
|
94
|
-
base_url = base_url or profile.api_url or ""
|
|
95
|
-
project_id = project_id or profile.active_project_id
|
|
96
|
-
if not base_url:
|
|
97
|
-
raise ValueError("API URL is required")
|
|
98
|
-
if not project_id:
|
|
99
|
-
raise ValueError("Project ID is required")
|
|
100
|
-
resolved_console = console or Console()
|
|
101
|
-
super().__init__(base_url, resolved_console)
|
|
102
|
-
self.project_id = project_id
|
|
103
|
-
|
|
104
|
-
def list_deployments(self) -> List[DeploymentResponse]:
|
|
105
|
-
response = self.client.get(
|
|
106
|
-
"/api/v1beta1/deployments",
|
|
107
|
-
params={"project_id": self.project_id},
|
|
108
|
-
)
|
|
109
|
-
deployments_response = DeploymentsListResponse.model_validate(response.json())
|
|
110
|
-
return [deployment for deployment in deployments_response.deployments]
|
|
111
|
-
|
|
112
|
-
def get_deployment(
|
|
113
|
-
self, deployment_id: str, include_events: bool = False
|
|
114
|
-
) -> DeploymentResponse:
|
|
115
|
-
response = self.client.get(
|
|
116
|
-
f"/api/v1beta1/deployments/{deployment_id}",
|
|
117
|
-
params={"project_id": self.project_id, "include_events": include_events},
|
|
118
|
-
)
|
|
119
|
-
return DeploymentResponse.model_validate(response.json())
|
|
120
|
-
|
|
121
|
-
def create_deployment(
|
|
122
|
-
self, deployment_data: DeploymentCreate
|
|
123
|
-
) -> DeploymentResponse:
|
|
124
|
-
response = self.client.post(
|
|
125
|
-
"/api/v1beta1/deployments",
|
|
126
|
-
params={"project_id": self.project_id},
|
|
127
|
-
json=deployment_data.model_dump(exclude_none=True),
|
|
128
|
-
)
|
|
129
|
-
return DeploymentResponse.model_validate(response.json())
|
|
130
|
-
|
|
131
|
-
def delete_deployment(self, deployment_id: str) -> None:
|
|
132
|
-
self.client.delete(
|
|
133
|
-
f"/api/v1beta1/deployments/{deployment_id}",
|
|
134
|
-
params={"project_id": self.project_id},
|
|
135
|
-
)
|
|
136
|
-
|
|
137
|
-
def update_deployment(
|
|
138
|
-
self,
|
|
139
|
-
deployment_id: str,
|
|
140
|
-
update_data: DeploymentUpdate,
|
|
141
|
-
) -> DeploymentResponse:
|
|
142
|
-
response = self.client.patch(
|
|
143
|
-
f"/api/v1beta1/deployments/{deployment_id}",
|
|
144
|
-
params={"project_id": self.project_id},
|
|
145
|
-
json=update_data.model_dump(),
|
|
146
|
-
)
|
|
147
|
-
return DeploymentResponse.model_validate(response.json())
|
|
148
|
-
|
|
149
|
-
def validate_repository(
|
|
150
|
-
self,
|
|
151
|
-
repo_url: str,
|
|
152
|
-
deployment_id: str | None = None,
|
|
153
|
-
pat: str | None = None,
|
|
154
|
-
) -> RepositoryValidationResponse:
|
|
155
|
-
response = self.client.post(
|
|
156
|
-
"/api/v1beta1/deployments/validate-repository",
|
|
157
|
-
params={"project_id": self.project_id},
|
|
158
|
-
json=RepositoryValidationRequest(
|
|
159
|
-
repository_url=repo_url,
|
|
160
|
-
deployment_id=deployment_id,
|
|
161
|
-
pat=pat,
|
|
162
|
-
).model_dump(),
|
|
163
|
-
)
|
|
164
|
-
return RepositoryValidationResponse.model_validate(response.json())
|
|
165
|
-
|
|
166
|
-
def stream_deployment_logs(
|
|
167
|
-
self,
|
|
168
|
-
deployment_id: str,
|
|
169
|
-
*,
|
|
170
|
-
include_init_containers: bool = False,
|
|
171
|
-
since_seconds: int | None = None,
|
|
172
|
-
tail_lines: int | None = None,
|
|
173
|
-
) -> tuple["Closer", Iterator[LogEvent]]:
|
|
174
|
-
"""Stream logs as LogEvent items from the control plane using SSE.
|
|
175
|
-
|
|
176
|
-
This yields `LogEvent` models until the stream ends (e.g. rollout).
|
|
177
|
-
"""
|
|
178
|
-
# Use a separate client without response hooks so we don't consume the stream
|
|
179
|
-
|
|
180
|
-
params = {
|
|
181
|
-
"project_id": self.project_id,
|
|
182
|
-
"include_init_containers": include_init_containers,
|
|
183
|
-
}
|
|
184
|
-
if since_seconds is not None:
|
|
185
|
-
params["since_seconds"] = since_seconds
|
|
186
|
-
if tail_lines is not None:
|
|
187
|
-
params["tail_lines"] = tail_lines
|
|
188
|
-
|
|
189
|
-
url = f"/api/v1beta1/deployments/{deployment_id}/logs"
|
|
190
|
-
headers = {"Accept": "text/event-stream"}
|
|
191
|
-
|
|
192
|
-
stack = contextlib.ExitStack()
|
|
193
|
-
response = stack.enter_context(
|
|
194
|
-
self.hookless_client.stream(
|
|
195
|
-
"GET", url, params=params, headers=headers, timeout=None
|
|
196
|
-
)
|
|
197
|
-
)
|
|
198
|
-
try:
|
|
199
|
-
response.raise_for_status()
|
|
200
|
-
except Exception:
|
|
201
|
-
stack.close()
|
|
202
|
-
raise
|
|
203
|
-
|
|
204
|
-
return stack.close, _iterate_log_stream(response, stack.close)
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
def _iterate_log_stream(
|
|
208
|
-
response: httpx.Response, closer: "Closer"
|
|
209
|
-
) -> Iterator[LogEvent]:
|
|
210
|
-
event_name: str | None = None
|
|
211
|
-
data_lines: list[str] = []
|
|
212
|
-
|
|
213
|
-
try:
|
|
214
|
-
for line in response.iter_lines():
|
|
215
|
-
if line is None:
|
|
216
|
-
continue
|
|
217
|
-
line = line.decode() if isinstance(line, (bytes, bytearray)) else line
|
|
218
|
-
print("got line", line)
|
|
219
|
-
if line.startswith("event:"):
|
|
220
|
-
event_name = line[len("event:") :].strip()
|
|
221
|
-
elif line.startswith("data:"):
|
|
222
|
-
data_lines.append(line[len("data:") :].lstrip())
|
|
223
|
-
elif line.strip() == "":
|
|
224
|
-
if event_name == "log" and data_lines:
|
|
225
|
-
data_str = "\n".join(data_lines)
|
|
226
|
-
try:
|
|
227
|
-
yield LogEvent.model_validate_json(data_str)
|
|
228
|
-
print("yielded log event", data_str)
|
|
229
|
-
except Exception:
|
|
230
|
-
# If parsing fails, skip malformed event
|
|
231
|
-
pass
|
|
232
|
-
# reset for next event
|
|
233
|
-
event_name = None
|
|
234
|
-
data_lines = []
|
|
235
|
-
finally:
|
|
236
|
-
try:
|
|
237
|
-
closer()
|
|
238
|
-
except Exception:
|
|
239
|
-
pass
|
|
1
|
+
from llama_deploy.cli.config import config_manager
|
|
2
|
+
from llama_deploy.core.client.manage_client import ControlPlaneClient, ProjectClient
|
|
3
|
+
from rich import print as rprint
|
|
240
4
|
|
|
241
5
|
|
|
242
6
|
def get_control_plane_client(base_url: str | None = None) -> ControlPlaneClient:
|
|
243
|
-
console = Console()
|
|
244
7
|
profile = config_manager.get_current_profile()
|
|
245
8
|
if not profile and not base_url:
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
9
|
+
rprint("\n[bold red]No profile configured![/bold red]")
|
|
10
|
+
rprint("\nTo get started, create a profile with:")
|
|
11
|
+
rprint("[cyan]llamactl profile create[/cyan]")
|
|
249
12
|
raise SystemExit(1)
|
|
250
13
|
resolved_base_url = (base_url or (profile.api_url if profile else "")).rstrip("/")
|
|
251
14
|
if not resolved_base_url:
|
|
252
15
|
raise ValueError("API URL is required")
|
|
253
|
-
return ControlPlaneClient(resolved_base_url
|
|
16
|
+
return ControlPlaneClient(resolved_base_url)
|
|
254
17
|
|
|
255
18
|
|
|
256
19
|
def get_project_client(
|
|
257
20
|
base_url: str | None = None, project_id: str | None = None
|
|
258
21
|
) -> ProjectClient:
|
|
259
|
-
console = Console()
|
|
260
22
|
profile = config_manager.get_current_profile()
|
|
261
23
|
if not profile:
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
24
|
+
rprint("\n[bold red]No profile configured![/bold red]")
|
|
25
|
+
rprint("\nTo get started, create a profile with:")
|
|
26
|
+
rprint("[cyan]llamactl profile create[/cyan]")
|
|
265
27
|
raise SystemExit(1)
|
|
266
28
|
resolved_base_url = (base_url or profile.api_url or "").rstrip("/")
|
|
267
29
|
if not resolved_base_url:
|
|
@@ -269,7 +31,4 @@ def get_project_client(
|
|
|
269
31
|
resolved_project_id = project_id or profile.active_project_id
|
|
270
32
|
if not resolved_project_id:
|
|
271
33
|
raise ValueError("Project ID is required")
|
|
272
|
-
return ProjectClient(resolved_base_url, resolved_project_id
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
type Closer = callable[tuple[()], None]
|
|
34
|
+
return ProjectClient(resolved_base_url, resolved_project_id)
|
|
@@ -25,7 +25,9 @@ class AliasedGroup(click.Group):
|
|
|
25
25
|
|
|
26
26
|
ctx.fail(f"Too many matches: {', '.join(sorted(matches))}")
|
|
27
27
|
|
|
28
|
-
def resolve_command(
|
|
28
|
+
def resolve_command(
|
|
29
|
+
self, ctx: click.Context, args: list[str]
|
|
30
|
+
) -> tuple[str, click.Command, list[str]]:
|
|
29
31
|
# always return the full command name
|
|
30
32
|
_, cmd, args = super().resolve_command(ctx, args)
|
|
31
33
|
return cmd.name, cmd, args
|
|
@@ -26,7 +26,7 @@ options = [
|
|
|
26
26
|
id="basic-ui",
|
|
27
27
|
name="Basic UI",
|
|
28
28
|
description="A basic starter workflow with a React Vite UI",
|
|
29
|
-
git_url="https://github.com/
|
|
29
|
+
git_url="https://github.com/run-llama/template-workflow-basic-ui",
|
|
30
30
|
),
|
|
31
31
|
TemplateOption(
|
|
32
32
|
id="extraction-review",
|
|
@@ -9,7 +9,6 @@ from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
|
|
|
9
9
|
from rich import print as rprint
|
|
10
10
|
|
|
11
11
|
from ..app import app
|
|
12
|
-
from ..options import global_options
|
|
13
12
|
|
|
14
13
|
|
|
15
14
|
@app.command(
|
|
@@ -36,7 +35,18 @@ from ..options import global_options
|
|
|
36
35
|
)
|
|
37
36
|
@click.option("--port", type=int, help="The port to run the API server on")
|
|
38
37
|
@click.option("--ui-port", type=int, help="The port to run the UI proxy server on")
|
|
39
|
-
@
|
|
38
|
+
@click.option(
|
|
39
|
+
"--log-level",
|
|
40
|
+
type=click.Choice(
|
|
41
|
+
["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], case_sensitive=False
|
|
42
|
+
),
|
|
43
|
+
help="The log level to run the API server at",
|
|
44
|
+
)
|
|
45
|
+
@click.option(
|
|
46
|
+
"--log-format",
|
|
47
|
+
type=click.Choice(["console", "json"], case_sensitive=False),
|
|
48
|
+
help="The format to use for logging",
|
|
49
|
+
)
|
|
40
50
|
def serve(
|
|
41
51
|
deployment_file: Path,
|
|
42
52
|
no_install: bool,
|
|
@@ -45,6 +55,8 @@ def serve(
|
|
|
45
55
|
preview: bool,
|
|
46
56
|
port: int | None = None,
|
|
47
57
|
ui_port: int | None = None,
|
|
58
|
+
log_level: str | None = None,
|
|
59
|
+
log_format: str | None = None,
|
|
48
60
|
) -> None:
|
|
49
61
|
"""Run llama_deploy API Server in the foreground. Reads the deployment configuration from the current directory. Can optionally specify a deployment file path."""
|
|
50
62
|
if not deployment_file.exists():
|
|
@@ -64,6 +76,8 @@ def serve(
|
|
|
64
76
|
open_browser=not no_open_browser,
|
|
65
77
|
port=port,
|
|
66
78
|
ui_port=ui_port,
|
|
79
|
+
log_level=log_level.upper() if log_level else None,
|
|
80
|
+
log_format=log_format.lower() if log_format else None,
|
|
67
81
|
)
|
|
68
82
|
|
|
69
83
|
except KeyboardInterrupt:
|
llama_deploy/cli/options.py
CHANGED
|
@@ -1,20 +1,26 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
from typing import Callable, ParamSpec, TypeVar
|
|
2
3
|
|
|
3
4
|
import click
|
|
4
5
|
|
|
6
|
+
P = ParamSpec("P")
|
|
7
|
+
R = TypeVar("R")
|
|
5
8
|
|
|
6
|
-
|
|
9
|
+
|
|
10
|
+
def global_options(f: Callable[P, R]) -> Callable[P, R]:
|
|
7
11
|
"""Common decorator to add global options to command groups"""
|
|
8
12
|
from .debug import setup_file_logging
|
|
9
13
|
|
|
10
|
-
def debug_callback(ctx, param, value):
|
|
14
|
+
def debug_callback(ctx: click.Context, param: click.Parameter, value: str) -> str:
|
|
11
15
|
if value:
|
|
12
|
-
setup_file_logging(level=logging._nameToLevel
|
|
16
|
+
setup_file_logging(level=logging._nameToLevel.get(value, logging.INFO))
|
|
13
17
|
return value
|
|
14
18
|
|
|
15
19
|
return click.option(
|
|
16
20
|
"--log-level",
|
|
17
|
-
type=click.Choice(
|
|
21
|
+
type=click.Choice(
|
|
22
|
+
["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], case_sensitive=False
|
|
23
|
+
),
|
|
18
24
|
help="Enable debug logging to file",
|
|
19
25
|
callback=debug_callback,
|
|
20
26
|
expose_value=False,
|
|
File without changes
|
|
@@ -38,6 +38,7 @@ from llama_deploy.core.schema.deployments import (
|
|
|
38
38
|
DeploymentResponse,
|
|
39
39
|
DeploymentUpdate,
|
|
40
40
|
)
|
|
41
|
+
from textual import events
|
|
41
42
|
from textual.app import App, ComposeResult
|
|
42
43
|
from textual.containers import Container, HorizontalGroup, Widget
|
|
43
44
|
from textual.content import Content
|
|
@@ -352,7 +353,7 @@ class DeploymentEditApp(App[DeploymentResponse | None]):
|
|
|
352
353
|
def on_mount(self) -> None:
|
|
353
354
|
self.theme = "tokyo-night"
|
|
354
355
|
|
|
355
|
-
def on_key(self, event) -> None:
|
|
356
|
+
def on_key(self, event: events.Key) -> None:
|
|
356
357
|
"""Handle key events, including Ctrl+C"""
|
|
357
358
|
if event.key == "ctrl+c":
|
|
358
359
|
if self.current_state == "monitor" and self.saved_deployment is not None:
|
|
@@ -9,11 +9,12 @@ import time
|
|
|
9
9
|
from pathlib import Path
|
|
10
10
|
from typing import Iterator
|
|
11
11
|
|
|
12
|
-
from llama_deploy.cli.client import Closer
|
|
13
12
|
from llama_deploy.cli.client import get_project_client as get_client
|
|
13
|
+
from llama_deploy.core.client.manage_client import Closer
|
|
14
14
|
from llama_deploy.core.schema.base import LogEvent
|
|
15
15
|
from llama_deploy.core.schema.deployments import DeploymentResponse
|
|
16
16
|
from rich.text import Text
|
|
17
|
+
from textual import events
|
|
17
18
|
from textual.app import App, ComposeResult
|
|
18
19
|
from textual.containers import Container, HorizontalGroup, Widget
|
|
19
20
|
from textual.content import Content
|
|
@@ -423,7 +424,7 @@ class DeploymentMonitorApp(App[None]):
|
|
|
423
424
|
def on_monitor_close_message(self, _: MonitorCloseMessage) -> None:
|
|
424
425
|
self.exit(None)
|
|
425
426
|
|
|
426
|
-
def on_key(self, event) -> None:
|
|
427
|
+
def on_key(self, event: events.Key) -> None:
|
|
427
428
|
# Support Ctrl+C to exit, consistent with other screens and terminals
|
|
428
429
|
if event.key == "ctrl+c":
|
|
429
430
|
self.exit(None)
|
|
@@ -224,7 +224,7 @@ class GitValidationWidget(Widget):
|
|
|
224
224
|
self.current_state = "options"
|
|
225
225
|
self.error_message = ""
|
|
226
226
|
elif event.button.id == "cancel_github_auth":
|
|
227
|
-
self._cancel_github_auth()
|
|
227
|
+
self.run_worker(self._cancel_github_auth())
|
|
228
228
|
elif event.button.id == "recheck_github":
|
|
229
229
|
self.run_worker(self._recheck_github_auth())
|
|
230
230
|
elif event.button.id == "continue_success":
|
|
@@ -258,10 +258,10 @@ class GitValidationWidget(Widget):
|
|
|
258
258
|
self.github_callback_server = GitHubCallbackServer()
|
|
259
259
|
self.run_worker(self._wait_for_callback())
|
|
260
260
|
|
|
261
|
-
def _cancel_github_auth(self) -> None:
|
|
261
|
+
async def _cancel_github_auth(self) -> None:
|
|
262
262
|
"""Cancel GitHub authentication and return to options"""
|
|
263
263
|
if self.github_callback_server:
|
|
264
|
-
self.github_callback_server.stop()
|
|
264
|
+
await self.github_callback_server.stop()
|
|
265
265
|
self.github_callback_server = None
|
|
266
266
|
self.current_state = "options"
|
|
267
267
|
|
|
@@ -1,12 +1,23 @@
|
|
|
1
1
|
import re
|
|
2
|
+
from typing import TypedDict
|
|
2
3
|
|
|
3
4
|
from textual.widgets import Static
|
|
4
5
|
|
|
5
6
|
|
|
7
|
+
class StaticKwargs(TypedDict, total=False):
|
|
8
|
+
expand: bool
|
|
9
|
+
shrink: bool
|
|
10
|
+
markup: bool
|
|
11
|
+
name: str | None
|
|
12
|
+
id: str | None
|
|
13
|
+
classes: str | None
|
|
14
|
+
disabled: bool
|
|
15
|
+
|
|
16
|
+
|
|
6
17
|
class PixelLlamaLoader(Static):
|
|
7
18
|
"""Pixelated llama loading animation using block characters"""
|
|
8
19
|
|
|
9
|
-
def __init__(self, **kwargs):
|
|
20
|
+
def __init__(self, **kwargs: StaticKwargs):
|
|
10
21
|
self.frame = 0
|
|
11
22
|
# Pixelated llama frames using Unicode block characters
|
|
12
23
|
self.frames = [
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
from pathlib import Path
|
|
5
5
|
|
|
6
|
+
from textual import events
|
|
6
7
|
from textual.app import App, ComposeResult
|
|
7
8
|
from textual.containers import (
|
|
8
9
|
Container,
|
|
@@ -45,7 +46,7 @@ class ProfileEditApp(App[ProfileForm | None]):
|
|
|
45
46
|
def on_mount(self) -> None:
|
|
46
47
|
self.theme = "tokyo-night"
|
|
47
48
|
|
|
48
|
-
def on_key(self, event) -> None:
|
|
49
|
+
def on_key(self, event: events.Key) -> None:
|
|
49
50
|
"""Handle key events, including Ctrl+C"""
|
|
50
51
|
if event.key == "ctrl+c":
|
|
51
52
|
self.exit(None)
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: llamactl
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.0a11
|
|
4
4
|
Summary: A command-line interface for managing LlamaDeploy projects and deployments
|
|
5
5
|
Author: Adrian Lyjak
|
|
6
6
|
Author-email: Adrian Lyjak <adrianlyjak@gmail.com>
|
|
7
7
|
License: MIT
|
|
8
|
-
Requires-Dist: llama-deploy-core>=0.3.
|
|
9
|
-
Requires-Dist: llama-deploy-appserver>=0.3.
|
|
8
|
+
Requires-Dist: llama-deploy-core[client]>=0.3.0a11,<0.4.0
|
|
9
|
+
Requires-Dist: llama-deploy-appserver>=0.3.0a11,<0.4.0
|
|
10
10
|
Requires-Dist: httpx>=0.24.0
|
|
11
11
|
Requires-Dist: rich>=13.0.0
|
|
12
12
|
Requires-Dist: questionary>=2.0.0
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
llama_deploy/cli/__init__.py,sha256=274c45e48048bf60668ab564ae8e7c5e6daf1d7779005f87d07ce9fa7d04936c,422
|
|
2
|
+
llama_deploy/cli/app.py,sha256=5200b4ac01b0ad0c405ce841fc01a12ed32f7b6474472f00a7d6c75fe274ea45,2324
|
|
3
|
+
llama_deploy/cli/client.py,sha256=f88cc30cf6df39fa68fb0aefe668634e4fd7216895d524b9af6f7c5727ba9da4,1510
|
|
4
|
+
llama_deploy/cli/commands/aliased_group.py,sha256=bc41007c97b7b93981217dbd4d4591df2b6c9412a2d9ed045b0ec5655ed285f2,1066
|
|
5
|
+
llama_deploy/cli/commands/deployment.py,sha256=7874f4a499ce1bfd6ae14833410cc75c4c954463d96064cfd045421358479d4c,8810
|
|
6
|
+
llama_deploy/cli/commands/init.py,sha256=51b2de1e35ff34bc15c9dfec72fbad08aaf528c334df168896d36458a4e9401c,6307
|
|
7
|
+
llama_deploy/cli/commands/profile.py,sha256=933d7a434c2684c7b47bfbd7340a09e4b34d56d20624886e15fdb4e0af97ce0b,6765
|
|
8
|
+
llama_deploy/cli/commands/serve.py,sha256=4d47850397ba172944df56a934a51bedb52403cbd3f9b000b1ced90a31c75049,2721
|
|
9
|
+
llama_deploy/cli/config.py,sha256=ebec8cf9e2112378ee6ecd626166711f3fba8cfa27cd1c931fe899c0b2a047b3,6241
|
|
10
|
+
llama_deploy/cli/debug.py,sha256=e85a72d473bbe1645eb31772f7349bde703d45704166f767385895c440afc762,496
|
|
11
|
+
llama_deploy/cli/env.py,sha256=6ebc24579815b3787829c81fd5bb9f31698a06e62c0128a788559f962b33a7af,1016
|
|
12
|
+
llama_deploy/cli/interactive_prompts/utils.py,sha256=db78eba78bf347738feb89ac3eeb77a1d11f4003980f81cf3c13842f8d41afeb,2463
|
|
13
|
+
llama_deploy/cli/options.py,sha256=e71d1a306e9e302b92ab55ace75f3a9273be267ae92a71226aac84c14271619d,823
|
|
14
|
+
llama_deploy/cli/py.typed,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
|
|
15
|
+
llama_deploy/cli/textual/deployment_form.py,sha256=33d4f3f0741aeaf0072f5f91368b3c0b0456c72370b72964363240d790b3fa06,20929
|
|
16
|
+
llama_deploy/cli/textual/deployment_help.py,sha256=d43e9ff29db71a842cf8b491545763d581ede3132b8af518c73af85a40950046,2464
|
|
17
|
+
llama_deploy/cli/textual/deployment_monitor.py,sha256=a2c4e48c5494f63a2b8dcc9ab1b11b4ce50d78ef94c0f54095448ce9d01fa59b,16782
|
|
18
|
+
llama_deploy/cli/textual/git_validation.py,sha256=fcbe5477c99e8e669b31c563572d4894f61475ef7e968a59d9f172642d390cf7,13329
|
|
19
|
+
llama_deploy/cli/textual/github_callback_server.py,sha256=dc74c510f8a98ef6ffaab0f6d11c7ea86ee77ca5adbc7725a2a29112bae24191,7556
|
|
20
|
+
llama_deploy/cli/textual/llama_loader.py,sha256=33cb32a46dd40bcf889c553e44f2672c410e26bd1d4b17aa6cca6d0a5d59c2c4,1468
|
|
21
|
+
llama_deploy/cli/textual/profile_form.py,sha256=747644895774e7416620d2071f6f054b06ec8e398ac0e7649386caa2a83fe2aa,5995
|
|
22
|
+
llama_deploy/cli/textual/secrets_form.py,sha256=a43fbd81aad034d0d60906bfd917c107f9ace414648b0f63ac0b29eeba4050db,7061
|
|
23
|
+
llama_deploy/cli/textual/styles.tcss,sha256=536cec7627d2a16dd03bf25bb9b6e4d53f1e0d18272b07ec0dc3bf76b0a7c2e0,3056
|
|
24
|
+
llamactl-0.3.0a11.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
|
|
25
|
+
llamactl-0.3.0a11.dist-info/entry_points.txt,sha256=b67e1eb64305058751a651a80f2d2268b5f7046732268421e796f64d4697f83c,52
|
|
26
|
+
llamactl-0.3.0a11.dist-info/METADATA,sha256=0d3518dd292be0addfdc766e4f909810dbcae7c31db2ac9243313a774186dc97,3177
|
|
27
|
+
llamactl-0.3.0a11.dist-info/RECORD,,
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
llama_deploy/cli/__init__.py,sha256=274c45e48048bf60668ab564ae8e7c5e6daf1d7779005f87d07ce9fa7d04936c,422
|
|
2
|
-
llama_deploy/cli/app.py,sha256=5200b4ac01b0ad0c405ce841fc01a12ed32f7b6474472f00a7d6c75fe274ea45,2324
|
|
3
|
-
llama_deploy/cli/client.py,sha256=a648bfcd07b1d4e9ad1e7d9e3ef591066ea5f45bb6fe4b8ad2c495347563794a,10110
|
|
4
|
-
llama_deploy/cli/commands/aliased_group.py,sha256=6e2457cdea51de83bb7f02b37abb77cb9b5bff0a61bdddd66c43240b66b13f13,986
|
|
5
|
-
llama_deploy/cli/commands/deployment.py,sha256=7874f4a499ce1bfd6ae14833410cc75c4c954463d96064cfd045421358479d4c,8810
|
|
6
|
-
llama_deploy/cli/commands/init.py,sha256=da6aecb5ebc8e4cf15421227fcafd98f573f601c52f8849c00d243b572c9f56a,6285
|
|
7
|
-
llama_deploy/cli/commands/profile.py,sha256=933d7a434c2684c7b47bfbd7340a09e4b34d56d20624886e15fdb4e0af97ce0b,6765
|
|
8
|
-
llama_deploy/cli/commands/serve.py,sha256=22227f383bb5a9d43de7c788139c685c7370e24f495c9b1929faae80b87d4ded,2232
|
|
9
|
-
llama_deploy/cli/config.py,sha256=ebec8cf9e2112378ee6ecd626166711f3fba8cfa27cd1c931fe899c0b2a047b3,6241
|
|
10
|
-
llama_deploy/cli/debug.py,sha256=e85a72d473bbe1645eb31772f7349bde703d45704166f767385895c440afc762,496
|
|
11
|
-
llama_deploy/cli/env.py,sha256=6ebc24579815b3787829c81fd5bb9f31698a06e62c0128a788559f962b33a7af,1016
|
|
12
|
-
llama_deploy/cli/interactive_prompts/utils.py,sha256=db78eba78bf347738feb89ac3eeb77a1d11f4003980f81cf3c13842f8d41afeb,2463
|
|
13
|
-
llama_deploy/cli/options.py,sha256=38bb4a231ad0436d8b910c98ff659c0736f619efdf56c402d60bb3f755df38e0,598
|
|
14
|
-
llama_deploy/cli/textual/deployment_form.py,sha256=5566e2545ef9548b14b7d3d2b0c1bda1dcd99aca814d34823da6b9da1903b8df,20890
|
|
15
|
-
llama_deploy/cli/textual/deployment_help.py,sha256=d43e9ff29db71a842cf8b491545763d581ede3132b8af518c73af85a40950046,2464
|
|
16
|
-
llama_deploy/cli/textual/deployment_monitor.py,sha256=0c641d9d0e29cc3c1ae44d4325d8a181ba4defb24ac84ac91741812b0d0adca1,16728
|
|
17
|
-
llama_deploy/cli/textual/git_validation.py,sha256=44e359d16aa879f4566a0077d025fdd799f500862a8462b5ed3586e528f7a273,13300
|
|
18
|
-
llama_deploy/cli/textual/github_callback_server.py,sha256=dc74c510f8a98ef6ffaab0f6d11c7ea86ee77ca5adbc7725a2a29112bae24191,7556
|
|
19
|
-
llama_deploy/cli/textual/llama_loader.py,sha256=468213a504057f21838b01f48d51f52e60aa622d6f0fe5bb800d76ced846cea9,1245
|
|
20
|
-
llama_deploy/cli/textual/profile_form.py,sha256=4410678edbd59b014f937ce760bafa51ae86f6dd58bec88f048a9eda273446aa,5956
|
|
21
|
-
llama_deploy/cli/textual/secrets_form.py,sha256=a43fbd81aad034d0d60906bfd917c107f9ace414648b0f63ac0b29eeba4050db,7061
|
|
22
|
-
llama_deploy/cli/textual/styles.tcss,sha256=536cec7627d2a16dd03bf25bb9b6e4d53f1e0d18272b07ec0dc3bf76b0a7c2e0,3056
|
|
23
|
-
llamactl-0.3.0a9.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
|
|
24
|
-
llamactl-0.3.0a9.dist-info/entry_points.txt,sha256=b67e1eb64305058751a651a80f2d2268b5f7046732268421e796f64d4697f83c,52
|
|
25
|
-
llamactl-0.3.0a9.dist-info/METADATA,sha256=251109a0de878a3e2599ca323c78039393c5cbfb4177aa3e8cf6331c9fc7d4c7,3166
|
|
26
|
-
llamactl-0.3.0a9.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|