llamactl 0.3.0a8__py3-none-any.whl → 0.3.0a10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_deploy/cli/client.py +4 -2
- llama_deploy/cli/commands/aliased_group.py +3 -1
- llama_deploy/cli/commands/init.py +1 -1
- llama_deploy/cli/commands/serve.py +16 -2
- llama_deploy/cli/options.py +10 -4
- llama_deploy/cli/py.typed +0 -0
- llama_deploy/cli/textual/deployment_form.py +2 -1
- llama_deploy/cli/textual/deployment_monitor.py +61 -15
- llama_deploy/cli/textual/git_validation.py +3 -3
- llama_deploy/cli/textual/llama_loader.py +12 -1
- llama_deploy/cli/textual/profile_form.py +2 -1
- {llamactl-0.3.0a8.dist-info → llamactl-0.3.0a10.dist-info}/METADATA +3 -3
- llamactl-0.3.0a10.dist-info/RECORD +27 -0
- llamactl-0.3.0a8.dist-info/RECORD +0 -26
- {llamactl-0.3.0a8.dist-info → llamactl-0.3.0a10.dist-info}/WHEEL +0 -0
- {llamactl-0.3.0a8.dist-info → llamactl-0.3.0a10.dist-info}/entry_points.txt +0 -0
llama_deploy/cli/client.py
CHANGED
|
@@ -109,10 +109,12 @@ class ProjectClient(BaseClient):
|
|
|
109
109
|
deployments_response = DeploymentsListResponse.model_validate(response.json())
|
|
110
110
|
return [deployment for deployment in deployments_response.deployments]
|
|
111
111
|
|
|
112
|
-
def get_deployment(
|
|
112
|
+
def get_deployment(
|
|
113
|
+
self, deployment_id: str, include_events: bool = False
|
|
114
|
+
) -> DeploymentResponse:
|
|
113
115
|
response = self.client.get(
|
|
114
116
|
f"/api/v1beta1/deployments/{deployment_id}",
|
|
115
|
-
params={"project_id": self.project_id},
|
|
117
|
+
params={"project_id": self.project_id, "include_events": include_events},
|
|
116
118
|
)
|
|
117
119
|
return DeploymentResponse.model_validate(response.json())
|
|
118
120
|
|
|
@@ -25,7 +25,9 @@ class AliasedGroup(click.Group):
|
|
|
25
25
|
|
|
26
26
|
ctx.fail(f"Too many matches: {', '.join(sorted(matches))}")
|
|
27
27
|
|
|
28
|
-
def resolve_command(
|
|
28
|
+
def resolve_command(
|
|
29
|
+
self, ctx: click.Context, args: list[str]
|
|
30
|
+
) -> tuple[str, click.Command, list[str]]:
|
|
29
31
|
# always return the full command name
|
|
30
32
|
_, cmd, args = super().resolve_command(ctx, args)
|
|
31
33
|
return cmd.name, cmd, args
|
|
@@ -26,7 +26,7 @@ options = [
|
|
|
26
26
|
id="basic-ui",
|
|
27
27
|
name="Basic UI",
|
|
28
28
|
description="A basic starter workflow with a React Vite UI",
|
|
29
|
-
git_url="https://github.com/
|
|
29
|
+
git_url="https://github.com/run-llama/template-workflow-basic-ui",
|
|
30
30
|
),
|
|
31
31
|
TemplateOption(
|
|
32
32
|
id="extraction-review",
|
|
@@ -9,7 +9,6 @@ from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
|
|
|
9
9
|
from rich import print as rprint
|
|
10
10
|
|
|
11
11
|
from ..app import app
|
|
12
|
-
from ..options import global_options
|
|
13
12
|
|
|
14
13
|
|
|
15
14
|
@app.command(
|
|
@@ -36,7 +35,18 @@ from ..options import global_options
|
|
|
36
35
|
)
|
|
37
36
|
@click.option("--port", type=int, help="The port to run the API server on")
|
|
38
37
|
@click.option("--ui-port", type=int, help="The port to run the UI proxy server on")
|
|
39
|
-
@
|
|
38
|
+
@click.option(
|
|
39
|
+
"--log-level",
|
|
40
|
+
type=click.Choice(
|
|
41
|
+
["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], case_sensitive=False
|
|
42
|
+
),
|
|
43
|
+
help="The log level to run the API server at",
|
|
44
|
+
)
|
|
45
|
+
@click.option(
|
|
46
|
+
"--log-format",
|
|
47
|
+
type=click.Choice(["console", "json"], case_sensitive=False),
|
|
48
|
+
help="The format to use for logging",
|
|
49
|
+
)
|
|
40
50
|
def serve(
|
|
41
51
|
deployment_file: Path,
|
|
42
52
|
no_install: bool,
|
|
@@ -45,6 +55,8 @@ def serve(
|
|
|
45
55
|
preview: bool,
|
|
46
56
|
port: int | None = None,
|
|
47
57
|
ui_port: int | None = None,
|
|
58
|
+
log_level: str | None = None,
|
|
59
|
+
log_format: str | None = None,
|
|
48
60
|
) -> None:
|
|
49
61
|
"""Run llama_deploy API Server in the foreground. Reads the deployment configuration from the current directory. Can optionally specify a deployment file path."""
|
|
50
62
|
if not deployment_file.exists():
|
|
@@ -64,6 +76,8 @@ def serve(
|
|
|
64
76
|
open_browser=not no_open_browser,
|
|
65
77
|
port=port,
|
|
66
78
|
ui_port=ui_port,
|
|
79
|
+
log_level=log_level.upper() if log_level else None,
|
|
80
|
+
log_format=log_format.lower() if log_format else None,
|
|
67
81
|
)
|
|
68
82
|
|
|
69
83
|
except KeyboardInterrupt:
|
llama_deploy/cli/options.py
CHANGED
|
@@ -1,20 +1,26 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
from typing import Callable, ParamSpec, TypeVar
|
|
2
3
|
|
|
3
4
|
import click
|
|
4
5
|
|
|
6
|
+
P = ParamSpec("P")
|
|
7
|
+
R = TypeVar("R")
|
|
5
8
|
|
|
6
|
-
|
|
9
|
+
|
|
10
|
+
def global_options(f: Callable[P, R]) -> Callable[P, R]:
|
|
7
11
|
"""Common decorator to add global options to command groups"""
|
|
8
12
|
from .debug import setup_file_logging
|
|
9
13
|
|
|
10
|
-
def debug_callback(ctx, param, value):
|
|
14
|
+
def debug_callback(ctx: click.Context, param: click.Parameter, value: str) -> str:
|
|
11
15
|
if value:
|
|
12
|
-
setup_file_logging(level=logging._nameToLevel
|
|
16
|
+
setup_file_logging(level=logging._nameToLevel.get(value, logging.INFO))
|
|
13
17
|
return value
|
|
14
18
|
|
|
15
19
|
return click.option(
|
|
16
20
|
"--log-level",
|
|
17
|
-
type=click.Choice(
|
|
21
|
+
type=click.Choice(
|
|
22
|
+
["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], case_sensitive=False
|
|
23
|
+
),
|
|
18
24
|
help="Enable debug logging to file",
|
|
19
25
|
callback=debug_callback,
|
|
20
26
|
expose_value=False,
|
|
File without changes
|
|
@@ -38,6 +38,7 @@ from llama_deploy.core.schema.deployments import (
|
|
|
38
38
|
DeploymentResponse,
|
|
39
39
|
DeploymentUpdate,
|
|
40
40
|
)
|
|
41
|
+
from textual import events
|
|
41
42
|
from textual.app import App, ComposeResult
|
|
42
43
|
from textual.containers import Container, HorizontalGroup, Widget
|
|
43
44
|
from textual.content import Content
|
|
@@ -352,7 +353,7 @@ class DeploymentEditApp(App[DeploymentResponse | None]):
|
|
|
352
353
|
def on_mount(self) -> None:
|
|
353
354
|
self.theme = "tokyo-night"
|
|
354
355
|
|
|
355
|
-
def on_key(self, event) -> None:
|
|
356
|
+
def on_key(self, event: events.Key) -> None:
|
|
356
357
|
"""Handle key events, including Ctrl+C"""
|
|
357
358
|
if event.key == "ctrl+c":
|
|
358
359
|
if self.current_state == "monitor" and self.saved_deployment is not None:
|
|
@@ -14,8 +14,10 @@ from llama_deploy.cli.client import get_project_client as get_client
|
|
|
14
14
|
from llama_deploy.core.schema.base import LogEvent
|
|
15
15
|
from llama_deploy.core.schema.deployments import DeploymentResponse
|
|
16
16
|
from rich.text import Text
|
|
17
|
+
from textual import events
|
|
17
18
|
from textual.app import App, ComposeResult
|
|
18
19
|
from textual.containers import Container, HorizontalGroup, Widget
|
|
20
|
+
from textual.content import Content
|
|
19
21
|
from textual.message import Message
|
|
20
22
|
from textual.reactive import reactive
|
|
21
23
|
from textual.widgets import Button, RichLog, Static
|
|
@@ -58,6 +60,16 @@ class DeploymentMonitorWidget(Widget):
|
|
|
58
60
|
margin-top: 1;
|
|
59
61
|
}
|
|
60
62
|
|
|
63
|
+
.status-line .status-main {
|
|
64
|
+
width: auto;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
.status-line .status-right {
|
|
68
|
+
width: 1fr;
|
|
69
|
+
text-align: right;
|
|
70
|
+
min-width: 12;
|
|
71
|
+
}
|
|
72
|
+
|
|
61
73
|
|
|
62
74
|
"""
|
|
63
75
|
|
|
@@ -87,10 +99,12 @@ class DeploymentMonitorWidget(Widget):
|
|
|
87
99
|
yield Static("", classes="error-message", id="error_message")
|
|
88
100
|
|
|
89
101
|
# Single-line status bar with colored icon and deployment ID
|
|
90
|
-
with HorizontalGroup(classes="
|
|
102
|
+
with HorizontalGroup(classes="status-line"):
|
|
91
103
|
yield Static(
|
|
92
|
-
self._render_status_line(), classes="status-
|
|
104
|
+
self._render_status_line(), classes="status-main", id="status_line"
|
|
93
105
|
)
|
|
106
|
+
yield Static("", classes="status-right", id="last_event_status")
|
|
107
|
+
yield Static("", classes="last-event mb-1", id="last_event_details")
|
|
94
108
|
|
|
95
109
|
yield Static("Logs", classes="secondary-message log-header")
|
|
96
110
|
yield RichLog(
|
|
@@ -128,7 +142,9 @@ class DeploymentMonitorWidget(Widget):
|
|
|
128
142
|
async def _fetch_deployment(self) -> None:
|
|
129
143
|
try:
|
|
130
144
|
client = get_client()
|
|
131
|
-
self.deployment = client.get_deployment(
|
|
145
|
+
self.deployment = client.get_deployment(
|
|
146
|
+
self.deployment_id, include_events=True
|
|
147
|
+
)
|
|
132
148
|
# Clear any previous error on success
|
|
133
149
|
self.error_message = ""
|
|
134
150
|
except Exception as e: # pragma: no cover - network errors
|
|
@@ -271,7 +287,7 @@ class DeploymentMonitorWidget(Widget):
|
|
|
271
287
|
return "●", gray
|
|
272
288
|
|
|
273
289
|
def _render_status_line(self) -> Text:
|
|
274
|
-
phase = self.deployment.status if self.deployment else "
|
|
290
|
+
phase = self.deployment.status if self.deployment else "Unknown"
|
|
275
291
|
icon, style = self._status_icon_and_style(phase)
|
|
276
292
|
line = Text()
|
|
277
293
|
line.append(icon, style=style)
|
|
@@ -279,6 +295,34 @@ class DeploymentMonitorWidget(Widget):
|
|
|
279
295
|
line.append(f"Status: {phase} — Deployment ID: {self.deployment_id or '-'}")
|
|
280
296
|
return line
|
|
281
297
|
|
|
298
|
+
def _render_last_event_details(self) -> Content:
|
|
299
|
+
if not self.deployment or not self.deployment.events:
|
|
300
|
+
return Content()
|
|
301
|
+
latest = self.deployment.events[-1]
|
|
302
|
+
txt = Text(f" {latest.message}", style="dim")
|
|
303
|
+
return Content.from_rich_text(txt)
|
|
304
|
+
|
|
305
|
+
def _render_last_event_status(self) -> Content:
|
|
306
|
+
if not self.deployment or not self.deployment.events:
|
|
307
|
+
return Content()
|
|
308
|
+
txt = Text()
|
|
309
|
+
# Pick the most recent by last_timestamp
|
|
310
|
+
latest = self.deployment.events[-1]
|
|
311
|
+
ts = None
|
|
312
|
+
ts = (latest.last_timestamp or latest.first_timestamp).strftime(
|
|
313
|
+
"%Y-%m-%d %H:%M:%S"
|
|
314
|
+
)
|
|
315
|
+
parts: list[str] = []
|
|
316
|
+
if latest.type:
|
|
317
|
+
parts.append(latest.type)
|
|
318
|
+
if latest.reason:
|
|
319
|
+
parts.append(latest.reason)
|
|
320
|
+
kind = "/".join(parts) if parts else None
|
|
321
|
+
if kind:
|
|
322
|
+
txt.append(f"{kind} ", style="medium_purple3")
|
|
323
|
+
txt.append(f"{ts}", style="dim")
|
|
324
|
+
return Content.from_rich_text(txt)
|
|
325
|
+
|
|
282
326
|
def on_unmount(self) -> None:
|
|
283
327
|
# Attempt to stop the streaming loop
|
|
284
328
|
self._stop_stream.set()
|
|
@@ -298,16 +342,16 @@ class DeploymentMonitorWidget(Widget):
|
|
|
298
342
|
def watch_deployment(self, deployment: DeploymentResponse | None) -> None:
|
|
299
343
|
if deployment is None:
|
|
300
344
|
return
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
try:
|
|
307
|
-
widget = self.query_one("#status_line", Static)
|
|
308
|
-
except Exception:
|
|
309
|
-
return
|
|
345
|
+
|
|
346
|
+
widget = self.query_one("#status_line", Static)
|
|
347
|
+
ev_widget = self.query_one("#last_event_status", Static)
|
|
348
|
+
ev_details_widget = self.query_one("#last_event_details", Static)
|
|
349
|
+
|
|
310
350
|
widget.update(self._render_status_line())
|
|
351
|
+
# Update last event line
|
|
352
|
+
ev_widget.update(self._render_last_event_status())
|
|
353
|
+
ev_details_widget.update(self._render_last_event_details())
|
|
354
|
+
ev_details_widget.display = bool(self.deployment and self.deployment.events)
|
|
311
355
|
|
|
312
356
|
def watch_wrap_enabled(self, enabled: bool) -> None:
|
|
313
357
|
try:
|
|
@@ -342,7 +386,9 @@ class DeploymentMonitorWidget(Widget):
|
|
|
342
386
|
client = get_client()
|
|
343
387
|
while not self._stop_stream.is_set():
|
|
344
388
|
try:
|
|
345
|
-
self.deployment = client.get_deployment(
|
|
389
|
+
self.deployment = client.get_deployment(
|
|
390
|
+
self.deployment_id, include_events=True
|
|
391
|
+
)
|
|
346
392
|
# Clear any previous error on success
|
|
347
393
|
if self.error_message:
|
|
348
394
|
self.error_message = ""
|
|
@@ -378,7 +424,7 @@ class DeploymentMonitorApp(App[None]):
|
|
|
378
424
|
def on_monitor_close_message(self, _: MonitorCloseMessage) -> None:
|
|
379
425
|
self.exit(None)
|
|
380
426
|
|
|
381
|
-
def on_key(self, event) -> None:
|
|
427
|
+
def on_key(self, event: events.Key) -> None:
|
|
382
428
|
# Support Ctrl+C to exit, consistent with other screens and terminals
|
|
383
429
|
if event.key == "ctrl+c":
|
|
384
430
|
self.exit(None)
|
|
@@ -224,7 +224,7 @@ class GitValidationWidget(Widget):
|
|
|
224
224
|
self.current_state = "options"
|
|
225
225
|
self.error_message = ""
|
|
226
226
|
elif event.button.id == "cancel_github_auth":
|
|
227
|
-
self._cancel_github_auth()
|
|
227
|
+
self.run_worker(self._cancel_github_auth())
|
|
228
228
|
elif event.button.id == "recheck_github":
|
|
229
229
|
self.run_worker(self._recheck_github_auth())
|
|
230
230
|
elif event.button.id == "continue_success":
|
|
@@ -258,10 +258,10 @@ class GitValidationWidget(Widget):
|
|
|
258
258
|
self.github_callback_server = GitHubCallbackServer()
|
|
259
259
|
self.run_worker(self._wait_for_callback())
|
|
260
260
|
|
|
261
|
-
def _cancel_github_auth(self) -> None:
|
|
261
|
+
async def _cancel_github_auth(self) -> None:
|
|
262
262
|
"""Cancel GitHub authentication and return to options"""
|
|
263
263
|
if self.github_callback_server:
|
|
264
|
-
self.github_callback_server.stop()
|
|
264
|
+
await self.github_callback_server.stop()
|
|
265
265
|
self.github_callback_server = None
|
|
266
266
|
self.current_state = "options"
|
|
267
267
|
|
|
@@ -1,12 +1,23 @@
|
|
|
1
1
|
import re
|
|
2
|
+
from typing import TypedDict
|
|
2
3
|
|
|
3
4
|
from textual.widgets import Static
|
|
4
5
|
|
|
5
6
|
|
|
7
|
+
class StaticKwargs(TypedDict, total=False):
|
|
8
|
+
expand: bool
|
|
9
|
+
shrink: bool
|
|
10
|
+
markup: bool
|
|
11
|
+
name: str | None
|
|
12
|
+
id: str | None
|
|
13
|
+
classes: str | None
|
|
14
|
+
disabled: bool
|
|
15
|
+
|
|
16
|
+
|
|
6
17
|
class PixelLlamaLoader(Static):
|
|
7
18
|
"""Pixelated llama loading animation using block characters"""
|
|
8
19
|
|
|
9
|
-
def __init__(self, **kwargs):
|
|
20
|
+
def __init__(self, **kwargs: StaticKwargs):
|
|
10
21
|
self.frame = 0
|
|
11
22
|
# Pixelated llama frames using Unicode block characters
|
|
12
23
|
self.frames = [
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
from pathlib import Path
|
|
5
5
|
|
|
6
|
+
from textual import events
|
|
6
7
|
from textual.app import App, ComposeResult
|
|
7
8
|
from textual.containers import (
|
|
8
9
|
Container,
|
|
@@ -45,7 +46,7 @@ class ProfileEditApp(App[ProfileForm | None]):
|
|
|
45
46
|
def on_mount(self) -> None:
|
|
46
47
|
self.theme = "tokyo-night"
|
|
47
48
|
|
|
48
|
-
def on_key(self, event) -> None:
|
|
49
|
+
def on_key(self, event: events.Key) -> None:
|
|
49
50
|
"""Handle key events, including Ctrl+C"""
|
|
50
51
|
if event.key == "ctrl+c":
|
|
51
52
|
self.exit(None)
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: llamactl
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.0a10
|
|
4
4
|
Summary: A command-line interface for managing LlamaDeploy projects and deployments
|
|
5
5
|
Author: Adrian Lyjak
|
|
6
6
|
Author-email: Adrian Lyjak <adrianlyjak@gmail.com>
|
|
7
7
|
License: MIT
|
|
8
|
-
Requires-Dist: llama-deploy-core>=0.3.
|
|
9
|
-
Requires-Dist: llama-deploy-appserver>=0.3.
|
|
8
|
+
Requires-Dist: llama-deploy-core>=0.3.0a10,<0.4.0
|
|
9
|
+
Requires-Dist: llama-deploy-appserver>=0.3.0a10,<0.4.0
|
|
10
10
|
Requires-Dist: httpx>=0.24.0
|
|
11
11
|
Requires-Dist: rich>=13.0.0
|
|
12
12
|
Requires-Dist: questionary>=2.0.0
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
llama_deploy/cli/__init__.py,sha256=274c45e48048bf60668ab564ae8e7c5e6daf1d7779005f87d07ce9fa7d04936c,422
|
|
2
|
+
llama_deploy/cli/app.py,sha256=5200b4ac01b0ad0c405ce841fc01a12ed32f7b6474472f00a7d6c75fe274ea45,2324
|
|
3
|
+
llama_deploy/cli/client.py,sha256=a648bfcd07b1d4e9ad1e7d9e3ef591066ea5f45bb6fe4b8ad2c495347563794a,10110
|
|
4
|
+
llama_deploy/cli/commands/aliased_group.py,sha256=bc41007c97b7b93981217dbd4d4591df2b6c9412a2d9ed045b0ec5655ed285f2,1066
|
|
5
|
+
llama_deploy/cli/commands/deployment.py,sha256=7874f4a499ce1bfd6ae14833410cc75c4c954463d96064cfd045421358479d4c,8810
|
|
6
|
+
llama_deploy/cli/commands/init.py,sha256=51b2de1e35ff34bc15c9dfec72fbad08aaf528c334df168896d36458a4e9401c,6307
|
|
7
|
+
llama_deploy/cli/commands/profile.py,sha256=933d7a434c2684c7b47bfbd7340a09e4b34d56d20624886e15fdb4e0af97ce0b,6765
|
|
8
|
+
llama_deploy/cli/commands/serve.py,sha256=4d47850397ba172944df56a934a51bedb52403cbd3f9b000b1ced90a31c75049,2721
|
|
9
|
+
llama_deploy/cli/config.py,sha256=ebec8cf9e2112378ee6ecd626166711f3fba8cfa27cd1c931fe899c0b2a047b3,6241
|
|
10
|
+
llama_deploy/cli/debug.py,sha256=e85a72d473bbe1645eb31772f7349bde703d45704166f767385895c440afc762,496
|
|
11
|
+
llama_deploy/cli/env.py,sha256=6ebc24579815b3787829c81fd5bb9f31698a06e62c0128a788559f962b33a7af,1016
|
|
12
|
+
llama_deploy/cli/interactive_prompts/utils.py,sha256=db78eba78bf347738feb89ac3eeb77a1d11f4003980f81cf3c13842f8d41afeb,2463
|
|
13
|
+
llama_deploy/cli/options.py,sha256=e71d1a306e9e302b92ab55ace75f3a9273be267ae92a71226aac84c14271619d,823
|
|
14
|
+
llama_deploy/cli/py.typed,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
|
|
15
|
+
llama_deploy/cli/textual/deployment_form.py,sha256=33d4f3f0741aeaf0072f5f91368b3c0b0456c72370b72964363240d790b3fa06,20929
|
|
16
|
+
llama_deploy/cli/textual/deployment_help.py,sha256=d43e9ff29db71a842cf8b491545763d581ede3132b8af518c73af85a40950046,2464
|
|
17
|
+
llama_deploy/cli/textual/deployment_monitor.py,sha256=4a36103310b22d68c11e2fe1e5dc0c2af847772d42c8f15b32a61261aeaa4104,16767
|
|
18
|
+
llama_deploy/cli/textual/git_validation.py,sha256=fcbe5477c99e8e669b31c563572d4894f61475ef7e968a59d9f172642d390cf7,13329
|
|
19
|
+
llama_deploy/cli/textual/github_callback_server.py,sha256=dc74c510f8a98ef6ffaab0f6d11c7ea86ee77ca5adbc7725a2a29112bae24191,7556
|
|
20
|
+
llama_deploy/cli/textual/llama_loader.py,sha256=33cb32a46dd40bcf889c553e44f2672c410e26bd1d4b17aa6cca6d0a5d59c2c4,1468
|
|
21
|
+
llama_deploy/cli/textual/profile_form.py,sha256=747644895774e7416620d2071f6f054b06ec8e398ac0e7649386caa2a83fe2aa,5995
|
|
22
|
+
llama_deploy/cli/textual/secrets_form.py,sha256=a43fbd81aad034d0d60906bfd917c107f9ace414648b0f63ac0b29eeba4050db,7061
|
|
23
|
+
llama_deploy/cli/textual/styles.tcss,sha256=536cec7627d2a16dd03bf25bb9b6e4d53f1e0d18272b07ec0dc3bf76b0a7c2e0,3056
|
|
24
|
+
llamactl-0.3.0a10.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
|
|
25
|
+
llamactl-0.3.0a10.dist-info/entry_points.txt,sha256=b67e1eb64305058751a651a80f2d2268b5f7046732268421e796f64d4697f83c,52
|
|
26
|
+
llamactl-0.3.0a10.dist-info/METADATA,sha256=64f72086ae8fbbce81d6af9beff116a4a40cb5dce008dc548a656d54ec436c15,3169
|
|
27
|
+
llamactl-0.3.0a10.dist-info/RECORD,,
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
llama_deploy/cli/__init__.py,sha256=274c45e48048bf60668ab564ae8e7c5e6daf1d7779005f87d07ce9fa7d04936c,422
|
|
2
|
-
llama_deploy/cli/app.py,sha256=5200b4ac01b0ad0c405ce841fc01a12ed32f7b6474472f00a7d6c75fe274ea45,2324
|
|
3
|
-
llama_deploy/cli/client.py,sha256=03d73191a2b89e33573b7df81b9d7586227e871451891f737d9f601d3aa47ffe,10032
|
|
4
|
-
llama_deploy/cli/commands/aliased_group.py,sha256=6e2457cdea51de83bb7f02b37abb77cb9b5bff0a61bdddd66c43240b66b13f13,986
|
|
5
|
-
llama_deploy/cli/commands/deployment.py,sha256=7874f4a499ce1bfd6ae14833410cc75c4c954463d96064cfd045421358479d4c,8810
|
|
6
|
-
llama_deploy/cli/commands/init.py,sha256=da6aecb5ebc8e4cf15421227fcafd98f573f601c52f8849c00d243b572c9f56a,6285
|
|
7
|
-
llama_deploy/cli/commands/profile.py,sha256=933d7a434c2684c7b47bfbd7340a09e4b34d56d20624886e15fdb4e0af97ce0b,6765
|
|
8
|
-
llama_deploy/cli/commands/serve.py,sha256=22227f383bb5a9d43de7c788139c685c7370e24f495c9b1929faae80b87d4ded,2232
|
|
9
|
-
llama_deploy/cli/config.py,sha256=ebec8cf9e2112378ee6ecd626166711f3fba8cfa27cd1c931fe899c0b2a047b3,6241
|
|
10
|
-
llama_deploy/cli/debug.py,sha256=e85a72d473bbe1645eb31772f7349bde703d45704166f767385895c440afc762,496
|
|
11
|
-
llama_deploy/cli/env.py,sha256=6ebc24579815b3787829c81fd5bb9f31698a06e62c0128a788559f962b33a7af,1016
|
|
12
|
-
llama_deploy/cli/interactive_prompts/utils.py,sha256=db78eba78bf347738feb89ac3eeb77a1d11f4003980f81cf3c13842f8d41afeb,2463
|
|
13
|
-
llama_deploy/cli/options.py,sha256=38bb4a231ad0436d8b910c98ff659c0736f619efdf56c402d60bb3f755df38e0,598
|
|
14
|
-
llama_deploy/cli/textual/deployment_form.py,sha256=5566e2545ef9548b14b7d3d2b0c1bda1dcd99aca814d34823da6b9da1903b8df,20890
|
|
15
|
-
llama_deploy/cli/textual/deployment_help.py,sha256=d43e9ff29db71a842cf8b491545763d581ede3132b8af518c73af85a40950046,2464
|
|
16
|
-
llama_deploy/cli/textual/deployment_monitor.py,sha256=1e1ea3381575d19e655a2a9eda8253c7e7fe9a02a2d637fd1fdce94500dde168,15044
|
|
17
|
-
llama_deploy/cli/textual/git_validation.py,sha256=44e359d16aa879f4566a0077d025fdd799f500862a8462b5ed3586e528f7a273,13300
|
|
18
|
-
llama_deploy/cli/textual/github_callback_server.py,sha256=dc74c510f8a98ef6ffaab0f6d11c7ea86ee77ca5adbc7725a2a29112bae24191,7556
|
|
19
|
-
llama_deploy/cli/textual/llama_loader.py,sha256=468213a504057f21838b01f48d51f52e60aa622d6f0fe5bb800d76ced846cea9,1245
|
|
20
|
-
llama_deploy/cli/textual/profile_form.py,sha256=4410678edbd59b014f937ce760bafa51ae86f6dd58bec88f048a9eda273446aa,5956
|
|
21
|
-
llama_deploy/cli/textual/secrets_form.py,sha256=a43fbd81aad034d0d60906bfd917c107f9ace414648b0f63ac0b29eeba4050db,7061
|
|
22
|
-
llama_deploy/cli/textual/styles.tcss,sha256=536cec7627d2a16dd03bf25bb9b6e4d53f1e0d18272b07ec0dc3bf76b0a7c2e0,3056
|
|
23
|
-
llamactl-0.3.0a8.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
|
|
24
|
-
llamactl-0.3.0a8.dist-info/entry_points.txt,sha256=b67e1eb64305058751a651a80f2d2268b5f7046732268421e796f64d4697f83c,52
|
|
25
|
-
llamactl-0.3.0a8.dist-info/METADATA,sha256=f4d04dd44460d1065bbc21979f97b7194b05b6bee247ee2748a2704d857d5ea5,3166
|
|
26
|
-
llamactl-0.3.0a8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|