llamactl 0.3.11__py3-none-any.whl → 0.3.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_deploy/cli/auth/client.py +6 -2
- llama_deploy/cli/commands/init.py +128 -42
- llama_deploy/cli/commands/serve.py +12 -1
- llama_deploy/cli/options.py +47 -12
- {llamactl-0.3.11.dist-info → llamactl-0.3.13.dist-info}/METADATA +3 -4
- {llamactl-0.3.11.dist-info → llamactl-0.3.13.dist-info}/RECORD +8 -8
- {llamactl-0.3.11.dist-info → llamactl-0.3.13.dist-info}/WHEEL +0 -0
- {llamactl-0.3.11.dist-info → llamactl-0.3.13.dist-info}/entry_points.txt +0 -0
llama_deploy/cli/auth/client.py
CHANGED
|
@@ -9,6 +9,7 @@ import httpx
|
|
|
9
9
|
import jwt
|
|
10
10
|
from jwt.algorithms import RSAAlgorithm # type: ignore[possibly-unbound-import]
|
|
11
11
|
from llama_deploy.cli.config.schema import DeviceOIDC
|
|
12
|
+
from llama_deploy.core.client.ssl_util import get_httpx_verify_param
|
|
12
13
|
from pydantic import BaseModel
|
|
13
14
|
|
|
14
15
|
logger = logging.getLogger(__name__)
|
|
@@ -57,10 +58,13 @@ class AuthMeResponse(BaseModel):
|
|
|
57
58
|
class ClientContextManager(AsyncContextManager):
|
|
58
59
|
def __init__(self, base_url: str | None, auth: httpx.Auth | None = None) -> None:
|
|
59
60
|
self.base_url = base_url.rstrip("/") if base_url else None
|
|
61
|
+
verify = get_httpx_verify_param()
|
|
60
62
|
if self.base_url:
|
|
61
|
-
self.client = httpx.AsyncClient(
|
|
63
|
+
self.client = httpx.AsyncClient(
|
|
64
|
+
base_url=self.base_url, auth=auth, verify=verify
|
|
65
|
+
)
|
|
62
66
|
else:
|
|
63
|
-
self.client = httpx.AsyncClient(auth=auth)
|
|
67
|
+
self.client = httpx.AsyncClient(auth=auth, verify=verify)
|
|
64
68
|
|
|
65
69
|
async def close(self) -> None:
|
|
66
70
|
try:
|
|
@@ -9,12 +9,18 @@ from pathlib import Path
|
|
|
9
9
|
|
|
10
10
|
import click
|
|
11
11
|
import copier
|
|
12
|
+
import httpx
|
|
12
13
|
import questionary
|
|
13
14
|
from click.exceptions import Exit
|
|
14
15
|
from llama_deploy.cli.app import app
|
|
15
|
-
from llama_deploy.cli.options import
|
|
16
|
+
from llama_deploy.cli.options import (
|
|
17
|
+
global_options,
|
|
18
|
+
interactive_option,
|
|
19
|
+
)
|
|
16
20
|
from llama_deploy.cli.styles import HEADER_COLOR_HEX
|
|
21
|
+
from llama_deploy.core.client.ssl_util import get_httpx_verify_param
|
|
17
22
|
from rich import print as rprint
|
|
23
|
+
from rich.text import Text
|
|
18
24
|
|
|
19
25
|
|
|
20
26
|
@app.command()
|
|
@@ -40,23 +46,24 @@ from rich import print as rprint
|
|
|
40
46
|
help="Force overwrite the directory if it exists",
|
|
41
47
|
)
|
|
42
48
|
@global_options
|
|
49
|
+
@interactive_option
|
|
43
50
|
def init(
|
|
44
51
|
update: bool,
|
|
45
52
|
template: str | None,
|
|
46
53
|
dir: Path | None,
|
|
47
54
|
force: bool,
|
|
55
|
+
interactive: bool,
|
|
48
56
|
) -> None:
|
|
49
57
|
"""Create a new app repository from a template"""
|
|
50
58
|
if update:
|
|
51
59
|
_update()
|
|
52
60
|
else:
|
|
53
|
-
_create(template, dir, force)
|
|
54
|
-
|
|
61
|
+
_create(template, dir, force, interactive)
|
|
55
62
|
|
|
56
|
-
def _create(template: str | None, dir: Path | None, force: bool) -> None:
|
|
57
|
-
# defer loading to improve cli startup time
|
|
58
|
-
from vibe_llama.sdk import VibeLlamaStarter
|
|
59
63
|
|
|
64
|
+
def _create(
|
|
65
|
+
template: str | None, dir: Path | None, force: bool, interactive: bool
|
|
66
|
+
) -> None:
|
|
60
67
|
@dataclass
|
|
61
68
|
class TemplateOption:
|
|
62
69
|
id: str
|
|
@@ -165,7 +172,23 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
|
|
|
165
172
|
),
|
|
166
173
|
]
|
|
167
174
|
|
|
168
|
-
if
|
|
175
|
+
# Initialize git repository if git is available
|
|
176
|
+
has_git = False
|
|
177
|
+
git_initialized = False
|
|
178
|
+
try:
|
|
179
|
+
subprocess.run(["git", "--version"], check=True, capture_output=True)
|
|
180
|
+
has_git = True
|
|
181
|
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
182
|
+
# git is not available or broken; continue without git
|
|
183
|
+
has_git = False
|
|
184
|
+
|
|
185
|
+
if not has_git:
|
|
186
|
+
rprint(
|
|
187
|
+
"git is required to initialize a template. Make sure you have it installed and available in your PATH."
|
|
188
|
+
)
|
|
189
|
+
raise Exit(1)
|
|
190
|
+
|
|
191
|
+
if template is None and interactive:
|
|
169
192
|
rprint(
|
|
170
193
|
"[bold]Select a template to start from.[/bold] Either with javascript frontend UI, or just a python workflow that can be used as an API."
|
|
171
194
|
)
|
|
@@ -191,16 +214,26 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
|
|
|
191
214
|
),
|
|
192
215
|
).ask()
|
|
193
216
|
if template is None:
|
|
194
|
-
|
|
217
|
+
options = [o.id for o in ui_options + headless_options]
|
|
218
|
+
rprint(
|
|
219
|
+
Text(
|
|
220
|
+
f"No template selected. Select a template or pass a template name with --template <{'|'.join(options)}>"
|
|
221
|
+
)
|
|
222
|
+
)
|
|
195
223
|
raise Exit(1)
|
|
196
224
|
if dir is None:
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
225
|
+
if interactive:
|
|
226
|
+
dir_str = questionary.text(
|
|
227
|
+
"Enter the directory to create the new app in", default=template
|
|
228
|
+
).ask()
|
|
229
|
+
if dir_str:
|
|
230
|
+
dir = Path(dir_str)
|
|
231
|
+
else:
|
|
232
|
+
return
|
|
233
|
+
else:
|
|
234
|
+
rprint(f"[yellow]No directory provided. Defaulting to {template}[/]")
|
|
235
|
+
dir = Path(template)
|
|
236
|
+
|
|
204
237
|
resolved_template: TemplateOption | None = next(
|
|
205
238
|
(o for o in ui_options + headless_options if o.id == template), None
|
|
206
239
|
)
|
|
@@ -208,49 +241,44 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
|
|
|
208
241
|
rprint(f"Template {template} not found")
|
|
209
242
|
raise Exit(1)
|
|
210
243
|
if dir.exists():
|
|
211
|
-
is_ok = (
|
|
212
|
-
|
|
213
|
-
|
|
244
|
+
is_ok = force or (
|
|
245
|
+
interactive
|
|
246
|
+
and questionary.confirm("Directory exists. Overwrite?", default=False).ask()
|
|
214
247
|
)
|
|
248
|
+
|
|
215
249
|
if not is_ok:
|
|
250
|
+
rprint(
|
|
251
|
+
f"[yellow]Try again with another directory or pass --force to overwrite the existing directory '{str(dir)}'[/]"
|
|
252
|
+
)
|
|
216
253
|
raise Exit(1)
|
|
217
254
|
else:
|
|
218
255
|
shutil.rmtree(dir, ignore_errors=True)
|
|
219
256
|
|
|
220
257
|
copier.run_copy(
|
|
221
|
-
resolved_template.source.url,
|
|
222
|
-
dir,
|
|
223
|
-
quiet=True,
|
|
258
|
+
resolved_template.source.url, dir, quiet=True, defaults=not interactive
|
|
224
259
|
)
|
|
225
|
-
# Initialize git repository if git is available
|
|
226
|
-
has_git = False
|
|
227
|
-
git_initialized = False
|
|
228
|
-
try:
|
|
229
|
-
subprocess.run(["git", "--version"], check=True, capture_output=True)
|
|
230
|
-
has_git = True
|
|
231
|
-
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
232
|
-
# git is not available or broken; continue without git
|
|
233
|
-
has_git = False
|
|
234
260
|
|
|
235
261
|
# Change to the new directory and initialize git repo
|
|
236
262
|
original_cwd = Path.cwd()
|
|
237
263
|
os.chdir(dir)
|
|
238
264
|
|
|
239
265
|
try:
|
|
240
|
-
# Dump in a bunch of docs for AI agents
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
+ (["LlamaCloud Services"] if resolved_template.llama_cloud else []),
|
|
266
|
+
# Dump in a bunch of docs for AI agents (best-effort)
|
|
267
|
+
docs_downloaded = asyncio.run(
|
|
268
|
+
_download_and_write_agents_md(
|
|
269
|
+
include_llama_cloud=resolved_template.llama_cloud
|
|
270
|
+
)
|
|
246
271
|
)
|
|
247
|
-
asyncio.run(vibe_llama_starter.write_instructions(overwrite=True))
|
|
248
272
|
# Create symlink for Claude.md to point to AGENTS.md
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
273
|
+
if docs_downloaded:
|
|
274
|
+
for alternate in [
|
|
275
|
+
"CLAUDE.md",
|
|
276
|
+
"GEMINI.md",
|
|
277
|
+
]: # don't support AGENTS.md (yet?)
|
|
278
|
+
claude_path = Path(alternate) # not supported yet
|
|
279
|
+
agents_path = Path("AGENTS.md")
|
|
280
|
+
if agents_path.exists() and not claude_path.exists():
|
|
281
|
+
claude_path.symlink_to("AGENTS.md")
|
|
254
282
|
|
|
255
283
|
# Initialize a git repo (best-effort). If anything fails, show a friendly note and continue.
|
|
256
284
|
if has_git:
|
|
@@ -369,3 +397,61 @@ def _update():
|
|
|
369
397
|
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
370
398
|
# Git not available or not in a git repo - continue silently
|
|
371
399
|
pass
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
async def _download_and_write_agents_md(include_llama_cloud: bool) -> bool:
|
|
403
|
+
"""Fetch a small set of reference docs and write AGENTS.md.
|
|
404
|
+
|
|
405
|
+
Replaces the previous vibe-llama usage with direct HTTP downloads.
|
|
406
|
+
|
|
407
|
+
Returns True if any documentation was fetched, False otherwise.
|
|
408
|
+
"""
|
|
409
|
+
BASE_URL = "https://raw.githubusercontent.com/run-llama/vibe-llama/main"
|
|
410
|
+
|
|
411
|
+
services: dict[str, str] = {
|
|
412
|
+
"LlamaIndex": f"{BASE_URL}/documentation/llamaindex.md",
|
|
413
|
+
"LlamaCloud Services": f"{BASE_URL}/documentation/llamacloud.md",
|
|
414
|
+
"llama-index-workflows": f"{BASE_URL}/documentation/llama-index-workflows.md",
|
|
415
|
+
"LlamaDeploy": f"{BASE_URL}/documentation/llamadeploy.md",
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
selected_services: list[str] = [
|
|
419
|
+
"LlamaDeploy",
|
|
420
|
+
"LlamaIndex",
|
|
421
|
+
"llama-index-workflows",
|
|
422
|
+
]
|
|
423
|
+
if include_llama_cloud:
|
|
424
|
+
selected_services.append("LlamaCloud Services")
|
|
425
|
+
|
|
426
|
+
urls: list[str] = [(s, u) for s in selected_services if (u := services.get(s))]
|
|
427
|
+
|
|
428
|
+
contents: list[str] = []
|
|
429
|
+
timeout = httpx.Timeout(5.0)
|
|
430
|
+
async with httpx.AsyncClient(
|
|
431
|
+
timeout=timeout, verify=get_httpx_verify_param()
|
|
432
|
+
) as client:
|
|
433
|
+
|
|
434
|
+
async def get_docs(service: str, url: str) -> str | None:
|
|
435
|
+
try:
|
|
436
|
+
resp = await client.get(url)
|
|
437
|
+
resp.raise_for_status()
|
|
438
|
+
text = resp.text.strip()
|
|
439
|
+
if text:
|
|
440
|
+
return text
|
|
441
|
+
except Exception:
|
|
442
|
+
# best-effort: skip failures
|
|
443
|
+
rprint(
|
|
444
|
+
f"[yellow]Failed to fetch documentation for {service}, skipping[/]"
|
|
445
|
+
)
|
|
446
|
+
return None
|
|
447
|
+
|
|
448
|
+
results = await asyncio.gather(
|
|
449
|
+
*[get_docs(service, url) for service, url in urls]
|
|
450
|
+
)
|
|
451
|
+
contents = [r for r in results if r is not None]
|
|
452
|
+
|
|
453
|
+
if contents:
|
|
454
|
+
agents_md = "\n\n---\n\n".join(contents) + "\n"
|
|
455
|
+
Path("AGENTS.md").write_text(agents_md, encoding="utf-8")
|
|
456
|
+
|
|
457
|
+
return bool(contents)
|
|
@@ -10,7 +10,10 @@ from click.exceptions import Abort, Exit
|
|
|
10
10
|
from llama_deploy.cli.commands.auth import validate_authenticated_profile
|
|
11
11
|
from llama_deploy.cli.config.env_service import service
|
|
12
12
|
from llama_deploy.cli.config.schema import Auth
|
|
13
|
-
from llama_deploy.cli.options import
|
|
13
|
+
from llama_deploy.cli.options import (
|
|
14
|
+
interactive_option,
|
|
15
|
+
native_tls_option,
|
|
16
|
+
)
|
|
14
17
|
from llama_deploy.cli.styles import WARNING
|
|
15
18
|
from llama_deploy.cli.utils.redact import redact_api_key
|
|
16
19
|
from llama_deploy.core.client.manage_client import ControlPlaneClient
|
|
@@ -72,7 +75,13 @@ logger = logging.getLogger(__name__)
|
|
|
72
75
|
type=click.Path(dir_okay=True, resolve_path=True, path_type=Path),
|
|
73
76
|
help="The path to the sqlite database to use for the workflow server if using local persistence",
|
|
74
77
|
)
|
|
78
|
+
@click.option(
|
|
79
|
+
"--host",
|
|
80
|
+
type=str,
|
|
81
|
+
help="The host to run the API server on. Default is 127.0.0.1. Use 0.0.0.0 to allow remote access.",
|
|
82
|
+
)
|
|
75
83
|
@interactive_option
|
|
84
|
+
@native_tls_option
|
|
76
85
|
def serve(
|
|
77
86
|
deployment_file: Path,
|
|
78
87
|
no_install: bool,
|
|
@@ -85,6 +94,7 @@ def serve(
|
|
|
85
94
|
log_format: str | None = None,
|
|
86
95
|
persistence: Literal["memory", "local", "cloud"] | None = None,
|
|
87
96
|
local_persistence_path: Path | None = None,
|
|
97
|
+
host: str | None = None,
|
|
88
98
|
interactive: bool = False,
|
|
89
99
|
) -> None:
|
|
90
100
|
"""Run llama_deploy API Server in the foreground. Reads the deployment configuration from the current directory. Can optionally specify a deployment file path."""
|
|
@@ -141,6 +151,7 @@ def serve(
|
|
|
141
151
|
cloud_persistence_name=f"_public:serve_workflows_{deployment_config.name}"
|
|
142
152
|
if persistence == "cloud"
|
|
143
153
|
else None,
|
|
154
|
+
host=host,
|
|
144
155
|
)
|
|
145
156
|
|
|
146
157
|
except (Exit, Abort):
|
llama_deploy/cli/options.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
import os
|
|
2
3
|
from typing import Callable, ParamSpec, TypeVar
|
|
3
4
|
|
|
4
5
|
import click
|
|
@@ -13,6 +14,52 @@ R = TypeVar("R")
|
|
|
13
14
|
def global_options(f: Callable[P, R]) -> Callable[P, R]:
|
|
14
15
|
"""Common decorator to add global options to command groups"""
|
|
15
16
|
|
|
17
|
+
return native_tls_option(file_logging(f))
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def interactive_option(f: Callable[P, R]) -> Callable[P, R]:
|
|
21
|
+
"""Add an interactive option to the command"""
|
|
22
|
+
|
|
23
|
+
default = is_interactive_session()
|
|
24
|
+
return click.option(
|
|
25
|
+
"--interactive/--no-interactive",
|
|
26
|
+
help="Run in interactive mode. If not provided, will default to the current session's interactive state.",
|
|
27
|
+
is_flag=True,
|
|
28
|
+
default=default,
|
|
29
|
+
)(f)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def native_tls_option(f: Callable[P, R]) -> Callable[P, R]:
|
|
33
|
+
"""Enable native TLS to trust system configured trust store rather than python bundled trust stores.
|
|
34
|
+
|
|
35
|
+
When enabled, we set:
|
|
36
|
+
- UV_NATIVE_TLS=1 to instruct uv to use the platform trust store
|
|
37
|
+
- LLAMA_DEPLOY_USE_TRUSTSTORE=1 to use system certificate store for Python httpx clients
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def _enable_native_tls(
|
|
41
|
+
ctx: click.Context, param: click.Parameter, value: bool
|
|
42
|
+
) -> bool:
|
|
43
|
+
if value:
|
|
44
|
+
# Don't override if user explicitly set a value
|
|
45
|
+
os.environ.setdefault("UV_NATIVE_TLS", "1")
|
|
46
|
+
os.environ.setdefault("LLAMA_DEPLOY_USE_TRUSTSTORE", "1")
|
|
47
|
+
return value
|
|
48
|
+
|
|
49
|
+
return click.option(
|
|
50
|
+
"--native-tls",
|
|
51
|
+
is_flag=True,
|
|
52
|
+
help=(
|
|
53
|
+
"Enable native TLS mode to use system certificate store rather than runtime defaults. Can be set via LLAMACTL_NATIVE_TLS=1"
|
|
54
|
+
),
|
|
55
|
+
callback=_enable_native_tls,
|
|
56
|
+
expose_value=False,
|
|
57
|
+
is_eager=True,
|
|
58
|
+
envvar=["LLAMACTL_NATIVE_TLS"],
|
|
59
|
+
)(f)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def file_logging(f: Callable[P, R]) -> Callable[P, R]:
|
|
16
63
|
def debug_callback(ctx: click.Context, param: click.Parameter, value: str) -> str:
|
|
17
64
|
if value:
|
|
18
65
|
setup_file_logging(level=logging._nameToLevel.get(value, logging.INFO))
|
|
@@ -29,15 +76,3 @@ def global_options(f: Callable[P, R]) -> Callable[P, R]:
|
|
|
29
76
|
is_eager=True,
|
|
30
77
|
hidden=True,
|
|
31
78
|
)(f)
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
def interactive_option(f: Callable[P, R]) -> Callable[P, R]:
|
|
35
|
-
"""Add an interactive option to the command"""
|
|
36
|
-
|
|
37
|
-
default = is_interactive_session()
|
|
38
|
-
return click.option(
|
|
39
|
-
"--interactive/--no-interactive",
|
|
40
|
-
help="Run in interactive mode. If not provided, will default to the current session's interactive state.",
|
|
41
|
-
is_flag=True,
|
|
42
|
-
default=default,
|
|
43
|
-
)(f)
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: llamactl
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.13
|
|
4
4
|
Summary: A command-line interface for managing LlamaDeploy projects and deployments
|
|
5
5
|
Author: Adrian Lyjak
|
|
6
6
|
Author-email: Adrian Lyjak <adrianlyjak@gmail.com>
|
|
7
7
|
License: MIT
|
|
8
|
-
Requires-Dist: llama-deploy-core[client]>=0.3.
|
|
9
|
-
Requires-Dist: llama-deploy-appserver>=0.3.
|
|
8
|
+
Requires-Dist: llama-deploy-core[client]>=0.3.13,<0.4.0
|
|
9
|
+
Requires-Dist: llama-deploy-appserver>=0.3.13,<0.4.0
|
|
10
10
|
Requires-Dist: httpx>=0.24.0,<1.0.0
|
|
11
11
|
Requires-Dist: rich>=13.0.0
|
|
12
12
|
Requires-Dist: questionary>=2.0.0
|
|
@@ -17,7 +17,6 @@ Requires-Dist: textual>=6.0.0
|
|
|
17
17
|
Requires-Dist: aiohttp>=3.12.14
|
|
18
18
|
Requires-Dist: copier>=9.9.0
|
|
19
19
|
Requires-Dist: pyjwt[crypto]>=2.10.1
|
|
20
|
-
Requires-Dist: vibe-llama>=0.4.4,<0.5.0
|
|
21
20
|
Requires-Python: >=3.11, <4
|
|
22
21
|
Description-Content-Type: text/markdown
|
|
23
22
|
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
llama_deploy/cli/__init__.py,sha256=116170a773d7377f2e61bc8b006999d463af24027867be54ff9b20132970490f,781
|
|
2
2
|
llama_deploy/cli/app.py,sha256=9170e4f506c482522bd745eb1cdb700a198cfcfd7204c168c94e5ee2b6b43ffa,2199
|
|
3
|
-
llama_deploy/cli/auth/client.py,sha256=
|
|
3
|
+
llama_deploy/cli/auth/client.py,sha256=3ebd2526f65f8d576e17d304df1b8a163d07586b88b5628cb36c9fa487a23ef6,11841
|
|
4
4
|
llama_deploy/cli/client.py,sha256=f4053b5183224cff55c1393e78887d1af2597219135379a851b742c676adc154,1727
|
|
5
5
|
llama_deploy/cli/commands/aliased_group.py,sha256=bc41007c97b7b93981217dbd4d4591df2b6c9412a2d9ed045b0ec5655ed285f2,1066
|
|
6
6
|
llama_deploy/cli/commands/auth.py,sha256=1381eee494c3a0c73253322b4a54af1a857d5b89e5f1685b8afa3422eecc5607,23937
|
|
7
7
|
llama_deploy/cli/commands/deployment.py,sha256=46339e09135521c46ff90235ccf765c37b1a161cec11d92e92a54ceac6528b01,9883
|
|
8
8
|
llama_deploy/cli/commands/env.py,sha256=36cb1b0abb9e3d1c5546d3e8a3c4c7839c4d6c2abf75763e39efb08376b3eae9,6808
|
|
9
|
-
llama_deploy/cli/commands/init.py,sha256=
|
|
10
|
-
llama_deploy/cli/commands/serve.py,sha256=
|
|
9
|
+
llama_deploy/cli/commands/init.py,sha256=68d6dbfd21596cb0012009c32f023e3b6960415ef69363b2b328e15ae17029e6,15974
|
|
10
|
+
llama_deploy/cli/commands/serve.py,sha256=e1e91f17e13dce31ebadb4a1b91cda9e2e3eeb45f89f1db0ae3fadd879e8a2ab,12871
|
|
11
11
|
llama_deploy/cli/config/_config.py,sha256=654a4b6d06542e3503edab7023fc1c3148de510b3e3f6194e28cd4bd3e7c029a,14230
|
|
12
12
|
llama_deploy/cli/config/_migrations.py,sha256=37055641970e1ea41abc583f270dc8a9dab03076224a02cd5fb08bbab2b9259f,2333
|
|
13
13
|
llama_deploy/cli/config/auth_service.py,sha256=9e62ed2ea112e6142a5d384835568d4a926627eb58730af89bef9420f549d42e,5126
|
|
@@ -20,7 +20,7 @@ llama_deploy/cli/debug.py,sha256=e85a72d473bbe1645eb31772f7349bde703d45704166f76
|
|
|
20
20
|
llama_deploy/cli/env.py,sha256=d4b83c1f12e07f90893fcc7388d769de37dc2b41d345eb6bc2041c39b4fb2c31,1057
|
|
21
21
|
llama_deploy/cli/interactive_prompts/session_utils.py,sha256=b996f2eddf70d6c49636c4797d246d212fce0950fe7e9a3f59cf6a1bf7ae26f5,1142
|
|
22
22
|
llama_deploy/cli/interactive_prompts/utils.py,sha256=594cc2a242cc3405d66d0e26a60647496cc5fcb4ce7d0500a4cfec4888c9a0fa,516
|
|
23
|
-
llama_deploy/cli/options.py,sha256=
|
|
23
|
+
llama_deploy/cli/options.py,sha256=1bddcaf69c0293b07ce8b73fa4ef92d62ea5d8eecd7f66b65e957d4a59381243,2479
|
|
24
24
|
llama_deploy/cli/py.typed,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
|
|
25
25
|
llama_deploy/cli/styles.py,sha256=15901fb567b0d10470f56a06d863819c4ed00a9f90b2a8c46b4bc2fb1dbdf6c3,307
|
|
26
26
|
llama_deploy/cli/textual/deployment_form.py,sha256=f153aa256bef51598432190604822d3f5295d1715696de6c0234dfe92fa5173d,27906
|
|
@@ -34,7 +34,7 @@ llama_deploy/cli/textual/styles.tcss,sha256=2536f52ea1a654ae1f8990a25d45c845cb31
|
|
|
34
34
|
llama_deploy/cli/utils/env_inject.py,sha256=01911758bcc3cf22aad0db0d1ade56aece48d6ad6bdb7186ea213337c67f5a89,688
|
|
35
35
|
llama_deploy/cli/utils/redact.py,sha256=1e768d76b4a6708230c34f7ce8a5a82ab52795bb3d6ab0387071ab4e8d7e7934,863
|
|
36
36
|
llama_deploy/cli/utils/version.py,sha256=bf01a6dda948b868cc08c93701ed44cd36b487402404af8451d4c0996a2edb31,364
|
|
37
|
-
llamactl-0.3.
|
|
38
|
-
llamactl-0.3.
|
|
39
|
-
llamactl-0.3.
|
|
40
|
-
llamactl-0.3.
|
|
37
|
+
llamactl-0.3.13.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
|
|
38
|
+
llamactl-0.3.13.dist-info/entry_points.txt,sha256=b67e1eb64305058751a651a80f2d2268b5f7046732268421e796f64d4697f83c,52
|
|
39
|
+
llamactl-0.3.13.dist-info/METADATA,sha256=110d516438520370097db9b543db32fedbbc9cef3a03a2508ac7b64339b258ce,3215
|
|
40
|
+
llamactl-0.3.13.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|