llamactl 0.3.16__py3-none-any.whl → 0.3.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,6 +4,7 @@ from llama_deploy.cli.commands.auth import auth
4
4
  from llama_deploy.cli.commands.deployment import deployments
5
5
  from llama_deploy.cli.commands.env import env_group
6
6
  from llama_deploy.cli.commands.init import init
7
+ from llama_deploy.cli.commands.pkg import pkg
7
8
  from llama_deploy.cli.commands.serve import serve
8
9
 
9
10
  from .app import app
@@ -22,7 +23,7 @@ def main() -> None:
22
23
  app()
23
24
 
24
25
 
25
- __all__ = ["app", "deployments", "auth", "serve", "init", "env_group"]
26
+ __all__ = ["app", "deployments", "auth", "serve", "init", "env_group", "pkg"]
26
27
 
27
28
 
28
29
  if __name__ == "__main__":
@@ -0,0 +1,122 @@
1
+ from pathlib import Path
2
+
3
+ import click
4
+ from llama_deploy.cli.pkg import (
5
+ DEFAULT_DOCKER_IGNORE,
6
+ build_dockerfile_content,
7
+ infer_python_version,
8
+ pkg_container_options,
9
+ )
10
+ from llama_deploy.core.deployment_config import (
11
+ read_deployment_config_from_git_root_or_cwd,
12
+ )
13
+ from rich import print as rprint
14
+
15
+ from ..app import app
16
+
17
+ SUPPORTED_FORMATS = ["Docker", "Podman"]
18
+ SUPPORTED_FORMATS_STR = ", ".join(SUPPORTED_FORMATS)
19
+
20
+
21
+ @app.group(
22
+ help=f"Package your application in different formats. Currently supported: {SUPPORTED_FORMATS_STR}",
23
+ no_args_is_help=True,
24
+ context_settings={"max_content_width": None},
25
+ )
26
+ def pkg() -> None:
27
+ """Package application in different formats (Dockerfile, Podman config, Nixpack...)"""
28
+ pass
29
+
30
+
31
+ @pkg.command(
32
+ "container",
33
+ help="Generate a minimal, build-ready file to containerize your workflows through Docker or Podman (currently frontend is not supported).",
34
+ )
35
+ @pkg_container_options
36
+ def create_container_file(
37
+ deployment_file: Path,
38
+ python_version: str | None = None,
39
+ port: int = 4501,
40
+ exclude: tuple[str, ...] | None = None,
41
+ output_file: str = "Dockerfile",
42
+ dockerignore_path: str = ".dockerignore",
43
+ overwrite: bool = False,
44
+ ):
45
+ _create_file_for_container(
46
+ deployment_file=deployment_file,
47
+ python_version=python_version,
48
+ port=port,
49
+ exclude=exclude,
50
+ output_file=output_file,
51
+ dockerignore_path=dockerignore_path,
52
+ overwrite=overwrite,
53
+ )
54
+
55
+
56
+ def _check_deployment_config(deployment_file: Path) -> Path:
57
+ if not deployment_file.exists():
58
+ rprint(f"[red]Deployment file '{deployment_file}' not found[/red]")
59
+ raise click.Abort()
60
+
61
+ # Early check: appserver requires a pyproject.toml in the config directory
62
+ config_dir = deployment_file if deployment_file.is_dir() else deployment_file.parent
63
+ if not (config_dir / "pyproject.toml").exists():
64
+ rprint(
65
+ "[red]No pyproject.toml found at[/red] "
66
+ f"[bold]{config_dir}[/bold].\n"
67
+ "Add a pyproject.toml to your project and re-run 'llamactl serve'."
68
+ )
69
+ raise click.Abort()
70
+
71
+ try:
72
+ config = read_deployment_config_from_git_root_or_cwd(
73
+ Path.cwd(), deployment_file
74
+ )
75
+ except Exception:
76
+ rprint(
77
+ "[red]Error: Could not read a deployment config. This doesn't appear to be a valid llama-deploy project.[/red]"
78
+ )
79
+ raise click.Abort()
80
+ if config.ui:
81
+ rprint(
82
+ "[bold red]Containerized UI builds are currently not supported. Please remove the UI configuration from your deployment file if you wish to proceed.[/]"
83
+ )
84
+ raise click.Abort()
85
+ return config_dir
86
+
87
+
88
+ def _create_file_for_container(
89
+ deployment_file: Path,
90
+ output_file: str = "Dockerfile",
91
+ python_version: str | None = None,
92
+ port: int = 4501,
93
+ exclude: tuple[str, ...] | None = None,
94
+ dockerignore_path: str = ".dockerignore",
95
+ overwrite: bool = False,
96
+ ):
97
+ config_dir = _check_deployment_config(deployment_file=deployment_file)
98
+
99
+ if not python_version:
100
+ python_version = infer_python_version(config_dir)
101
+
102
+ dockerignore_content = DEFAULT_DOCKER_IGNORE
103
+ if exclude:
104
+ for item in exclude:
105
+ dockerignore_content += "\n" + item
106
+
107
+ dockerfile_content = build_dockerfile_content(python_version, port)
108
+
109
+ if Path(output_file).exists() and not overwrite:
110
+ rprint(
111
+ f"[red bold]Error: {output_file} already exists. If you wish to overwrite the file, pass `--overwrite` as a flag to the command.[/]"
112
+ )
113
+ raise click.Abort()
114
+ with open(output_file, "w") as f:
115
+ f.write(dockerfile_content)
116
+ if Path(dockerignore_path).exists() and not overwrite:
117
+ rprint(
118
+ f"[red bold]Error: {dockerignore_path} already exists. If you wish to overwrite the file, pass `--overwrite` as a flag to the command.[/]"
119
+ )
120
+ raise click.Abort()
121
+ with open(dockerignore_path, "w") as f:
122
+ f.write(dockerignore_content)
@@ -0,0 +1,10 @@
1
+ from .defaults import DEFAULT_DOCKER_IGNORE
2
+ from .options import pkg_container_options
3
+ from .utils import build_dockerfile_content, infer_python_version
4
+
5
+ __all__ = [
6
+ "infer_python_version",
7
+ "build_dockerfile_content",
8
+ "DEFAULT_DOCKER_IGNORE",
9
+ "pkg_container_options",
10
+ ]
@@ -0,0 +1,11 @@
1
+ DEFAULT_DOCKER_IGNORE = """
2
+ .venv/
3
+ .git/
4
+ __pycache__/
5
+ *.py[oc]
6
+ build/
7
+ dist/
8
+ wheels/
9
+ *.egg-info
10
+ .env
11
+ """
@@ -0,0 +1,84 @@
1
+ from pathlib import Path
2
+ from typing import Callable, ParamSpec, TypeVar
3
+
4
+ import click
5
+ from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
6
+
7
+ P = ParamSpec("P")
8
+ R = TypeVar("R")
9
+
10
+
11
+ def _deployment_file_option(f: Callable[P, R]) -> Callable[P, R]:
12
+ return click.argument(
13
+ "deployment_file",
14
+ required=False,
15
+ default=DEFAULT_DEPLOYMENT_FILE_PATH,
16
+ type=click.Path(dir_okay=True, resolve_path=True, path_type=Path),
17
+ )(f)
18
+
19
+
20
+ def _python_version_option(f: Callable[P, R]) -> Callable[P, R]:
21
+ return click.option(
22
+ "--python-version",
23
+ help="Python version for the base image. Default is inferred from the uv project configuration (.python-version or pyproject.toml). If no version can be inferred, python 3.12 is used.",
24
+ required=False,
25
+ default=None,
26
+ )(f)
27
+
28
+
29
+ def _port_option(f: Callable[P, R]) -> Callable[P, R]:
30
+ return click.option(
31
+ "--port",
32
+ help="The port to run the API server on. Defaults to 4501.",
33
+ required=False,
34
+ default=4501,
35
+ type=int,
36
+ )(f)
37
+
38
+
39
+ def _dockerignore_path_option(f: Callable[P, R]) -> Callable[P, R]:
40
+ return click.option(
41
+ "--dockerignore-path",
42
+ help="Path for the output .dockerignore file. Defaults to .dockerignore",
43
+ required=False,
44
+ default=".dockerignore",
45
+ )(f)
46
+
47
+
48
+ def _output_file_option(f: Callable[P, R]) -> Callable[P, R]:
49
+ return click.option(
50
+ "--output-file",
51
+ help="Path for the output file to build the image. Defaults to Dockerfile",
52
+ required=False,
53
+ default="Dockerfile",
54
+ )(f)
55
+
56
+
57
+ def _overwrite_option(f: Callable[P, R]) -> Callable[P, R]:
58
+ return click.option(
59
+ "--overwrite",
60
+ help="Overwrite output files",
61
+ is_flag=True,
62
+ )(f)
63
+
64
+
65
+ def _exclude_option(f: Callable[P, R]) -> Callable[P, R]:
66
+ return click.option(
67
+ "--exclude",
68
+ help="Path to exclude from the build (will be appended to .dockerignore). Can be used multiple times.",
69
+ multiple=True,
70
+ required=False,
71
+ default=None,
72
+ )(f)
73
+
74
+
75
+ def pkg_container_options(f: Callable[P, R]) -> Callable[P, R]:
76
+ return _deployment_file_option(
77
+ _python_version_option(
78
+ _port_option(
79
+ _dockerignore_path_option(
80
+ _overwrite_option(_exclude_option(_output_file_option(f)))
81
+ )
82
+ )
83
+ )
84
+ )
@@ -0,0 +1,46 @@
1
+ from pathlib import Path
2
+ from tomllib import load as load_toml
3
+
4
+
5
+ def _get_min_py_version(requires_python: str):
6
+ min_v = requires_python.split(",")[0].strip()
7
+ return (
8
+ min_v.replace("=", "")
9
+ .replace(">", "")
10
+ .replace("<", "")
11
+ .replace("~", "")
12
+ .strip()
13
+ )
14
+
15
+
16
+ def infer_python_version(config_dir: Path):
17
+ if (config_dir / ".python-version").exists():
18
+ with open(config_dir / ".python-version", "r") as f:
19
+ content = f.read()
20
+ if content.strip():
21
+ py_version = content.strip()
22
+ return py_version
23
+ with open(config_dir / "pyproject.toml", "rb") as f:
24
+ data = load_toml(f)
25
+ return _get_min_py_version(data.get("project", {}).get("requires-python", "3.12"))
26
+
27
+
28
+ def build_dockerfile_content(python_version: str | None = None, port: int = 4501):
29
+ return f"""
30
+ FROM python:{python_version}-slim-trixie
31
+
32
+ COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
33
+
34
+ WORKDIR /app
35
+
36
+ COPY . /app/
37
+
38
+ ENV PATH=/root/.local/bin:$PATH
39
+
40
+ RUN uv sync --locked
41
+ RUN uv tool install llamactl
42
+
43
+ EXPOSE {port}
44
+
45
+ ENTRYPOINT [ "uv", "run", "llamactl", "serve", "--host", "0.0.0.0", "--port", "{port}" ]
46
+ """
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llamactl
3
- Version: 0.3.16
3
+ Version: 0.3.17
4
4
  Summary: A command-line interface for managing LlamaDeploy projects and deployments
5
5
  Author: Adrian Lyjak
6
6
  Author-email: Adrian Lyjak <adrianlyjak@gmail.com>
7
7
  License: MIT
8
- Requires-Dist: llama-deploy-core[client]>=0.3.16,<0.4.0
9
- Requires-Dist: llama-deploy-appserver>=0.3.16,<0.4.0
8
+ Requires-Dist: llama-deploy-core[client]>=0.3.17,<0.4.0
9
+ Requires-Dist: llama-deploy-appserver>=0.3.17,<0.4.0
10
10
  Requires-Dist: vibe-llama-core>=0.1.0
11
11
  Requires-Dist: rich>=13.0.0
12
12
  Requires-Dist: questionary>=2.0.0
@@ -1,4 +1,4 @@
1
- llama_deploy/cli/__init__.py,sha256=116170a773d7377f2e61bc8b006999d463af24027867be54ff9b20132970490f,781
1
+ llama_deploy/cli/__init__.py,sha256=ae22d4cdf686aeef367d80d71938c62d7570d9bddd454c44fa78b3354f25d4f5,834
2
2
  llama_deploy/cli/app.py,sha256=9170e4f506c482522bd745eb1cdb700a198cfcfd7204c168c94e5ee2b6b43ffa,2199
3
3
  llama_deploy/cli/auth/client.py,sha256=3ebd2526f65f8d576e17d304df1b8a163d07586b88b5628cb36c9fa487a23ef6,11841
4
4
  llama_deploy/cli/client.py,sha256=f4053b5183224cff55c1393e78887d1af2597219135379a851b742c676adc154,1727
@@ -7,6 +7,7 @@ llama_deploy/cli/commands/auth.py,sha256=1381eee494c3a0c73253322b4a54af1a857d5b8
7
7
  llama_deploy/cli/commands/deployment.py,sha256=abea89e94ed978821a998453792d127605831ba23108df776873852ddd19f97e,14526
8
8
  llama_deploy/cli/commands/env.py,sha256=36cb1b0abb9e3d1c5546d3e8a3c4c7839c4d6c2abf75763e39efb08376b3eae9,6808
9
9
  llama_deploy/cli/commands/init.py,sha256=348ec90fb54a6774062b109afe8cc9ac7f3c59144f941fd5697001ea52d5b946,14981
10
+ llama_deploy/cli/commands/pkg.py,sha256=31049a8266fba71a45920187ef983988bb5ba3b9ad81ab4b7bca6042a071a810,4068
10
11
  llama_deploy/cli/commands/serve.py,sha256=e1e91f17e13dce31ebadb4a1b91cda9e2e3eeb45f89f1db0ae3fadd879e8a2ab,12871
11
12
  llama_deploy/cli/config/_config.py,sha256=654a4b6d06542e3503edab7023fc1c3148de510b3e3f6194e28cd4bd3e7c029a,14230
12
13
  llama_deploy/cli/config/_migrations.py,sha256=37055641970e1ea41abc583f270dc8a9dab03076224a02cd5fb08bbab2b9259f,2333
@@ -21,6 +22,10 @@ llama_deploy/cli/env.py,sha256=d4b83c1f12e07f90893fcc7388d769de37dc2b41d345eb6bc
21
22
  llama_deploy/cli/interactive_prompts/session_utils.py,sha256=b996f2eddf70d6c49636c4797d246d212fce0950fe7e9a3f59cf6a1bf7ae26f5,1142
22
23
  llama_deploy/cli/interactive_prompts/utils.py,sha256=594cc2a242cc3405d66d0e26a60647496cc5fcb4ce7d0500a4cfec4888c9a0fa,516
23
24
  llama_deploy/cli/options.py,sha256=1bddcaf69c0293b07ce8b73fa4ef92d62ea5d8eecd7f66b65e957d4a59381243,2479
25
+ llama_deploy/cli/pkg/__init__.py,sha256=6e5ba5891b4d71c046fd4759202c1326ea686aeaaa54e2cbf4e78c86a80d6286,286
26
+ llama_deploy/cli/pkg/defaults.py,sha256=3d315935352f5271e301fc907420f44e616630911e6e6fb6a33bf9f57adb57c3,104
27
+ llama_deploy/cli/pkg/options.py,sha256=540c619a2a11f72161b8e41002446cf9d3f62de605d6026d262d5e72cb36bd82,2418
28
+ llama_deploy/cli/pkg/utils.py,sha256=b25348ac0f9ddc984ef98edc930aef0648ed182437e660cc60b0daf56172b171,1197
24
29
  llama_deploy/cli/py.typed,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
25
30
  llama_deploy/cli/styles.py,sha256=15901fb567b0d10470f56a06d863819c4ed00a9f90b2a8c46b4bc2fb1dbdf6c3,307
26
31
  llama_deploy/cli/textual/deployment_form.py,sha256=92602faca36ba453e7b2c4ab04644e4bf4c12f98ff60d619ebd2697b06a70692,28953
@@ -34,7 +39,7 @@ llama_deploy/cli/textual/styles.tcss,sha256=2536f52ea1a654ae1f8990a25d45c845cb31
34
39
  llama_deploy/cli/utils/env_inject.py,sha256=01911758bcc3cf22aad0db0d1ade56aece48d6ad6bdb7186ea213337c67f5a89,688
35
40
  llama_deploy/cli/utils/redact.py,sha256=1e768d76b4a6708230c34f7ce8a5a82ab52795bb3d6ab0387071ab4e8d7e7934,863
36
41
  llama_deploy/cli/utils/version.py,sha256=bf01a6dda948b868cc08c93701ed44cd36b487402404af8451d4c0996a2edb31,364
37
- llamactl-0.3.16.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
38
- llamactl-0.3.16.dist-info/entry_points.txt,sha256=b67e1eb64305058751a651a80f2d2268b5f7046732268421e796f64d4697f83c,52
39
- llamactl-0.3.16.dist-info/METADATA,sha256=d0fc19ba6e842db8013553c960a0ec24c47f7d4812ef570a5686806b249f4b49,3217
40
- llamactl-0.3.16.dist-info/RECORD,,
42
+ llamactl-0.3.17.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
43
+ llamactl-0.3.17.dist-info/entry_points.txt,sha256=b67e1eb64305058751a651a80f2d2268b5f7046732268421e796f64d4697f83c,52
44
+ llamactl-0.3.17.dist-info/METADATA,sha256=53b3447ce6786b1e6e5258271157338fb6f3c39c9ba2cb1529b9427a2fe85f34,3217
45
+ llamactl-0.3.17.dist-info/RECORD,,