llamactl 0.3.23__py3-none-any.whl → 0.3.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. llama_deploy/cli/app.py +7 -4
  2. llama_deploy/cli/auth/client.py +19 -2
  3. llama_deploy/cli/client.py +4 -1
  4. llama_deploy/cli/commands/aliased_group.py +11 -3
  5. llama_deploy/cli/commands/auth.py +105 -37
  6. llama_deploy/cli/commands/deployment.py +47 -17
  7. llama_deploy/cli/commands/dev.py +126 -11
  8. llama_deploy/cli/commands/env.py +30 -5
  9. llama_deploy/cli/commands/init.py +33 -10
  10. llama_deploy/cli/commands/pkg.py +2 -2
  11. llama_deploy/cli/commands/serve.py +21 -15
  12. llama_deploy/cli/config/_config.py +4 -4
  13. llama_deploy/cli/config/_migrations.py +7 -2
  14. llama_deploy/cli/config/auth_service.py +1 -1
  15. llama_deploy/cli/config/migrations/0001_init.sql +1 -1
  16. llama_deploy/cli/config/migrations/0002_add_auth_fields.sql +0 -2
  17. llama_deploy/cli/pkg/options.py +4 -1
  18. llama_deploy/cli/pkg/utils.py +8 -5
  19. llama_deploy/cli/textual/deployment_form.py +5 -3
  20. llama_deploy/cli/textual/deployment_help.py +8 -7
  21. llama_deploy/cli/textual/deployment_monitor.py +8 -5
  22. llama_deploy/cli/textual/git_validation.py +45 -8
  23. llama_deploy/cli/textual/github_callback_server.py +12 -12
  24. llama_deploy/cli/textual/llama_loader.py +25 -19
  25. llama_deploy/cli/textual/secrets_form.py +2 -1
  26. llama_deploy/cli/textual/styles.tcss +1 -1
  27. llama_deploy/cli/utils/retry.py +49 -0
  28. {llamactl-0.3.23.dist-info → llamactl-0.3.25.dist-info}/METADATA +7 -5
  29. llamactl-0.3.25.dist-info/RECORD +47 -0
  30. llamactl-0.3.23.dist-info/RECORD +0 -46
  31. {llamactl-0.3.23.dist-info → llamactl-0.3.25.dist-info}/WHEEL +0 -0
  32. {llamactl-0.3.23.dist-info → llamactl-0.3.25.dist-info}/entry_points.txt +0 -0
@@ -6,14 +6,6 @@ from pathlib import Path
6
6
 
7
7
  import click
8
8
  from click.exceptions import Abort, Exit
9
- from llama_deploy.appserver.app import prepare_server, start_preflight_in_target_venv
10
- from llama_deploy.appserver.deployment_config_parser import get_deployment_config
11
- from llama_deploy.appserver.settings import configure_settings, settings
12
- from llama_deploy.appserver.workflow_loader import (
13
- load_environment_variables,
14
- parse_environment_variables,
15
- validate_required_env_vars,
16
- )
17
9
  from llama_deploy.cli.commands.aliased_group import AliasedGroup
18
10
  from llama_deploy.cli.commands.serve import (
19
11
  _maybe_inject_llama_cloud_credentials,
@@ -29,6 +21,8 @@ from rich import print as rprint
29
21
 
30
22
  from ..app import app
31
23
 
24
+ _ClickPath = getattr(click, "Path")
25
+
32
26
 
33
27
  @app.group(
34
28
  name="dev",
@@ -52,7 +46,7 @@ dev.add_command(serve_command, name="serve")
52
46
  "deployment_file",
53
47
  required=False,
54
48
  default=DEFAULT_DEPLOYMENT_FILE_PATH,
55
- type=click.Path(dir_okay=True, resolve_path=True, path_type=Path),
49
+ type=_ClickPath(dir_okay=True, resolve_path=True, path_type=Path),
56
50
  )
57
51
  @interactive_option
58
52
  @global_options
@@ -85,6 +79,69 @@ def validate_command(deployment_file: Path, interactive: bool) -> None:
85
79
  rprint(f"[green]Validated workflows in {config_dir} successfully.[/green]")
86
80
 
87
81
 
82
+ @dev.command(
83
+ "export-json-graph",
84
+ help="Produce a JSON graph representation of registered workflows",
85
+ hidden=True, # perhaps expose if we have a built in visualization (mermaid, etc.)
86
+ )
87
+ @click.argument(
88
+ "deployment_file",
89
+ required=False,
90
+ default=DEFAULT_DEPLOYMENT_FILE_PATH,
91
+ type=_ClickPath(dir_okay=True, resolve_path=True, path_type=Path),
92
+ )
93
+ @click.option(
94
+ "--output",
95
+ help=(
96
+ "File where output JSON graph will be saved. "
97
+ "Defaults to workflows.json in the current directory."
98
+ ),
99
+ required=False,
100
+ default=None,
101
+ type=_ClickPath(dir_okay=True, resolve_path=True, path_type=Path),
102
+ )
103
+ @interactive_option
104
+ @global_options
105
+ def export_json_graph_command(
106
+ deployment_file: Path,
107
+ output: Path | None,
108
+ interactive: bool,
109
+ ) -> None:
110
+ """Export the configured workflows to a JSON document that may be used for graph visualization."""
111
+ if not deployment_file.exists():
112
+ rprint(f"[red]Deployment file '{deployment_file}' does not exist[/red]")
113
+ raise click.Abort()
114
+
115
+ _ensure_project_layout(
116
+ deployment_file, command_name="llamactl dev export-json-graph"
117
+ )
118
+ _maybe_inject_llama_cloud_credentials(
119
+ deployment_file, interactive, require_cloud=False
120
+ )
121
+
122
+ prepare_server(
123
+ deployment_file=deployment_file,
124
+ install=True,
125
+ build=False,
126
+ install_ui_deps=False,
127
+ )
128
+
129
+ wd = Path.cwd()
130
+ if output is None:
131
+ output = wd / "workflows.json"
132
+
133
+ try:
134
+ start_export_json_graph_in_target_venv(
135
+ cwd=wd,
136
+ deployment_file=deployment_file,
137
+ output=output,
138
+ )
139
+ except subprocess.CalledProcessError as exc:
140
+ rprint("[red]Workflow JSON graph export failed. See errors above.[/red]")
141
+ raise Exit(exc.returncode)
142
+ rprint(f"[green]Exported workflow JSON graph to {output}[/green]")
143
+
144
+
88
145
  @dev.command(
89
146
  "run",
90
147
  help=(
@@ -98,7 +155,7 @@ def validate_command(deployment_file: Path, interactive: bool) -> None:
98
155
  "deployment_file",
99
156
  "--deployment-file",
100
157
  default=DEFAULT_DEPLOYMENT_FILE_PATH,
101
- type=click.Path(dir_okay=True, resolve_path=True, path_type=Path),
158
+ type=_ClickPath(dir_okay=True, resolve_path=True, path_type=Path),
102
159
  help="The deployment file to use for the command",
103
160
  )
104
161
  @click.option(
@@ -159,6 +216,15 @@ def _ensure_project_layout(deployment_file: Path, *, command_name: str) -> Path:
159
216
  def _prepare_environment(
160
217
  deployment_file: Path, interactive: bool, *, require_cloud: bool
161
218
  ) -> tuple[DeploymentConfig, Path]:
219
+ from llama_deploy.appserver.deployment_config_parser import (
220
+ get_deployment_config,
221
+ )
222
+ from llama_deploy.appserver.settings import configure_settings, settings
223
+ from llama_deploy.appserver.workflow_loader import (
224
+ load_environment_variables,
225
+ validate_required_env_vars,
226
+ )
227
+
162
228
  _maybe_inject_llama_cloud_credentials(
163
229
  deployment_file, interactive, require_cloud=require_cloud
164
230
  )
@@ -173,4 +239,53 @@ def _prepare_environment(
173
239
  return config, config_parent
174
240
 
175
241
 
176
- __all__ = ["dev", "validate_command", "run_command"]
242
+ def prepare_server(
243
+ *, deployment_file: Path, install: bool, build: bool, install_ui_deps: bool
244
+ ) -> None:
245
+ """Thin wrapper so tests can monkeypatch `dev.prepare_server` without importing appserver at import time."""
246
+ from llama_deploy.appserver.app import prepare_server as _prepare_server
247
+
248
+ _prepare_server(
249
+ deployment_file=deployment_file,
250
+ install=install,
251
+ build=build,
252
+ install_ui_deps=install_ui_deps,
253
+ )
254
+
255
+
256
+ def start_preflight_in_target_venv(*, cwd: Path, deployment_file: Path) -> None:
257
+ """Thin wrapper so tests can monkeypatch `dev.start_preflight_in_target_venv`."""
258
+ from llama_deploy.appserver.app import (
259
+ start_preflight_in_target_venv as _start_preflight_in_target_venv,
260
+ )
261
+
262
+ _start_preflight_in_target_venv(cwd=cwd, deployment_file=deployment_file)
263
+
264
+
265
+ def start_export_json_graph_in_target_venv(
266
+ *, cwd: Path, deployment_file: Path, output: Path
267
+ ) -> None:
268
+ """Thin wrapper so tests can monkeypatch `dev.start_export_json_graph_in_target_venv`."""
269
+ from llama_deploy.appserver.app import (
270
+ start_export_json_graph_in_target_venv as _start_export_json_graph_in_target_venv,
271
+ )
272
+
273
+ _start_export_json_graph_in_target_venv(
274
+ cwd=cwd,
275
+ deployment_file=deployment_file,
276
+ output=output,
277
+ )
278
+
279
+
280
+ def parse_environment_variables(
281
+ config: DeploymentConfig, config_parent: Path
282
+ ) -> dict[str, str]:
283
+ """Wrapper used by tests; imports workflow loader lazily."""
284
+ from llama_deploy.appserver.workflow_loader import (
285
+ parse_environment_variables as _parse_environment_variables,
286
+ )
287
+
288
+ return _parse_environment_variables(config, config_parent)
289
+
290
+
291
+ __all__ = ["dev", "validate_command", "run_command", "export_json_graph_command"]
@@ -1,7 +1,9 @@
1
+ from __future__ import annotations
2
+
1
3
  from importlib import metadata as importlib_metadata
4
+ from typing import TYPE_CHECKING
2
5
 
3
6
  import click
4
- import questionary
5
7
  from llama_deploy.cli.config.schema import Environment
6
8
  from llama_deploy.cli.styles import (
7
9
  ACTIVE_INDICATOR,
@@ -16,10 +18,23 @@ from rich.table import Table
16
18
  from rich.text import Text
17
19
 
18
20
  from ..app import console
19
- from ..config.env_service import service
20
21
  from ..options import global_options, interactive_option
21
22
  from .auth import auth
22
23
 
24
+ if TYPE_CHECKING:
25
+ from llama_deploy.cli.config.env_service import EnvService
26
+
27
+
28
+ def _env_service() -> EnvService:
29
+ """Return the shared EnvService instance via a local import.
30
+
31
+ This keeps CLI startup light while remaining easy to patch in tests via
32
+ ``llama_deploy.cli.config.env_service.service``.
33
+ """
34
+ from ..config.env_service import service
35
+
36
+ return service
37
+
23
38
 
24
39
  @auth.group(
25
40
  name="env",
@@ -35,6 +50,7 @@ def env_group() -> None:
35
50
  @global_options
36
51
  def list_environments_cmd() -> None:
37
52
  try:
53
+ service = _env_service()
38
54
  envs = service.list_environments()
39
55
  current_env = service.get_current_environment()
40
56
 
@@ -67,11 +83,14 @@ def list_environments_cmd() -> None:
67
83
  @global_options
68
84
  def add_environment_cmd(api_url: str | None, interactive: bool) -> None:
69
85
  try:
86
+ service = _env_service()
70
87
  if not api_url:
71
88
  if not interactive:
72
89
  raise click.ClickException("API URL is required when not interactive")
73
90
  current_env = service.get_current_environment()
74
- entered = questionary.text(
91
+ from questionary import text
92
+
93
+ entered = text(
75
94
  "Enter control plane API URL", default=current_env.api_url
76
95
  ).ask()
77
96
  if not entered:
@@ -79,6 +98,8 @@ def add_environment_cmd(api_url: str | None, interactive: bool) -> None:
79
98
  return
80
99
  api_url = entered.strip()
81
100
 
101
+ if api_url is None:
102
+ raise click.ClickException("API URL is required")
82
103
  api_url = api_url.rstrip("/")
83
104
  env = service.probe_environment(api_url)
84
105
  service.create_or_update_environment(env)
@@ -97,6 +118,7 @@ def add_environment_cmd(api_url: str | None, interactive: bool) -> None:
97
118
  @global_options
98
119
  def delete_environment_cmd(api_url: str | None, interactive: bool) -> None:
99
120
  try:
121
+ service = _env_service()
100
122
  if not api_url:
101
123
  if not interactive:
102
124
  raise click.ClickException("API URL is required when not interactive")
@@ -111,6 +133,8 @@ def delete_environment_cmd(api_url: str | None, interactive: bool) -> None:
111
133
  return
112
134
  api_url = result.api_url
113
135
 
136
+ if api_url is None:
137
+ raise click.ClickException("API URL is required")
114
138
  api_url = api_url.rstrip("/")
115
139
  deleted = service.delete_environment(api_url)
116
140
  if not deleted:
@@ -129,6 +153,7 @@ def delete_environment_cmd(api_url: str | None, interactive: bool) -> None:
129
153
  @global_options
130
154
  def switch_environment_cmd(api_url: str | None, interactive: bool) -> None:
131
155
  try:
156
+ service = _env_service()
132
157
  selected_url = api_url
133
158
 
134
159
  if not selected_url and interactive:
@@ -193,12 +218,12 @@ def _select_environment(
193
218
  current_env: Environment,
194
219
  message: str = "Select environment",
195
220
  ) -> Environment | None:
196
- envs = service.list_environments()
197
- current_env = service.get_current_environment()
198
221
  if not envs:
199
222
  raise click.ClickException(
200
223
  "No environments found. This is a bug and shouldn't happen."
201
224
  )
225
+ import questionary
226
+
202
227
  return questionary.select(
203
228
  message,
204
229
  choices=[
@@ -6,10 +6,9 @@ import shutil
6
6
  import subprocess
7
7
  from dataclasses import dataclass
8
8
  from pathlib import Path
9
+ from typing import TYPE_CHECKING, cast
9
10
 
10
11
  import click
11
- import copier
12
- import questionary
13
12
  from click.exceptions import Exit
14
13
  from llama_deploy.cli.app import app
15
14
  from llama_deploy.cli.options import (
@@ -19,7 +18,12 @@ from llama_deploy.cli.options import (
19
18
  from llama_deploy.cli.styles import HEADER_COLOR_HEX
20
19
  from rich import print as rprint
21
20
  from rich.text import Text
22
- from vibe_llama_core.docs import get_agent_rules
21
+
22
+ if TYPE_CHECKING:
23
+ pass
24
+
25
+
26
+ _ClickPath = getattr(click, "Path")
23
27
 
24
28
 
25
29
  @app.command()
@@ -35,8 +39,12 @@ from vibe_llama_core.docs import get_agent_rules
35
39
  @click.option(
36
40
  "--dir",
37
41
  help="The directory to create the new app in",
38
- type=click.Path(
39
- file_okay=False, dir_okay=True, writable=True, resolve_path=True, path_type=Path
42
+ type=_ClickPath(
43
+ file_okay=False,
44
+ dir_okay=True,
45
+ writable=True,
46
+ resolve_path=True,
47
+ path_type=Path,
40
48
  ),
41
49
  )
42
50
  @click.option(
@@ -63,6 +71,8 @@ def init(
63
71
  def _create(
64
72
  template: str | None, dir: Path | None, force: bool, interactive: bool
65
73
  ) -> None:
74
+ import questionary
75
+
66
76
  @dataclass
67
77
  class TemplateOption:
68
78
  id: str
@@ -271,8 +281,15 @@ def _create(
271
281
  else:
272
282
  shutil.rmtree(dir, ignore_errors=True)
273
283
 
284
+ # Import copier lazily at call time to keep CLI startup light while still
285
+ # allowing tests to patch ``copier.run_copy`` directly.
286
+ import copier
287
+
274
288
  copier.run_copy(
275
- resolved_template.source.url, dir, quiet=True, defaults=not interactive
289
+ resolved_template.source.url,
290
+ dir,
291
+ quiet=True,
292
+ defaults=not interactive,
276
293
  )
277
294
 
278
295
  # Change to the new directory and initialize git repo
@@ -392,15 +409,19 @@ def _create(
392
409
  rprint("")
393
410
 
394
411
 
395
- def _update():
412
+ def _update() -> None:
396
413
  """Update the app to the latest version"""
397
414
  try:
415
+ # Import copier lazily so the init command remains lightweight when
416
+ # unused, while tests can patch ``copier.run_update`` directly.
417
+ import copier
418
+
398
419
  copier.run_update(
399
420
  overwrite=True,
400
421
  skip_answered=True,
401
422
  quiet=True,
402
423
  )
403
- except copier.UserMessageError as e:
424
+ except Exception as e: # scoped to copier errors; type opaque here
404
425
  rprint(f"{e}")
405
426
  raise Exit(1)
406
427
 
@@ -448,6 +469,8 @@ async def _download_and_write_agents_md(include_llama_cloud: bool) -> bool:
448
469
 
449
470
  Returns True if any documentation was fetched, False otherwise.
450
471
  """
472
+ from vibe_llama_core.docs import get_agent_rules
473
+ from vibe_llama_core.docs.utils import LibraryName
451
474
 
452
475
  selected_services: list[str] = [
453
476
  "LlamaDeploy",
@@ -463,10 +486,10 @@ async def _download_and_write_agents_md(include_llama_cloud: bool) -> bool:
463
486
  try:
464
487
  await get_agent_rules(
465
488
  agent="OpenAI Codex CLI",
466
- service=service,
489
+ service=cast(LibraryName, service),
467
490
  overwrite_files=False,
468
491
  verbose=False,
469
- ) # type: ignore
492
+ )
470
493
  except Exception:
471
494
  rprint(f"[yellow]Failed to fetch documentation for {service}, skipping[/]")
472
495
  else:
@@ -41,7 +41,7 @@ def create_container_file(
41
41
  output_file: str = "Dockerfile",
42
42
  dockerignore_path: str = ".dockerignore",
43
43
  overwrite: bool = False,
44
- ):
44
+ ) -> None:
45
45
  _create_file_for_container(
46
46
  deployment_file=deployment_file,
47
47
  python_version=python_version,
@@ -93,7 +93,7 @@ def _create_file_for_container(
93
93
  exclude: tuple[str, ...] | None = None,
94
94
  dockerignore_path: str = ".dockerignore",
95
95
  overwrite: bool = False,
96
- ):
96
+ ) -> None:
97
97
  config_dir = _check_deployment_config(deployment_file=deployment_file)
98
98
 
99
99
  if not python_version:
@@ -1,22 +1,20 @@
1
+ from __future__ import annotations
2
+
1
3
  import asyncio
2
4
  import logging
3
5
  import os
4
6
  from pathlib import Path
5
- from typing import Literal
7
+ from typing import TYPE_CHECKING, Literal
6
8
 
7
9
  import click
8
- import questionary
9
10
  from click.exceptions import Abort, Exit
10
11
  from llama_deploy.cli.commands.auth import validate_authenticated_profile
11
- from llama_deploy.cli.config.env_service import service
12
- from llama_deploy.cli.config.schema import Auth
13
12
  from llama_deploy.cli.options import (
14
13
  interactive_option,
15
14
  native_tls_option,
16
15
  )
17
16
  from llama_deploy.cli.styles import WARNING
18
17
  from llama_deploy.cli.utils.redact import redact_api_key
19
- from llama_deploy.core.client.manage_client import ControlPlaneClient
20
18
  from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
21
19
  from llama_deploy.core.deployment_config import (
22
20
  read_deployment_config_from_git_root_or_cwd,
@@ -26,7 +24,11 @@ from rich import print as rprint
26
24
 
27
25
  from ..app import app
28
26
 
27
+ if TYPE_CHECKING:
28
+ from llama_deploy.cli.config.schema import Auth
29
+
29
30
  logger = logging.getLogger(__name__)
31
+ _ClickPath = getattr(click, "Path")
30
32
 
31
33
 
32
34
  @app.command(
@@ -37,7 +39,7 @@ logger = logging.getLogger(__name__)
37
39
  "deployment_file",
38
40
  required=False,
39
41
  default=DEFAULT_DEPLOYMENT_FILE_PATH,
40
- type=click.Path(dir_okay=True, resolve_path=True, path_type=Path),
42
+ type=_ClickPath(dir_okay=True, resolve_path=True, path_type=Path),
41
43
  )
42
44
  @click.option(
43
45
  "--no-install", is_flag=True, help="Skip installing python and js dependencies"
@@ -72,7 +74,7 @@ logger = logging.getLogger(__name__)
72
74
  )
73
75
  @click.option(
74
76
  "--local-persistence-path",
75
- type=click.Path(dir_okay=True, resolve_path=True, path_type=Path),
77
+ type=_ClickPath(dir_okay=True, resolve_path=True, path_type=Path),
76
78
  help="The path to the sqlite database to use for the workflow server if using local persistence",
77
79
  )
78
80
  @click.option(
@@ -165,13 +167,13 @@ def serve(
165
167
  raise click.Abort()
166
168
 
167
169
 
168
- def _set_env_vars_from_profile(profile: Auth):
170
+ def _set_env_vars_from_profile(profile: Auth) -> None:
169
171
  if profile.api_key:
170
172
  _set_env_vars(profile.api_key, profile.api_url)
171
173
  _set_project_id_from_profile(profile)
172
174
 
173
175
 
174
- def _set_env_vars_from_env(env_vars: dict[str, str]):
176
+ def _set_env_vars_from_env(env_vars: dict[str, str]) -> None:
175
177
  key = env_vars.get("LLAMA_CLOUD_API_KEY")
176
178
  url = env_vars.get("LLAMA_CLOUD_BASE_URL", "https://api.cloud.llamaindex.ai")
177
179
  # Also propagate project id if present in the environment
@@ -180,7 +182,7 @@ def _set_env_vars_from_env(env_vars: dict[str, str]):
180
182
  _set_env_vars(key, url)
181
183
 
182
184
 
183
- def _set_env_vars(key: str, url: str):
185
+ def _set_env_vars(key: str, url: str) -> None:
184
186
  os.environ["LLAMA_CLOUD_API_KEY"] = key
185
187
  os.environ["LLAMA_CLOUD_BASE_URL"] = url
186
188
  # kludge for common web servers to inject local auth key
@@ -189,13 +191,13 @@ def _set_env_vars(key: str, url: str):
189
191
  os.environ[f"{prefix}LLAMA_CLOUD_BASE_URL"] = url
190
192
 
191
193
 
192
- def _set_project_id_from_env(env_vars: dict[str, str]):
194
+ def _set_project_id_from_env(env_vars: dict[str, str]) -> None:
193
195
  project_id = env_vars.get("LLAMA_DEPLOY_PROJECT_ID")
194
196
  if project_id:
195
197
  os.environ["LLAMA_DEPLOY_PROJECT_ID"] = project_id
196
198
 
197
199
 
198
- def _set_project_id_from_profile(profile: Auth):
200
+ def _set_project_id_from_profile(profile: Auth) -> None:
199
201
  if profile.project_id:
200
202
  os.environ["LLAMA_DEPLOY_PROJECT_ID"] = profile.project_id
201
203
 
@@ -211,6 +213,10 @@ def _maybe_inject_llama_cloud_credentials(
211
213
  - If no profile/api_key and session is interactive, prompt to log in and inject afterward.
212
214
  - If user declines or session is non-interactive, warn that deployment may not work.
213
215
  """
216
+ import questionary
217
+ from llama_deploy.appserver.workflow_loader import parse_environment_variables
218
+ from llama_deploy.cli.config.env_service import service
219
+
214
220
  # Read config directly to avoid cached global settings
215
221
  try:
216
222
  config = read_deployment_config_from_git_root_or_cwd(
@@ -225,9 +231,6 @@ def _maybe_inject_llama_cloud_credentials(
225
231
  if not config.llama_cloud and not require_cloud:
226
232
  return
227
233
 
228
- # Import lazily to avoid loading appserver dependencies on general CLI startup
229
- from llama_deploy.appserver.workflow_loader import parse_environment_variables
230
-
231
234
  vars = parse_environment_variables(
232
235
  config, deployment_file.parent if deployment_file.is_file() else deployment_file
233
236
  )
@@ -306,6 +309,9 @@ def _maybe_select_project_for_env_key() -> None:
306
309
 
307
310
  If more than one project exists, prompt the user to select one.
308
311
  """
312
+ import questionary
313
+ from llama_deploy.core.client.manage_client import ControlPlaneClient
314
+
309
315
  api_key = os.environ.get("LLAMA_CLOUD_API_KEY")
310
316
  base_url = os.environ.get("LLAMA_CLOUD_BASE_URL", "https://api.cloud.llamaindex.ai")
311
317
  if not api_key:
@@ -67,11 +67,11 @@ class ConfigManager:
67
67
  config_dir = Path.home() / ".config" / "llamactl"
68
68
  return config_dir.expanduser()
69
69
 
70
- def _ensure_config_dir(self):
70
+ def _ensure_config_dir(self) -> None:
71
71
  """Create configuration directory if it doesn't exist"""
72
72
  self.config_dir.mkdir(parents=True, exist_ok=True)
73
73
 
74
- def _init_database(self):
74
+ def _init_database(self) -> None:
75
75
  """Initialize SQLite database and run migrations; then seed defaults."""
76
76
 
77
77
  with sqlite3.connect(self.db_path) as conn:
@@ -80,7 +80,7 @@ class ConfigManager:
80
80
 
81
81
  conn.commit()
82
82
 
83
- def destroy_database(self):
83
+ def destroy_database(self) -> None:
84
84
  """Destroy the database"""
85
85
  self.db_path.unlink()
86
86
  self._init_database()
@@ -89,7 +89,7 @@ class ConfigManager:
89
89
  ## Settings
90
90
  #############################################
91
91
 
92
- def set_settings_current_profile(self, name: str | None):
92
+ def set_settings_current_profile(self, name: str | None) -> None:
93
93
  """Set or clear the current active profile.
94
94
 
95
95
  If name is None, the setting is removed.
@@ -8,8 +8,13 @@ from __future__ import annotations
8
8
  import logging
9
9
  import re
10
10
  import sqlite3
11
+ import sys
11
12
  from importlib import import_module, resources
12
- from importlib.resources.abc import Traversable
13
+
14
+ if sys.version_info >= (3, 11):
15
+ from importlib.resources.abc import Traversable
16
+ else:
17
+ from importlib.abc import Traversable
13
18
 
14
19
  logger = logging.getLogger(__name__)
15
20
 
@@ -22,7 +27,7 @@ def _iter_migration_files() -> list[Traversable]:
22
27
  """Yield packaged SQL migration files in lexicographic order."""
23
28
  pkg = import_module(_MIGRATIONS_PKG)
24
29
  root = resources.files(pkg)
25
- files = (p for p in root.iterdir() if p.name.endswith(".sql"))
30
+ files: list[Traversable] = [p for p in root.iterdir() if p.name.endswith(".sql")]
26
31
  if not files:
27
32
  raise ValueError("No migration files found")
28
33
  return sorted(files, key=lambda p: p.name)
@@ -86,7 +86,7 @@ class AuthService:
86
86
  return asyncio.run(_fetch_server_version())
87
87
 
88
88
  def _validate_token_and_list_projects(self, api_key: str) -> list[ProjectSummary]:
89
- async def _run():
89
+ async def _run() -> list[ProjectSummary]:
90
90
  async with ControlPlaneClient.ctx(self.env.api_url, api_key) as client:
91
91
  return await client.list_projects()
92
92
 
@@ -32,4 +32,4 @@ SELECT DISTINCT api_url, 0 FROM profiles;
32
32
 
33
33
  -- 3) Ensure the default cloud environment exists with auth required
34
34
  INSERT OR IGNORE INTO environments (api_url, requires_auth, min_llamactl_version)
35
- VALUES ('https://api.cloud.llamaindex.ai', 1, NULL);
35
+ VALUES ('https://api.cloud.llamaindex.ai', 1, NULL);
@@ -20,5 +20,3 @@ WHERE id IS NULL;
20
20
 
21
21
  -- Ensure id values are unique
22
22
  CREATE UNIQUE INDEX IF NOT EXISTS idx_profiles_id ON profiles(id);
23
-
24
-
@@ -7,13 +7,16 @@ from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
7
7
  P = ParamSpec("P")
8
8
  R = TypeVar("R")
9
9
 
10
+ # hack around for mypy not letting you set path_type=Path on click.Path
11
+ _ClickPath = getattr(click, "Path")
12
+
10
13
 
11
14
  def _deployment_file_option(f: Callable[P, R]) -> Callable[P, R]:
12
15
  return click.argument(
13
16
  "deployment_file",
14
17
  required=False,
15
18
  default=DEFAULT_DEPLOYMENT_FILE_PATH,
16
- type=click.Path(dir_okay=True, resolve_path=True, path_type=Path),
19
+ type=_ClickPath(dir_okay=True, resolve_path=True, path_type=Path),
17
20
  )(f)
18
21
 
19
22
 
@@ -1,8 +1,9 @@
1
1
  from pathlib import Path
2
- from tomllib import load as load_toml
3
2
 
3
+ from llama_deploy.core._compat import load_toml_file
4
4
 
5
- def _get_min_py_version(requires_python: str):
5
+
6
+ def _get_min_py_version(requires_python: str) -> str:
6
7
  min_v = requires_python.split(",")[0].strip()
7
8
  return (
8
9
  min_v.replace("=", "")
@@ -13,7 +14,7 @@ def _get_min_py_version(requires_python: str):
13
14
  )
14
15
 
15
16
 
16
- def infer_python_version(config_dir: Path):
17
+ def infer_python_version(config_dir: Path) -> str:
17
18
  if (config_dir / ".python-version").exists():
18
19
  with open(config_dir / ".python-version", "r") as f:
19
20
  content = f.read()
@@ -21,11 +22,13 @@ def infer_python_version(config_dir: Path):
21
22
  py_version = content.strip()
22
23
  return py_version
23
24
  with open(config_dir / "pyproject.toml", "rb") as f:
24
- data = load_toml(f)
25
+ data = load_toml_file(f)
25
26
  return _get_min_py_version(data.get("project", {}).get("requires-python", "3.12"))
26
27
 
27
28
 
28
- def build_dockerfile_content(python_version: str | None = None, port: int = 4501):
29
+ def build_dockerfile_content(
30
+ python_version: str | None = None, port: int = 4501
31
+ ) -> str:
29
32
  return f"""
30
33
  FROM python:{python_version}-slim-trixie
31
34