llamactl 0.3.0a16__tar.gz → 0.3.0a18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/PKG-INFO +4 -4
  2. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/pyproject.toml +4 -4
  3. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/commands/auth.py +4 -5
  4. llamactl-0.3.0a18/src/llama_deploy/cli/commands/serve.py +199 -0
  5. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/config/_config.py +32 -3
  6. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/config/auth_service.py +1 -1
  7. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/config/env_service.py +2 -0
  8. llamactl-0.3.0a16/src/llama_deploy/cli/commands/serve.py +0 -88
  9. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/README.md +0 -0
  10. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/__init__.py +0 -0
  11. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/app.py +0 -0
  12. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/client.py +0 -0
  13. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/commands/aliased_group.py +0 -0
  14. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/commands/deployment.py +0 -0
  15. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/commands/env.py +0 -0
  16. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/commands/init.py +0 -0
  17. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/config/schema.py +0 -0
  18. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/debug.py +0 -0
  19. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/env.py +0 -0
  20. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/interactive_prompts/session_utils.py +0 -0
  21. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/interactive_prompts/utils.py +0 -0
  22. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/options.py +0 -0
  23. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/platform_client.py +0 -0
  24. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/py.typed +0 -0
  25. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/textual/deployment_form.py +0 -0
  26. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/textual/deployment_help.py +0 -0
  27. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/textual/deployment_monitor.py +0 -0
  28. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/textual/git_validation.py +0 -0
  29. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/textual/github_callback_server.py +0 -0
  30. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/textual/llama_loader.py +0 -0
  31. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/textual/secrets_form.py +0 -0
  32. {llamactl-0.3.0a16 → llamactl-0.3.0a18}/src/llama_deploy/cli/textual/styles.tcss +0 -0
@@ -1,13 +1,13 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llamactl
3
- Version: 0.3.0a16
3
+ Version: 0.3.0a18
4
4
  Summary: A command-line interface for managing LlamaDeploy projects and deployments
5
5
  Author: Adrian Lyjak
6
6
  Author-email: Adrian Lyjak <adrianlyjak@gmail.com>
7
7
  License: MIT
8
- Requires-Dist: llama-deploy-core[client]>=0.3.0a16,<0.4.0
9
- Requires-Dist: llama-deploy-appserver>=0.3.0a16,<0.4.0
10
- Requires-Dist: httpx>=0.24.0
8
+ Requires-Dist: llama-deploy-core[client]>=0.3.0a18,<0.4.0
9
+ Requires-Dist: llama-deploy-appserver>=0.3.0a18,<0.4.0
10
+ Requires-Dist: httpx>=0.24.0,<1.0.0
11
11
  Requires-Dist: rich>=13.0.0
12
12
  Requires-Dist: questionary>=2.0.0
13
13
  Requires-Dist: click>=8.2.1
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "llamactl"
3
- version = "0.3.0a16"
3
+ version = "0.3.0a18"
4
4
  description = "A command-line interface for managing LlamaDeploy projects and deployments"
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -9,9 +9,9 @@ authors = [
9
9
  ]
10
10
  requires-python = ">=3.11, <4"
11
11
  dependencies = [
12
- "llama-deploy-core[client]>=0.3.0a16,<0.4.0",
13
- "llama-deploy-appserver>=0.3.0a16,<0.4.0",
14
- "httpx>=0.24.0",
12
+ "llama-deploy-core[client]>=0.3.0a18,<0.4.0",
13
+ "llama-deploy-appserver>=0.3.0a18,<0.4.0",
14
+ "httpx>=0.24.0,<1.0.0",
15
15
  "rich>=13.0.0",
16
16
  "questionary>=2.0.0",
17
17
  "click>=8.2.1",
@@ -276,11 +276,10 @@ def validate_authenticated_profile(interactive: bool) -> Auth:
276
276
 
277
277
 
278
278
  def _prompt_for_api_key() -> str:
279
- while True:
280
- entered = questionary.password("Enter API key token").ask()
281
- if entered:
282
- return entered.strip()
283
- rprint("[yellow]API key is required[/yellow]")
279
+ entered = questionary.password("Enter API key token").ask()
280
+ if entered:
281
+ return entered.strip()
282
+ raise click.ClickException("No API key entered")
284
283
 
285
284
 
286
285
  def _validate_token_and_list_projects(
@@ -0,0 +1,199 @@
1
+ import os
2
+ from pathlib import Path
3
+
4
+ import click
5
+ import questionary
6
+ from llama_deploy.appserver.app import (
7
+ prepare_server,
8
+ start_server_in_target_venv,
9
+ )
10
+ from llama_deploy.appserver.workflow_loader import parse_environment_variables
11
+ from llama_deploy.cli.commands.auth import validate_authenticated_profile
12
+ from llama_deploy.cli.config.env_service import service
13
+ from llama_deploy.cli.config.schema import Auth
14
+ from llama_deploy.cli.options import interactive_option
15
+ from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
16
+ from llama_deploy.core.deployment_config import (
17
+ read_deployment_config_from_git_root_or_cwd,
18
+ )
19
+ from rich import print as rprint
20
+
21
+ from ..app import app
22
+
23
+
24
+ @app.command(
25
+ "serve",
26
+ help="Serve a LlamaDeploy app locally for development and testing",
27
+ )
28
+ @click.argument(
29
+ "deployment_file",
30
+ required=False,
31
+ default=DEFAULT_DEPLOYMENT_FILE_PATH,
32
+ type=click.Path(dir_okay=True, resolve_path=True, path_type=Path),
33
+ )
34
+ @click.option(
35
+ "--no-install", is_flag=True, help="Skip installing python and js dependencies"
36
+ )
37
+ @click.option(
38
+ "--no-reload", is_flag=True, help="Skip reloading the API server on code changes"
39
+ )
40
+ @click.option("--no-open-browser", is_flag=True, help="Skip opening the browser")
41
+ @click.option(
42
+ "--preview",
43
+ is_flag=True,
44
+ help="Preview mode pre-builds the UI to static files, like a production build",
45
+ )
46
+ @click.option("--port", type=int, help="The port to run the API server on")
47
+ @click.option("--ui-port", type=int, help="The port to run the UI proxy server on")
48
+ @click.option(
49
+ "--log-level",
50
+ type=click.Choice(
51
+ ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], case_sensitive=False
52
+ ),
53
+ help="The log level to run the API server at",
54
+ )
55
+ @click.option(
56
+ "--log-format",
57
+ type=click.Choice(["console", "json"], case_sensitive=False),
58
+ help="The format to use for logging",
59
+ )
60
+ @interactive_option
61
+ def serve(
62
+ deployment_file: Path,
63
+ no_install: bool,
64
+ no_reload: bool,
65
+ no_open_browser: bool,
66
+ preview: bool,
67
+ port: int | None = None,
68
+ ui_port: int | None = None,
69
+ log_level: str | None = None,
70
+ log_format: str | None = None,
71
+ interactive: bool = False,
72
+ ) -> None:
73
+ """Run llama_deploy API Server in the foreground. Reads the deployment configuration from the current directory. Can optionally specify a deployment file path."""
74
+ if not deployment_file.exists():
75
+ rprint(f"[red]Deployment file '{deployment_file}' not found[/red]")
76
+ raise click.Abort()
77
+
78
+ try:
79
+ # Pre-check: if the template requires llama cloud access, ensure credentials
80
+ _maybe_inject_llama_cloud_credentials(deployment_file, interactive)
81
+
82
+ prepare_server(
83
+ deployment_file=deployment_file,
84
+ install=not no_install,
85
+ build=preview,
86
+ )
87
+ start_server_in_target_venv(
88
+ cwd=Path.cwd(),
89
+ deployment_file=deployment_file,
90
+ proxy_ui=not preview,
91
+ reload=not no_reload,
92
+ open_browser=not no_open_browser,
93
+ port=port,
94
+ ui_port=ui_port,
95
+ log_level=log_level.upper() if log_level else None,
96
+ log_format=log_format.lower() if log_format else None,
97
+ )
98
+
99
+ except KeyboardInterrupt:
100
+ print("Shutting down...")
101
+
102
+ except Exception as e:
103
+ rprint(f"[red]Error: {e}[/red]")
104
+ raise click.Abort()
105
+
106
+
107
+ def _set_env_vars_from_profile(profile: Auth):
108
+ if profile.api_key:
109
+ _set_env_vars(profile.api_key, profile.api_url)
110
+
111
+
112
+ def _set_env_vars_from_env(env_vars: dict[str, str]):
113
+ key = env_vars.get("LLAMA_CLOUD_API_KEY")
114
+ url = env_vars.get("LLAMA_CLOUD_BASE_URL", "https://api.cloud.llamaindex.ai")
115
+ if key:
116
+ _set_env_vars(key, url)
117
+
118
+
119
+ def _set_env_vars(key: str, url: str):
120
+ print(f"Setting env vars: {key}, {url}")
121
+ os.environ["LLAMA_CLOUD_API_KEY"] = key
122
+ os.environ["LLAMA_CLOUD_BASE_URL"] = url
123
+ # kludge for common web servers to inject local auth key
124
+ for prefix in ["VITE_", "NEXT_PUBLIC_"]:
125
+ os.environ[f"{prefix}LLAMA_CLOUD_API_KEY"] = key
126
+ os.environ[f"{prefix}LLAMA_CLOUD_BASE_URL"] = url
127
+
128
+
129
+ def _maybe_inject_llama_cloud_credentials(
130
+ deployment_file: Path, interactive: bool
131
+ ) -> None:
132
+ """If the deployment config indicates Llama Cloud usage, ensure LLAMA_CLOUD_API_KEY is set.
133
+
134
+ Behavior:
135
+ - If LLAMA_CLOUD_API_KEY is already set, do nothing.
136
+ - Else, try to read current profile's api_key and inject.
137
+ - If no profile/api_key and session is interactive, prompt to log in and inject afterward.
138
+ - If user declines or session is non-interactive, warn that deployment may not work.
139
+ """
140
+ # Read config directly to avoid cached global settings
141
+ try:
142
+ config = read_deployment_config_from_git_root_or_cwd(
143
+ Path.cwd(), deployment_file
144
+ )
145
+ except Exception:
146
+ rprint(
147
+ "[red]Error: Could not read a deployment config. This doesn't appear to be a valid llama-deploy project.[/red]"
148
+ )
149
+ raise click.Abort()
150
+
151
+ if not config.llama_cloud:
152
+ return
153
+
154
+ vars = parse_environment_variables(
155
+ config, deployment_file.parent if deployment_file.is_file() else deployment_file
156
+ )
157
+
158
+ existing = os.environ.get("LLAMA_CLOUD_API_KEY") or vars.get("LLAMA_CLOUD_API_KEY")
159
+ if existing:
160
+ _set_env_vars_from_env({**os.environ, **vars})
161
+ return
162
+
163
+ env = service.get_current_environment()
164
+ if not env.requires_auth:
165
+ rprint(
166
+ "[yellow]Warning: This app requires Llama Cloud authentication, and no LLAMA_CLOUD_API_KEY is present. The app may not work.[/yellow]"
167
+ )
168
+ return
169
+
170
+ auth_svc = service.current_auth_service()
171
+ profile = auth_svc.get_current_profile()
172
+ if profile and profile.api_key:
173
+ _set_env_vars_from_profile(profile)
174
+ return
175
+
176
+ # No key available; consider prompting if interactive
177
+ if interactive:
178
+ should_login = questionary.confirm(
179
+ "This deployment requires Llama Cloud. Login now to inject credentials?",
180
+ default=True,
181
+ ).ask()
182
+ if should_login:
183
+ try:
184
+ authed = validate_authenticated_profile(True)
185
+ if authed.api_key:
186
+ _set_env_vars_from_profile(authed)
187
+ return
188
+ except Exception:
189
+ # fall through to warning
190
+ pass
191
+ rprint(
192
+ "[yellow]Warning: No Llama Cloud credentials configured. The app may not work.[/yellow]"
193
+ )
194
+ return
195
+
196
+ # Non-interactive session
197
+ rprint(
198
+ "[yellow]Warning: LLAMA_CLOUD_API_KEY is not set and no logged-in profile was found. The deployment may not work.[/yellow]"
199
+ )
@@ -79,13 +79,40 @@ class ConfigManager:
79
79
  if "api_key" not in mig_columns:
80
80
  conn.execute("ALTER TABLE profiles ADD COLUMN api_key TEXT")
81
81
 
82
+ # Migration: change profiles primary key from name to composite (name, api_url)
83
+ # Only perform if the table exists with single-column PK on name
84
+ pk_info_cur = conn.execute("PRAGMA table_info(profiles)")
85
+ pk_info_rows = pk_info_cur.fetchall()
86
+ pk_columns = [row[1] for row in pk_info_rows if len(row) > 5 and row[5] > 0]
87
+ if pk_columns == ["name"] and "api_url" in columns:
88
+ conn.execute("ALTER TABLE profiles RENAME TO profiles_old")
89
+ conn.execute(
90
+ """
91
+ CREATE TABLE profiles (
92
+ name TEXT NOT NULL,
93
+ api_url TEXT NOT NULL,
94
+ project_id TEXT NOT NULL,
95
+ api_key TEXT,
96
+ PRIMARY KEY (name, api_url)
97
+ )
98
+ """
99
+ )
100
+ conn.execute(
101
+ """
102
+ INSERT INTO profiles (name, api_url, project_id, api_key)
103
+ SELECT name, api_url, project_id, api_key FROM profiles_old
104
+ """
105
+ )
106
+ conn.execute("DROP TABLE profiles_old")
107
+
82
108
  # Create tables with new schema (this will only create if they don't exist)
83
109
  conn.execute("""
84
110
  CREATE TABLE IF NOT EXISTS profiles (
85
- name TEXT PRIMARY KEY,
111
+ name TEXT NOT NULL,
86
112
  api_url TEXT NOT NULL,
87
113
  project_id TEXT NOT NULL,
88
- api_key TEXT
114
+ api_key TEXT,
115
+ PRIMARY KEY (name, api_url)
89
116
  )
90
117
  """)
91
118
 
@@ -177,7 +204,9 @@ class ConfigManager:
177
204
  )
178
205
  conn.commit()
179
206
  except sqlite3.IntegrityError:
180
- raise ValueError(f"Profile '{name}' already exists")
207
+ raise ValueError(
208
+ f"Profile '{name}' already exists for environment '{api_url}'"
209
+ )
181
210
 
182
211
  return profile
183
212
 
@@ -64,5 +64,5 @@ def _auto_profile_name_from_token(api_key: str) -> str:
64
64
  cleaned = token.replace(" ", "")
65
65
  first = cleaned[:6]
66
66
  last = cleaned[-4:] if len(cleaned) > 10 else cleaned[-2:]
67
- base = f"token-{first}-{last}"
67
+ base = f"{first}****{last}"
68
68
  return base
@@ -30,6 +30,8 @@ class EnvService:
30
30
  self.config_manager.create_or_update_environment(
31
31
  env.api_url, env.requires_auth, env.min_llamactl_version
32
32
  )
33
+ self.config_manager.set_settings_current_environment(env.api_url)
34
+ self.config_manager.set_settings_current_profile(None)
33
35
 
34
36
  def delete_environment(self, api_url: str) -> bool:
35
37
  return self.config_manager.delete_environment(api_url)
@@ -1,88 +0,0 @@
1
- from pathlib import Path
2
-
3
- import click
4
- from llama_deploy.appserver.app import (
5
- prepare_server,
6
- start_server_in_target_venv,
7
- )
8
- from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
9
- from rich import print as rprint
10
-
11
- from ..app import app
12
-
13
-
14
- @app.command(
15
- "serve",
16
- help="Serve a LlamaDeploy app locally for development and testing",
17
- )
18
- @click.argument(
19
- "deployment_file",
20
- required=False,
21
- default=DEFAULT_DEPLOYMENT_FILE_PATH,
22
- type=click.Path(dir_okay=True, resolve_path=True, path_type=Path),
23
- )
24
- @click.option(
25
- "--no-install", is_flag=True, help="Skip installing python and js dependencies"
26
- )
27
- @click.option(
28
- "--no-reload", is_flag=True, help="Skip reloading the API server on code changes"
29
- )
30
- @click.option("--no-open-browser", is_flag=True, help="Skip opening the browser")
31
- @click.option(
32
- "--preview",
33
- is_flag=True,
34
- help="Preview mode pre-builds the UI to static files, like a production build",
35
- )
36
- @click.option("--port", type=int, help="The port to run the API server on")
37
- @click.option("--ui-port", type=int, help="The port to run the UI proxy server on")
38
- @click.option(
39
- "--log-level",
40
- type=click.Choice(
41
- ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], case_sensitive=False
42
- ),
43
- help="The log level to run the API server at",
44
- )
45
- @click.option(
46
- "--log-format",
47
- type=click.Choice(["console", "json"], case_sensitive=False),
48
- help="The format to use for logging",
49
- )
50
- def serve(
51
- deployment_file: Path,
52
- no_install: bool,
53
- no_reload: bool,
54
- no_open_browser: bool,
55
- preview: bool,
56
- port: int | None = None,
57
- ui_port: int | None = None,
58
- log_level: str | None = None,
59
- log_format: str | None = None,
60
- ) -> None:
61
- """Run llama_deploy API Server in the foreground. Reads the deployment configuration from the current directory. Can optionally specify a deployment file path."""
62
- if not deployment_file.exists():
63
- rprint(f"[red]Deployment file '{deployment_file}' not found[/red]")
64
- raise click.Abort()
65
-
66
- try:
67
- prepare_server(
68
- install=not no_install,
69
- build=preview,
70
- )
71
- start_server_in_target_venv(
72
- cwd=Path.cwd(),
73
- deployment_file=deployment_file,
74
- proxy_ui=not preview,
75
- reload=not no_reload,
76
- open_browser=not no_open_browser,
77
- port=port,
78
- ui_port=ui_port,
79
- log_level=log_level.upper() if log_level else None,
80
- log_format=log_format.lower() if log_format else None,
81
- )
82
-
83
- except KeyboardInterrupt:
84
- print("Shutting down...")
85
-
86
- except Exception as e:
87
- rprint(f"[red]Error: {e}[/red]")
88
- raise click.Abort()
File without changes