llamactl 0.2.7a1__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. llama_deploy/cli/__init__.py +9 -22
  2. llama_deploy/cli/app.py +69 -0
  3. llama_deploy/cli/auth/client.py +362 -0
  4. llama_deploy/cli/client.py +47 -170
  5. llama_deploy/cli/commands/aliased_group.py +33 -0
  6. llama_deploy/cli/commands/auth.py +696 -0
  7. llama_deploy/cli/commands/deployment.py +300 -0
  8. llama_deploy/cli/commands/env.py +211 -0
  9. llama_deploy/cli/commands/init.py +313 -0
  10. llama_deploy/cli/commands/serve.py +239 -0
  11. llama_deploy/cli/config/_config.py +390 -0
  12. llama_deploy/cli/config/_migrations.py +65 -0
  13. llama_deploy/cli/config/auth_service.py +130 -0
  14. llama_deploy/cli/config/env_service.py +67 -0
  15. llama_deploy/cli/config/migrations/0001_init.sql +35 -0
  16. llama_deploy/cli/config/migrations/0002_add_auth_fields.sql +24 -0
  17. llama_deploy/cli/config/migrations/__init__.py +7 -0
  18. llama_deploy/cli/config/schema.py +61 -0
  19. llama_deploy/cli/env.py +5 -3
  20. llama_deploy/cli/interactive_prompts/session_utils.py +37 -0
  21. llama_deploy/cli/interactive_prompts/utils.py +6 -72
  22. llama_deploy/cli/options.py +27 -5
  23. llama_deploy/cli/py.typed +0 -0
  24. llama_deploy/cli/styles.py +10 -0
  25. llama_deploy/cli/textual/deployment_form.py +263 -36
  26. llama_deploy/cli/textual/deployment_help.py +53 -0
  27. llama_deploy/cli/textual/deployment_monitor.py +466 -0
  28. llama_deploy/cli/textual/git_validation.py +20 -21
  29. llama_deploy/cli/textual/github_callback_server.py +17 -14
  30. llama_deploy/cli/textual/llama_loader.py +13 -1
  31. llama_deploy/cli/textual/secrets_form.py +28 -8
  32. llama_deploy/cli/textual/styles.tcss +49 -8
  33. llama_deploy/cli/utils/env_inject.py +23 -0
  34. {llamactl-0.2.7a1.dist-info → llamactl-0.3.0.dist-info}/METADATA +9 -6
  35. llamactl-0.3.0.dist-info/RECORD +38 -0
  36. {llamactl-0.2.7a1.dist-info → llamactl-0.3.0.dist-info}/WHEEL +1 -1
  37. llama_deploy/cli/commands.py +0 -549
  38. llama_deploy/cli/config.py +0 -173
  39. llama_deploy/cli/textual/profile_form.py +0 -171
  40. llamactl-0.2.7a1.dist-info/RECORD +0 -19
  41. {llamactl-0.2.7a1.dist-info → llamactl-0.3.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,313 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import os
5
+ import shutil
6
+ import subprocess
7
+ from dataclasses import dataclass
8
+ from pathlib import Path
9
+
10
+ import click
11
+ import copier
12
+ import questionary
13
+ from click.exceptions import Exit
14
+ from llama_deploy.cli.app import app
15
+ from llama_deploy.cli.options import global_options
16
+ from llama_deploy.cli.styles import HEADER_COLOR_HEX
17
+ from rich import print as rprint
18
+ from vibe_llama.scaffold import create_scaffold
19
+ from vibe_llama.scaffold.scaffold import ProjectName
20
+ from vibe_llama.sdk import VibeLlamaStarter
21
+
22
+
23
+ @dataclass
24
+ class TemplateOption:
25
+ id: str
26
+ name: str
27
+ description: str
28
+ source: VibeLlamaTemplate | GithubTemplateRepo
29
+ llama_cloud: bool
30
+
31
+
32
+ @dataclass
33
+ class VibeLlamaTemplate:
34
+ name: ProjectName
35
+
36
+
37
+ @dataclass
38
+ class GithubTemplateRepo:
39
+ url: str
40
+
41
+
42
+ ui_options = [
43
+ TemplateOption(
44
+ id="basic-ui",
45
+ name="Basic UI",
46
+ description="A basic starter workflow with a React Vite UI",
47
+ source=GithubTemplateRepo(
48
+ url="https://github.com/run-llama/template-workflow-basic-ui"
49
+ ),
50
+ llama_cloud=False,
51
+ ),
52
+ TemplateOption(
53
+ id="extraction-review",
54
+ name="Extraction Agent with Review UI",
55
+ description="Extract data from documents using a custom schema and Llama Cloud. Includes a UI to review and correct the results",
56
+ source=GithubTemplateRepo(
57
+ url="https://github.com/run-llama/template-workflow-data-extraction"
58
+ ),
59
+ llama_cloud=True,
60
+ ),
61
+ ]
62
+ headless_options = [
63
+ TemplateOption(
64
+ id="basic",
65
+ name="Basic Workflow",
66
+ description="A base example that showcases usage patterns for workflows",
67
+ source=VibeLlamaTemplate(name="basic"),
68
+ llama_cloud=False,
69
+ ),
70
+ TemplateOption(
71
+ id="document_parsing",
72
+ name="Document Parser",
73
+ description="A workflow that, using LlamaParse, parses unstructured documents and returns their raw text content",
74
+ source=VibeLlamaTemplate(name="document_parsing"),
75
+ llama_cloud=True,
76
+ ),
77
+ TemplateOption(
78
+ id="human_in_the_loop",
79
+ name="Human in the Loop",
80
+ description="A workflow showcasing how to use human in the loop with LlamaIndex workflows",
81
+ source=VibeLlamaTemplate(name="human_in_the_loop"),
82
+ llama_cloud=False,
83
+ ),
84
+ TemplateOption(
85
+ id="invoice_extraction",
86
+ name="Invoice Extraction",
87
+ description="A workflow that, given an invoice, extracts several key details using LlamaExtract",
88
+ source=VibeLlamaTemplate(name="invoice_extraction"),
89
+ llama_cloud=True,
90
+ ),
91
+ TemplateOption(
92
+ id="rag",
93
+ name="RAG",
94
+ description="A workflow that embeds, indexes and queries your documents on the fly, providing you with a simple RAG pipeline",
95
+ source=VibeLlamaTemplate(name="rag"),
96
+ llama_cloud=False,
97
+ ),
98
+ TemplateOption(
99
+ id="web_scraping",
100
+ name="Web Scraping",
101
+ description="A workflow that, given several urls, scrapes and summarizes their content using Google's Gemini API",
102
+ source=VibeLlamaTemplate(name="web_scraping"),
103
+ llama_cloud=False,
104
+ ),
105
+ ]
106
+
107
+
108
+ @app.command()
109
+ @click.option(
110
+ "--update",
111
+ is_flag=True,
112
+ help="Instead of creating a new app, update the current app to the latest version. Other options will be ignored.",
113
+ )
114
+ @click.option(
115
+ "--template",
116
+ type=click.Choice([o.id for o in ui_options]),
117
+ help="The template to use for the new app",
118
+ )
119
+ @click.option(
120
+ "--dir",
121
+ help="The directory to create the new app in",
122
+ type=click.Path(
123
+ file_okay=False, dir_okay=True, writable=True, resolve_path=True, path_type=Path
124
+ ),
125
+ )
126
+ @click.option(
127
+ "--force",
128
+ is_flag=True,
129
+ help="Force overwrite the directory if it exists",
130
+ )
131
+ @global_options
132
+ def init(
133
+ update: bool,
134
+ template: str | None,
135
+ dir: Path | None,
136
+ force: bool,
137
+ ) -> None:
138
+ """Create a new app repository from a template"""
139
+ if update:
140
+ _update()
141
+ else:
142
+ _create(template, dir, force)
143
+
144
+
145
+ def _create(template: str | None, dir: Path | None, force: bool) -> None:
146
+ if template is None:
147
+ rprint(
148
+ "[bold]Select a template to start from.[/bold] Either with javascript frontend UI, or just a python workflow that can be used as an API."
149
+ )
150
+ template = questionary.select(
151
+ "",
152
+ choices=[questionary.Separator("------------ With UI -------------")]
153
+ + [
154
+ questionary.Choice(title=o.name, value=o.id, description=o.description)
155
+ for o in ui_options
156
+ ]
157
+ + [
158
+ questionary.Separator(" "),
159
+ questionary.Separator("--- Headless Workflows (No UI) ---"),
160
+ ]
161
+ + [
162
+ questionary.Choice(title=o.name, value=o.id, description=o.description)
163
+ for o in headless_options
164
+ ],
165
+ style=questionary.Style(
166
+ [
167
+ ("separator", f"fg:{HEADER_COLOR_HEX}"),
168
+ ]
169
+ ),
170
+ ).ask()
171
+ if template is None:
172
+ rprint("No template selected")
173
+ raise Exit(1)
174
+ if dir is None:
175
+ dir_str = questionary.text(
176
+ "Enter the directory to create the new app in", default=template
177
+ ).ask()
178
+ if not dir_str:
179
+ rprint("No directory provided")
180
+ raise Exit(1)
181
+ dir = Path(dir_str)
182
+ resolved_template: TemplateOption | None = next(
183
+ (o for o in ui_options + headless_options if o.id == template), None
184
+ )
185
+ if resolved_template is None:
186
+ rprint(f"Template {template} not found")
187
+ raise Exit(1)
188
+ if dir.exists():
189
+ is_ok = (
190
+ force
191
+ or questionary.confirm("Directory exists. Overwrite?", default=False).ask()
192
+ )
193
+ if not is_ok:
194
+ raise Exit(1)
195
+ else:
196
+ shutil.rmtree(dir, ignore_errors=True)
197
+
198
+ if isinstance(resolved_template.source, GithubTemplateRepo):
199
+ copier.run_copy(
200
+ resolved_template.source.url,
201
+ dir,
202
+ quiet=True,
203
+ )
204
+ else:
205
+ asyncio.run(create_scaffold(resolved_template.source.name, str(dir)))
206
+ # Initialize git repository if git is available
207
+ has_git = False
208
+ try:
209
+ subprocess.run(["git", "--version"], check=True, capture_output=True)
210
+ has_git = True
211
+ except subprocess.CalledProcessError:
212
+ pass
213
+
214
+ # Change to the new directory and initialize git repo
215
+ original_cwd = Path.cwd()
216
+ os.chdir(dir)
217
+
218
+ try:
219
+ # Dump in a bunch of docs for AI agents
220
+ vibe_llama_starter = VibeLlamaStarter(
221
+ agents=["OpenAI Codex CLI"], # AGENTS.md, supported by Cursor,
222
+ services=["LlamaIndex", "llama-index-workflows"]
223
+ + (["LlamaCloud Services"] if resolved_template.llama_cloud else []),
224
+ )
225
+ asyncio.run(vibe_llama_starter.write_instructions(overwrite=True))
226
+ # Create symlink for Claude.md to point to AGENTS.md
227
+ for alternate in ["CLAUDE.md", "GEMINI.md"]: # don't support AGENTS.md (yet?)
228
+ claude_path = Path(alternate) # not supported yet
229
+ agents_path = Path("AGENTS.md")
230
+ if agents_path.exists() and not claude_path.exists():
231
+ claude_path.symlink_to("AGENTS.md")
232
+ if has_git:
233
+ subprocess.run(["git", "init"], check=True, capture_output=True)
234
+ subprocess.run(["git", "add", "."], check=True, capture_output=True)
235
+ subprocess.run(
236
+ ["git", "commit", "-m", "Initial commit"],
237
+ check=True,
238
+ capture_output=True,
239
+ )
240
+ finally:
241
+ os.chdir(original_cwd)
242
+
243
+ rprint(
244
+ f"Successfully created [blue]{dir}[/] using the [blue]{resolved_template.name}[/] template! 🎉 🦙 💾"
245
+ )
246
+ rprint("")
247
+ rprint("[bold]To run locally:[/]")
248
+ rprint(f" [orange3]cd[/] {dir}")
249
+ rprint(" [orange3]uvx[/] llamactl serve")
250
+ rprint("")
251
+ rprint("[bold]To deploy:[/]")
252
+ if has_git:
253
+ rprint(" [orange3]git[/] init")
254
+ rprint(" [orange3]git[/] add .")
255
+ rprint(" [orange3]git[/] commit -m 'Initial commit'")
256
+ rprint("")
257
+ rprint("[dim](Create a new repo and add it as a remote)[/]")
258
+ rprint("")
259
+ rprint(" [orange3]git[/] remote add origin <your-repo-url>")
260
+ rprint(" [orange3]git[/] push -u origin main")
261
+ rprint("")
262
+ # rprint(" [orange3]uvx[/] llamactl login")
263
+ rprint(" [orange3]uvx[/] llamactl deploy")
264
+ rprint("")
265
+
266
+
267
+ def _update():
268
+ """Update the app to the latest version"""
269
+ try:
270
+ copier.run_update(
271
+ overwrite=True,
272
+ skip_answered=True,
273
+ quiet=True,
274
+ )
275
+ except copier.UserMessageError as e:
276
+ rprint(f"{e}")
277
+ raise Exit(1)
278
+
279
+ # Check git status and warn about conflicts
280
+ try:
281
+ result = subprocess.run(
282
+ ["git", "status", "--porcelain"],
283
+ check=True,
284
+ capture_output=True,
285
+ text=True,
286
+ )
287
+
288
+ if result.stdout.strip():
289
+ conflicted_files = []
290
+ modified_files = []
291
+
292
+ for line in result.stdout.strip().split("\n"):
293
+ status = line[:2]
294
+ filename = line[3:]
295
+
296
+ if "UU" in status or "AA" in status or "DD" in status:
297
+ conflicted_files.append(filename)
298
+ elif status.strip():
299
+ modified_files.append(filename)
300
+
301
+ if conflicted_files:
302
+ rprint("")
303
+ rprint("⚠️ [bold]Files with conflicts detected:[/]")
304
+ for file in conflicted_files:
305
+ rprint(f" {file}")
306
+ rprint("")
307
+ rprint(
308
+ "Please manually resolve conflicts with a merge editor before proceeding."
309
+ )
310
+
311
+ except (subprocess.CalledProcessError, FileNotFoundError):
312
+ # Git not available or not in a git repo - continue silently
313
+ pass
@@ -0,0 +1,239 @@
1
+ import logging
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Literal
5
+
6
+ import click
7
+ import questionary
8
+ from llama_deploy.appserver.app import (
9
+ prepare_server,
10
+ start_server_in_target_venv,
11
+ )
12
+ from llama_deploy.appserver.deployment_config_parser import get_deployment_config
13
+ from llama_deploy.appserver.workflow_loader import parse_environment_variables
14
+ from llama_deploy.cli.commands.auth import validate_authenticated_profile
15
+ from llama_deploy.cli.config.env_service import service
16
+ from llama_deploy.cli.config.schema import Auth
17
+ from llama_deploy.cli.options import interactive_option
18
+ from llama_deploy.cli.styles import WARNING
19
+ from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
20
+ from llama_deploy.core.deployment_config import (
21
+ read_deployment_config_from_git_root_or_cwd,
22
+ )
23
+ from rich import print as rprint
24
+
25
+ from ..app import app
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ @app.command(
31
+ "serve",
32
+ help="Serve a LlamaDeploy app locally for development and testing",
33
+ )
34
+ @click.argument(
35
+ "deployment_file",
36
+ required=False,
37
+ default=DEFAULT_DEPLOYMENT_FILE_PATH,
38
+ type=click.Path(dir_okay=True, resolve_path=True, path_type=Path),
39
+ )
40
+ @click.option(
41
+ "--no-install", is_flag=True, help="Skip installing python and js dependencies"
42
+ )
43
+ @click.option(
44
+ "--no-reload", is_flag=True, help="Skip reloading the API server on code changes"
45
+ )
46
+ @click.option("--no-open-browser", is_flag=True, help="Skip opening the browser")
47
+ @click.option(
48
+ "--preview",
49
+ is_flag=True,
50
+ help="Preview mode pre-builds the UI to static files, like a production build",
51
+ )
52
+ @click.option("--port", type=int, help="The port to run the API server on")
53
+ @click.option("--ui-port", type=int, help="The port to run the UI proxy server on")
54
+ @click.option(
55
+ "--log-level",
56
+ type=click.Choice(
57
+ ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], case_sensitive=False
58
+ ),
59
+ help="The log level to run the API server at",
60
+ )
61
+ @click.option(
62
+ "--log-format",
63
+ type=click.Choice(["console", "json"], case_sensitive=False),
64
+ help="The format to use for logging",
65
+ )
66
+ @click.option(
67
+ "--persistence",
68
+ type=click.Choice(["memory", "local", "cloud"]),
69
+ help="The persistence mode to use for the workflow server",
70
+ )
71
+ @click.option(
72
+ "--local-persistence-path",
73
+ type=click.Path(dir_okay=True, resolve_path=True, path_type=Path),
74
+ help="The path to the sqlite database to use for the workflow server if using local persistence",
75
+ )
76
+ @interactive_option
77
+ def serve(
78
+ deployment_file: Path,
79
+ no_install: bool,
80
+ no_reload: bool,
81
+ no_open_browser: bool,
82
+ preview: bool,
83
+ port: int | None = None,
84
+ ui_port: int | None = None,
85
+ log_level: str | None = None,
86
+ log_format: str | None = None,
87
+ persistence: Literal["memory", "local", "cloud"] | None = None,
88
+ local_persistence_path: Path | None = None,
89
+ interactive: bool = False,
90
+ ) -> None:
91
+ """Run llama_deploy API Server in the foreground. Reads the deployment configuration from the current directory. Can optionally specify a deployment file path."""
92
+ if not deployment_file.exists():
93
+ rprint(f"[red]Deployment file '{deployment_file}' not found[/red]")
94
+ raise click.Abort()
95
+
96
+ try:
97
+ # Pre-check: if the template requires llama cloud access, ensure credentials
98
+ _maybe_inject_llama_cloud_credentials(
99
+ deployment_file, interactive, require_cloud=persistence == "cloud"
100
+ )
101
+
102
+ prepare_server(
103
+ deployment_file=deployment_file,
104
+ install=not no_install,
105
+ build=preview,
106
+ )
107
+ deployment_config = get_deployment_config()
108
+ start_server_in_target_venv(
109
+ cwd=Path.cwd(),
110
+ deployment_file=deployment_file,
111
+ proxy_ui=not preview,
112
+ reload=not no_reload,
113
+ open_browser=not no_open_browser,
114
+ port=port,
115
+ ui_port=ui_port,
116
+ log_level=log_level.upper() if log_level else None,
117
+ log_format=log_format.lower() if log_format else None,
118
+ persistence=persistence if persistence else "local",
119
+ local_persistence_path=str(local_persistence_path)
120
+ if local_persistence_path and persistence == "local"
121
+ else None,
122
+ cloud_persistence_name=f"_public:serve_workflows_{deployment_config.name}"
123
+ if persistence == "cloud"
124
+ else None,
125
+ )
126
+
127
+ except KeyboardInterrupt:
128
+ logger.debug("Shutting down...")
129
+
130
+ except Exception as e:
131
+ rprint(f"[red]Error: {e}[/red]")
132
+ raise click.Abort()
133
+
134
+
135
+ def _set_env_vars_from_profile(profile: Auth):
136
+ if profile.api_key:
137
+ _set_env_vars(profile.api_key, profile.api_url)
138
+ _set_project_id_from_profile(profile)
139
+
140
+
141
+ def _set_env_vars_from_env(env_vars: dict[str, str]):
142
+ key = env_vars.get("LLAMA_CLOUD_API_KEY")
143
+ url = env_vars.get("LLAMA_CLOUD_BASE_URL", "https://api.cloud.llamaindex.ai")
144
+ # Also propagate project id if present in the environment
145
+ _set_project_id_from_env(env_vars)
146
+ if key:
147
+ _set_env_vars(key, url)
148
+
149
+
150
+ def _set_env_vars(key: str, url: str):
151
+ os.environ["LLAMA_CLOUD_API_KEY"] = key
152
+ os.environ["LLAMA_CLOUD_BASE_URL"] = url
153
+ # kludge for common web servers to inject local auth key
154
+ for prefix in ["VITE_", "NEXT_PUBLIC_"]:
155
+ os.environ[f"{prefix}LLAMA_CLOUD_API_KEY"] = key
156
+ os.environ[f"{prefix}LLAMA_CLOUD_BASE_URL"] = url
157
+
158
+
159
+ def _set_project_id_from_env(env_vars: dict[str, str]):
160
+ project_id = env_vars.get("LLAMA_DEPLOY_PROJECT_ID")
161
+ if project_id:
162
+ os.environ["LLAMA_DEPLOY_PROJECT_ID"] = project_id
163
+
164
+
165
+ def _set_project_id_from_profile(profile: Auth):
166
+ if profile.project_id:
167
+ os.environ["LLAMA_DEPLOY_PROJECT_ID"] = profile.project_id
168
+
169
+
170
+ def _maybe_inject_llama_cloud_credentials(
171
+ deployment_file: Path, interactive: bool, require_cloud: bool
172
+ ) -> None:
173
+ """If the deployment config indicates Llama Cloud usage, ensure LLAMA_CLOUD_API_KEY is set.
174
+
175
+ Behavior:
176
+ - If LLAMA_CLOUD_API_KEY is already set, do nothing.
177
+ - Else, try to read current profile's api_key and inject.
178
+ - If no profile/api_key and session is interactive, prompt to log in and inject afterward.
179
+ - If user declines or session is non-interactive, warn that deployment may not work.
180
+ """
181
+ # Read config directly to avoid cached global settings
182
+ try:
183
+ config = read_deployment_config_from_git_root_or_cwd(
184
+ Path.cwd(), deployment_file
185
+ )
186
+ except Exception:
187
+ rprint(
188
+ "[red]Error: Could not read a deployment config. This doesn't appear to be a valid llama-deploy project.[/red]"
189
+ )
190
+ raise click.Abort()
191
+
192
+ if not config.llama_cloud and not require_cloud:
193
+ return
194
+
195
+ vars = parse_environment_variables(
196
+ config, deployment_file.parent if deployment_file.is_file() else deployment_file
197
+ )
198
+
199
+ # Ensure project id is available to the app and UI processes
200
+ _set_project_id_from_env({**os.environ, **vars})
201
+
202
+ existing = os.environ.get("LLAMA_CLOUD_API_KEY") or vars.get("LLAMA_CLOUD_API_KEY")
203
+ if existing:
204
+ _set_env_vars_from_env({**os.environ, **vars})
205
+ return
206
+
207
+ env = service.get_current_environment()
208
+ if not env.requires_auth:
209
+ rprint(
210
+ f"[{WARNING}]Warning: This app requires Llama Cloud authentication, and no LLAMA_CLOUD_API_KEY is present. The app may not work.[/]"
211
+ )
212
+ return
213
+
214
+ auth_svc = service.current_auth_service()
215
+ profile = auth_svc.get_current_profile()
216
+ if profile and profile.api_key:
217
+ _set_env_vars_from_profile(profile)
218
+ return
219
+
220
+ # No key available; consider prompting if interactive
221
+ if interactive:
222
+ should_login = questionary.confirm(
223
+ "This deployment requires Llama Cloud. Login now to inject credentials? Otherwise the app may not work.",
224
+ default=True,
225
+ ).ask()
226
+ if should_login:
227
+ authed = validate_authenticated_profile(True)
228
+ if authed.api_key:
229
+ _set_env_vars_from_profile(authed)
230
+ return
231
+ rprint(
232
+ f"[{WARNING}]Warning: No Llama Cloud credentials configured. The app may not work.[/]"
233
+ )
234
+ return
235
+
236
+ # Non-interactive session
237
+ rprint(
238
+ f"[{WARNING}]Warning: LLAMA_CLOUD_API_KEY is not set and no logged-in profile was found. The app may not work.[/]"
239
+ )