llamactl 0.3.8__py3-none-any.whl → 0.3.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -55,8 +55,6 @@ def init(
55
55
 
56
56
  def _create(template: str | None, dir: Path | None, force: bool) -> None:
57
57
  # defer loading to improve cli startup time
58
- from vibe_llama.scaffold import create_scaffold
59
- from vibe_llama.scaffold.scaffold import ProjectName
60
58
  from vibe_llama.sdk import VibeLlamaStarter
61
59
 
62
60
  @dataclass
@@ -64,13 +62,9 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
64
62
  id: str
65
63
  name: str
66
64
  description: str
67
- source: VibeLlamaTemplate | GithubTemplateRepo
65
+ source: GithubTemplateRepo
68
66
  llama_cloud: bool
69
67
 
70
- @dataclass
71
- class VibeLlamaTemplate:
72
- name: ProjectName
73
-
74
68
  @dataclass
75
69
  class GithubTemplateRepo:
76
70
  url: str
@@ -110,42 +104,54 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
110
104
  id="basic",
111
105
  name="Basic Workflow",
112
106
  description="A base example that showcases usage patterns for workflows",
113
- source=VibeLlamaTemplate(name="basic"),
107
+ source=GithubTemplateRepo(
108
+ url="https://github.com/run-llama/template-workflow-basic"
109
+ ),
114
110
  llama_cloud=False,
115
111
  ),
116
112
  TemplateOption(
117
113
  id="document_parsing",
118
114
  name="Document Parser",
119
115
  description="A workflow that, using LlamaParse, parses unstructured documents and returns their raw text content",
120
- source=VibeLlamaTemplate(name="document_parsing"),
116
+ source=GithubTemplateRepo(
117
+ url="https://github.com/run-llama/template-workflow-document-parsing"
118
+ ),
121
119
  llama_cloud=True,
122
120
  ),
123
121
  TemplateOption(
124
122
  id="human_in_the_loop",
125
123
  name="Human in the Loop",
126
124
  description="A workflow showcasing how to use human in the loop with LlamaIndex workflows",
127
- source=VibeLlamaTemplate(name="human_in_the_loop"),
125
+ source=GithubTemplateRepo(
126
+ url="https://github.com/run-llama/template-workflow-human-in-the-loop"
127
+ ),
128
128
  llama_cloud=False,
129
129
  ),
130
130
  TemplateOption(
131
131
  id="invoice_extraction",
132
132
  name="Invoice Extraction",
133
133
  description="A workflow that, given an invoice, extracts several key details using LlamaExtract",
134
- source=VibeLlamaTemplate(name="invoice_extraction"),
134
+ source=GithubTemplateRepo(
135
+ url="https://github.com/run-llama/template-workflow-invoice-extraction"
136
+ ),
135
137
  llama_cloud=True,
136
138
  ),
137
139
  TemplateOption(
138
140
  id="rag",
139
141
  name="RAG",
140
142
  description="A workflow that embeds, indexes and queries your documents on the fly, providing you with a simple RAG pipeline",
141
- source=VibeLlamaTemplate(name="rag"),
143
+ source=GithubTemplateRepo(
144
+ url="https://github.com/run-llama/template-workflow-rag"
145
+ ),
142
146
  llama_cloud=False,
143
147
  ),
144
148
  TemplateOption(
145
149
  id="web_scraping",
146
150
  name="Web Scraping",
147
151
  description="A workflow that, given several urls, scrapes and summarizes their content using Google's Gemini API",
148
- source=VibeLlamaTemplate(name="web_scraping"),
152
+ source=GithubTemplateRepo(
153
+ url="https://github.com/run-llama/template-workflow-web-scraping"
154
+ ),
149
155
  llama_cloud=False,
150
156
  ),
151
157
  ]
@@ -202,21 +208,19 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
202
208
  else:
203
209
  shutil.rmtree(dir, ignore_errors=True)
204
210
 
205
- if isinstance(resolved_template.source, GithubTemplateRepo):
206
- copier.run_copy(
207
- resolved_template.source.url,
208
- dir,
209
- quiet=True,
210
- )
211
- else:
212
- asyncio.run(create_scaffold(resolved_template.source.name, str(dir)))
211
+ copier.run_copy(
212
+ resolved_template.source.url,
213
+ dir,
214
+ quiet=True,
215
+ )
213
216
  # Initialize git repository if git is available
214
217
  has_git = False
215
218
  try:
216
219
  subprocess.run(["git", "--version"], check=True, capture_output=True)
217
220
  has_git = True
218
- except subprocess.CalledProcessError:
219
- pass
221
+ except (subprocess.CalledProcessError, FileNotFoundError):
222
+ # git is not available or broken; continue without git
223
+ has_git = False
220
224
 
221
225
  # Change to the new directory and initialize git repo
222
226
  original_cwd = Path.cwd()
@@ -227,7 +231,7 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
227
231
 
228
232
  vibe_llama_starter = VibeLlamaStarter(
229
233
  agents=["OpenAI Codex CLI"], # AGENTS.md, supported by Cursor,
230
- services=["LlamaIndex", "llama-index-workflows"]
234
+ services=["LlamaDeploy", "LlamaIndex", "llama-index-workflows"]
231
235
  + (["LlamaCloud Services"] if resolved_template.llama_cloud else []),
232
236
  )
233
237
  asyncio.run(vibe_llama_starter.write_instructions(overwrite=True))
@@ -237,7 +241,10 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
237
241
  agents_path = Path("AGENTS.md")
238
242
  if agents_path.exists() and not claude_path.exists():
239
243
  claude_path.symlink_to("AGENTS.md")
240
- if has_git:
244
+
245
+ # Initialize a git repo (best-effort). If anything fails, show a friendly note and continue.
246
+ if has_git:
247
+ try:
241
248
  subprocess.run(["git", "init"], check=True, capture_output=True)
242
249
  subprocess.run(["git", "add", "."], check=True, capture_output=True)
243
250
  subprocess.run(
@@ -245,9 +252,40 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
245
252
  check=True,
246
253
  capture_output=True,
247
254
  )
255
+ except (subprocess.CalledProcessError, FileNotFoundError) as e:
256
+ # Extract a short error message if present
257
+ err_msg = ""
258
+ if isinstance(e, subprocess.CalledProcessError):
259
+ stderr = getattr(e, "stderr", b"")
260
+ if isinstance(stderr, (bytes, bytearray)):
261
+ try:
262
+ stderr = stderr.decode("utf-8", "ignore")
263
+ except Exception:
264
+ stderr = ""
265
+ if isinstance(stderr, str) and stderr.strip():
266
+ err_msg = stderr.strip().split("\n")[-1]
267
+ elif isinstance(e, FileNotFoundError):
268
+ err_msg = "git executable not found"
269
+
270
+ rprint("")
271
+ rprint("⚠️ [bold]Skipping git initialization due to an error.[/]")
272
+ if err_msg:
273
+ rprint(f" {err_msg}")
274
+ rprint(" You can initialize it manually:")
275
+ rprint(" git init && git add . && git commit -m 'Initial commit'")
276
+ rprint("")
248
277
  finally:
249
278
  os.chdir(original_cwd)
250
279
 
280
+ # If git is not available at all, let the user know how to proceed
281
+ if not has_git:
282
+ rprint("")
283
+ rprint("⚠️ [bold]Skipping git initialization due to an error.[/]")
284
+ rprint(" git executable not found")
285
+ rprint(" You can initialize it manually:")
286
+ rprint(" git init && git add . && git commit -m 'Initial commit'")
287
+ rprint("")
288
+
251
289
  rprint(
252
290
  f"Successfully created [blue]{dir}[/] using the [blue]{resolved_template.name}[/] template! 🎉 🦙 💾"
253
291
  )
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import logging
2
3
  import os
3
4
  from pathlib import Path
@@ -5,15 +6,19 @@ from typing import Literal
5
6
 
6
7
  import click
7
8
  import questionary
9
+ from click.exceptions import Abort, Exit
8
10
  from llama_deploy.cli.commands.auth import validate_authenticated_profile
9
11
  from llama_deploy.cli.config.env_service import service
10
12
  from llama_deploy.cli.config.schema import Auth
11
13
  from llama_deploy.cli.options import interactive_option
12
14
  from llama_deploy.cli.styles import WARNING
15
+ from llama_deploy.cli.utils.redact import redact_api_key
16
+ from llama_deploy.core.client.manage_client import ControlPlaneClient
13
17
  from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
14
18
  from llama_deploy.core.deployment_config import (
15
19
  read_deployment_config_from_git_root_or_cwd,
16
20
  )
21
+ from llama_deploy.core.schema.projects import ProjectSummary
17
22
  from rich import print as rprint
18
23
 
19
24
  from ..app import app
@@ -108,6 +113,7 @@ def serve(
108
113
  build=preview,
109
114
  )
110
115
  deployment_config = get_deployment_config()
116
+ _print_connection_summary()
111
117
  start_server_in_target_venv(
112
118
  cwd=Path.cwd(),
113
119
  deployment_file=deployment_file,
@@ -127,6 +133,9 @@ def serve(
127
133
  else None,
128
134
  )
129
135
 
136
+ except (Exit, Abort):
137
+ raise
138
+
130
139
  except KeyboardInterrupt:
131
140
  logger.debug("Shutting down...")
132
141
 
@@ -207,6 +216,32 @@ def _maybe_inject_llama_cloud_credentials(
207
216
 
208
217
  existing = os.environ.get("LLAMA_CLOUD_API_KEY") or vars.get("LLAMA_CLOUD_API_KEY")
209
218
  if existing:
219
+ # If interactive, allow choosing between env var and configured profile
220
+ if interactive:
221
+ choice = questionary.select(
222
+ "LLAMA_CLOUD_API_KEY detected in environment. Which credentials do you want to use?",
223
+ choices=[
224
+ questionary.Choice(
225
+ title=f"Use environment variable - {redact_api_key(existing)}",
226
+ value="env",
227
+ ),
228
+ questionary.Choice(title="Use configured profile", value="profile"),
229
+ ],
230
+ ).ask()
231
+ if choice is None:
232
+ raise Exit(0)
233
+ if choice == "profile":
234
+ # Ensure we have an authenticated profile and inject from it
235
+ authed = validate_authenticated_profile(True)
236
+ _set_env_vars_from_profile(authed)
237
+ return
238
+ # Default to env var path when cancelled or explicitly chosen
239
+ _set_env_vars_from_env({**os.environ, **vars})
240
+ # If no project id provided, try to detect and select one using the env API key
241
+ if not os.environ.get("LLAMA_DEPLOY_PROJECT_ID"):
242
+ _maybe_select_project_for_env_key()
243
+ return
244
+ # Non-interactive: trust current environment variables
210
245
  _set_env_vars_from_env({**os.environ, **vars})
211
246
  return
212
247
 
@@ -243,3 +278,56 @@ def _maybe_inject_llama_cloud_credentials(
243
278
  rprint(
244
279
  f"[{WARNING}]Warning: LLAMA_CLOUD_API_KEY is not set and no logged-in profile was found. The app may not work.[/]"
245
280
  )
281
+
282
+
283
+ def _maybe_select_project_for_env_key() -> None:
284
+ """When using an env API key, ensure LLAMA_DEPLOY_PROJECT_ID is set.
285
+
286
+ If more than one project exists, prompt the user to select one.
287
+ """
288
+ api_key = os.environ.get("LLAMA_CLOUD_API_KEY")
289
+ base_url = os.environ.get("LLAMA_CLOUD_BASE_URL", "https://api.cloud.llamaindex.ai")
290
+ if not api_key:
291
+ return
292
+ try:
293
+
294
+ async def _run() -> list[ProjectSummary]:
295
+ async with ControlPlaneClient.ctx(base_url, api_key, None) as client:
296
+ return await client.list_projects()
297
+
298
+ projects = asyncio.run(_run())
299
+ if not projects:
300
+ return
301
+ if len(projects) == 1:
302
+ os.environ["LLAMA_DEPLOY_PROJECT_ID"] = projects[0].project_id
303
+ return
304
+ # Multiple: prompt selection
305
+ choice = questionary.select(
306
+ "Select a project",
307
+ choices=[
308
+ questionary.Choice(
309
+ title=f"{p.project_name} ({p.deployment_count} deployments)",
310
+ value=p.project_id,
311
+ )
312
+ for p in projects
313
+ ],
314
+ ).ask()
315
+ if choice:
316
+ os.environ["LLAMA_DEPLOY_PROJECT_ID"] = choice
317
+ except Exception:
318
+ # Best-effort; if we fail to list, do nothing
319
+ pass
320
+
321
+
322
+ def _print_connection_summary() -> None:
323
+ base_url = os.environ.get("LLAMA_CLOUD_BASE_URL")
324
+ project_id = os.environ.get("LLAMA_DEPLOY_PROJECT_ID")
325
+ api_key = os.environ.get("LLAMA_CLOUD_API_KEY")
326
+ if not base_url and not project_id and not api_key:
327
+ return
328
+ redacted = redact_api_key(api_key)
329
+ env_text = base_url or "-"
330
+ proj_text = project_id or "-"
331
+ rprint(
332
+ f"Connecting to environment: [bold]{env_text}[/], project: [bold]{proj_text}[/], api key: [bold]{redacted}[/]"
333
+ )
@@ -3,6 +3,7 @@ import asyncio
3
3
  from llama_deploy.cli.auth.client import PlatformAuthClient, RefreshMiddleware
4
4
  from llama_deploy.cli.config._config import Auth, ConfigManager, Environment
5
5
  from llama_deploy.cli.config.schema import DeviceOIDC
6
+ from llama_deploy.cli.utils.redact import redact_api_key
6
7
  from llama_deploy.core.client.manage_client import ControlPlaneClient, httpx
7
8
  from llama_deploy.core.schema import VersionResponse
8
9
  from llama_deploy.core.schema.projects import ProjectSummary
@@ -123,8 +124,4 @@ class AuthService:
123
124
 
124
125
  def _auto_profile_name_from_token(api_key: str) -> str:
125
126
  token = api_key or "token"
126
- cleaned = token.replace(" ", "")
127
- first = cleaned[:6]
128
- last = cleaned[-4:] if len(cleaned) > 10 else cleaned[-2:]
129
- base = f"{first}****{last}"
130
- return base
127
+ return redact_api_key(token)
@@ -0,0 +1,29 @@
1
+ from __future__ import annotations
2
+
3
+
4
+ def redact_api_key(
5
+ token: str | None,
6
+ visible_prefix: int = 6,
7
+ visible_suffix_long: int = 4,
8
+ visible_suffix_short: int = 2,
9
+ long_threshold: int = 10,
10
+ mask: str = "****",
11
+ ) -> str:
12
+ """Redact an API key for display.
13
+
14
+ Shows a prefix and suffix with a mask in the middle. If token is short,
15
+ reduces the suffix length to keep at least two trailing characters visible.
16
+
17
+ This mirrors the masking behavior used for profile names.
18
+ """
19
+ if not token:
20
+ return "-"
21
+ cleaned = token.replace(" ", "")
22
+ if len(cleaned) <= 0:
23
+ return "-"
24
+ first = cleaned[:visible_prefix]
25
+ last_len = (
26
+ visible_suffix_long if len(cleaned) > long_threshold else visible_suffix_short
27
+ )
28
+ last = cleaned[-last_len:] if last_len > 0 else ""
29
+ return f"{first}{mask}{last}"
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llamactl
3
- Version: 0.3.8
3
+ Version: 0.3.10
4
4
  Summary: A command-line interface for managing LlamaDeploy projects and deployments
5
5
  Author: Adrian Lyjak
6
6
  Author-email: Adrian Lyjak <adrianlyjak@gmail.com>
7
7
  License: MIT
8
- Requires-Dist: llama-deploy-core[client]>=0.3.8,<0.4.0
9
- Requires-Dist: llama-deploy-appserver>=0.3.8,<0.4.0
8
+ Requires-Dist: llama-deploy-core[client]>=0.3.10,<0.4.0
9
+ Requires-Dist: llama-deploy-appserver>=0.3.10,<0.4.0
10
10
  Requires-Dist: httpx>=0.24.0,<1.0.0
11
11
  Requires-Dist: rich>=13.0.0
12
12
  Requires-Dist: questionary>=2.0.0
@@ -17,7 +17,7 @@ Requires-Dist: textual>=6.0.0
17
17
  Requires-Dist: aiohttp>=3.12.14
18
18
  Requires-Dist: copier>=9.9.0
19
19
  Requires-Dist: pyjwt[crypto]>=2.10.1
20
- Requires-Dist: vibe-llama>=0.4.2,<0.5.0
20
+ Requires-Dist: vibe-llama>=0.4.4,<0.5.0
21
21
  Requires-Python: >=3.11, <4
22
22
  Description-Content-Type: text/markdown
23
23
 
@@ -6,11 +6,11 @@ llama_deploy/cli/commands/aliased_group.py,sha256=bc41007c97b7b93981217dbd4d4591
6
6
  llama_deploy/cli/commands/auth.py,sha256=1381eee494c3a0c73253322b4a54af1a857d5b89e5f1685b8afa3422eecc5607,23937
7
7
  llama_deploy/cli/commands/deployment.py,sha256=46339e09135521c46ff90235ccf765c37b1a161cec11d92e92a54ceac6528b01,9883
8
8
  llama_deploy/cli/commands/env.py,sha256=36cb1b0abb9e3d1c5546d3e8a3c4c7839c4d6c2abf75763e39efb08376b3eae9,6808
9
- llama_deploy/cli/commands/init.py,sha256=20c6f7d8ff60858df97e08d0a6c06ef3008a2b41cab76fdf3a4734d42c0a164a,10836
10
- llama_deploy/cli/commands/serve.py,sha256=985a8c7c27caf46878841de4452aff05b69b13c669227337665431e0d48f5fbc,8688
9
+ llama_deploy/cli/commands/init.py,sha256=48032051263023082354d0260656d0422c3c659ebbaed70a530195beaeb28f82,12679
10
+ llama_deploy/cli/commands/serve.py,sha256=fc88f80bda980752bcf163b69db6c36d4dd5d5c570983477d7e942de80c8f1f7,12165
11
11
  llama_deploy/cli/config/_config.py,sha256=654a4b6d06542e3503edab7023fc1c3148de510b3e3f6194e28cd4bd3e7c029a,14230
12
12
  llama_deploy/cli/config/_migrations.py,sha256=37055641970e1ea41abc583f270dc8a9dab03076224a02cd5fb08bbab2b9259f,2333
13
- llama_deploy/cli/config/auth_service.py,sha256=8a61110e18c752bbec5fbca23cd5d35d4ec232a4371f8c8291ba07ad83d30c6c,5208
13
+ llama_deploy/cli/config/auth_service.py,sha256=9e62ed2ea112e6142a5d384835568d4a926627eb58730af89bef9420f549d42e,5126
14
14
  llama_deploy/cli/config/env_service.py,sha256=cd51a68f1e9aad0bdd49cd76351cd54cea612a7f669512484c42e2876fea0458,2650
15
15
  llama_deploy/cli/config/migrations/0001_init.sql,sha256=aaffcb1fd0a00398ecf0af2d98ae26479c91519ec938efa99270f2d98dfdd1f4,1091
16
16
  llama_deploy/cli/config/migrations/0002_add_auth_fields.sql,sha256=31bd109e5fa0a9ad563a205b4c0e8110db4df3b4b3956704a0c0cdf345002daa,724
@@ -32,7 +32,8 @@ llama_deploy/cli/textual/llama_loader.py,sha256=33cb32a46dd40bcf889c553e44f2672c
32
32
  llama_deploy/cli/textual/secrets_form.py,sha256=df6699de29d2bc2cbcaddd41ad2495ce0e622cdccaadbc8369a6ee09a9e79d34,7251
33
33
  llama_deploy/cli/textual/styles.tcss,sha256=c8fa0eec00a97fa6907d223faaad82c6add1ea3f60009f1630be19282ea77e3b,3271
34
34
  llama_deploy/cli/utils/env_inject.py,sha256=01911758bcc3cf22aad0db0d1ade56aece48d6ad6bdb7186ea213337c67f5a89,688
35
- llamactl-0.3.8.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
36
- llamactl-0.3.8.dist-info/entry_points.txt,sha256=b67e1eb64305058751a651a80f2d2268b5f7046732268421e796f64d4697f83c,52
37
- llamactl-0.3.8.dist-info/METADATA,sha256=163a9428bd0648a9b48cbb6af32ea2a13ae4b29da524d5501b3c109359271040,3252
38
- llamactl-0.3.8.dist-info/RECORD,,
35
+ llama_deploy/cli/utils/redact.py,sha256=1e768d76b4a6708230c34f7ce8a5a82ab52795bb3d6ab0387071ab4e8d7e7934,863
36
+ llamactl-0.3.10.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
37
+ llamactl-0.3.10.dist-info/entry_points.txt,sha256=b67e1eb64305058751a651a80f2d2268b5f7046732268421e796f64d4697f83c,52
38
+ llamactl-0.3.10.dist-info/METADATA,sha256=c8f61d981b0dd97d1b6508154e2a8ab45da30316f1c110f034e0a2b9c8fb61b4,3255
39
+ llamactl-0.3.10.dist-info/RECORD,,