llamactl 0.3.9__tar.gz → 0.3.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {llamactl-0.3.9 → llamactl-0.3.10}/PKG-INFO +3 -3
  2. {llamactl-0.3.9 → llamactl-0.3.10}/pyproject.toml +3 -3
  3. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/commands/init.py +24 -21
  4. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/commands/serve.py +88 -0
  5. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/config/auth_service.py +2 -5
  6. llamactl-0.3.10/src/llama_deploy/cli/utils/redact.py +29 -0
  7. {llamactl-0.3.9 → llamactl-0.3.10}/README.md +0 -0
  8. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/__init__.py +0 -0
  9. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/app.py +0 -0
  10. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/auth/client.py +0 -0
  11. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/client.py +0 -0
  12. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/commands/aliased_group.py +0 -0
  13. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/commands/auth.py +0 -0
  14. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/commands/deployment.py +0 -0
  15. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/commands/env.py +0 -0
  16. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/config/_config.py +0 -0
  17. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/config/_migrations.py +0 -0
  18. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/config/env_service.py +0 -0
  19. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/config/migrations/0001_init.sql +0 -0
  20. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/config/migrations/0002_add_auth_fields.sql +0 -0
  21. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/config/migrations/__init__.py +0 -0
  22. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/config/schema.py +0 -0
  23. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/debug.py +0 -0
  24. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/env.py +0 -0
  25. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/interactive_prompts/session_utils.py +0 -0
  26. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/interactive_prompts/utils.py +0 -0
  27. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/options.py +0 -0
  28. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/py.typed +0 -0
  29. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/styles.py +0 -0
  30. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/textual/deployment_form.py +0 -0
  31. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/textual/deployment_help.py +0 -0
  32. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/textual/deployment_monitor.py +0 -0
  33. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/textual/git_validation.py +0 -0
  34. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/textual/github_callback_server.py +0 -0
  35. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/textual/llama_loader.py +0 -0
  36. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/textual/secrets_form.py +0 -0
  37. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/textual/styles.tcss +0 -0
  38. {llamactl-0.3.9 → llamactl-0.3.10}/src/llama_deploy/cli/utils/env_inject.py +0 -0
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llamactl
3
- Version: 0.3.9
3
+ Version: 0.3.10
4
4
  Summary: A command-line interface for managing LlamaDeploy projects and deployments
5
5
  Author: Adrian Lyjak
6
6
  Author-email: Adrian Lyjak <adrianlyjak@gmail.com>
7
7
  License: MIT
8
- Requires-Dist: llama-deploy-core[client]>=0.3.9,<0.4.0
9
- Requires-Dist: llama-deploy-appserver>=0.3.9,<0.4.0
8
+ Requires-Dist: llama-deploy-core[client]>=0.3.10,<0.4.0
9
+ Requires-Dist: llama-deploy-appserver>=0.3.10,<0.4.0
10
10
  Requires-Dist: httpx>=0.24.0,<1.0.0
11
11
  Requires-Dist: rich>=13.0.0
12
12
  Requires-Dist: questionary>=2.0.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "llamactl"
3
- version = "0.3.9"
3
+ version = "0.3.10"
4
4
  description = "A command-line interface for managing LlamaDeploy projects and deployments"
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -9,8 +9,8 @@ authors = [
9
9
  ]
10
10
  requires-python = ">=3.11, <4"
11
11
  dependencies = [
12
- "llama-deploy-core[client]>=0.3.9,<0.4.0",
13
- "llama-deploy-appserver>=0.3.9,<0.4.0",
12
+ "llama-deploy-core[client]>=0.3.10,<0.4.0",
13
+ "llama-deploy-appserver>=0.3.10,<0.4.0",
14
14
  "httpx>=0.24.0,<1.0.0",
15
15
  "rich>=13.0.0",
16
16
  "questionary>=2.0.0",
@@ -55,8 +55,6 @@ def init(
55
55
 
56
56
  def _create(template: str | None, dir: Path | None, force: bool) -> None:
57
57
  # defer loading to improve cli startup time
58
- from vibe_llama.scaffold import create_scaffold
59
- from vibe_llama.scaffold.scaffold import ProjectName
60
58
  from vibe_llama.sdk import VibeLlamaStarter
61
59
 
62
60
  @dataclass
@@ -64,13 +62,9 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
64
62
  id: str
65
63
  name: str
66
64
  description: str
67
- source: VibeLlamaTemplate | GithubTemplateRepo
65
+ source: GithubTemplateRepo
68
66
  llama_cloud: bool
69
67
 
70
- @dataclass
71
- class VibeLlamaTemplate:
72
- name: ProjectName
73
-
74
68
  @dataclass
75
69
  class GithubTemplateRepo:
76
70
  url: str
@@ -110,42 +104,54 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
110
104
  id="basic",
111
105
  name="Basic Workflow",
112
106
  description="A base example that showcases usage patterns for workflows",
113
- source=VibeLlamaTemplate(name="basic"),
107
+ source=GithubTemplateRepo(
108
+ url="https://github.com/run-llama/template-workflow-basic"
109
+ ),
114
110
  llama_cloud=False,
115
111
  ),
116
112
  TemplateOption(
117
113
  id="document_parsing",
118
114
  name="Document Parser",
119
115
  description="A workflow that, using LlamaParse, parses unstructured documents and returns their raw text content",
120
- source=VibeLlamaTemplate(name="document_parsing"),
116
+ source=GithubTemplateRepo(
117
+ url="https://github.com/run-llama/template-workflow-document-parsing"
118
+ ),
121
119
  llama_cloud=True,
122
120
  ),
123
121
  TemplateOption(
124
122
  id="human_in_the_loop",
125
123
  name="Human in the Loop",
126
124
  description="A workflow showcasing how to use human in the loop with LlamaIndex workflows",
127
- source=VibeLlamaTemplate(name="human_in_the_loop"),
125
+ source=GithubTemplateRepo(
126
+ url="https://github.com/run-llama/template-workflow-human-in-the-loop"
127
+ ),
128
128
  llama_cloud=False,
129
129
  ),
130
130
  TemplateOption(
131
131
  id="invoice_extraction",
132
132
  name="Invoice Extraction",
133
133
  description="A workflow that, given an invoice, extracts several key details using LlamaExtract",
134
- source=VibeLlamaTemplate(name="invoice_extraction"),
134
+ source=GithubTemplateRepo(
135
+ url="https://github.com/run-llama/template-workflow-invoice-extraction"
136
+ ),
135
137
  llama_cloud=True,
136
138
  ),
137
139
  TemplateOption(
138
140
  id="rag",
139
141
  name="RAG",
140
142
  description="A workflow that embeds, indexes and queries your documents on the fly, providing you with a simple RAG pipeline",
141
- source=VibeLlamaTemplate(name="rag"),
143
+ source=GithubTemplateRepo(
144
+ url="https://github.com/run-llama/template-workflow-rag"
145
+ ),
142
146
  llama_cloud=False,
143
147
  ),
144
148
  TemplateOption(
145
149
  id="web_scraping",
146
150
  name="Web Scraping",
147
151
  description="A workflow that, given several urls, scrapes and summarizes their content using Google's Gemini API",
148
- source=VibeLlamaTemplate(name="web_scraping"),
152
+ source=GithubTemplateRepo(
153
+ url="https://github.com/run-llama/template-workflow-web-scraping"
154
+ ),
149
155
  llama_cloud=False,
150
156
  ),
151
157
  ]
@@ -202,14 +208,11 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
202
208
  else:
203
209
  shutil.rmtree(dir, ignore_errors=True)
204
210
 
205
- if isinstance(resolved_template.source, GithubTemplateRepo):
206
- copier.run_copy(
207
- resolved_template.source.url,
208
- dir,
209
- quiet=True,
210
- )
211
- else:
212
- asyncio.run(create_scaffold(resolved_template.source.name, str(dir)))
211
+ copier.run_copy(
212
+ resolved_template.source.url,
213
+ dir,
214
+ quiet=True,
215
+ )
213
216
  # Initialize git repository if git is available
214
217
  has_git = False
215
218
  try:
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import logging
2
3
  import os
3
4
  from pathlib import Path
@@ -5,15 +6,19 @@ from typing import Literal
5
6
 
6
7
  import click
7
8
  import questionary
9
+ from click.exceptions import Abort, Exit
8
10
  from llama_deploy.cli.commands.auth import validate_authenticated_profile
9
11
  from llama_deploy.cli.config.env_service import service
10
12
  from llama_deploy.cli.config.schema import Auth
11
13
  from llama_deploy.cli.options import interactive_option
12
14
  from llama_deploy.cli.styles import WARNING
15
+ from llama_deploy.cli.utils.redact import redact_api_key
16
+ from llama_deploy.core.client.manage_client import ControlPlaneClient
13
17
  from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
14
18
  from llama_deploy.core.deployment_config import (
15
19
  read_deployment_config_from_git_root_or_cwd,
16
20
  )
21
+ from llama_deploy.core.schema.projects import ProjectSummary
17
22
  from rich import print as rprint
18
23
 
19
24
  from ..app import app
@@ -108,6 +113,7 @@ def serve(
108
113
  build=preview,
109
114
  )
110
115
  deployment_config = get_deployment_config()
116
+ _print_connection_summary()
111
117
  start_server_in_target_venv(
112
118
  cwd=Path.cwd(),
113
119
  deployment_file=deployment_file,
@@ -127,6 +133,9 @@ def serve(
127
133
  else None,
128
134
  )
129
135
 
136
+ except (Exit, Abort):
137
+ raise
138
+
130
139
  except KeyboardInterrupt:
131
140
  logger.debug("Shutting down...")
132
141
 
@@ -207,6 +216,32 @@ def _maybe_inject_llama_cloud_credentials(
207
216
 
208
217
  existing = os.environ.get("LLAMA_CLOUD_API_KEY") or vars.get("LLAMA_CLOUD_API_KEY")
209
218
  if existing:
219
+ # If interactive, allow choosing between env var and configured profile
220
+ if interactive:
221
+ choice = questionary.select(
222
+ "LLAMA_CLOUD_API_KEY detected in environment. Which credentials do you want to use?",
223
+ choices=[
224
+ questionary.Choice(
225
+ title=f"Use environment variable - {redact_api_key(existing)}",
226
+ value="env",
227
+ ),
228
+ questionary.Choice(title="Use configured profile", value="profile"),
229
+ ],
230
+ ).ask()
231
+ if choice is None:
232
+ raise Exit(0)
233
+ if choice == "profile":
234
+ # Ensure we have an authenticated profile and inject from it
235
+ authed = validate_authenticated_profile(True)
236
+ _set_env_vars_from_profile(authed)
237
+ return
238
+ # Default to env var path when cancelled or explicitly chosen
239
+ _set_env_vars_from_env({**os.environ, **vars})
240
+ # If no project id provided, try to detect and select one using the env API key
241
+ if not os.environ.get("LLAMA_DEPLOY_PROJECT_ID"):
242
+ _maybe_select_project_for_env_key()
243
+ return
244
+ # Non-interactive: trust current environment variables
210
245
  _set_env_vars_from_env({**os.environ, **vars})
211
246
  return
212
247
 
@@ -243,3 +278,56 @@ def _maybe_inject_llama_cloud_credentials(
243
278
  rprint(
244
279
  f"[{WARNING}]Warning: LLAMA_CLOUD_API_KEY is not set and no logged-in profile was found. The app may not work.[/]"
245
280
  )
281
+
282
+
283
+ def _maybe_select_project_for_env_key() -> None:
284
+ """When using an env API key, ensure LLAMA_DEPLOY_PROJECT_ID is set.
285
+
286
+ If more than one project exists, prompt the user to select one.
287
+ """
288
+ api_key = os.environ.get("LLAMA_CLOUD_API_KEY")
289
+ base_url = os.environ.get("LLAMA_CLOUD_BASE_URL", "https://api.cloud.llamaindex.ai")
290
+ if not api_key:
291
+ return
292
+ try:
293
+
294
+ async def _run() -> list[ProjectSummary]:
295
+ async with ControlPlaneClient.ctx(base_url, api_key, None) as client:
296
+ return await client.list_projects()
297
+
298
+ projects = asyncio.run(_run())
299
+ if not projects:
300
+ return
301
+ if len(projects) == 1:
302
+ os.environ["LLAMA_DEPLOY_PROJECT_ID"] = projects[0].project_id
303
+ return
304
+ # Multiple: prompt selection
305
+ choice = questionary.select(
306
+ "Select a project",
307
+ choices=[
308
+ questionary.Choice(
309
+ title=f"{p.project_name} ({p.deployment_count} deployments)",
310
+ value=p.project_id,
311
+ )
312
+ for p in projects
313
+ ],
314
+ ).ask()
315
+ if choice:
316
+ os.environ["LLAMA_DEPLOY_PROJECT_ID"] = choice
317
+ except Exception:
318
+ # Best-effort; if we fail to list, do nothing
319
+ pass
320
+
321
+
322
+ def _print_connection_summary() -> None:
323
+ base_url = os.environ.get("LLAMA_CLOUD_BASE_URL")
324
+ project_id = os.environ.get("LLAMA_DEPLOY_PROJECT_ID")
325
+ api_key = os.environ.get("LLAMA_CLOUD_API_KEY")
326
+ if not base_url and not project_id and not api_key:
327
+ return
328
+ redacted = redact_api_key(api_key)
329
+ env_text = base_url or "-"
330
+ proj_text = project_id or "-"
331
+ rprint(
332
+ f"Connecting to environment: [bold]{env_text}[/], project: [bold]{proj_text}[/], api key: [bold]{redacted}[/]"
333
+ )
@@ -3,6 +3,7 @@ import asyncio
3
3
  from llama_deploy.cli.auth.client import PlatformAuthClient, RefreshMiddleware
4
4
  from llama_deploy.cli.config._config import Auth, ConfigManager, Environment
5
5
  from llama_deploy.cli.config.schema import DeviceOIDC
6
+ from llama_deploy.cli.utils.redact import redact_api_key
6
7
  from llama_deploy.core.client.manage_client import ControlPlaneClient, httpx
7
8
  from llama_deploy.core.schema import VersionResponse
8
9
  from llama_deploy.core.schema.projects import ProjectSummary
@@ -123,8 +124,4 @@ class AuthService:
123
124
 
124
125
  def _auto_profile_name_from_token(api_key: str) -> str:
125
126
  token = api_key or "token"
126
- cleaned = token.replace(" ", "")
127
- first = cleaned[:6]
128
- last = cleaned[-4:] if len(cleaned) > 10 else cleaned[-2:]
129
- base = f"{first}****{last}"
130
- return base
127
+ return redact_api_key(token)
@@ -0,0 +1,29 @@
1
+ from __future__ import annotations
2
+
3
+
4
+ def redact_api_key(
5
+ token: str | None,
6
+ visible_prefix: int = 6,
7
+ visible_suffix_long: int = 4,
8
+ visible_suffix_short: int = 2,
9
+ long_threshold: int = 10,
10
+ mask: str = "****",
11
+ ) -> str:
12
+ """Redact an API key for display.
13
+
14
+ Shows a prefix and suffix with a mask in the middle. If token is short,
15
+ reduces the suffix length to keep at least two trailing characters visible.
16
+
17
+ This mirrors the masking behavior used for profile names.
18
+ """
19
+ if not token:
20
+ return "-"
21
+ cleaned = token.replace(" ", "")
22
+ if len(cleaned) <= 0:
23
+ return "-"
24
+ first = cleaned[:visible_prefix]
25
+ last_len = (
26
+ visible_suffix_long if len(cleaned) > long_threshold else visible_suffix_short
27
+ )
28
+ last = cleaned[-last_len:] if last_len > 0 else ""
29
+ return f"{first}{mask}{last}"
File without changes