llamactl 0.3.0a19__py3-none-any.whl → 0.3.0a21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_deploy/cli/auth/client.py +362 -0
- llama_deploy/cli/client.py +14 -5
- llama_deploy/cli/commands/auth.py +300 -33
- llama_deploy/cli/commands/deployment.py +32 -38
- llama_deploy/cli/commands/env.py +19 -14
- llama_deploy/cli/commands/init.py +137 -34
- llama_deploy/cli/commands/serve.py +29 -12
- llama_deploy/cli/config/_config.py +178 -202
- llama_deploy/cli/config/_migrations.py +65 -0
- llama_deploy/cli/config/auth_service.py +64 -2
- llama_deploy/cli/config/env_service.py +15 -14
- llama_deploy/cli/config/migrations/0001_init.sql +35 -0
- llama_deploy/cli/config/migrations/0002_add_auth_fields.sql +24 -0
- llama_deploy/cli/config/migrations/__init__.py +7 -0
- llama_deploy/cli/config/schema.py +30 -0
- llama_deploy/cli/env.py +2 -1
- llama_deploy/cli/styles.py +10 -0
- llama_deploy/cli/textual/deployment_form.py +63 -7
- llama_deploy/cli/textual/deployment_monitor.py +71 -108
- llama_deploy/cli/textual/github_callback_server.py +4 -4
- llama_deploy/cli/textual/secrets_form.py +4 -0
- llama_deploy/cli/textual/styles.tcss +7 -5
- {llamactl-0.3.0a19.dist-info → llamactl-0.3.0a21.dist-info}/METADATA +5 -3
- llamactl-0.3.0a21.dist-info/RECORD +37 -0
- llama_deploy/cli/platform_client.py +0 -52
- llamactl-0.3.0a19.dist-info/RECORD +0 -32
- {llamactl-0.3.0a19.dist-info → llamactl-0.3.0a21.dist-info}/WHEEL +0 -0
- {llamactl-0.3.0a19.dist-info → llamactl-0.3.0a21.dist-info}/entry_points.txt +0 -0
llama_deploy/cli/commands/env.py
CHANGED
|
@@ -3,6 +3,13 @@ from importlib import metadata as importlib_metadata
|
|
|
3
3
|
import click
|
|
4
4
|
import questionary
|
|
5
5
|
from llama_deploy.cli.config.schema import Environment
|
|
6
|
+
from llama_deploy.cli.styles import (
|
|
7
|
+
ACTIVE_INDICATOR,
|
|
8
|
+
HEADER_COLOR,
|
|
9
|
+
MUTED_COL,
|
|
10
|
+
PRIMARY_COL,
|
|
11
|
+
WARNING,
|
|
12
|
+
)
|
|
6
13
|
from packaging import version as packaging_version
|
|
7
14
|
from rich import print as rprint
|
|
8
15
|
from rich.table import Table
|
|
@@ -32,23 +39,21 @@ def list_environments_cmd() -> None:
|
|
|
32
39
|
current_env = service.get_current_environment()
|
|
33
40
|
|
|
34
41
|
if not envs:
|
|
35
|
-
rprint("[
|
|
42
|
+
rprint(f"[{WARNING}]No environments found[/]")
|
|
36
43
|
return
|
|
37
44
|
|
|
38
|
-
table = Table(show_edge=False, box=None, header_style="bold
|
|
39
|
-
table.add_column(" API URL")
|
|
40
|
-
table.add_column("Requires Auth")
|
|
45
|
+
table = Table(show_edge=False, box=None, header_style=f"bold {HEADER_COLOR}")
|
|
46
|
+
table.add_column(" API URL", style=PRIMARY_COL)
|
|
47
|
+
table.add_column("Requires Auth", style=MUTED_COL)
|
|
41
48
|
|
|
42
49
|
for env in envs:
|
|
43
50
|
text = Text()
|
|
44
51
|
if env == current_env:
|
|
45
|
-
text.append("* ", style=
|
|
52
|
+
text.append("* ", style=ACTIVE_INDICATOR)
|
|
46
53
|
else:
|
|
47
54
|
text.append(" ")
|
|
48
55
|
text.append(env.api_url)
|
|
49
|
-
table.add_row(
|
|
50
|
-
text, Text("true" if env.requires_auth else "false", style="grey46")
|
|
51
|
-
)
|
|
56
|
+
table.add_row(text, Text("true" if env.requires_auth else "false"))
|
|
52
57
|
|
|
53
58
|
console.print(table)
|
|
54
59
|
except Exception as e:
|
|
@@ -70,7 +75,7 @@ def add_environment_cmd(api_url: str | None, interactive: bool) -> None:
|
|
|
70
75
|
"Enter control plane API URL", default=current_env.api_url
|
|
71
76
|
).ask()
|
|
72
77
|
if not entered:
|
|
73
|
-
rprint("[
|
|
78
|
+
rprint(f"[{WARNING}]No environment entered[/]")
|
|
74
79
|
return
|
|
75
80
|
api_url = entered.strip()
|
|
76
81
|
|
|
@@ -102,7 +107,7 @@ def delete_environment_cmd(api_url: str | None, interactive: bool) -> None:
|
|
|
102
107
|
)
|
|
103
108
|
|
|
104
109
|
if not result:
|
|
105
|
-
rprint("[
|
|
110
|
+
rprint(f"[{WARNING}]No environment selected[/]")
|
|
106
111
|
return
|
|
107
112
|
api_url = result.api_url
|
|
108
113
|
|
|
@@ -133,13 +138,13 @@ def switch_environment_cmd(api_url: str | None, interactive: bool) -> None:
|
|
|
133
138
|
"Select environment",
|
|
134
139
|
)
|
|
135
140
|
if not result:
|
|
136
|
-
rprint("[
|
|
141
|
+
rprint(f"[{WARNING}]No environment selected[/]")
|
|
137
142
|
return
|
|
138
143
|
selected_url = result.api_url
|
|
139
144
|
|
|
140
145
|
if not selected_url:
|
|
141
146
|
if interactive:
|
|
142
|
-
rprint("[
|
|
147
|
+
rprint(f"[{WARNING}]No environment selected[/]")
|
|
143
148
|
return
|
|
144
149
|
raise click.ClickException("API URL is required when not interactive")
|
|
145
150
|
|
|
@@ -150,7 +155,7 @@ def switch_environment_cmd(api_url: str | None, interactive: bool) -> None:
|
|
|
150
155
|
try:
|
|
151
156
|
env = service.auto_update_env(env)
|
|
152
157
|
except Exception as e:
|
|
153
|
-
rprint(f"[
|
|
158
|
+
rprint(f"[{WARNING}]Failed to resolve environment: {e}[/]")
|
|
154
159
|
return
|
|
155
160
|
service.current_auth_service().select_any_profile()
|
|
156
161
|
rprint(f"[green]Switched to environment[/green] {env.api_url}")
|
|
@@ -176,7 +181,7 @@ def _maybe_warn_min_version(min_required: str | None) -> None:
|
|
|
176
181
|
try:
|
|
177
182
|
if packaging_version.parse(current) < packaging_version.parse(min_required):
|
|
178
183
|
rprint(
|
|
179
|
-
f"[
|
|
184
|
+
f"[{WARNING}]Warning:[/] This environment requires llamactl >= [bold]{min_required}[/bold], you have [bold]{current}[/bold]."
|
|
180
185
|
)
|
|
181
186
|
except Exception:
|
|
182
187
|
# If packaging is not available or parsing fails, skip strict comparison
|
|
@@ -1,3 +1,6 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
1
4
|
import os
|
|
2
5
|
import shutil
|
|
3
6
|
import subprocess
|
|
@@ -10,7 +13,11 @@ import questionary
|
|
|
10
13
|
from click.exceptions import Exit
|
|
11
14
|
from llama_deploy.cli.app import app
|
|
12
15
|
from llama_deploy.cli.options import global_options
|
|
16
|
+
from llama_deploy.cli.styles import HEADER_COLOR_HEX
|
|
13
17
|
from rich import print as rprint
|
|
18
|
+
from vibe_llama.scaffold import create_scaffold
|
|
19
|
+
from vibe_llama.scaffold.scaffold import ProjectName
|
|
20
|
+
from vibe_llama.sdk import VibeLlamaStarter
|
|
14
21
|
|
|
15
22
|
|
|
16
23
|
@dataclass
|
|
@@ -18,21 +25,82 @@ class TemplateOption:
|
|
|
18
25
|
id: str
|
|
19
26
|
name: str
|
|
20
27
|
description: str
|
|
21
|
-
|
|
28
|
+
source: VibeLlamaTemplate | GithubTemplateRepo
|
|
29
|
+
llama_cloud: bool
|
|
30
|
+
|
|
22
31
|
|
|
32
|
+
@dataclass
|
|
33
|
+
class VibeLlamaTemplate:
|
|
34
|
+
name: ProjectName
|
|
23
35
|
|
|
24
|
-
|
|
36
|
+
|
|
37
|
+
@dataclass
|
|
38
|
+
class GithubTemplateRepo:
|
|
39
|
+
url: str
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
ui_options = [
|
|
25
43
|
TemplateOption(
|
|
26
44
|
id="basic-ui",
|
|
27
45
|
name="Basic UI",
|
|
28
46
|
description="A basic starter workflow with a React Vite UI",
|
|
29
|
-
|
|
47
|
+
source=GithubTemplateRepo(
|
|
48
|
+
url="https://github.com/run-llama/template-workflow-basic-ui"
|
|
49
|
+
),
|
|
50
|
+
llama_cloud=False,
|
|
30
51
|
),
|
|
31
52
|
TemplateOption(
|
|
32
53
|
id="extraction-review",
|
|
33
54
|
name="Extraction Agent with Review UI",
|
|
34
55
|
description="Extract data from documents using a custom schema and Llama Cloud. Includes a UI to review and correct the results",
|
|
35
|
-
|
|
56
|
+
source=GithubTemplateRepo(
|
|
57
|
+
url="https://github.com/run-llama/template-workflow-data-extraction"
|
|
58
|
+
),
|
|
59
|
+
llama_cloud=True,
|
|
60
|
+
),
|
|
61
|
+
]
|
|
62
|
+
headless_options = [
|
|
63
|
+
TemplateOption(
|
|
64
|
+
id="basic",
|
|
65
|
+
name="Basic Workflow",
|
|
66
|
+
description="A base example that showcases usage patterns for workflows",
|
|
67
|
+
source=VibeLlamaTemplate(name="basic"),
|
|
68
|
+
llama_cloud=False,
|
|
69
|
+
),
|
|
70
|
+
TemplateOption(
|
|
71
|
+
id="document_parsing",
|
|
72
|
+
name="Document Parser",
|
|
73
|
+
description="A workflow that, using LlamaParse, parses unstructured documents and returns their raw text content",
|
|
74
|
+
source=VibeLlamaTemplate(name="document_parsing"),
|
|
75
|
+
llama_cloud=True,
|
|
76
|
+
),
|
|
77
|
+
TemplateOption(
|
|
78
|
+
id="human_in_the_loop",
|
|
79
|
+
name="Human in the Loop",
|
|
80
|
+
description="A workflow showcasing how to use human in the loop with LlamaIndex workflows",
|
|
81
|
+
source=VibeLlamaTemplate(name="human_in_the_loop"),
|
|
82
|
+
llama_cloud=False,
|
|
83
|
+
),
|
|
84
|
+
TemplateOption(
|
|
85
|
+
id="invoice_extraction",
|
|
86
|
+
name="Invoice Extraction",
|
|
87
|
+
description="A workflow that, given an invoice, extracts several key details using LlamaExtract",
|
|
88
|
+
source=VibeLlamaTemplate(name="invoice_extraction"),
|
|
89
|
+
llama_cloud=True,
|
|
90
|
+
),
|
|
91
|
+
TemplateOption(
|
|
92
|
+
id="rag",
|
|
93
|
+
name="RAG",
|
|
94
|
+
description="A workflow that embeds, indexes and queries your documents on the fly, providing you with a simple RAG pipeline",
|
|
95
|
+
source=VibeLlamaTemplate(name="rag"),
|
|
96
|
+
llama_cloud=False,
|
|
97
|
+
),
|
|
98
|
+
TemplateOption(
|
|
99
|
+
id="web_scraping",
|
|
100
|
+
name="Web Scraping",
|
|
101
|
+
description="A workflow that, given several urls, scrapes and summarizes their content using Google's Gemini API",
|
|
102
|
+
source=VibeLlamaTemplate(name="web_scraping"),
|
|
103
|
+
llama_cloud=False,
|
|
36
104
|
),
|
|
37
105
|
]
|
|
38
106
|
|
|
@@ -45,7 +113,7 @@ options = [
|
|
|
45
113
|
)
|
|
46
114
|
@click.option(
|
|
47
115
|
"--template",
|
|
48
|
-
type=click.Choice([o.id for o in
|
|
116
|
+
type=click.Choice([o.id for o in ui_options]),
|
|
49
117
|
help="The template to use for the new app",
|
|
50
118
|
)
|
|
51
119
|
@click.option(
|
|
@@ -76,12 +144,29 @@ def init(
|
|
|
76
144
|
|
|
77
145
|
def _create(template: str | None, dir: Path | None, force: bool) -> None:
|
|
78
146
|
if template is None:
|
|
147
|
+
rprint(
|
|
148
|
+
"[bold]Select a template to start from.[/bold] Either with javascript frontend UI, or just a python workflow that can be used as an API."
|
|
149
|
+
)
|
|
79
150
|
template = questionary.select(
|
|
80
|
-
"
|
|
81
|
-
choices=[
|
|
151
|
+
"",
|
|
152
|
+
choices=[questionary.Separator("------------ With UI -------------")]
|
|
153
|
+
+ [
|
|
154
|
+
questionary.Choice(title=o.name, value=o.id, description=o.description)
|
|
155
|
+
for o in ui_options
|
|
156
|
+
]
|
|
157
|
+
+ [
|
|
158
|
+
questionary.Separator(" "),
|
|
159
|
+
questionary.Separator("--- Headless Workflows (No UI) ---"),
|
|
160
|
+
]
|
|
161
|
+
+ [
|
|
82
162
|
questionary.Choice(title=o.name, value=o.id, description=o.description)
|
|
83
|
-
for o in
|
|
163
|
+
for o in headless_options
|
|
84
164
|
],
|
|
165
|
+
style=questionary.Style(
|
|
166
|
+
[
|
|
167
|
+
("separator", f"fg:{HEADER_COLOR_HEX}"),
|
|
168
|
+
]
|
|
169
|
+
),
|
|
85
170
|
).ask()
|
|
86
171
|
if template is None:
|
|
87
172
|
rprint("No template selected")
|
|
@@ -94,7 +179,9 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
|
|
|
94
179
|
rprint("No directory provided")
|
|
95
180
|
raise Exit(1)
|
|
96
181
|
dir = Path(dir_str)
|
|
97
|
-
resolved_template = next(
|
|
182
|
+
resolved_template: TemplateOption | None = next(
|
|
183
|
+
(o for o in ui_options + headless_options if o.id == template), None
|
|
184
|
+
)
|
|
98
185
|
if resolved_template is None:
|
|
99
186
|
rprint(f"Template {template} not found")
|
|
100
187
|
raise Exit(1)
|
|
@@ -107,35 +194,51 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
|
|
|
107
194
|
raise Exit(1)
|
|
108
195
|
else:
|
|
109
196
|
shutil.rmtree(dir, ignore_errors=True)
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
197
|
+
|
|
198
|
+
if isinstance(resolved_template.source, GithubTemplateRepo):
|
|
199
|
+
copier.run_copy(
|
|
200
|
+
resolved_template.source.url,
|
|
201
|
+
dir,
|
|
202
|
+
quiet=True,
|
|
203
|
+
)
|
|
204
|
+
else:
|
|
205
|
+
asyncio.run(create_scaffold(resolved_template.source.name, str(dir)))
|
|
115
206
|
# Initialize git repository if git is available
|
|
116
|
-
|
|
207
|
+
has_git = False
|
|
117
208
|
try:
|
|
118
209
|
subprocess.run(["git", "--version"], check=True, capture_output=True)
|
|
210
|
+
has_git = True
|
|
211
|
+
except subprocess.CalledProcessError:
|
|
212
|
+
pass
|
|
119
213
|
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
try:
|
|
125
|
-
subprocess.run(["git", "init"], check=True, capture_output=True)
|
|
126
|
-
subprocess.run(["git", "add", "."], check=True, capture_output=True)
|
|
127
|
-
subprocess.run(
|
|
128
|
-
["git", "commit", "-m", "Initial commit"],
|
|
129
|
-
check=True,
|
|
130
|
-
capture_output=True,
|
|
131
|
-
)
|
|
132
|
-
is_git_initialized = True
|
|
133
|
-
finally:
|
|
134
|
-
os.chdir(original_cwd)
|
|
214
|
+
# Change to the new directory and initialize git repo
|
|
215
|
+
original_cwd = Path.cwd()
|
|
216
|
+
os.chdir(dir)
|
|
135
217
|
|
|
136
|
-
|
|
137
|
-
#
|
|
138
|
-
|
|
218
|
+
try:
|
|
219
|
+
# Dump in a bunch of docs for AI agents
|
|
220
|
+
vibe_llama_starter = VibeLlamaStarter(
|
|
221
|
+
agents=["OpenAI Codex CLI"], # AGENTS.md, supported by Cursor,
|
|
222
|
+
services=["LlamaIndex", "llama-index-workflows"]
|
|
223
|
+
+ (["LlamaCloud Services"] if resolved_template.llama_cloud else []),
|
|
224
|
+
)
|
|
225
|
+
asyncio.run(vibe_llama_starter.write_instructions(overwrite=True))
|
|
226
|
+
# Create symlink for Claude.md to point to AGENTS.md
|
|
227
|
+
for alternate in ["CLAUDE.md", "GEMINI.md"]: # don't support AGENTS.md (yet?)
|
|
228
|
+
claude_path = Path(alternate) # not supported yet
|
|
229
|
+
agents_path = Path("AGENTS.md")
|
|
230
|
+
if agents_path.exists() and not claude_path.exists():
|
|
231
|
+
claude_path.symlink_to("AGENTS.md")
|
|
232
|
+
if has_git:
|
|
233
|
+
subprocess.run(["git", "init"], check=True, capture_output=True)
|
|
234
|
+
subprocess.run(["git", "add", "."], check=True, capture_output=True)
|
|
235
|
+
subprocess.run(
|
|
236
|
+
["git", "commit", "-m", "Initial commit"],
|
|
237
|
+
check=True,
|
|
238
|
+
capture_output=True,
|
|
239
|
+
)
|
|
240
|
+
finally:
|
|
241
|
+
os.chdir(original_cwd)
|
|
139
242
|
|
|
140
243
|
rprint(
|
|
141
244
|
f"Successfully created [blue]{dir}[/] using the [blue]{resolved_template.name}[/] template! 🎉 🦙 💾"
|
|
@@ -146,7 +249,7 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
|
|
|
146
249
|
rprint(" [orange3]uvx[/] llamactl serve")
|
|
147
250
|
rprint("")
|
|
148
251
|
rprint("[bold]To deploy:[/]")
|
|
149
|
-
if
|
|
252
|
+
if has_git:
|
|
150
253
|
rprint(" [orange3]git[/] init")
|
|
151
254
|
rprint(" [orange3]git[/] add .")
|
|
152
255
|
rprint(" [orange3]git[/] commit -m 'Initial commit'")
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import logging
|
|
1
2
|
import os
|
|
2
3
|
from pathlib import Path
|
|
3
4
|
|
|
@@ -12,6 +13,7 @@ from llama_deploy.cli.commands.auth import validate_authenticated_profile
|
|
|
12
13
|
from llama_deploy.cli.config.env_service import service
|
|
13
14
|
from llama_deploy.cli.config.schema import Auth
|
|
14
15
|
from llama_deploy.cli.options import interactive_option
|
|
16
|
+
from llama_deploy.cli.styles import WARNING
|
|
15
17
|
from llama_deploy.core.config import DEFAULT_DEPLOYMENT_FILE_PATH
|
|
16
18
|
from llama_deploy.core.deployment_config import (
|
|
17
19
|
read_deployment_config_from_git_root_or_cwd,
|
|
@@ -20,6 +22,8 @@ from rich import print as rprint
|
|
|
20
22
|
|
|
21
23
|
from ..app import app
|
|
22
24
|
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
23
27
|
|
|
24
28
|
@app.command(
|
|
25
29
|
"serve",
|
|
@@ -97,7 +101,7 @@ def serve(
|
|
|
97
101
|
)
|
|
98
102
|
|
|
99
103
|
except KeyboardInterrupt:
|
|
100
|
-
|
|
104
|
+
logger.debug("Shutting down...")
|
|
101
105
|
|
|
102
106
|
except Exception as e:
|
|
103
107
|
rprint(f"[red]Error: {e}[/red]")
|
|
@@ -107,11 +111,14 @@ def serve(
|
|
|
107
111
|
def _set_env_vars_from_profile(profile: Auth):
|
|
108
112
|
if profile.api_key:
|
|
109
113
|
_set_env_vars(profile.api_key, profile.api_url)
|
|
114
|
+
_set_project_id_from_profile(profile)
|
|
110
115
|
|
|
111
116
|
|
|
112
117
|
def _set_env_vars_from_env(env_vars: dict[str, str]):
|
|
113
118
|
key = env_vars.get("LLAMA_CLOUD_API_KEY")
|
|
114
119
|
url = env_vars.get("LLAMA_CLOUD_BASE_URL", "https://api.cloud.llamaindex.ai")
|
|
120
|
+
# Also propagate project id if present in the environment
|
|
121
|
+
_set_project_id_from_env(env_vars)
|
|
115
122
|
if key:
|
|
116
123
|
_set_env_vars(key, url)
|
|
117
124
|
|
|
@@ -125,6 +132,17 @@ def _set_env_vars(key: str, url: str):
|
|
|
125
132
|
os.environ[f"{prefix}LLAMA_CLOUD_BASE_URL"] = url
|
|
126
133
|
|
|
127
134
|
|
|
135
|
+
def _set_project_id_from_env(env_vars: dict[str, str]):
|
|
136
|
+
project_id = env_vars.get("LLAMA_DEPLOY_PROJECT_ID")
|
|
137
|
+
if project_id:
|
|
138
|
+
os.environ["LLAMA_DEPLOY_PROJECT_ID"] = project_id
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _set_project_id_from_profile(profile: Auth):
|
|
142
|
+
if profile.project_id:
|
|
143
|
+
os.environ["LLAMA_DEPLOY_PROJECT_ID"] = profile.project_id
|
|
144
|
+
|
|
145
|
+
|
|
128
146
|
def _maybe_inject_llama_cloud_credentials(
|
|
129
147
|
deployment_file: Path, interactive: bool
|
|
130
148
|
) -> None:
|
|
@@ -154,6 +172,9 @@ def _maybe_inject_llama_cloud_credentials(
|
|
|
154
172
|
config, deployment_file.parent if deployment_file.is_file() else deployment_file
|
|
155
173
|
)
|
|
156
174
|
|
|
175
|
+
# Ensure project id is available to the app and UI processes
|
|
176
|
+
_set_project_id_from_env({**os.environ, **vars})
|
|
177
|
+
|
|
157
178
|
existing = os.environ.get("LLAMA_CLOUD_API_KEY") or vars.get("LLAMA_CLOUD_API_KEY")
|
|
158
179
|
if existing:
|
|
159
180
|
_set_env_vars_from_env({**os.environ, **vars})
|
|
@@ -162,7 +183,7 @@ def _maybe_inject_llama_cloud_credentials(
|
|
|
162
183
|
env = service.get_current_environment()
|
|
163
184
|
if not env.requires_auth:
|
|
164
185
|
rprint(
|
|
165
|
-
"[
|
|
186
|
+
f"[{WARNING}]Warning: This app requires Llama Cloud authentication, and no LLAMA_CLOUD_API_KEY is present. The app may not work.[/]"
|
|
166
187
|
)
|
|
167
188
|
return
|
|
168
189
|
|
|
@@ -179,20 +200,16 @@ def _maybe_inject_llama_cloud_credentials(
|
|
|
179
200
|
default=True,
|
|
180
201
|
).ask()
|
|
181
202
|
if should_login:
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
return
|
|
187
|
-
except Exception:
|
|
188
|
-
# fall through to warning
|
|
189
|
-
pass
|
|
203
|
+
authed = validate_authenticated_profile(True)
|
|
204
|
+
if authed.api_key:
|
|
205
|
+
_set_env_vars_from_profile(authed)
|
|
206
|
+
return
|
|
190
207
|
rprint(
|
|
191
|
-
"[
|
|
208
|
+
f"[{WARNING}]Warning: No Llama Cloud credentials configured. The app may not work.[/]"
|
|
192
209
|
)
|
|
193
210
|
return
|
|
194
211
|
|
|
195
212
|
# Non-interactive session
|
|
196
213
|
rprint(
|
|
197
|
-
"[
|
|
214
|
+
f"[{WARNING}]Warning: LLAMA_CLOUD_API_KEY is not set and no logged-in profile was found. The app may not work.[/]"
|
|
198
215
|
)
|