deepeval 3.7.6__py3-none-any.whl → 3.7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/cli/main.py +2022 -759
- deepeval/cli/utils.py +208 -36
- deepeval/config/dotenv_handler.py +19 -0
- deepeval/config/settings.py +658 -262
- deepeval/config/utils.py +9 -1
- deepeval/evaluate/execute.py +153 -94
- deepeval/key_handler.py +121 -51
- deepeval/metrics/base_metric.py +9 -3
- deepeval/metrics/g_eval/g_eval.py +6 -1
- deepeval/metrics/indicator.py +8 -4
- deepeval/metrics/mcp/mcp_task_completion.py +15 -16
- deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +15 -15
- deepeval/metrics/mcp/schema.py +4 -0
- deepeval/metrics/mcp/template.py +8 -1
- deepeval/metrics/prompt_alignment/prompt_alignment.py +6 -3
- deepeval/metrics/tool_use/schema.py +4 -0
- deepeval/metrics/tool_use/template.py +16 -2
- deepeval/metrics/tool_use/tool_use.py +30 -28
- deepeval/metrics/topic_adherence/schema.py +4 -0
- deepeval/metrics/topic_adherence/template.py +8 -1
- deepeval/metrics/topic_adherence/topic_adherence.py +15 -14
- deepeval/metrics/turn_contextual_precision/template.py +8 -1
- deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +44 -86
- deepeval/metrics/turn_contextual_recall/template.py +8 -1
- deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +44 -82
- deepeval/metrics/turn_contextual_relevancy/template.py +8 -1
- deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +48 -92
- deepeval/metrics/turn_faithfulness/template.py +8 -1
- deepeval/metrics/turn_faithfulness/turn_faithfulness.py +76 -130
- deepeval/metrics/utils.py +16 -1
- deepeval/models/__init__.py +2 -0
- deepeval/models/llms/__init__.py +2 -0
- deepeval/models/llms/amazon_bedrock_model.py +5 -4
- deepeval/models/llms/anthropic_model.py +4 -3
- deepeval/models/llms/azure_model.py +4 -3
- deepeval/models/llms/deepseek_model.py +5 -8
- deepeval/models/llms/grok_model.py +5 -8
- deepeval/models/llms/kimi_model.py +5 -8
- deepeval/models/llms/litellm_model.py +2 -0
- deepeval/models/llms/local_model.py +1 -1
- deepeval/models/llms/openai_model.py +4 -3
- deepeval/models/retry_policy.py +10 -5
- deepeval/models/utils.py +1 -5
- deepeval/simulator/conversation_simulator.py +6 -2
- deepeval/simulator/template.py +3 -1
- deepeval/synthesizer/synthesizer.py +19 -17
- deepeval/test_run/test_run.py +6 -1
- deepeval/utils.py +26 -0
- {deepeval-3.7.6.dist-info → deepeval-3.7.7.dist-info}/METADATA +3 -3
- {deepeval-3.7.6.dist-info → deepeval-3.7.7.dist-info}/RECORD +54 -53
- {deepeval-3.7.6.dist-info → deepeval-3.7.7.dist-info}/LICENSE.md +0 -0
- {deepeval-3.7.6.dist-info → deepeval-3.7.7.dist-info}/WHEEL +0 -0
- {deepeval-3.7.6.dist-info → deepeval-3.7.7.dist-info}/entry_points.txt +0 -0
deepeval/cli/main.py
CHANGED
|
@@ -16,32 +16,42 @@ General behavior for all `set-*` / `unset-*` commands:
|
|
|
16
16
|
"""
|
|
17
17
|
|
|
18
18
|
import os
|
|
19
|
-
from typing import Optional
|
|
20
|
-
from rich import print
|
|
21
|
-
from rich.markup import escape
|
|
22
19
|
import webbrowser
|
|
23
20
|
import threading
|
|
24
21
|
import random
|
|
25
22
|
import string
|
|
26
23
|
import socket
|
|
27
24
|
import typer
|
|
25
|
+
import importlib.metadata
|
|
26
|
+
from typing import List, Optional
|
|
27
|
+
from rich import print
|
|
28
|
+
from rich.markup import escape
|
|
29
|
+
from rich.console import Console
|
|
30
|
+
from rich.table import Table
|
|
28
31
|
from enum import Enum
|
|
32
|
+
from pathlib import Path
|
|
29
33
|
from pydantic import SecretStr
|
|
34
|
+
from pydantic_core import PydanticUndefined
|
|
30
35
|
from deepeval.key_handler import (
|
|
31
36
|
EmbeddingKeyValues,
|
|
32
37
|
ModelKeyValues,
|
|
33
38
|
)
|
|
34
39
|
from deepeval.telemetry import capture_login_event, capture_view_event
|
|
35
|
-
from deepeval.cli.test import app as test_app
|
|
36
|
-
from deepeval.cli.server import start_server
|
|
37
40
|
from deepeval.config.settings import get_settings
|
|
38
41
|
from deepeval.utils import delete_file_if_exists, open_browser
|
|
39
42
|
from deepeval.test_run.test_run import (
|
|
40
43
|
LATEST_TEST_RUN_FILE_PATH,
|
|
41
44
|
global_test_run_manager,
|
|
42
45
|
)
|
|
46
|
+
from deepeval.cli.test import app as test_app
|
|
47
|
+
from deepeval.cli.server import start_server
|
|
43
48
|
from deepeval.cli.utils import (
|
|
49
|
+
coerce_blank_to_none,
|
|
50
|
+
is_optional,
|
|
51
|
+
load_service_account_key_file,
|
|
52
|
+
parse_and_validate,
|
|
44
53
|
render_login_message,
|
|
54
|
+
resolve_field_names,
|
|
45
55
|
upload_and_open_link,
|
|
46
56
|
PROD,
|
|
47
57
|
)
|
|
@@ -49,7 +59,7 @@ from deepeval.confident.api import (
|
|
|
49
59
|
is_confident,
|
|
50
60
|
)
|
|
51
61
|
|
|
52
|
-
app = typer.Typer(name="deepeval")
|
|
62
|
+
app = typer.Typer(name="deepeval", no_args_is_help=True)
|
|
53
63
|
app.add_typer(test_app, name="test")
|
|
54
64
|
|
|
55
65
|
|
|
@@ -58,6 +68,17 @@ class Regions(Enum):
|
|
|
58
68
|
EU = "EU"
|
|
59
69
|
|
|
60
70
|
|
|
71
|
+
def version_callback(value: Optional[bool] = None) -> None:
|
|
72
|
+
if not value:
|
|
73
|
+
return
|
|
74
|
+
try:
|
|
75
|
+
version = importlib.metadata.version("deepeval")
|
|
76
|
+
except importlib.metadata.PackageNotFoundError:
|
|
77
|
+
from deepeval import __version__ as version # type: ignore
|
|
78
|
+
typer.echo(version) # or: typer.echo(f"deepeval {v}")
|
|
79
|
+
raise typer.Exit()
|
|
80
|
+
|
|
81
|
+
|
|
61
82
|
def generate_pairing_code():
|
|
62
83
|
"""Generate a random pairing code."""
|
|
63
84
|
return "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
|
|
@@ -84,6 +105,54 @@ def is_openai_configured() -> bool:
|
|
|
84
105
|
return bool(env and env.strip())
|
|
85
106
|
|
|
86
107
|
|
|
108
|
+
def _handle_save_result(
|
|
109
|
+
*,
|
|
110
|
+
handled: bool,
|
|
111
|
+
path: Optional[str],
|
|
112
|
+
updates: dict,
|
|
113
|
+
save: Optional[str],
|
|
114
|
+
quiet: bool,
|
|
115
|
+
success_msg: Optional[str] = None,
|
|
116
|
+
updated_msg: str = "Saved environment variables to {path} (ensure it's git-ignored).",
|
|
117
|
+
no_changes_msg: str = "No changes to save in {path}.",
|
|
118
|
+
tip_msg: Optional[str] = None,
|
|
119
|
+
) -> bool:
|
|
120
|
+
if not handled and save is not None:
|
|
121
|
+
raise typer.BadParameter(
|
|
122
|
+
"Unsupported --save option. Use --save=dotenv[:path].",
|
|
123
|
+
param_hint="--save",
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
if quiet:
|
|
127
|
+
return False
|
|
128
|
+
|
|
129
|
+
if path and updates:
|
|
130
|
+
print(updated_msg.format(path=path))
|
|
131
|
+
elif path:
|
|
132
|
+
print(no_changes_msg.format(path=path))
|
|
133
|
+
elif tip_msg:
|
|
134
|
+
print(tip_msg)
|
|
135
|
+
|
|
136
|
+
if success_msg:
|
|
137
|
+
print(success_msg)
|
|
138
|
+
|
|
139
|
+
return True
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
@app.callback()
|
|
143
|
+
def main(
|
|
144
|
+
version: Optional[bool] = typer.Option(
|
|
145
|
+
None,
|
|
146
|
+
"--version",
|
|
147
|
+
"-V",
|
|
148
|
+
help="Show the DeepEval version and exit.",
|
|
149
|
+
callback=version_callback,
|
|
150
|
+
is_eager=True,
|
|
151
|
+
),
|
|
152
|
+
) -> None:
|
|
153
|
+
pass
|
|
154
|
+
|
|
155
|
+
|
|
87
156
|
@app.command(name="set-confident-region")
|
|
88
157
|
def set_confident_region_command(
|
|
89
158
|
region: Regions = typer.Argument(
|
|
@@ -91,10 +160,17 @@ def set_confident_region_command(
|
|
|
91
160
|
),
|
|
92
161
|
save: Optional[str] = typer.Option(
|
|
93
162
|
None,
|
|
163
|
+
"-s",
|
|
94
164
|
"--save",
|
|
95
165
|
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
96
166
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
97
167
|
),
|
|
168
|
+
quiet: bool = typer.Option(
|
|
169
|
+
False,
|
|
170
|
+
"-q",
|
|
171
|
+
"--quiet",
|
|
172
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
173
|
+
),
|
|
98
174
|
):
|
|
99
175
|
"""Set the Confident AI data region."""
|
|
100
176
|
# Add flag emojis based on region
|
|
@@ -104,52 +180,46 @@ def set_confident_region_command(
|
|
|
104
180
|
with settings.edit(save=save) as edit_ctx:
|
|
105
181
|
settings.CONFIDENT_REGION = region.value
|
|
106
182
|
|
|
107
|
-
handled, path,
|
|
108
|
-
|
|
109
|
-
if not handled and save is not None:
|
|
110
|
-
# invalid --save format (unsupported)
|
|
111
|
-
print("Unsupported --save option. Use --save=dotenv[:path].")
|
|
112
|
-
elif path:
|
|
113
|
-
# persisted to a file
|
|
114
|
-
print(
|
|
115
|
-
f"Saved environment variables to {path} (ensure it's git-ignored)."
|
|
116
|
-
)
|
|
117
|
-
else:
|
|
118
|
-
# updated in-memory & process env only
|
|
119
|
-
print(
|
|
120
|
-
"Settings updated for this session. To persist, use --save=dotenv[:path] "
|
|
121
|
-
"(default .env.local) or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
|
|
122
|
-
)
|
|
183
|
+
handled, path, updates = edit_ctx.result
|
|
123
184
|
|
|
124
|
-
|
|
125
|
-
|
|
185
|
+
_handle_save_result(
|
|
186
|
+
handled=handled,
|
|
187
|
+
path=path,
|
|
188
|
+
updates=updates,
|
|
189
|
+
save=save,
|
|
190
|
+
quiet=quiet,
|
|
191
|
+
success_msg=(
|
|
192
|
+
f":raising_hands: Congratulations! You're now using the {flag} {region.value} data region for Confident AI."
|
|
193
|
+
),
|
|
126
194
|
)
|
|
127
195
|
|
|
128
196
|
|
|
129
|
-
@app.command(
|
|
197
|
+
@app.command(
|
|
198
|
+
help=(
|
|
199
|
+
"Login will prompt you for your Confident AI API key (input hidden). "
|
|
200
|
+
"Get it from https://app.confident-ai.com. "
|
|
201
|
+
"Required to log events to the server. "
|
|
202
|
+
"The API key will be saved in your environment variables, typically in .env.local, unless a different path is provided with --save."
|
|
203
|
+
)
|
|
204
|
+
)
|
|
130
205
|
def login(
|
|
131
|
-
api_key: str = typer.Option(
|
|
132
|
-
"",
|
|
133
|
-
help="API key to get from https://app.confident-ai.com. Required if you want to log events to the server.",
|
|
134
|
-
),
|
|
135
|
-
confident_api_key: Optional[str] = typer.Option(
|
|
136
|
-
None,
|
|
137
|
-
"--confident-api-key",
|
|
138
|
-
"-c",
|
|
139
|
-
help="Confident API key (non-interactive). If omitted, you'll be prompted to enter one. In all cases the key is saved to a dotenv file (default: .env.local) unless overridden with --save.",
|
|
140
|
-
),
|
|
141
206
|
save: Optional[str] = typer.Option(
|
|
142
207
|
None,
|
|
208
|
+
"-s",
|
|
143
209
|
"--save",
|
|
144
210
|
help="Where to persist settings. Format: dotenv[:path]. Defaults to .env.local. If omitted, login still writes to .env.local.",
|
|
145
211
|
),
|
|
146
212
|
):
|
|
213
|
+
api_key = coerce_blank_to_none(
|
|
214
|
+
typer.prompt("🔐 Enter your API Key", hide_input=True)
|
|
215
|
+
)
|
|
216
|
+
|
|
147
217
|
with capture_login_event() as span:
|
|
148
218
|
completed = False
|
|
149
219
|
try:
|
|
150
220
|
# Resolve the key from CLI flag or interactive flow
|
|
151
|
-
if
|
|
152
|
-
key =
|
|
221
|
+
if api_key is not None:
|
|
222
|
+
key = api_key
|
|
153
223
|
else:
|
|
154
224
|
render_login_message()
|
|
155
225
|
|
|
@@ -171,16 +241,15 @@ def login(
|
|
|
171
241
|
)
|
|
172
242
|
|
|
173
243
|
# Manual fallback if still empty
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
key = api_key.strip()
|
|
244
|
+
while True:
|
|
245
|
+
api_key = coerce_blank_to_none(
|
|
246
|
+
typer.prompt("🔐 Enter your API Key", hide_input=True)
|
|
247
|
+
)
|
|
248
|
+
if api_key:
|
|
249
|
+
break
|
|
250
|
+
else:
|
|
251
|
+
print("❌ API Key cannot be empty. Please try again.\n")
|
|
252
|
+
key = api_key
|
|
184
253
|
|
|
185
254
|
settings = get_settings()
|
|
186
255
|
save = save or settings.DEEPEVAL_DEFAULT_SAVE or "dotenv:.env.local"
|
|
@@ -224,9 +293,16 @@ def login(
|
|
|
224
293
|
def logout(
|
|
225
294
|
save: Optional[str] = typer.Option(
|
|
226
295
|
None,
|
|
296
|
+
"-s",
|
|
227
297
|
"--save",
|
|
228
298
|
help="Where to remove the saved key from. Use format dotenv[:path]. If omitted, uses DEEPEVAL_DEFAULT_SAVE or .env.local. The JSON keystore is always cleared.",
|
|
229
|
-
)
|
|
299
|
+
),
|
|
300
|
+
quiet: bool = typer.Option(
|
|
301
|
+
False,
|
|
302
|
+
"-q",
|
|
303
|
+
"--quiet",
|
|
304
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
305
|
+
),
|
|
230
306
|
):
|
|
231
307
|
"""
|
|
232
308
|
Log out of Confident AI.
|
|
@@ -244,18 +320,19 @@ def logout(
|
|
|
244
320
|
|
|
245
321
|
handled, path, updated = edit_ctx.result
|
|
246
322
|
|
|
247
|
-
if
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
323
|
+
if _handle_save_result(
|
|
324
|
+
handled=handled,
|
|
325
|
+
path=path,
|
|
326
|
+
updates=updated,
|
|
327
|
+
save=save,
|
|
328
|
+
quiet=quiet,
|
|
329
|
+
updated_msg="Removed Confident AI key(s) from {path}.",
|
|
330
|
+
tip_msg=None,
|
|
331
|
+
):
|
|
332
|
+
print("\n🎉🥳 You've successfully logged out! :raising_hands: ")
|
|
254
333
|
|
|
255
334
|
delete_file_if_exists(LATEST_TEST_RUN_FILE_PATH)
|
|
256
335
|
|
|
257
|
-
print("\n🎉🥳 You've successfully logged out! :raising_hands: ")
|
|
258
|
-
|
|
259
336
|
|
|
260
337
|
@app.command()
|
|
261
338
|
def view():
|
|
@@ -273,7 +350,183 @@ def view():
|
|
|
273
350
|
upload_and_open_link(_span=span)
|
|
274
351
|
|
|
275
352
|
|
|
276
|
-
@app.command(
|
|
353
|
+
@app.command(
|
|
354
|
+
name="settings",
|
|
355
|
+
help=(
|
|
356
|
+
"Power-user command to set/unset any DeepEval Settings field. "
|
|
357
|
+
"Uses Pydantic type validation. Supports partial, case-insensitive matching for --unset and --list."
|
|
358
|
+
),
|
|
359
|
+
)
|
|
360
|
+
def update_settings(
|
|
361
|
+
set_: Optional[List[str]] = typer.Option(
|
|
362
|
+
None,
|
|
363
|
+
"-u",
|
|
364
|
+
"--set",
|
|
365
|
+
help="Set a setting (repeatable). Format: KEY=VALUE",
|
|
366
|
+
),
|
|
367
|
+
unset: Optional[List[str]] = typer.Option(
|
|
368
|
+
None,
|
|
369
|
+
"-U",
|
|
370
|
+
"--unset",
|
|
371
|
+
help=(
|
|
372
|
+
"Unset setting(s) by name or partial match (repeatable, case-insensitive). "
|
|
373
|
+
"If a filter matches multiple keys, all are unset."
|
|
374
|
+
),
|
|
375
|
+
),
|
|
376
|
+
list_: bool = typer.Option(
|
|
377
|
+
False,
|
|
378
|
+
"-l",
|
|
379
|
+
"--list",
|
|
380
|
+
help="List available settings. You can optionally pass a FILTER argument, such as `-l verbose`.",
|
|
381
|
+
),
|
|
382
|
+
filters: Optional[List[str]] = typer.Argument(
|
|
383
|
+
None,
|
|
384
|
+
help="Optional filter(s) for --list (case-insensitive substring match). You can pass multiple terms.",
|
|
385
|
+
),
|
|
386
|
+
save: Optional[str] = typer.Option(
|
|
387
|
+
None,
|
|
388
|
+
"-s",
|
|
389
|
+
"--save",
|
|
390
|
+
help="Persist settings to dotenv. Usage: --save=dotenv[:path] (default: .env.local)",
|
|
391
|
+
),
|
|
392
|
+
quiet: bool = typer.Option(
|
|
393
|
+
False,
|
|
394
|
+
"-q",
|
|
395
|
+
"--quiet",
|
|
396
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
397
|
+
),
|
|
398
|
+
):
|
|
399
|
+
def _format_setting_value(val: object) -> str:
|
|
400
|
+
if isinstance(val, SecretStr):
|
|
401
|
+
secret = val.get_secret_value()
|
|
402
|
+
return "********" if secret and secret.strip() else ""
|
|
403
|
+
if val is None:
|
|
404
|
+
return ""
|
|
405
|
+
s = str(val)
|
|
406
|
+
return s if len(s) <= 120 else (s[:117] + "…")
|
|
407
|
+
|
|
408
|
+
def _print_settings_list(filter_terms: Optional[List[str]]) -> None:
|
|
409
|
+
needles = []
|
|
410
|
+
for term in filter_terms or []:
|
|
411
|
+
t = term.strip().lower().replace("-", "_")
|
|
412
|
+
if t:
|
|
413
|
+
needles.append(t)
|
|
414
|
+
|
|
415
|
+
table = Table(title="Settings")
|
|
416
|
+
table.add_column("Name", style="bold")
|
|
417
|
+
table.add_column("Value", overflow="fold")
|
|
418
|
+
table.add_column("Description", overflow="fold")
|
|
419
|
+
|
|
420
|
+
shown = 0
|
|
421
|
+
for name in sorted(fields.keys()):
|
|
422
|
+
hay = name.lower().replace("-", "_")
|
|
423
|
+
if needles and not any(n in hay for n in needles):
|
|
424
|
+
continue
|
|
425
|
+
|
|
426
|
+
field_info = fields[name]
|
|
427
|
+
desc = field_info.description or ""
|
|
428
|
+
current_val = getattr(settings, name, None)
|
|
429
|
+
table.add_row(name, _format_setting_value(current_val), desc)
|
|
430
|
+
shown += 1
|
|
431
|
+
|
|
432
|
+
if shown == 0:
|
|
433
|
+
raise typer.BadParameter(f"No settings matched: {filter_terms!r}")
|
|
434
|
+
|
|
435
|
+
Console().print(table)
|
|
436
|
+
|
|
437
|
+
settings = get_settings()
|
|
438
|
+
fields = type(settings).model_fields
|
|
439
|
+
|
|
440
|
+
if filters is not None and not list_:
|
|
441
|
+
raise typer.BadParameter("FILTER can only be used with --list / -l.")
|
|
442
|
+
|
|
443
|
+
if list_:
|
|
444
|
+
if set_ or unset:
|
|
445
|
+
raise typer.BadParameter(
|
|
446
|
+
"--list cannot be combined with --set/--unset."
|
|
447
|
+
)
|
|
448
|
+
_print_settings_list(filters)
|
|
449
|
+
return
|
|
450
|
+
|
|
451
|
+
# Build an assignment plan: name -> value (None means "unset")
|
|
452
|
+
plan: dict[str, object] = {}
|
|
453
|
+
|
|
454
|
+
# --unset (filters)
|
|
455
|
+
if unset:
|
|
456
|
+
matched_any = False
|
|
457
|
+
for f in unset:
|
|
458
|
+
matches = resolve_field_names(settings, f)
|
|
459
|
+
if not matches:
|
|
460
|
+
continue
|
|
461
|
+
matched_any = True
|
|
462
|
+
for name in matches:
|
|
463
|
+
field_info = fields[name]
|
|
464
|
+
ann = field_info.annotation
|
|
465
|
+
|
|
466
|
+
# "unset" semantics:
|
|
467
|
+
# - Optional -> None
|
|
468
|
+
# - else -> reset to default if it exists
|
|
469
|
+
if is_optional(ann):
|
|
470
|
+
plan[name] = None
|
|
471
|
+
elif field_info.default is not PydanticUndefined:
|
|
472
|
+
plan[name] = field_info.default
|
|
473
|
+
else:
|
|
474
|
+
raise typer.BadParameter(
|
|
475
|
+
f"Cannot unset required setting {name} (no default, not Optional)."
|
|
476
|
+
)
|
|
477
|
+
|
|
478
|
+
if unset and not matched_any:
|
|
479
|
+
raise typer.BadParameter(f"No settings matched: {unset!r}")
|
|
480
|
+
|
|
481
|
+
# --set KEY=VALUE
|
|
482
|
+
if set_:
|
|
483
|
+
for item in set_:
|
|
484
|
+
key, sep, raw = item.partition("=")
|
|
485
|
+
if not sep:
|
|
486
|
+
raise typer.BadParameter(
|
|
487
|
+
f"--set must be KEY=VALUE (got {item!r})"
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
matches = resolve_field_names(settings, key)
|
|
491
|
+
if not matches:
|
|
492
|
+
raise typer.BadParameter(f"Unknown setting: {key!r}")
|
|
493
|
+
if len(matches) > 1:
|
|
494
|
+
raise typer.BadParameter(
|
|
495
|
+
f"Ambiguous setting {key!r}; matches: {', '.join(matches)}"
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
name = matches[0]
|
|
499
|
+
field_info = fields[name]
|
|
500
|
+
plan[name] = parse_and_validate(name, field_info, raw)
|
|
501
|
+
|
|
502
|
+
if not plan:
|
|
503
|
+
# nothing requested
|
|
504
|
+
return
|
|
505
|
+
|
|
506
|
+
with settings.edit(save=save) as edit_ctx:
|
|
507
|
+
for name, val in plan.items():
|
|
508
|
+
setattr(settings, name, val)
|
|
509
|
+
|
|
510
|
+
handled, path, updates = edit_ctx.result
|
|
511
|
+
|
|
512
|
+
_handle_save_result(
|
|
513
|
+
handled=handled,
|
|
514
|
+
path=path,
|
|
515
|
+
updates=updates,
|
|
516
|
+
save=save,
|
|
517
|
+
quiet=quiet,
|
|
518
|
+
success_msg=":wrench: Settings updated." if updates else None,
|
|
519
|
+
)
|
|
520
|
+
|
|
521
|
+
|
|
522
|
+
@app.command(
|
|
523
|
+
name="set-debug",
|
|
524
|
+
help=(
|
|
525
|
+
"Configure verbosity flags (global LOG_LEVEL, verbose mode), retry logger levels, "
|
|
526
|
+
"gRPC logging, and Confident trace toggles. Use the --save option to persist settings "
|
|
527
|
+
"to a dotenv file (default: .env.local)."
|
|
528
|
+
),
|
|
529
|
+
)
|
|
277
530
|
def set_debug(
|
|
278
531
|
# Core verbosity
|
|
279
532
|
log_level: Optional[str] = typer.Option(
|
|
@@ -284,6 +537,16 @@ def set_debug(
|
|
|
284
537
|
verbose: Optional[bool] = typer.Option(
|
|
285
538
|
None, "--verbose/--no-verbose", help="Toggle DEEPEVAL_VERBOSE_MODE."
|
|
286
539
|
),
|
|
540
|
+
debug_async: Optional[bool] = typer.Option(
|
|
541
|
+
None,
|
|
542
|
+
"--debug-async/--no-debug-async",
|
|
543
|
+
help="Toggle DEEPEVAL_DEBUG_ASYNC.",
|
|
544
|
+
),
|
|
545
|
+
log_stack_traces: Optional[bool] = typer.Option(
|
|
546
|
+
None,
|
|
547
|
+
"--log-stack-traces/--no-log-stack-traces",
|
|
548
|
+
help="Toggle DEEPEVAL_LOG_STACK_TRACES.",
|
|
549
|
+
),
|
|
287
550
|
# Retry logging dials
|
|
288
551
|
retry_before_level: Optional[str] = typer.Option(
|
|
289
552
|
None,
|
|
@@ -353,35 +616,27 @@ def set_debug(
|
|
|
353
616
|
"--metric-logging-enabled/--no-metric-logging-enabled",
|
|
354
617
|
help="Enable / disable CONFIDENT_METRIC_LOGGING_ENABLED.",
|
|
355
618
|
),
|
|
356
|
-
# Advanced / potentially surprising
|
|
357
|
-
error_reporting: Optional[bool] = typer.Option(
|
|
358
|
-
None,
|
|
359
|
-
"--error-reporting/--no-error-reporting",
|
|
360
|
-
help="Enable / disable ERROR_REPORTING.",
|
|
361
|
-
),
|
|
362
|
-
ignore_errors: Optional[bool] = typer.Option(
|
|
363
|
-
None,
|
|
364
|
-
"--ignore-errors/--no-ignore-errors",
|
|
365
|
-
help="Enable / disable IGNORE_DEEPEVAL_ERRORS (not recommended in normal debugging).",
|
|
366
|
-
),
|
|
367
619
|
# Persistence
|
|
368
620
|
save: Optional[str] = typer.Option(
|
|
369
621
|
None,
|
|
622
|
+
"-s",
|
|
370
623
|
"--save",
|
|
371
624
|
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
372
625
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
373
626
|
),
|
|
627
|
+
quiet: bool = typer.Option(
|
|
628
|
+
False,
|
|
629
|
+
"-q",
|
|
630
|
+
"--quiet",
|
|
631
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
632
|
+
),
|
|
374
633
|
):
|
|
375
634
|
"""
|
|
376
|
-
Configure
|
|
635
|
+
Configure debug and logging behaviors for DeepEval.
|
|
377
636
|
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
Examples:
|
|
383
|
-
deepeval set-debug --log-level DEBUG --verbose --grpc --retry-before-level DEBUG --retry-after-level INFO
|
|
384
|
-
deepeval set-debug --trace-verbose --trace-env staging --save dotenv:.env.local
|
|
637
|
+
Use verbosity flags to set the global log level, retry logging behavior, gRPC logging,
|
|
638
|
+
Confident AI tracing, and more. This command applies changes immediately but can also
|
|
639
|
+
persist settings to a dotenv file with --save.
|
|
385
640
|
"""
|
|
386
641
|
settings = get_settings()
|
|
387
642
|
with settings.edit(save=save) as edit_ctx:
|
|
@@ -390,6 +645,10 @@ def set_debug(
|
|
|
390
645
|
settings.LOG_LEVEL = log_level
|
|
391
646
|
if verbose is not None:
|
|
392
647
|
settings.DEEPEVAL_VERBOSE_MODE = verbose
|
|
648
|
+
if debug_async is not None:
|
|
649
|
+
settings.DEEPEVAL_DEBUG_ASYNC = debug_async
|
|
650
|
+
if log_stack_traces is not None:
|
|
651
|
+
settings.DEEPEVAL_LOG_STACK_TRACES = log_stack_traces
|
|
393
652
|
|
|
394
653
|
# Retry logging
|
|
395
654
|
if retry_before_level is not None:
|
|
@@ -427,84 +686,80 @@ def set_debug(
|
|
|
427
686
|
if metric_logging_enabled is not None:
|
|
428
687
|
settings.CONFIDENT_METRIC_LOGGING_ENABLED = metric_logging_enabled
|
|
429
688
|
|
|
430
|
-
|
|
431
|
-
if error_reporting is not None:
|
|
432
|
-
settings.ERROR_REPORTING = error_reporting
|
|
433
|
-
if ignore_errors is not None:
|
|
434
|
-
settings.IGNORE_DEEPEVAL_ERRORS = ignore_errors
|
|
435
|
-
|
|
436
|
-
handled, path, updated = edit_ctx.result
|
|
437
|
-
|
|
438
|
-
if not updated:
|
|
439
|
-
# no changes were made, so there is nothing to do.
|
|
440
|
-
return
|
|
441
|
-
|
|
442
|
-
if not handled and save is not None:
|
|
443
|
-
print("Unsupported --save option. Use --save=dotenv[:path].")
|
|
444
|
-
elif path:
|
|
445
|
-
print(
|
|
446
|
-
f"Saved environment variables to {path} (ensure it's git-ignored)."
|
|
447
|
-
)
|
|
448
|
-
else:
|
|
449
|
-
print(
|
|
450
|
-
"Settings updated for this session. To persist, use --save=dotenv[:path] "
|
|
451
|
-
"(default .env.local) or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
|
|
452
|
-
)
|
|
689
|
+
handled, path, updates = edit_ctx.result
|
|
453
690
|
|
|
454
|
-
|
|
691
|
+
_handle_save_result(
|
|
692
|
+
handled=handled,
|
|
693
|
+
path=path,
|
|
694
|
+
updates=updates,
|
|
695
|
+
save=save,
|
|
696
|
+
quiet=quiet,
|
|
697
|
+
success_msg=":loud_sound: Debug options updated." if updates else None,
|
|
698
|
+
)
|
|
455
699
|
|
|
456
700
|
|
|
457
|
-
@app.command(
|
|
701
|
+
@app.command(
|
|
702
|
+
name="unset-debug",
|
|
703
|
+
help=(
|
|
704
|
+
"Restore default behavior by removing debug-related overrides. "
|
|
705
|
+
"Use --save to also remove these keys from a dotenv file (default: .env.local)."
|
|
706
|
+
),
|
|
707
|
+
)
|
|
458
708
|
def unset_debug(
|
|
459
709
|
save: Optional[str] = typer.Option(
|
|
460
710
|
None,
|
|
711
|
+
"-s",
|
|
461
712
|
"--save",
|
|
462
713
|
help="Remove only the debug-related environment variables from a dotenv file. "
|
|
463
714
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
464
715
|
),
|
|
716
|
+
quiet: bool = typer.Option(
|
|
717
|
+
False,
|
|
718
|
+
"-q",
|
|
719
|
+
"--quiet",
|
|
720
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
721
|
+
),
|
|
465
722
|
):
|
|
466
|
-
"""
|
|
467
|
-
Restore default behavior by unsetting debug related variables.
|
|
468
|
-
|
|
469
|
-
Behavior:
|
|
470
|
-
- Resets LOG_LEVEL back to 'info'.
|
|
471
|
-
- Unsets DEEPEVAL_VERBOSE_MODE, retry log-level overrides, gRPC and Confident trace flags.
|
|
472
|
-
- If --save is provided (or DEEPEVAL_DEFAULT_SAVE is set), removes these keys from the target dotenv file.
|
|
473
|
-
"""
|
|
474
723
|
settings = get_settings()
|
|
475
724
|
with settings.edit(save=save) as edit_ctx:
|
|
476
|
-
#
|
|
477
|
-
settings.LOG_LEVEL =
|
|
478
|
-
settings.CONFIDENT_TRACE_ENVIRONMENT = "development"
|
|
479
|
-
settings.CONFIDENT_TRACE_VERBOSE = True
|
|
480
|
-
settings.CONFIDENT_METRIC_LOGGING_VERBOSE = True
|
|
481
|
-
settings.CONFIDENT_METRIC_LOGGING_ENABLED = True
|
|
482
|
-
|
|
483
|
-
# Clear optional toggles/overrides
|
|
725
|
+
# Core verbosity
|
|
726
|
+
settings.LOG_LEVEL = None
|
|
484
727
|
settings.DEEPEVAL_VERBOSE_MODE = None
|
|
728
|
+
settings.DEEPEVAL_DEBUG_ASYNC = None
|
|
729
|
+
settings.DEEPEVAL_LOG_STACK_TRACES = None
|
|
730
|
+
|
|
731
|
+
# Retry logging dials
|
|
485
732
|
settings.DEEPEVAL_RETRY_BEFORE_LOG_LEVEL = None
|
|
486
733
|
settings.DEEPEVAL_RETRY_AFTER_LOG_LEVEL = None
|
|
487
734
|
|
|
735
|
+
# gRPC visibility
|
|
488
736
|
settings.DEEPEVAL_GRPC_LOGGING = None
|
|
489
737
|
settings.GRPC_VERBOSITY = None
|
|
490
738
|
settings.GRPC_TRACE = None
|
|
491
739
|
|
|
740
|
+
# Confident tracing
|
|
741
|
+
settings.CONFIDENT_TRACE_VERBOSE = None
|
|
742
|
+
settings.CONFIDENT_TRACE_ENVIRONMENT = None
|
|
492
743
|
settings.CONFIDENT_TRACE_FLUSH = None
|
|
493
|
-
settings.
|
|
494
|
-
|
|
495
|
-
settings.ERROR_REPORTING = None
|
|
496
|
-
settings.IGNORE_DEEPEVAL_ERRORS = None
|
|
497
|
-
|
|
498
|
-
handled, path, _ = edit_ctx.result
|
|
499
|
-
|
|
500
|
-
if not handled and save is not None:
|
|
501
|
-
print("Unsupported --save option. Use --save=dotenv[:path].")
|
|
502
|
-
elif path:
|
|
503
|
-
print(f"Removed debug-related environment variables from {path}.")
|
|
504
|
-
else:
|
|
505
|
-
print("Debug settings reverted to defaults for this session.")
|
|
744
|
+
settings.CONFIDENT_TRACE_SAMPLE_RATE = None
|
|
506
745
|
|
|
507
|
-
|
|
746
|
+
# Confident metrics
|
|
747
|
+
settings.CONFIDENT_METRIC_LOGGING_VERBOSE = None
|
|
748
|
+
settings.CONFIDENT_METRIC_LOGGING_FLUSH = None
|
|
749
|
+
settings.CONFIDENT_METRIC_LOGGING_SAMPLE_RATE = None
|
|
750
|
+
settings.CONFIDENT_METRIC_LOGGING_ENABLED = None
|
|
751
|
+
|
|
752
|
+
handled, path, updates = edit_ctx.result
|
|
753
|
+
|
|
754
|
+
_handle_save_result(
|
|
755
|
+
handled=handled,
|
|
756
|
+
path=path,
|
|
757
|
+
updates=updates,
|
|
758
|
+
save=save,
|
|
759
|
+
quiet=quiet,
|
|
760
|
+
success_msg=":mute: Debug options unset." if updates else None,
|
|
761
|
+
tip_msg=None,
|
|
762
|
+
)
|
|
508
763
|
|
|
509
764
|
|
|
510
765
|
#############################################
|
|
@@ -514,29 +769,52 @@ def unset_debug(
|
|
|
514
769
|
|
|
515
770
|
@app.command(name="set-openai")
|
|
516
771
|
def set_openai_env(
|
|
517
|
-
model: str = typer.Option(
|
|
772
|
+
model: Optional[str] = typer.Option(
|
|
773
|
+
None,
|
|
774
|
+
"-m",
|
|
775
|
+
"--model",
|
|
776
|
+
help="Model identifier to use for this provider (e.g., `gpt-4.1`).",
|
|
777
|
+
),
|
|
778
|
+
prompt_api_key: bool = typer.Option(
|
|
779
|
+
False,
|
|
780
|
+
"-k",
|
|
781
|
+
"--prompt-api-key",
|
|
782
|
+
help=(
|
|
783
|
+
"Prompt for OPENAI_API_KEY (input hidden). Not suitable for CI. "
|
|
784
|
+
"If --save (or DEEPEVAL_DEFAULT_SAVE) is used, the key is written to dotenv in plaintext."
|
|
785
|
+
),
|
|
786
|
+
),
|
|
518
787
|
cost_per_input_token: Optional[float] = typer.Option(
|
|
519
788
|
None,
|
|
520
|
-
"
|
|
789
|
+
"-i",
|
|
790
|
+
"--cost-per-input-token",
|
|
521
791
|
help=(
|
|
522
|
-
"USD per input token
|
|
523
|
-
"REQUIRED if you use a custom/
|
|
792
|
+
"USD per input token override used for cost tracking. Preconfigured for known models; "
|
|
793
|
+
"REQUIRED if you use a custom/unknown model."
|
|
524
794
|
),
|
|
525
795
|
),
|
|
526
796
|
cost_per_output_token: Optional[float] = typer.Option(
|
|
527
797
|
None,
|
|
528
|
-
"
|
|
798
|
+
"-o",
|
|
799
|
+
"--cost-per-output-token",
|
|
529
800
|
help=(
|
|
530
|
-
"USD per output token
|
|
531
|
-
"REQUIRED if you use a custom/
|
|
801
|
+
"USD per output token override used for cost tracking. Preconfigured for known models; "
|
|
802
|
+
"REQUIRED if you use a custom/unknown model."
|
|
532
803
|
),
|
|
533
804
|
),
|
|
534
805
|
save: Optional[str] = typer.Option(
|
|
535
806
|
None,
|
|
807
|
+
"-s",
|
|
536
808
|
"--save",
|
|
537
809
|
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
538
810
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
539
811
|
),
|
|
812
|
+
quiet: bool = typer.Option(
|
|
813
|
+
False,
|
|
814
|
+
"-q",
|
|
815
|
+
"--quiet",
|
|
816
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
817
|
+
),
|
|
540
818
|
):
|
|
541
819
|
"""
|
|
542
820
|
Configure OpenAI as the active LLM provider.
|
|
@@ -549,56 +827,69 @@ def set_openai_env(
|
|
|
549
827
|
Pricing rules:
|
|
550
828
|
- If `model` is a known OpenAI model, you may omit costs (built‑in pricing is used).
|
|
551
829
|
- If `model` is custom/unsupported, you must provide both
|
|
552
|
-
`--
|
|
830
|
+
`--cost-per-input-token` and `--cost-per-output-token`.
|
|
553
831
|
|
|
554
832
|
Secrets & saving:
|
|
555
|
-
|
|
556
|
-
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
833
|
+
|
|
834
|
+
- If you run with --prompt-api-key, DeepEval will set OPENAI_API_KEY for this session.
|
|
835
|
+
- If --save=dotenv[:path] is used (or DEEPEVAL_DEFAULT_SAVE is set), the key will be written to that dotenv file (plaintext).
|
|
836
|
+
|
|
837
|
+
Secrets are never written to .deepeval/.deepeval (legacy JSON store).
|
|
560
838
|
|
|
561
839
|
Args:
|
|
562
|
-
model: OpenAI model name, such as `gpt-4o-mini`.
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
840
|
+
--model: OpenAI model name, such as `gpt-4o-mini`.
|
|
841
|
+
--prompt-api-key: Prompt interactively for OPENAI_API_KEY (input hidden). Avoids putting secrets on the command line (shell history/process args). Not suitable for CI.
|
|
842
|
+
--cost-per-input-token: USD per input token (optional for known models).
|
|
843
|
+
--cost-per-output-token: USD per output token (optional for known models).
|
|
844
|
+
--save: Persist config (and supported secrets) to a dotenv file; format `dotenv[:path]`.
|
|
845
|
+
--quiet: Suppress printing to the terminal.
|
|
566
846
|
|
|
567
847
|
Example:
|
|
568
848
|
deepeval set-openai \\
|
|
569
849
|
--model gpt-4o-mini \\
|
|
570
|
-
--
|
|
571
|
-
--
|
|
850
|
+
--cost-per-input-token 0.0005 \\
|
|
851
|
+
--cost-per-output-token 0.0015 \\
|
|
572
852
|
--save dotenv:.env.local
|
|
573
853
|
"""
|
|
854
|
+
api_key = None
|
|
855
|
+
if prompt_api_key:
|
|
856
|
+
api_key = coerce_blank_to_none(
|
|
857
|
+
typer.prompt("OpenAI API key", hide_input=True)
|
|
858
|
+
)
|
|
859
|
+
|
|
860
|
+
model = coerce_blank_to_none(model)
|
|
861
|
+
|
|
574
862
|
settings = get_settings()
|
|
575
863
|
with settings.edit(save=save) as edit_ctx:
|
|
576
864
|
edit_ctx.switch_model_provider(ModelKeyValues.USE_OPENAI_MODEL)
|
|
577
|
-
|
|
865
|
+
if model is not None:
|
|
866
|
+
settings.OPENAI_MODEL_NAME = model
|
|
867
|
+
if api_key is not None:
|
|
868
|
+
settings.OPENAI_API_KEY = api_key
|
|
578
869
|
if cost_per_input_token is not None:
|
|
579
870
|
settings.OPENAI_COST_PER_INPUT_TOKEN = cost_per_input_token
|
|
580
871
|
if cost_per_output_token is not None:
|
|
581
872
|
settings.OPENAI_COST_PER_OUTPUT_TOKEN = cost_per_output_token
|
|
582
873
|
|
|
583
|
-
handled, path,
|
|
874
|
+
handled, path, updates = edit_ctx.result
|
|
584
875
|
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
print(
|
|
591
|
-
f"Saved environment variables to {path} (ensure it's git-ignored)."
|
|
592
|
-
)
|
|
593
|
-
else:
|
|
594
|
-
# updated in-memory & process env only
|
|
595
|
-
print(
|
|
596
|
-
"Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
|
|
597
|
-
"or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
|
|
876
|
+
effective_model = settings.OPENAI_MODEL_NAME
|
|
877
|
+
if not effective_model:
|
|
878
|
+
raise typer.BadParameter(
|
|
879
|
+
"OpenAI model name is not set. Pass --model (or set OPENAI_MODEL_NAME).",
|
|
880
|
+
param_hint="--model",
|
|
598
881
|
)
|
|
599
882
|
|
|
600
|
-
|
|
601
|
-
|
|
883
|
+
_handle_save_result(
|
|
884
|
+
handled=handled,
|
|
885
|
+
path=path,
|
|
886
|
+
updates=updates,
|
|
887
|
+
save=save,
|
|
888
|
+
quiet=quiet,
|
|
889
|
+
success_msg=(
|
|
890
|
+
f":raising_hands: Congratulations! You're now using OpenAI's `{escape(effective_model)}` "
|
|
891
|
+
"for all evals that require an LLM."
|
|
892
|
+
),
|
|
602
893
|
)
|
|
603
894
|
|
|
604
895
|
|
|
@@ -606,10 +897,23 @@ def set_openai_env(
|
|
|
606
897
|
def unset_openai_env(
|
|
607
898
|
save: Optional[str] = typer.Option(
|
|
608
899
|
None,
|
|
900
|
+
"-s",
|
|
609
901
|
"--save",
|
|
610
902
|
help="Remove only the OpenAI related environment variables from a dotenv file. "
|
|
611
903
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
612
904
|
),
|
|
905
|
+
clear_secrets: bool = typer.Option(
|
|
906
|
+
False,
|
|
907
|
+
"-x",
|
|
908
|
+
"--clear-secrets",
|
|
909
|
+
help="Also remove OPENAI_API_KEY from the dotenv store.",
|
|
910
|
+
),
|
|
911
|
+
quiet: bool = typer.Option(
|
|
912
|
+
False,
|
|
913
|
+
"-q",
|
|
914
|
+
"--quiet",
|
|
915
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
916
|
+
),
|
|
613
917
|
):
|
|
614
918
|
"""
|
|
615
919
|
Unset OpenAI as the active provider.
|
|
@@ -622,6 +926,8 @@ def unset_openai_env(
|
|
|
622
926
|
|
|
623
927
|
Args:
|
|
624
928
|
--save: Remove OpenAI keys from the given dotenv file as well.
|
|
929
|
+
--clear-secrets: Removes OPENAI_API_KEY from the dotenv store
|
|
930
|
+
--quiet: Suppress printing to the terminal
|
|
625
931
|
|
|
626
932
|
Example:
|
|
627
933
|
deepeval unset-openai --save dotenv:.env.local
|
|
@@ -633,24 +939,29 @@ def unset_openai_env(
|
|
|
633
939
|
settings.OPENAI_COST_PER_INPUT_TOKEN = None
|
|
634
940
|
settings.OPENAI_COST_PER_OUTPUT_TOKEN = None
|
|
635
941
|
settings.USE_OPENAI_MODEL = None
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
942
|
+
if clear_secrets:
|
|
943
|
+
settings.OPENAI_API_KEY = None
|
|
944
|
+
|
|
945
|
+
handled, path, updates = edit_ctx.result
|
|
946
|
+
|
|
947
|
+
if _handle_save_result(
|
|
948
|
+
handled=handled,
|
|
949
|
+
path=path,
|
|
950
|
+
updates=updates,
|
|
951
|
+
save=save,
|
|
952
|
+
quiet=quiet,
|
|
953
|
+
updated_msg="Removed OpenAI environment variables from {path}.",
|
|
954
|
+
tip_msg=None,
|
|
955
|
+
):
|
|
956
|
+
if is_openai_configured():
|
|
957
|
+
print(
|
|
958
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
959
|
+
)
|
|
960
|
+
else:
|
|
961
|
+
print(
|
|
962
|
+
"OpenAI has been unset. No active provider is configured. "
|
|
963
|
+
"Set one with the CLI, or add credentials to .env[.local]."
|
|
964
|
+
)
|
|
654
965
|
|
|
655
966
|
|
|
656
967
|
#############################################
|
|
@@ -660,108 +971,99 @@ def unset_openai_env(
|
|
|
660
971
|
|
|
661
972
|
@app.command(name="set-azure-openai")
|
|
662
973
|
def set_azure_openai_env(
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
"
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
974
|
+
model: Optional[str] = typer.Option(
|
|
975
|
+
None,
|
|
976
|
+
"-m",
|
|
977
|
+
"--model",
|
|
978
|
+
help="Model identifier to use for this provider (e.g., `gpt-4.1`).",
|
|
979
|
+
),
|
|
980
|
+
prompt_api_key: bool = typer.Option(
|
|
981
|
+
False,
|
|
982
|
+
"-k",
|
|
983
|
+
"--prompt-api-key",
|
|
984
|
+
help=(
|
|
985
|
+
"Prompt for AZURE_OPENAI_API_KEY (input hidden). Not suitable for CI. "
|
|
986
|
+
"If --save (or DEEPEVAL_DEFAULT_SAVE) is used, the key is written to dotenv in plaintext."
|
|
987
|
+
),
|
|
670
988
|
),
|
|
671
|
-
|
|
672
|
-
|
|
989
|
+
base_url: Optional[str] = typer.Option(
|
|
990
|
+
None,
|
|
991
|
+
"-u",
|
|
992
|
+
"--base-url",
|
|
993
|
+
help="Override the API endpoint/base URL used by this provider.",
|
|
673
994
|
),
|
|
674
|
-
|
|
675
|
-
|
|
995
|
+
api_version: Optional[str] = typer.Option(
|
|
996
|
+
None,
|
|
997
|
+
"-v",
|
|
998
|
+
"--api-version",
|
|
999
|
+
help="Azure OpenAI API version (passed to the Azure OpenAI client).",
|
|
676
1000
|
),
|
|
677
|
-
|
|
678
|
-
|
|
1001
|
+
model_version: Optional[str] = typer.Option(
|
|
1002
|
+
None, "-V", "--model-version", help="Azure model version"
|
|
679
1003
|
),
|
|
680
|
-
|
|
681
|
-
None, "--
|
|
1004
|
+
deployment_name: Optional[str] = typer.Option(
|
|
1005
|
+
None, "-d", "--deployment-name", help="Azure OpenAI deployment name"
|
|
682
1006
|
),
|
|
683
1007
|
save: Optional[str] = typer.Option(
|
|
684
1008
|
None,
|
|
1009
|
+
"-s",
|
|
685
1010
|
"--save",
|
|
686
1011
|
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
687
1012
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
688
1013
|
),
|
|
1014
|
+
quiet: bool = typer.Option(
|
|
1015
|
+
False,
|
|
1016
|
+
"-q",
|
|
1017
|
+
"--quiet",
|
|
1018
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1019
|
+
),
|
|
689
1020
|
):
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
settings.AZURE_OPENAI_ENDPOINT = azure_openai_endpoint
|
|
695
|
-
settings.OPENAI_API_VERSION = openai_api_version
|
|
696
|
-
settings.AZURE_DEPLOYMENT_NAME = azure_deployment_name
|
|
697
|
-
settings.AZURE_MODEL_NAME = openai_model_name
|
|
698
|
-
if azure_model_version is not None:
|
|
699
|
-
settings.AZURE_MODEL_VERSION = azure_model_version
|
|
700
|
-
|
|
701
|
-
handled, path, _ = edit_ctx.result
|
|
702
|
-
|
|
703
|
-
if not handled and save is not None:
|
|
704
|
-
# invalid --save format (unsupported)
|
|
705
|
-
print("Unsupported --save option. Use --save=dotenv[:path].")
|
|
706
|
-
elif path:
|
|
707
|
-
# persisted to a file
|
|
708
|
-
print(
|
|
709
|
-
f"Saved environment variables to {path} (ensure it's git-ignored)."
|
|
710
|
-
)
|
|
711
|
-
else:
|
|
712
|
-
# updated in-memory & process env only
|
|
713
|
-
print(
|
|
714
|
-
"Settings updated for this session. To persist, use --save=dotenv[:path] "
|
|
715
|
-
"(default .env.local) or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
|
|
1021
|
+
api_key = None
|
|
1022
|
+
if prompt_api_key:
|
|
1023
|
+
api_key = coerce_blank_to_none(
|
|
1024
|
+
typer.prompt("Azure OpenAI API key", hide_input=True)
|
|
716
1025
|
)
|
|
717
1026
|
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
)
|
|
721
|
-
|
|
1027
|
+
model = coerce_blank_to_none(model)
|
|
1028
|
+
base_url = coerce_blank_to_none(base_url)
|
|
1029
|
+
api_version = coerce_blank_to_none(api_version)
|
|
1030
|
+
deployment_name = coerce_blank_to_none(deployment_name)
|
|
1031
|
+
model_version = coerce_blank_to_none(model_version)
|
|
722
1032
|
|
|
723
|
-
@app.command(name="set-azure-openai-embedding")
|
|
724
|
-
def set_azure_openai_embedding_env(
|
|
725
|
-
azure_embedding_deployment_name: str = typer.Option(
|
|
726
|
-
...,
|
|
727
|
-
"--embedding-deployment-name",
|
|
728
|
-
help="Azure embedding deployment name",
|
|
729
|
-
),
|
|
730
|
-
save: Optional[str] = typer.Option(
|
|
731
|
-
None,
|
|
732
|
-
"--save",
|
|
733
|
-
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
734
|
-
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
735
|
-
),
|
|
736
|
-
):
|
|
737
1033
|
settings = get_settings()
|
|
738
1034
|
with settings.edit(save=save) as edit_ctx:
|
|
739
|
-
edit_ctx.switch_model_provider(
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
"
|
|
760
|
-
"(default .env.local) or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
|
|
1035
|
+
edit_ctx.switch_model_provider(ModelKeyValues.USE_AZURE_OPENAI)
|
|
1036
|
+
if model is not None:
|
|
1037
|
+
settings.AZURE_MODEL_NAME = model
|
|
1038
|
+
if api_key is not None:
|
|
1039
|
+
settings.AZURE_OPENAI_API_KEY = api_key
|
|
1040
|
+
if base_url is not None:
|
|
1041
|
+
settings.AZURE_OPENAI_ENDPOINT = base_url
|
|
1042
|
+
if api_version is not None:
|
|
1043
|
+
settings.OPENAI_API_VERSION = api_version
|
|
1044
|
+
if deployment_name is not None:
|
|
1045
|
+
settings.AZURE_DEPLOYMENT_NAME = deployment_name
|
|
1046
|
+
if model_version is not None:
|
|
1047
|
+
settings.AZURE_MODEL_VERSION = model_version
|
|
1048
|
+
|
|
1049
|
+
handled, path, updates = edit_ctx.result
|
|
1050
|
+
|
|
1051
|
+
effective_model = settings.AZURE_MODEL_NAME
|
|
1052
|
+
if not effective_model:
|
|
1053
|
+
raise typer.BadParameter(
|
|
1054
|
+
"Azure OpenAI model name is not set. Pass --model (or set AZURE_MODEL_NAME).",
|
|
1055
|
+
param_hint="--model",
|
|
761
1056
|
)
|
|
762
1057
|
|
|
763
|
-
|
|
764
|
-
|
|
1058
|
+
_handle_save_result(
|
|
1059
|
+
handled=handled,
|
|
1060
|
+
path=path,
|
|
1061
|
+
updates=updates,
|
|
1062
|
+
save=save,
|
|
1063
|
+
quiet=quiet,
|
|
1064
|
+
success_msg=(
|
|
1065
|
+
f":raising_hands: Congratulations! You're now using Azure OpenAI's `{escape(effective_model)}` for all evals that require an LLM."
|
|
1066
|
+
),
|
|
765
1067
|
)
|
|
766
1068
|
|
|
767
1069
|
|
|
@@ -769,122 +1071,532 @@ def set_azure_openai_embedding_env(
|
|
|
769
1071
|
def unset_azure_openai_env(
|
|
770
1072
|
save: Optional[str] = typer.Option(
|
|
771
1073
|
None,
|
|
1074
|
+
"-s",
|
|
772
1075
|
"--save",
|
|
773
1076
|
help="Remove only the Azure OpenAI–related environment variables from a dotenv file. "
|
|
774
1077
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
775
|
-
)
|
|
1078
|
+
),
|
|
1079
|
+
clear_secrets: bool = typer.Option(
|
|
1080
|
+
False,
|
|
1081
|
+
"-x",
|
|
1082
|
+
"--clear-secrets",
|
|
1083
|
+
help="Also remove AZURE_OPENAI_API_KEY from the dotenv store.",
|
|
1084
|
+
),
|
|
1085
|
+
quiet: bool = typer.Option(
|
|
1086
|
+
False,
|
|
1087
|
+
"-q",
|
|
1088
|
+
"--quiet",
|
|
1089
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1090
|
+
),
|
|
776
1091
|
):
|
|
777
1092
|
settings = get_settings()
|
|
778
1093
|
with settings.edit(save=save) as edit_ctx:
|
|
779
|
-
settings.AZURE_OPENAI_API_KEY = None
|
|
780
1094
|
settings.AZURE_OPENAI_ENDPOINT = None
|
|
781
1095
|
settings.OPENAI_API_VERSION = None
|
|
782
1096
|
settings.AZURE_DEPLOYMENT_NAME = None
|
|
783
1097
|
settings.AZURE_MODEL_NAME = None
|
|
784
1098
|
settings.AZURE_MODEL_VERSION = None
|
|
785
1099
|
settings.USE_AZURE_OPENAI = None
|
|
1100
|
+
if clear_secrets:
|
|
1101
|
+
settings.AZURE_OPENAI_API_KEY = None
|
|
1102
|
+
|
|
1103
|
+
handled, path, updates = edit_ctx.result
|
|
1104
|
+
|
|
1105
|
+
if _handle_save_result(
|
|
1106
|
+
handled=handled,
|
|
1107
|
+
path=path,
|
|
1108
|
+
updates=updates,
|
|
1109
|
+
save=save,
|
|
1110
|
+
quiet=quiet,
|
|
1111
|
+
updated_msg="Removed Azure OpenAI environment variables from {path}.",
|
|
1112
|
+
tip_msg=None,
|
|
1113
|
+
):
|
|
1114
|
+
if is_openai_configured():
|
|
1115
|
+
print(
|
|
1116
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
1117
|
+
)
|
|
1118
|
+
else:
|
|
1119
|
+
print(
|
|
1120
|
+
"Azure OpenAI has been unset. No active provider is configured. Set one with the CLI, or add credentials to .env[.local]."
|
|
1121
|
+
)
|
|
786
1122
|
|
|
787
|
-
handled, path, _ = edit_ctx.result
|
|
788
1123
|
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
1124
|
+
@app.command(name="set-azure-openai-embedding")
|
|
1125
|
+
def set_azure_openai_embedding_env(
|
|
1126
|
+
model: Optional[str] = typer.Option(
|
|
1127
|
+
None,
|
|
1128
|
+
"-m",
|
|
1129
|
+
"--model",
|
|
1130
|
+
help="Model identifier to use for this provider (e.g., `gpt-4.1`).",
|
|
1131
|
+
),
|
|
1132
|
+
deployment_name: Optional[str] = typer.Option(
|
|
1133
|
+
None,
|
|
1134
|
+
"-d",
|
|
1135
|
+
"--deployment-name",
|
|
1136
|
+
help="Azure embedding deployment name",
|
|
1137
|
+
),
|
|
1138
|
+
save: Optional[str] = typer.Option(
|
|
1139
|
+
None,
|
|
1140
|
+
"-s",
|
|
1141
|
+
"--save",
|
|
1142
|
+
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
1143
|
+
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1144
|
+
),
|
|
1145
|
+
quiet: bool = typer.Option(
|
|
1146
|
+
False,
|
|
1147
|
+
"-q",
|
|
1148
|
+
"--quiet",
|
|
1149
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1150
|
+
),
|
|
1151
|
+
):
|
|
1152
|
+
model = coerce_blank_to_none(model)
|
|
1153
|
+
deployment_name = coerce_blank_to_none(deployment_name)
|
|
795
1154
|
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
1155
|
+
settings = get_settings()
|
|
1156
|
+
with settings.edit(save=save) as edit_ctx:
|
|
1157
|
+
edit_ctx.switch_model_provider(
|
|
1158
|
+
EmbeddingKeyValues.USE_AZURE_OPENAI_EMBEDDING
|
|
799
1159
|
)
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
1160
|
+
if model is not None:
|
|
1161
|
+
settings.AZURE_EMBEDDING_MODEL_NAME = model
|
|
1162
|
+
if deployment_name is not None:
|
|
1163
|
+
settings.AZURE_EMBEDDING_DEPLOYMENT_NAME = deployment_name
|
|
1164
|
+
|
|
1165
|
+
handled, path, updates = edit_ctx.result
|
|
1166
|
+
|
|
1167
|
+
effective_model = settings.AZURE_EMBEDDING_MODEL_NAME
|
|
1168
|
+
if not effective_model:
|
|
1169
|
+
raise typer.BadParameter(
|
|
1170
|
+
"Azure OpenAI embedding model name is not set. Pass --model (or set AZURE_EMBEDDING_MODEL_NAME).",
|
|
1171
|
+
param_hint="--model",
|
|
803
1172
|
)
|
|
804
1173
|
|
|
1174
|
+
_handle_save_result(
|
|
1175
|
+
handled=handled,
|
|
1176
|
+
path=path,
|
|
1177
|
+
updates=updates,
|
|
1178
|
+
save=save,
|
|
1179
|
+
quiet=quiet,
|
|
1180
|
+
success_msg=(
|
|
1181
|
+
f":raising_hands: Congratulations! You're now using Azure OpenAI embedding model `{escape(effective_model)}` for all evals that require text embeddings."
|
|
1182
|
+
),
|
|
1183
|
+
)
|
|
1184
|
+
|
|
805
1185
|
|
|
806
1186
|
@app.command(name="unset-azure-openai-embedding")
|
|
807
1187
|
def unset_azure_openai_embedding_env(
|
|
808
1188
|
save: Optional[str] = typer.Option(
|
|
809
1189
|
None,
|
|
1190
|
+
"-s",
|
|
810
1191
|
"--save",
|
|
811
1192
|
help="Remove only the Azure OpenAI embedding related environment variables from a dotenv file. "
|
|
812
1193
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
813
1194
|
),
|
|
1195
|
+
quiet: bool = typer.Option(
|
|
1196
|
+
False,
|
|
1197
|
+
"-q",
|
|
1198
|
+
"--quiet",
|
|
1199
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1200
|
+
),
|
|
814
1201
|
):
|
|
815
1202
|
settings = get_settings()
|
|
816
1203
|
with settings.edit(save=save) as edit_ctx:
|
|
1204
|
+
settings.AZURE_EMBEDDING_MODEL_NAME = None
|
|
817
1205
|
settings.AZURE_EMBEDDING_DEPLOYMENT_NAME = None
|
|
818
1206
|
settings.USE_AZURE_OPENAI_EMBEDDING = None
|
|
819
1207
|
|
|
820
|
-
handled, path,
|
|
821
|
-
|
|
822
|
-
if not handled and save is not None:
|
|
823
|
-
# invalid --save format (unsupported)
|
|
824
|
-
print("Unsupported --save option. Use --save=dotenv[:path].")
|
|
825
|
-
elif path:
|
|
826
|
-
# persisted to a file
|
|
827
|
-
print(
|
|
828
|
-
f"Removed Azure OpenAI embedding environment variables from {path}."
|
|
829
|
-
)
|
|
1208
|
+
handled, path, updates = edit_ctx.result
|
|
830
1209
|
|
|
831
|
-
if
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
1210
|
+
if _handle_save_result(
|
|
1211
|
+
handled=handled,
|
|
1212
|
+
path=path,
|
|
1213
|
+
updates=updates,
|
|
1214
|
+
save=save,
|
|
1215
|
+
quiet=quiet,
|
|
1216
|
+
updated_msg="Removed Azure OpenAI embedding environment variables from {path}.",
|
|
1217
|
+
tip_msg=None,
|
|
1218
|
+
):
|
|
1219
|
+
if is_openai_configured():
|
|
1220
|
+
print(
|
|
1221
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
1222
|
+
)
|
|
1223
|
+
else:
|
|
1224
|
+
print(
|
|
1225
|
+
"Azure OpenAI embedding has been unset. No active provider is configured. Set one with the CLI, or add credentials to .env[.local]."
|
|
1226
|
+
)
|
|
839
1227
|
|
|
840
1228
|
|
|
841
1229
|
#############################################
|
|
842
|
-
#
|
|
1230
|
+
# Anthropic Model Integration ###############
|
|
843
1231
|
#############################################
|
|
844
1232
|
|
|
845
1233
|
|
|
846
|
-
@app.command(name="set-
|
|
847
|
-
def
|
|
848
|
-
|
|
849
|
-
base_url: str = typer.Option(
|
|
850
|
-
"http://localhost:11434",
|
|
851
|
-
"-b",
|
|
852
|
-
"--base-url",
|
|
853
|
-
help="Base URL for the local model API",
|
|
854
|
-
),
|
|
855
|
-
save: Optional[str] = typer.Option(
|
|
1234
|
+
@app.command(name="set-anthropic")
|
|
1235
|
+
def set_anthropic_model_env(
|
|
1236
|
+
model: Optional[str] = typer.Option(
|
|
856
1237
|
None,
|
|
857
|
-
"
|
|
858
|
-
|
|
859
|
-
"
|
|
1238
|
+
"-m",
|
|
1239
|
+
"--model",
|
|
1240
|
+
help="Model identifier to use for this provider",
|
|
1241
|
+
),
|
|
1242
|
+
prompt_api_key: bool = typer.Option(
|
|
1243
|
+
False,
|
|
1244
|
+
"-k",
|
|
1245
|
+
"--prompt-api-key",
|
|
1246
|
+
help=(
|
|
1247
|
+
"Prompt for ANTHROPIC_API_KEY (input hidden). Not suitable for CI. "
|
|
1248
|
+
"If --save (or DEEPEVAL_DEFAULT_SAVE) is used, the key is written to dotenv in plaintext."
|
|
1249
|
+
),
|
|
860
1250
|
),
|
|
861
|
-
|
|
1251
|
+
cost_per_input_token: Optional[float] = typer.Option(
|
|
1252
|
+
None,
|
|
1253
|
+
"-i",
|
|
1254
|
+
"--cost-per-input-token",
|
|
1255
|
+
help=(
|
|
1256
|
+
"USD per input token override used for cost tracking. Preconfigured for known models; "
|
|
1257
|
+
"REQUIRED if you use a custom/unknown model."
|
|
1258
|
+
),
|
|
1259
|
+
),
|
|
1260
|
+
cost_per_output_token: Optional[float] = typer.Option(
|
|
1261
|
+
None,
|
|
1262
|
+
"-o",
|
|
1263
|
+
"--cost-per-output-token",
|
|
1264
|
+
help=(
|
|
1265
|
+
"USD per output token override used for cost tracking. Preconfigured for known models; "
|
|
1266
|
+
"REQUIRED if you use a custom/unknown model."
|
|
1267
|
+
),
|
|
1268
|
+
),
|
|
1269
|
+
save: Optional[str] = typer.Option(
|
|
1270
|
+
None,
|
|
1271
|
+
"-s",
|
|
1272
|
+
"--save",
|
|
1273
|
+
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
1274
|
+
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1275
|
+
),
|
|
1276
|
+
quiet: bool = typer.Option(
|
|
1277
|
+
False,
|
|
1278
|
+
"-q",
|
|
1279
|
+
"--quiet",
|
|
1280
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1281
|
+
),
|
|
1282
|
+
):
|
|
1283
|
+
api_key = None
|
|
1284
|
+
if prompt_api_key:
|
|
1285
|
+
api_key = coerce_blank_to_none(
|
|
1286
|
+
typer.prompt("Anthropic API key", hide_input=True)
|
|
1287
|
+
)
|
|
1288
|
+
|
|
1289
|
+
model = coerce_blank_to_none(model)
|
|
1290
|
+
|
|
862
1291
|
settings = get_settings()
|
|
863
1292
|
with settings.edit(save=save) as edit_ctx:
|
|
864
|
-
edit_ctx.switch_model_provider(ModelKeyValues.
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
1293
|
+
edit_ctx.switch_model_provider(ModelKeyValues.USE_ANTHROPIC_MODEL)
|
|
1294
|
+
if api_key is not None:
|
|
1295
|
+
settings.ANTHROPIC_API_KEY = api_key
|
|
1296
|
+
if model is not None:
|
|
1297
|
+
settings.ANTHROPIC_MODEL_NAME = model
|
|
1298
|
+
if cost_per_input_token is not None:
|
|
1299
|
+
settings.ANTHROPIC_COST_PER_INPUT_TOKEN = cost_per_input_token
|
|
1300
|
+
if cost_per_output_token is not None:
|
|
1301
|
+
settings.ANTHROPIC_COST_PER_OUTPUT_TOKEN = cost_per_output_token
|
|
868
1302
|
|
|
869
|
-
handled, path,
|
|
1303
|
+
handled, path, updates = edit_ctx.result
|
|
870
1304
|
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
print(
|
|
877
|
-
f"Saved environment variables to {path} (ensure it's git-ignored)."
|
|
1305
|
+
effective_model = settings.ANTHROPIC_MODEL_NAME
|
|
1306
|
+
if not effective_model:
|
|
1307
|
+
raise typer.BadParameter(
|
|
1308
|
+
"Anthropic model name is not set. Pass --model (or set ANTHROPIC_MODEL_NAME).",
|
|
1309
|
+
param_hint="--model",
|
|
878
1310
|
)
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
1311
|
+
|
|
1312
|
+
_handle_save_result(
|
|
1313
|
+
handled=handled,
|
|
1314
|
+
path=path,
|
|
1315
|
+
updates=updates,
|
|
1316
|
+
save=save,
|
|
1317
|
+
quiet=quiet,
|
|
1318
|
+
success_msg=(
|
|
1319
|
+
f":raising_hands: Congratulations! You're now using Anthropic `{escape(effective_model)}` for all evals that require an LLM."
|
|
1320
|
+
),
|
|
1321
|
+
)
|
|
1322
|
+
|
|
1323
|
+
|
|
1324
|
+
@app.command(name="unset-anthropic")
|
|
1325
|
+
def unset_anthropic_model_env(
|
|
1326
|
+
save: Optional[str] = typer.Option(
|
|
1327
|
+
None,
|
|
1328
|
+
"-s",
|
|
1329
|
+
"--save",
|
|
1330
|
+
help="Remove only the Anthropic model related environment variables from a dotenv file. "
|
|
1331
|
+
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1332
|
+
),
|
|
1333
|
+
clear_secrets: bool = typer.Option(
|
|
1334
|
+
False,
|
|
1335
|
+
"-x",
|
|
1336
|
+
"--clear-secrets",
|
|
1337
|
+
help="Also remove ANTHROPIC_API_KEY from the dotenv store.",
|
|
1338
|
+
),
|
|
1339
|
+
quiet: bool = typer.Option(
|
|
1340
|
+
False,
|
|
1341
|
+
"-q",
|
|
1342
|
+
"--quiet",
|
|
1343
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1344
|
+
),
|
|
1345
|
+
):
|
|
1346
|
+
settings = get_settings()
|
|
1347
|
+
with settings.edit(save=save) as edit_ctx:
|
|
1348
|
+
settings.USE_ANTHROPIC_MODEL = None
|
|
1349
|
+
settings.ANTHROPIC_MODEL_NAME = None
|
|
1350
|
+
settings.ANTHROPIC_COST_PER_INPUT_TOKEN = None
|
|
1351
|
+
settings.ANTHROPIC_COST_PER_OUTPUT_TOKEN = None
|
|
1352
|
+
if clear_secrets:
|
|
1353
|
+
settings.ANTHROPIC_API_KEY = None
|
|
1354
|
+
|
|
1355
|
+
handled, path, updates = edit_ctx.result
|
|
1356
|
+
|
|
1357
|
+
if _handle_save_result(
|
|
1358
|
+
handled=handled,
|
|
1359
|
+
path=path,
|
|
1360
|
+
updates=updates,
|
|
1361
|
+
save=save,
|
|
1362
|
+
quiet=quiet,
|
|
1363
|
+
updated_msg="Removed Anthropic model environment variables from {path}.",
|
|
1364
|
+
tip_msg=None,
|
|
1365
|
+
):
|
|
1366
|
+
if is_openai_configured():
|
|
1367
|
+
print(
|
|
1368
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
1369
|
+
)
|
|
1370
|
+
else:
|
|
1371
|
+
print(
|
|
1372
|
+
"The Anthropic model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
|
|
1373
|
+
)
|
|
1374
|
+
|
|
1375
|
+
|
|
1376
|
+
#############################################
|
|
1377
|
+
# AWS Bedrock Model Integration #############
|
|
1378
|
+
#############################################
|
|
1379
|
+
|
|
1380
|
+
|
|
1381
|
+
@app.command(name="set-bedrock")
|
|
1382
|
+
def set_bedrock_model_env(
|
|
1383
|
+
model: Optional[str] = typer.Option(
|
|
1384
|
+
None,
|
|
1385
|
+
"-m",
|
|
1386
|
+
"--model",
|
|
1387
|
+
help="Model identifier to use for this provider",
|
|
1388
|
+
),
|
|
1389
|
+
prompt_credentials: bool = typer.Option(
|
|
1390
|
+
False,
|
|
1391
|
+
"-a",
|
|
1392
|
+
"--prompt-credentials",
|
|
1393
|
+
help=(
|
|
1394
|
+
"Prompt for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY (secret access key input is hidden). Not suitable for CI. "
|
|
1395
|
+
"If --save (or DEEPEVAL_DEFAULT_SAVE) is used, credentials are written to dotenv in plaintext."
|
|
1396
|
+
),
|
|
1397
|
+
),
|
|
1398
|
+
region: Optional[str] = typer.Option(
|
|
1399
|
+
None,
|
|
1400
|
+
"-r",
|
|
1401
|
+
"--region",
|
|
1402
|
+
help="AWS region for bedrock (e.g., `us-east-1`).",
|
|
1403
|
+
),
|
|
1404
|
+
cost_per_input_token: Optional[float] = typer.Option(
|
|
1405
|
+
None,
|
|
1406
|
+
"-i",
|
|
1407
|
+
"--cost-per-input-token",
|
|
1408
|
+
help=(
|
|
1409
|
+
"USD per input token override used for cost tracking. Preconfigured for known models; "
|
|
1410
|
+
"REQUIRED if you use a custom/unknown model."
|
|
1411
|
+
),
|
|
1412
|
+
),
|
|
1413
|
+
cost_per_output_token: Optional[float] = typer.Option(
|
|
1414
|
+
None,
|
|
1415
|
+
"-o",
|
|
1416
|
+
"--cost-per-output-token",
|
|
1417
|
+
help=(
|
|
1418
|
+
"USD per output token override used for cost tracking. Preconfigured for known models; "
|
|
1419
|
+
"REQUIRED if you use a custom/unknown model."
|
|
1420
|
+
),
|
|
1421
|
+
),
|
|
1422
|
+
save: Optional[str] = typer.Option(
|
|
1423
|
+
None,
|
|
1424
|
+
"-s",
|
|
1425
|
+
"--save",
|
|
1426
|
+
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
1427
|
+
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1428
|
+
),
|
|
1429
|
+
quiet: bool = typer.Option(
|
|
1430
|
+
False,
|
|
1431
|
+
"-q",
|
|
1432
|
+
"--quiet",
|
|
1433
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1434
|
+
),
|
|
1435
|
+
):
|
|
1436
|
+
access_key_id = None
|
|
1437
|
+
secret_access_key = None
|
|
1438
|
+
if prompt_credentials:
|
|
1439
|
+
access_key_id = coerce_blank_to_none(typer.prompt("AWS Access key Id"))
|
|
1440
|
+
secret_access_key = coerce_blank_to_none(
|
|
1441
|
+
typer.prompt("AWS Secret Access key", hide_input=True)
|
|
884
1442
|
)
|
|
885
1443
|
|
|
886
|
-
|
|
887
|
-
|
|
1444
|
+
model = coerce_blank_to_none(model)
|
|
1445
|
+
region = coerce_blank_to_none(region)
|
|
1446
|
+
|
|
1447
|
+
settings = get_settings()
|
|
1448
|
+
with settings.edit(save=save) as edit_ctx:
|
|
1449
|
+
edit_ctx.switch_model_provider(ModelKeyValues.USE_AWS_BEDROCK_MODEL)
|
|
1450
|
+
if access_key_id is not None:
|
|
1451
|
+
settings.AWS_ACCESS_KEY_ID = access_key_id
|
|
1452
|
+
if secret_access_key is not None:
|
|
1453
|
+
settings.AWS_SECRET_ACCESS_KEY = secret_access_key
|
|
1454
|
+
if model is not None:
|
|
1455
|
+
settings.AWS_BEDROCK_MODEL_NAME = model
|
|
1456
|
+
if region is not None:
|
|
1457
|
+
settings.AWS_BEDROCK_REGION = region
|
|
1458
|
+
if cost_per_input_token is not None:
|
|
1459
|
+
settings.AWS_BEDROCK_COST_PER_INPUT_TOKEN = cost_per_input_token
|
|
1460
|
+
if cost_per_output_token is not None:
|
|
1461
|
+
settings.AWS_BEDROCK_COST_PER_OUTPUT_TOKEN = cost_per_output_token
|
|
1462
|
+
|
|
1463
|
+
handled, path, updates = edit_ctx.result
|
|
1464
|
+
|
|
1465
|
+
effective_model = settings.AWS_BEDROCK_MODEL_NAME
|
|
1466
|
+
if not effective_model:
|
|
1467
|
+
raise typer.BadParameter(
|
|
1468
|
+
"AWS Bedrock model name is not set. Pass --model (or set AWS_BEDROCK_MODEL_NAME).",
|
|
1469
|
+
param_hint="--model",
|
|
1470
|
+
)
|
|
1471
|
+
|
|
1472
|
+
_handle_save_result(
|
|
1473
|
+
handled=handled,
|
|
1474
|
+
path=path,
|
|
1475
|
+
updates=updates,
|
|
1476
|
+
save=save,
|
|
1477
|
+
quiet=quiet,
|
|
1478
|
+
success_msg=(
|
|
1479
|
+
f":raising_hands: Congratulations! You're now using AWS Bedrock `{escape(effective_model)}` for all evals that require an LLM."
|
|
1480
|
+
),
|
|
1481
|
+
)
|
|
1482
|
+
|
|
1483
|
+
|
|
1484
|
+
@app.command(name="unset-bedrock")
|
|
1485
|
+
def unset_bedrock_model_env(
|
|
1486
|
+
save: Optional[str] = typer.Option(
|
|
1487
|
+
None,
|
|
1488
|
+
"-s",
|
|
1489
|
+
"--save",
|
|
1490
|
+
help="Remove only the AWS Bedrock model related environment variables from a dotenv file. "
|
|
1491
|
+
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1492
|
+
),
|
|
1493
|
+
clear_secrets: bool = typer.Option(
|
|
1494
|
+
False,
|
|
1495
|
+
"-x",
|
|
1496
|
+
"--clear-secrets",
|
|
1497
|
+
help="Also remove AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY from the dotenv store.",
|
|
1498
|
+
),
|
|
1499
|
+
quiet: bool = typer.Option(
|
|
1500
|
+
False,
|
|
1501
|
+
"-q",
|
|
1502
|
+
"--quiet",
|
|
1503
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1504
|
+
),
|
|
1505
|
+
):
|
|
1506
|
+
settings = get_settings()
|
|
1507
|
+
with settings.edit(save=save) as edit_ctx:
|
|
1508
|
+
settings.USE_AWS_BEDROCK_MODEL = None
|
|
1509
|
+
settings.AWS_BEDROCK_MODEL_NAME = None
|
|
1510
|
+
settings.AWS_BEDROCK_REGION = None
|
|
1511
|
+
settings.AWS_BEDROCK_COST_PER_INPUT_TOKEN = None
|
|
1512
|
+
settings.AWS_BEDROCK_COST_PER_OUTPUT_TOKEN = None
|
|
1513
|
+
if clear_secrets:
|
|
1514
|
+
settings.AWS_ACCESS_KEY_ID = None
|
|
1515
|
+
settings.AWS_SECRET_ACCESS_KEY = None
|
|
1516
|
+
|
|
1517
|
+
handled, path, updates = edit_ctx.result
|
|
1518
|
+
|
|
1519
|
+
if _handle_save_result(
|
|
1520
|
+
handled=handled,
|
|
1521
|
+
path=path,
|
|
1522
|
+
updates=updates,
|
|
1523
|
+
save=save,
|
|
1524
|
+
quiet=quiet,
|
|
1525
|
+
updated_msg="Removed AWS Bedrock model environment variables from {path}.",
|
|
1526
|
+
tip_msg=None,
|
|
1527
|
+
):
|
|
1528
|
+
if is_openai_configured():
|
|
1529
|
+
print(
|
|
1530
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
1531
|
+
)
|
|
1532
|
+
else:
|
|
1533
|
+
print(
|
|
1534
|
+
"The AWS Bedrock model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
|
|
1535
|
+
)
|
|
1536
|
+
|
|
1537
|
+
|
|
1538
|
+
#############################################
|
|
1539
|
+
# Ollama Integration ########################
|
|
1540
|
+
#############################################
|
|
1541
|
+
|
|
1542
|
+
|
|
1543
|
+
@app.command(name="set-ollama")
|
|
1544
|
+
def set_ollama_model_env(
|
|
1545
|
+
model: Optional[str] = typer.Option(
|
|
1546
|
+
None,
|
|
1547
|
+
"-m",
|
|
1548
|
+
"--model",
|
|
1549
|
+
help="Model identifier to use for this provider",
|
|
1550
|
+
),
|
|
1551
|
+
base_url: str = typer.Option(
|
|
1552
|
+
"http://localhost:11434",
|
|
1553
|
+
"-u",
|
|
1554
|
+
"--base-url",
|
|
1555
|
+
help="Override the API endpoint/base URL used by this provider.",
|
|
1556
|
+
),
|
|
1557
|
+
save: Optional[str] = typer.Option(
|
|
1558
|
+
None,
|
|
1559
|
+
"-s",
|
|
1560
|
+
"--save",
|
|
1561
|
+
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
1562
|
+
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1563
|
+
),
|
|
1564
|
+
quiet: bool = typer.Option(
|
|
1565
|
+
False,
|
|
1566
|
+
"-q",
|
|
1567
|
+
"--quiet",
|
|
1568
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1569
|
+
),
|
|
1570
|
+
):
|
|
1571
|
+
model = coerce_blank_to_none(model)
|
|
1572
|
+
base_url = coerce_blank_to_none(base_url)
|
|
1573
|
+
|
|
1574
|
+
settings = get_settings()
|
|
1575
|
+
with settings.edit(save=save) as edit_ctx:
|
|
1576
|
+
edit_ctx.switch_model_provider(ModelKeyValues.USE_LOCAL_MODEL)
|
|
1577
|
+
settings.LOCAL_MODEL_API_KEY = "ollama"
|
|
1578
|
+
if model is not None:
|
|
1579
|
+
settings.OLLAMA_MODEL_NAME = model
|
|
1580
|
+
if base_url is not None:
|
|
1581
|
+
settings.LOCAL_MODEL_BASE_URL = base_url
|
|
1582
|
+
|
|
1583
|
+
handled, path, updates = edit_ctx.result
|
|
1584
|
+
|
|
1585
|
+
effective_model = settings.OLLAMA_MODEL_NAME
|
|
1586
|
+
if not effective_model:
|
|
1587
|
+
raise typer.BadParameter(
|
|
1588
|
+
"Ollama model name is not set. Pass --model (or set OLLAMA_MODEL_NAME).",
|
|
1589
|
+
param_hint="--model",
|
|
1590
|
+
)
|
|
1591
|
+
_handle_save_result(
|
|
1592
|
+
handled=handled,
|
|
1593
|
+
path=path,
|
|
1594
|
+
updates=updates,
|
|
1595
|
+
save=save,
|
|
1596
|
+
quiet=quiet,
|
|
1597
|
+
success_msg=(
|
|
1598
|
+
f":raising_hands: Congratulations! You're now using a local Ollama model `{escape(effective_model)}` for all evals that require an LLM."
|
|
1599
|
+
),
|
|
888
1600
|
)
|
|
889
1601
|
|
|
890
1602
|
|
|
@@ -892,81 +1604,110 @@ def set_ollama_model_env(
|
|
|
892
1604
|
def unset_ollama_model_env(
|
|
893
1605
|
save: Optional[str] = typer.Option(
|
|
894
1606
|
None,
|
|
1607
|
+
"-s",
|
|
895
1608
|
"--save",
|
|
896
1609
|
help="Remove only the Ollama related environment variables from a dotenv file. "
|
|
897
1610
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
898
1611
|
),
|
|
1612
|
+
clear_secrets: bool = typer.Option(
|
|
1613
|
+
False,
|
|
1614
|
+
"-x",
|
|
1615
|
+
"--clear-secrets",
|
|
1616
|
+
help="Also remove LOCAL_MODEL_API_KEY from the dotenv store.",
|
|
1617
|
+
),
|
|
1618
|
+
quiet: bool = typer.Option(
|
|
1619
|
+
False,
|
|
1620
|
+
"-q",
|
|
1621
|
+
"--quiet",
|
|
1622
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1623
|
+
),
|
|
899
1624
|
):
|
|
900
1625
|
settings = get_settings()
|
|
901
1626
|
with settings.edit(save=save) as edit_ctx:
|
|
902
|
-
|
|
903
|
-
|
|
1627
|
+
if clear_secrets:
|
|
1628
|
+
settings.LOCAL_MODEL_API_KEY = None
|
|
1629
|
+
settings.OLLAMA_MODEL_NAME = None
|
|
904
1630
|
settings.LOCAL_MODEL_BASE_URL = None
|
|
905
1631
|
settings.USE_LOCAL_MODEL = None
|
|
906
1632
|
|
|
907
|
-
handled, path,
|
|
1633
|
+
handled, path, updates = edit_ctx.result
|
|
908
1634
|
|
|
909
|
-
if
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
1635
|
+
if _handle_save_result(
|
|
1636
|
+
handled=handled,
|
|
1637
|
+
path=path,
|
|
1638
|
+
updates=updates,
|
|
1639
|
+
save=save,
|
|
1640
|
+
quiet=quiet,
|
|
1641
|
+
updated_msg="Removed local Ollama environment variables from {path}.",
|
|
1642
|
+
tip_msg=None,
|
|
1643
|
+
):
|
|
1644
|
+
if is_openai_configured():
|
|
1645
|
+
print(
|
|
1646
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
1647
|
+
)
|
|
1648
|
+
else:
|
|
1649
|
+
print(
|
|
1650
|
+
"The local Ollama model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
|
|
1651
|
+
)
|
|
924
1652
|
|
|
925
1653
|
|
|
926
1654
|
@app.command(name="set-ollama-embeddings")
|
|
927
1655
|
def set_ollama_embeddings_env(
|
|
928
|
-
|
|
929
|
-
|
|
1656
|
+
model: Optional[str] = typer.Option(
|
|
1657
|
+
None,
|
|
1658
|
+
"-m",
|
|
1659
|
+
"--model",
|
|
1660
|
+
help="Model identifier to use for this provider.",
|
|
930
1661
|
),
|
|
931
1662
|
base_url: str = typer.Option(
|
|
932
1663
|
"http://localhost:11434",
|
|
933
|
-
"-
|
|
1664
|
+
"-u",
|
|
934
1665
|
"--base-url",
|
|
935
|
-
help="
|
|
1666
|
+
help="Override the API endpoint/base URL used by this provider.",
|
|
936
1667
|
),
|
|
937
1668
|
save: Optional[str] = typer.Option(
|
|
938
1669
|
None,
|
|
1670
|
+
"-s",
|
|
939
1671
|
"--save",
|
|
940
1672
|
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
941
1673
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
942
1674
|
),
|
|
1675
|
+
quiet: bool = typer.Option(
|
|
1676
|
+
False,
|
|
1677
|
+
"-q",
|
|
1678
|
+
"--quiet",
|
|
1679
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1680
|
+
),
|
|
943
1681
|
):
|
|
1682
|
+
model = coerce_blank_to_none(model)
|
|
1683
|
+
base_url = coerce_blank_to_none(base_url)
|
|
1684
|
+
|
|
944
1685
|
settings = get_settings()
|
|
945
1686
|
with settings.edit(save=save) as edit_ctx:
|
|
946
1687
|
edit_ctx.switch_model_provider(EmbeddingKeyValues.USE_LOCAL_EMBEDDINGS)
|
|
947
1688
|
settings.LOCAL_EMBEDDING_API_KEY = "ollama"
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
1689
|
+
if model is not None:
|
|
1690
|
+
settings.LOCAL_EMBEDDING_MODEL_NAME = model
|
|
1691
|
+
if base_url is not None:
|
|
1692
|
+
settings.LOCAL_EMBEDDING_BASE_URL = base_url
|
|
1693
|
+
|
|
1694
|
+
handled, path, updates = edit_ctx.result
|
|
1695
|
+
|
|
1696
|
+
effective_model = settings.LOCAL_EMBEDDING_MODEL_NAME
|
|
1697
|
+
if not effective_model:
|
|
1698
|
+
raise typer.BadParameter(
|
|
1699
|
+
"Ollama embedding model name is not set. Pass --model (or set LOCAL_EMBEDDING_MODEL_NAME).",
|
|
1700
|
+
param_hint="--model",
|
|
960
1701
|
)
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
1702
|
+
_handle_save_result(
|
|
1703
|
+
handled=handled,
|
|
1704
|
+
path=path,
|
|
1705
|
+
updates=updates,
|
|
1706
|
+
save=save,
|
|
1707
|
+
quiet=quiet,
|
|
1708
|
+
success_msg=(
|
|
1709
|
+
f":raising_hands: Congratulations! You're now using the Ollama embedding model `{escape(effective_model)}` for all evals that require text embeddings."
|
|
1710
|
+
),
|
|
970
1711
|
)
|
|
971
1712
|
|
|
972
1713
|
|
|
@@ -974,38 +1715,51 @@ def set_ollama_embeddings_env(
|
|
|
974
1715
|
def unset_ollama_embeddings_env(
|
|
975
1716
|
save: Optional[str] = typer.Option(
|
|
976
1717
|
None,
|
|
1718
|
+
"-s",
|
|
977
1719
|
"--save",
|
|
978
1720
|
help="Remove only the Ollama embedding related environment variables from a dotenv file. "
|
|
979
1721
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
980
1722
|
),
|
|
1723
|
+
clear_secrets: bool = typer.Option(
|
|
1724
|
+
False,
|
|
1725
|
+
"-x",
|
|
1726
|
+
"--clear-secrets",
|
|
1727
|
+
help="Also remove LOCAL_EMBEDDING_API_KEY from the dotenv store.",
|
|
1728
|
+
),
|
|
1729
|
+
quiet: bool = typer.Option(
|
|
1730
|
+
False,
|
|
1731
|
+
"-q",
|
|
1732
|
+
"--quiet",
|
|
1733
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1734
|
+
),
|
|
981
1735
|
):
|
|
982
|
-
|
|
983
1736
|
settings = get_settings()
|
|
984
1737
|
with settings.edit(save=save) as edit_ctx:
|
|
985
|
-
|
|
1738
|
+
if clear_secrets:
|
|
1739
|
+
settings.LOCAL_EMBEDDING_API_KEY = None
|
|
986
1740
|
settings.LOCAL_EMBEDDING_MODEL_NAME = None
|
|
987
1741
|
settings.LOCAL_EMBEDDING_BASE_URL = None
|
|
988
1742
|
settings.USE_LOCAL_EMBEDDINGS = None
|
|
989
1743
|
|
|
990
|
-
handled, path,
|
|
1744
|
+
handled, path, updates = edit_ctx.result
|
|
991
1745
|
|
|
992
|
-
if
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1746
|
+
if _handle_save_result(
|
|
1747
|
+
handled=handled,
|
|
1748
|
+
path=path,
|
|
1749
|
+
updates=updates,
|
|
1750
|
+
save=save,
|
|
1751
|
+
quiet=quiet,
|
|
1752
|
+
updated_msg="Removed local Ollama embedding environment variables from {path}.",
|
|
1753
|
+
tip_msg=None,
|
|
1754
|
+
):
|
|
1755
|
+
if is_openai_configured():
|
|
1756
|
+
print(
|
|
1757
|
+
":raised_hands: Regular OpenAI embeddings will still be used by default because OPENAI_API_KEY is set."
|
|
1758
|
+
)
|
|
1759
|
+
else:
|
|
1760
|
+
print(
|
|
1761
|
+
"The local Ollama embedding model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
|
|
1762
|
+
)
|
|
1009
1763
|
|
|
1010
1764
|
|
|
1011
1765
|
#############################################
|
|
@@ -1015,58 +1769,86 @@ def unset_ollama_embeddings_env(
|
|
|
1015
1769
|
|
|
1016
1770
|
@app.command(name="set-local-model")
|
|
1017
1771
|
def set_local_model_env(
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1772
|
+
model: Optional[str] = typer.Option(
|
|
1773
|
+
None,
|
|
1774
|
+
"-m",
|
|
1775
|
+
"--model",
|
|
1776
|
+
help="Model identifier to use for this provider",
|
|
1777
|
+
),
|
|
1778
|
+
prompt_api_key: bool = typer.Option(
|
|
1779
|
+
False,
|
|
1780
|
+
"-k",
|
|
1781
|
+
"--prompt-api-key",
|
|
1782
|
+
help=(
|
|
1783
|
+
"Prompt for LOCAL_MODEL_API_KEY (input hidden). Not suitable for CI. "
|
|
1784
|
+
"If --save (or DEEPEVAL_DEFAULT_SAVE) is used, the key is written to dotenv in plaintext."
|
|
1785
|
+
),
|
|
1023
1786
|
),
|
|
1024
|
-
|
|
1787
|
+
base_url: Optional[str] = typer.Option(
|
|
1025
1788
|
None,
|
|
1026
|
-
"
|
|
1027
|
-
|
|
1789
|
+
"-u",
|
|
1790
|
+
"--base-url",
|
|
1791
|
+
help="Override the API endpoint/base URL used by this provider.",
|
|
1028
1792
|
),
|
|
1029
1793
|
model_format: Optional[str] = typer.Option(
|
|
1030
|
-
|
|
1794
|
+
None,
|
|
1795
|
+
"-f",
|
|
1031
1796
|
"--format",
|
|
1032
1797
|
help="Format of the response from the local model (default: json)",
|
|
1033
1798
|
),
|
|
1034
1799
|
save: Optional[str] = typer.Option(
|
|
1035
1800
|
None,
|
|
1801
|
+
"-s",
|
|
1036
1802
|
"--save",
|
|
1037
1803
|
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
1038
1804
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1039
1805
|
),
|
|
1806
|
+
quiet: bool = typer.Option(
|
|
1807
|
+
False,
|
|
1808
|
+
"-q",
|
|
1809
|
+
"--quiet",
|
|
1810
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1811
|
+
),
|
|
1040
1812
|
):
|
|
1813
|
+
api_key = None
|
|
1814
|
+
if prompt_api_key:
|
|
1815
|
+
api_key = coerce_blank_to_none(
|
|
1816
|
+
typer.prompt("Local Model API key", hide_input=True)
|
|
1817
|
+
)
|
|
1818
|
+
|
|
1819
|
+
model = coerce_blank_to_none(model)
|
|
1820
|
+
base_url = coerce_blank_to_none(base_url)
|
|
1821
|
+
model_format = coerce_blank_to_none(model_format)
|
|
1822
|
+
|
|
1041
1823
|
settings = get_settings()
|
|
1042
1824
|
with settings.edit(save=save) as edit_ctx:
|
|
1043
1825
|
edit_ctx.switch_model_provider(ModelKeyValues.USE_LOCAL_MODEL)
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
if
|
|
1047
|
-
settings.
|
|
1048
|
-
if api_key:
|
|
1826
|
+
if model is not None:
|
|
1827
|
+
settings.LOCAL_MODEL_NAME = model
|
|
1828
|
+
if base_url is not None:
|
|
1829
|
+
settings.LOCAL_MODEL_BASE_URL = base_url
|
|
1830
|
+
if api_key is not None:
|
|
1049
1831
|
settings.LOCAL_MODEL_API_KEY = api_key
|
|
1832
|
+
if model_format is not None:
|
|
1833
|
+
settings.LOCAL_MODEL_FORMAT = model_format
|
|
1050
1834
|
|
|
1051
|
-
handled, path,
|
|
1835
|
+
handled, path, updates = edit_ctx.result
|
|
1052
1836
|
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
print(
|
|
1059
|
-
f"Saved environment variables to {path} (ensure it's git-ignored)."
|
|
1837
|
+
effective_model = settings.LOCAL_MODEL_NAME
|
|
1838
|
+
if not effective_model:
|
|
1839
|
+
raise typer.BadParameter(
|
|
1840
|
+
"Local model name is not set. Pass --model (or set LOCAL_MODEL_NAME).",
|
|
1841
|
+
param_hint="--model",
|
|
1060
1842
|
)
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1843
|
+
_handle_save_result(
|
|
1844
|
+
handled=handled,
|
|
1845
|
+
path=path,
|
|
1846
|
+
updates=updates,
|
|
1847
|
+
save=save,
|
|
1848
|
+
quiet=quiet,
|
|
1849
|
+
success_msg=(
|
|
1850
|
+
f":raising_hands: Congratulations! You're now using a local model `{escape(effective_model)}` for all evals that require an LLM."
|
|
1851
|
+
),
|
|
1070
1852
|
)
|
|
1071
1853
|
|
|
1072
1854
|
|
|
@@ -1074,36 +1856,52 @@ def set_local_model_env(
|
|
|
1074
1856
|
def unset_local_model_env(
|
|
1075
1857
|
save: Optional[str] = typer.Option(
|
|
1076
1858
|
None,
|
|
1859
|
+
"-s",
|
|
1077
1860
|
"--save",
|
|
1078
1861
|
help="Remove only the local model related environment variables from a dotenv file. "
|
|
1079
1862
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1080
1863
|
),
|
|
1864
|
+
clear_secrets: bool = typer.Option(
|
|
1865
|
+
False,
|
|
1866
|
+
"-x",
|
|
1867
|
+
"--clear-secrets",
|
|
1868
|
+
help="Also remove LOCAL_MODEL_API_KEY from the dotenv store.",
|
|
1869
|
+
),
|
|
1870
|
+
quiet: bool = typer.Option(
|
|
1871
|
+
False,
|
|
1872
|
+
"-q",
|
|
1873
|
+
"--quiet",
|
|
1874
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1875
|
+
),
|
|
1081
1876
|
):
|
|
1082
1877
|
settings = get_settings()
|
|
1083
1878
|
with settings.edit(save=save) as edit_ctx:
|
|
1084
|
-
|
|
1879
|
+
if clear_secrets:
|
|
1880
|
+
settings.LOCAL_MODEL_API_KEY = None
|
|
1085
1881
|
settings.LOCAL_MODEL_NAME = None
|
|
1086
1882
|
settings.LOCAL_MODEL_BASE_URL = None
|
|
1087
1883
|
settings.LOCAL_MODEL_FORMAT = None
|
|
1088
1884
|
settings.USE_LOCAL_MODEL = None
|
|
1089
1885
|
|
|
1090
|
-
handled, path,
|
|
1091
|
-
|
|
1092
|
-
if not handled and save is not None:
|
|
1093
|
-
# invalid --save format (unsupported)
|
|
1094
|
-
print("Unsupported --save option. Use --save=dotenv[:path].")
|
|
1095
|
-
elif path:
|
|
1096
|
-
# persisted to a file
|
|
1097
|
-
print(f"Removed local model environment variables from {path}.")
|
|
1886
|
+
handled, path, updates = edit_ctx.result
|
|
1098
1887
|
|
|
1099
|
-
if
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1888
|
+
if _handle_save_result(
|
|
1889
|
+
handled=handled,
|
|
1890
|
+
path=path,
|
|
1891
|
+
updates=updates,
|
|
1892
|
+
save=save,
|
|
1893
|
+
quiet=quiet,
|
|
1894
|
+
updated_msg="Removed local model environment variables from {path}.",
|
|
1895
|
+
tip_msg=None,
|
|
1896
|
+
):
|
|
1897
|
+
if is_openai_configured():
|
|
1898
|
+
print(
|
|
1899
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
1900
|
+
)
|
|
1901
|
+
else:
|
|
1902
|
+
print(
|
|
1903
|
+
"The local model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
|
|
1904
|
+
)
|
|
1107
1905
|
|
|
1108
1906
|
|
|
1109
1907
|
#############################################
|
|
@@ -1113,50 +1911,90 @@ def unset_local_model_env(
|
|
|
1113
1911
|
|
|
1114
1912
|
@app.command(name="set-grok")
|
|
1115
1913
|
def set_grok_model_env(
|
|
1116
|
-
|
|
1117
|
-
|
|
1914
|
+
model: Optional[str] = typer.Option(
|
|
1915
|
+
None,
|
|
1916
|
+
"-m",
|
|
1917
|
+
"--model",
|
|
1918
|
+
help="Model identifier to use for this provider",
|
|
1919
|
+
),
|
|
1920
|
+
prompt_api_key: bool = typer.Option(
|
|
1921
|
+
False,
|
|
1922
|
+
"-k",
|
|
1923
|
+
"--prompt-api-key",
|
|
1924
|
+
help=(
|
|
1925
|
+
"Prompt for GROK_API_KEY (input hidden). Not suitable for CI. "
|
|
1926
|
+
"If --save (or DEEPEVAL_DEFAULT_SAVE) is used, the key is written to dotenv in plaintext."
|
|
1927
|
+
),
|
|
1118
1928
|
),
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
"
|
|
1122
|
-
|
|
1929
|
+
cost_per_input_token: Optional[float] = typer.Option(
|
|
1930
|
+
None,
|
|
1931
|
+
"-i",
|
|
1932
|
+
"--cost-per-input-token",
|
|
1933
|
+
help=(
|
|
1934
|
+
"USD per input token override used for cost tracking. Preconfigured for known models; "
|
|
1935
|
+
"REQUIRED if you use a custom/unknown model."
|
|
1936
|
+
),
|
|
1123
1937
|
),
|
|
1124
|
-
|
|
1125
|
-
|
|
1938
|
+
cost_per_output_token: Optional[float] = typer.Option(
|
|
1939
|
+
None,
|
|
1940
|
+
"-o",
|
|
1941
|
+
"--cost-per-output-token",
|
|
1942
|
+
help=(
|
|
1943
|
+
"USD per output token override used for cost tracking. Preconfigured for known models; "
|
|
1944
|
+
"REQUIRED if you use a custom/unknown model."
|
|
1945
|
+
),
|
|
1126
1946
|
),
|
|
1127
1947
|
save: Optional[str] = typer.Option(
|
|
1128
1948
|
None,
|
|
1949
|
+
"-s",
|
|
1129
1950
|
"--save",
|
|
1130
1951
|
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
1131
1952
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1132
1953
|
),
|
|
1954
|
+
quiet: bool = typer.Option(
|
|
1955
|
+
False,
|
|
1956
|
+
"-q",
|
|
1957
|
+
"--quiet",
|
|
1958
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
1959
|
+
),
|
|
1133
1960
|
):
|
|
1961
|
+
api_key = None
|
|
1962
|
+
if prompt_api_key:
|
|
1963
|
+
api_key = coerce_blank_to_none(
|
|
1964
|
+
typer.prompt("Grok API key", hide_input=True)
|
|
1965
|
+
)
|
|
1966
|
+
|
|
1967
|
+
model = coerce_blank_to_none(model)
|
|
1968
|
+
|
|
1134
1969
|
settings = get_settings()
|
|
1135
1970
|
with settings.edit(save=save) as edit_ctx:
|
|
1136
1971
|
edit_ctx.switch_model_provider(ModelKeyValues.USE_GROK_MODEL)
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1972
|
+
if api_key is not None:
|
|
1973
|
+
settings.GROK_API_KEY = api_key
|
|
1974
|
+
if model is not None:
|
|
1975
|
+
settings.GROK_MODEL_NAME = model
|
|
1976
|
+
if cost_per_input_token is not None:
|
|
1977
|
+
settings.GROK_COST_PER_INPUT_TOKEN = cost_per_input_token
|
|
1978
|
+
if cost_per_output_token is not None:
|
|
1979
|
+
settings.GROK_COST_PER_OUTPUT_TOKEN = cost_per_output_token
|
|
1140
1980
|
|
|
1141
|
-
handled, path,
|
|
1981
|
+
handled, path, updates = edit_ctx.result
|
|
1142
1982
|
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
print(
|
|
1149
|
-
f"Saved environment variables to {path} (ensure it's git-ignored)."
|
|
1150
|
-
)
|
|
1151
|
-
else:
|
|
1152
|
-
# updated in-memory & process env only
|
|
1153
|
-
print(
|
|
1154
|
-
"Settings updated for this session. To persist, use --save=dotenv[:path] "
|
|
1155
|
-
"(default .env.local) or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
|
|
1983
|
+
effective_model = settings.GROK_MODEL_NAME
|
|
1984
|
+
if not effective_model:
|
|
1985
|
+
raise typer.BadParameter(
|
|
1986
|
+
"Grok model name is not set. Pass --model (or set GROK_MODEL_NAME).",
|
|
1987
|
+
param_hint="--model",
|
|
1156
1988
|
)
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1989
|
+
_handle_save_result(
|
|
1990
|
+
handled=handled,
|
|
1991
|
+
path=path,
|
|
1992
|
+
updates=updates,
|
|
1993
|
+
save=save,
|
|
1994
|
+
quiet=quiet,
|
|
1995
|
+
success_msg=(
|
|
1996
|
+
f":raising_hands: Congratulations! You're now using Grok `{escape(effective_model)}` for all evals that require an LLM."
|
|
1997
|
+
),
|
|
1160
1998
|
)
|
|
1161
1999
|
|
|
1162
2000
|
|
|
@@ -1164,35 +2002,52 @@ def set_grok_model_env(
|
|
|
1164
2002
|
def unset_grok_model_env(
|
|
1165
2003
|
save: Optional[str] = typer.Option(
|
|
1166
2004
|
None,
|
|
2005
|
+
"-s",
|
|
1167
2006
|
"--save",
|
|
1168
2007
|
help="Remove only the Grok model related environment variables from a dotenv file. "
|
|
1169
2008
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1170
2009
|
),
|
|
2010
|
+
clear_secrets: bool = typer.Option(
|
|
2011
|
+
False,
|
|
2012
|
+
"-x",
|
|
2013
|
+
"--clear-secrets",
|
|
2014
|
+
help="Also remove GROK_API_KEY from the dotenv store.",
|
|
2015
|
+
),
|
|
2016
|
+
quiet: bool = typer.Option(
|
|
2017
|
+
False,
|
|
2018
|
+
"-q",
|
|
2019
|
+
"--quiet",
|
|
2020
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
2021
|
+
),
|
|
1171
2022
|
):
|
|
1172
2023
|
settings = get_settings()
|
|
1173
2024
|
with settings.edit(save=save) as edit_ctx:
|
|
1174
|
-
settings.GROK_API_KEY = None
|
|
1175
|
-
settings.GROK_MODEL_NAME = None
|
|
1176
|
-
settings.TEMPERATURE = None
|
|
1177
2025
|
settings.USE_GROK_MODEL = None
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
)
|
|
2026
|
+
settings.GROK_MODEL_NAME = None
|
|
2027
|
+
settings.GROK_COST_PER_INPUT_TOKEN = None
|
|
2028
|
+
settings.GROK_COST_PER_OUTPUT_TOKEN = None
|
|
2029
|
+
if clear_secrets:
|
|
2030
|
+
settings.GROK_API_KEY = None
|
|
2031
|
+
|
|
2032
|
+
handled, path, updates = edit_ctx.result
|
|
2033
|
+
|
|
2034
|
+
if _handle_save_result(
|
|
2035
|
+
handled=handled,
|
|
2036
|
+
path=path,
|
|
2037
|
+
updates=updates,
|
|
2038
|
+
save=save,
|
|
2039
|
+
quiet=quiet,
|
|
2040
|
+
updated_msg="Removed Grok model environment variables from {path}.",
|
|
2041
|
+
tip_msg=None,
|
|
2042
|
+
):
|
|
2043
|
+
if is_openai_configured():
|
|
2044
|
+
print(
|
|
2045
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
2046
|
+
)
|
|
2047
|
+
else:
|
|
2048
|
+
print(
|
|
2049
|
+
"The Grok model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
|
|
2050
|
+
)
|
|
1196
2051
|
|
|
1197
2052
|
|
|
1198
2053
|
#############################################
|
|
@@ -1202,50 +2057,90 @@ def unset_grok_model_env(
|
|
|
1202
2057
|
|
|
1203
2058
|
@app.command(name="set-moonshot")
|
|
1204
2059
|
def set_moonshot_model_env(
|
|
1205
|
-
|
|
1206
|
-
|
|
2060
|
+
model: Optional[str] = typer.Option(
|
|
2061
|
+
None,
|
|
2062
|
+
"-m",
|
|
2063
|
+
"--model",
|
|
2064
|
+
help="Model identifier to use for this provider",
|
|
2065
|
+
),
|
|
2066
|
+
prompt_api_key: bool = typer.Option(
|
|
2067
|
+
False,
|
|
2068
|
+
"-k",
|
|
2069
|
+
"--prompt-api-key",
|
|
2070
|
+
help=(
|
|
2071
|
+
"Prompt for MOONSHOT_API_KEY (input hidden). Not suitable for CI. "
|
|
2072
|
+
"If --save (or DEEPEVAL_DEFAULT_SAVE) is used, the key is written to dotenv in plaintext."
|
|
2073
|
+
),
|
|
1207
2074
|
),
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
"
|
|
1211
|
-
|
|
2075
|
+
cost_per_input_token: Optional[float] = typer.Option(
|
|
2076
|
+
None,
|
|
2077
|
+
"-i",
|
|
2078
|
+
"--cost-per-input-token",
|
|
2079
|
+
help=(
|
|
2080
|
+
"USD per input token override used for cost tracking. Preconfigured for known models; "
|
|
2081
|
+
"REQUIRED if you use a custom/unknown model."
|
|
2082
|
+
),
|
|
1212
2083
|
),
|
|
1213
|
-
|
|
1214
|
-
|
|
2084
|
+
cost_per_output_token: Optional[float] = typer.Option(
|
|
2085
|
+
None,
|
|
2086
|
+
"-o",
|
|
2087
|
+
"--cost-per-output-token",
|
|
2088
|
+
help=(
|
|
2089
|
+
"USD per output token override used for cost tracking. Preconfigured for known models; "
|
|
2090
|
+
"REQUIRED if you use a custom/unknown model."
|
|
2091
|
+
),
|
|
1215
2092
|
),
|
|
1216
2093
|
save: Optional[str] = typer.Option(
|
|
1217
2094
|
None,
|
|
2095
|
+
"-s",
|
|
1218
2096
|
"--save",
|
|
1219
2097
|
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
1220
2098
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1221
2099
|
),
|
|
2100
|
+
quiet: bool = typer.Option(
|
|
2101
|
+
False,
|
|
2102
|
+
"-q",
|
|
2103
|
+
"--quiet",
|
|
2104
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
2105
|
+
),
|
|
1222
2106
|
):
|
|
2107
|
+
api_key = None
|
|
2108
|
+
if prompt_api_key:
|
|
2109
|
+
api_key = coerce_blank_to_none(
|
|
2110
|
+
typer.prompt("Moonshot API key", hide_input=True)
|
|
2111
|
+
)
|
|
2112
|
+
|
|
2113
|
+
model = coerce_blank_to_none(model)
|
|
2114
|
+
|
|
1223
2115
|
settings = get_settings()
|
|
1224
2116
|
with settings.edit(save=save) as edit_ctx:
|
|
1225
2117
|
edit_ctx.switch_model_provider(ModelKeyValues.USE_MOONSHOT_MODEL)
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
2118
|
+
if model is not None:
|
|
2119
|
+
settings.MOONSHOT_MODEL_NAME = model
|
|
2120
|
+
if api_key is not None:
|
|
2121
|
+
settings.MOONSHOT_API_KEY = api_key
|
|
2122
|
+
if cost_per_input_token is not None:
|
|
2123
|
+
settings.MOONSHOT_COST_PER_INPUT_TOKEN = cost_per_input_token
|
|
2124
|
+
if cost_per_output_token is not None:
|
|
2125
|
+
settings.MOONSHOT_COST_PER_OUTPUT_TOKEN = cost_per_output_token
|
|
1229
2126
|
|
|
1230
|
-
handled, path,
|
|
2127
|
+
handled, path, updates = edit_ctx.result
|
|
1231
2128
|
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
print(
|
|
1238
|
-
f"Saved environment variables to {path} (ensure it's git-ignored)."
|
|
2129
|
+
effective_model = settings.MOONSHOT_MODEL_NAME
|
|
2130
|
+
if not effective_model:
|
|
2131
|
+
raise typer.BadParameter(
|
|
2132
|
+
"Moonshot model name is not set. Pass --model (or set MOONSHOT_MODEL_NAME).",
|
|
2133
|
+
param_hint="--model",
|
|
1239
2134
|
)
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
2135
|
+
_handle_save_result(
|
|
2136
|
+
handled=handled,
|
|
2137
|
+
path=path,
|
|
2138
|
+
updates=updates,
|
|
2139
|
+
save=save,
|
|
2140
|
+
quiet=quiet,
|
|
2141
|
+
success_msg=(
|
|
2142
|
+
f":raising_hands: Congratulations! You're now using Moonshot `{escape(effective_model)}` for all evals that require an LLM."
|
|
2143
|
+
),
|
|
1249
2144
|
)
|
|
1250
2145
|
|
|
1251
2146
|
|
|
@@ -1253,35 +2148,52 @@ def set_moonshot_model_env(
|
|
|
1253
2148
|
def unset_moonshot_model_env(
|
|
1254
2149
|
save: Optional[str] = typer.Option(
|
|
1255
2150
|
None,
|
|
2151
|
+
"-s",
|
|
1256
2152
|
"--save",
|
|
1257
2153
|
help="Remove only the Moonshot model related environment variables from a dotenv file. "
|
|
1258
2154
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1259
2155
|
),
|
|
2156
|
+
clear_secrets: bool = typer.Option(
|
|
2157
|
+
False,
|
|
2158
|
+
"-x",
|
|
2159
|
+
"--clear-secrets",
|
|
2160
|
+
help="Also remove MOONSHOT_API_KEY from the dotenv store.",
|
|
2161
|
+
),
|
|
2162
|
+
quiet: bool = typer.Option(
|
|
2163
|
+
False,
|
|
2164
|
+
"-q",
|
|
2165
|
+
"--quiet",
|
|
2166
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
2167
|
+
),
|
|
1260
2168
|
):
|
|
1261
2169
|
settings = get_settings()
|
|
1262
2170
|
with settings.edit(save=save) as edit_ctx:
|
|
1263
|
-
settings.MOONSHOT_API_KEY = None
|
|
1264
|
-
settings.MOONSHOT_MODEL_NAME = None
|
|
1265
|
-
settings.TEMPERATURE = None
|
|
1266
2171
|
settings.USE_MOONSHOT_MODEL = None
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
)
|
|
2172
|
+
settings.MOONSHOT_MODEL_NAME = None
|
|
2173
|
+
settings.MOONSHOT_COST_PER_INPUT_TOKEN = None
|
|
2174
|
+
settings.MOONSHOT_COST_PER_OUTPUT_TOKEN = None
|
|
2175
|
+
if clear_secrets:
|
|
2176
|
+
settings.MOONSHOT_API_KEY = None
|
|
2177
|
+
|
|
2178
|
+
handled, path, updates = edit_ctx.result
|
|
2179
|
+
|
|
2180
|
+
if _handle_save_result(
|
|
2181
|
+
handled=handled,
|
|
2182
|
+
path=path,
|
|
2183
|
+
updates=updates,
|
|
2184
|
+
save=save,
|
|
2185
|
+
quiet=quiet,
|
|
2186
|
+
updated_msg="Removed Moonshot model environment variables from {path}.",
|
|
2187
|
+
tip_msg=None,
|
|
2188
|
+
):
|
|
2189
|
+
if is_openai_configured():
|
|
2190
|
+
print(
|
|
2191
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
2192
|
+
)
|
|
2193
|
+
else:
|
|
2194
|
+
print(
|
|
2195
|
+
"The Moonshot model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
|
|
2196
|
+
)
|
|
1285
2197
|
|
|
1286
2198
|
|
|
1287
2199
|
#############################################
|
|
@@ -1291,50 +2203,91 @@ def unset_moonshot_model_env(
|
|
|
1291
2203
|
|
|
1292
2204
|
@app.command(name="set-deepseek")
|
|
1293
2205
|
def set_deepseek_model_env(
|
|
1294
|
-
|
|
1295
|
-
|
|
2206
|
+
model: Optional[str] = typer.Option(
|
|
2207
|
+
None,
|
|
2208
|
+
"-m",
|
|
2209
|
+
"--model",
|
|
2210
|
+
help="Model identifier to use for this provider",
|
|
2211
|
+
),
|
|
2212
|
+
prompt_api_key: bool = typer.Option(
|
|
2213
|
+
False,
|
|
2214
|
+
"-k",
|
|
2215
|
+
"--prompt-api-key",
|
|
2216
|
+
help=(
|
|
2217
|
+
"Prompt for DEEPSEEK_API_KEY (input hidden). Not suitable for CI. "
|
|
2218
|
+
"If --save (or DEEPEVAL_DEFAULT_SAVE) is used, the key is written to dotenv in plaintext."
|
|
2219
|
+
),
|
|
1296
2220
|
),
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
"
|
|
1300
|
-
|
|
2221
|
+
cost_per_input_token: Optional[float] = typer.Option(
|
|
2222
|
+
None,
|
|
2223
|
+
"-i",
|
|
2224
|
+
"--cost-per-input-token",
|
|
2225
|
+
help=(
|
|
2226
|
+
"USD per input token override used for cost tracking. Preconfigured for known models; "
|
|
2227
|
+
"REQUIRED if you use a custom/unknown model."
|
|
2228
|
+
),
|
|
1301
2229
|
),
|
|
1302
|
-
|
|
1303
|
-
|
|
2230
|
+
cost_per_output_token: Optional[float] = typer.Option(
|
|
2231
|
+
None,
|
|
2232
|
+
"-o",
|
|
2233
|
+
"--cost-per-output-token",
|
|
2234
|
+
help=(
|
|
2235
|
+
"USD per output token override used for cost tracking. Preconfigured for known models; "
|
|
2236
|
+
"REQUIRED if you use a custom/unknown model."
|
|
2237
|
+
),
|
|
1304
2238
|
),
|
|
1305
2239
|
save: Optional[str] = typer.Option(
|
|
1306
2240
|
None,
|
|
2241
|
+
"-s",
|
|
1307
2242
|
"--save",
|
|
1308
2243
|
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
1309
2244
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1310
2245
|
),
|
|
2246
|
+
quiet: bool = typer.Option(
|
|
2247
|
+
False,
|
|
2248
|
+
"-q",
|
|
2249
|
+
"--quiet",
|
|
2250
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
2251
|
+
),
|
|
1311
2252
|
):
|
|
2253
|
+
api_key = None
|
|
2254
|
+
if prompt_api_key:
|
|
2255
|
+
api_key = coerce_blank_to_none(
|
|
2256
|
+
typer.prompt("DeepSeek API key", hide_input=True)
|
|
2257
|
+
)
|
|
2258
|
+
|
|
2259
|
+
model = coerce_blank_to_none(model)
|
|
2260
|
+
|
|
1312
2261
|
settings = get_settings()
|
|
1313
2262
|
with settings.edit(save=save) as edit_ctx:
|
|
1314
2263
|
edit_ctx.switch_model_provider(ModelKeyValues.USE_DEEPSEEK_MODEL)
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
2264
|
+
if model is not None:
|
|
2265
|
+
settings.DEEPSEEK_MODEL_NAME = model
|
|
2266
|
+
if api_key is not None:
|
|
2267
|
+
settings.DEEPSEEK_API_KEY = api_key
|
|
2268
|
+
if cost_per_input_token is not None:
|
|
2269
|
+
settings.DEEPSEEK_COST_PER_INPUT_TOKEN = cost_per_input_token
|
|
2270
|
+
if cost_per_output_token is not None:
|
|
2271
|
+
settings.DEEPSEEK_COST_PER_OUTPUT_TOKEN = cost_per_output_token
|
|
1318
2272
|
|
|
1319
|
-
handled, path,
|
|
2273
|
+
handled, path, updates = edit_ctx.result
|
|
1320
2274
|
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
print(
|
|
1327
|
-
f"Saved environment variables to {path} (ensure it's git-ignored)."
|
|
1328
|
-
)
|
|
1329
|
-
else:
|
|
1330
|
-
# updated in-memory & process env only
|
|
1331
|
-
print(
|
|
1332
|
-
"Settings updated for this session. To persist, use --save=dotenv[:path] "
|
|
1333
|
-
"(default .env.local) or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
|
|
2275
|
+
effective_model = settings.DEEPSEEK_MODEL_NAME
|
|
2276
|
+
if not effective_model:
|
|
2277
|
+
raise typer.BadParameter(
|
|
2278
|
+
"DeepSeek model name is not set. Pass --model (or set DEEPSEEK_MODEL_NAME).",
|
|
2279
|
+
param_hint="--model",
|
|
1334
2280
|
)
|
|
1335
2281
|
|
|
1336
|
-
|
|
1337
|
-
|
|
2282
|
+
_handle_save_result(
|
|
2283
|
+
handled=handled,
|
|
2284
|
+
path=path,
|
|
2285
|
+
updates=updates,
|
|
2286
|
+
save=save,
|
|
2287
|
+
quiet=quiet,
|
|
2288
|
+
success_msg=(
|
|
2289
|
+
f":raising_hands: Congratulations! You're now using DeepSeek `{escape(effective_model)}` for all evals that require an LLM."
|
|
2290
|
+
),
|
|
1338
2291
|
)
|
|
1339
2292
|
|
|
1340
2293
|
|
|
@@ -1342,35 +2295,52 @@ def set_deepseek_model_env(
|
|
|
1342
2295
|
def unset_deepseek_model_env(
|
|
1343
2296
|
save: Optional[str] = typer.Option(
|
|
1344
2297
|
None,
|
|
2298
|
+
"-s",
|
|
1345
2299
|
"--save",
|
|
1346
2300
|
help="Remove only the DeepSeek model related environment variables from a dotenv file. "
|
|
1347
2301
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1348
2302
|
),
|
|
2303
|
+
clear_secrets: bool = typer.Option(
|
|
2304
|
+
False,
|
|
2305
|
+
"-x",
|
|
2306
|
+
"--clear-secrets",
|
|
2307
|
+
help="Also remove DEEPSEEK_API_KEY from the dotenv store.",
|
|
2308
|
+
),
|
|
2309
|
+
quiet: bool = typer.Option(
|
|
2310
|
+
False,
|
|
2311
|
+
"-q",
|
|
2312
|
+
"--quiet",
|
|
2313
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
2314
|
+
),
|
|
1349
2315
|
):
|
|
1350
2316
|
settings = get_settings()
|
|
1351
2317
|
with settings.edit(save=save) as edit_ctx:
|
|
1352
|
-
settings.DEEPSEEK_API_KEY = None
|
|
1353
|
-
settings.DEEPSEEK_MODEL_NAME = None
|
|
1354
|
-
settings.TEMPERATURE = None
|
|
1355
2318
|
settings.USE_DEEPSEEK_MODEL = None
|
|
1356
|
-
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
)
|
|
2319
|
+
settings.DEEPSEEK_MODEL_NAME = None
|
|
2320
|
+
settings.DEEPSEEK_COST_PER_INPUT_TOKEN = None
|
|
2321
|
+
settings.DEEPSEEK_COST_PER_OUTPUT_TOKEN = None
|
|
2322
|
+
if clear_secrets:
|
|
2323
|
+
settings.DEEPSEEK_API_KEY = None
|
|
2324
|
+
|
|
2325
|
+
handled, path, updates = edit_ctx.result
|
|
2326
|
+
|
|
2327
|
+
if _handle_save_result(
|
|
2328
|
+
handled=handled,
|
|
2329
|
+
path=path,
|
|
2330
|
+
updates=updates,
|
|
2331
|
+
save=save,
|
|
2332
|
+
quiet=quiet,
|
|
2333
|
+
updated_msg="Removed DeepSeek model environment variables from {path}.",
|
|
2334
|
+
tip_msg=None,
|
|
2335
|
+
):
|
|
2336
|
+
if is_openai_configured():
|
|
2337
|
+
print(
|
|
2338
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
2339
|
+
)
|
|
2340
|
+
else:
|
|
2341
|
+
print(
|
|
2342
|
+
"The DeepSeek model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
|
|
2343
|
+
)
|
|
1374
2344
|
|
|
1375
2345
|
|
|
1376
2346
|
#############################################
|
|
@@ -1380,51 +2350,77 @@ def unset_deepseek_model_env(
|
|
|
1380
2350
|
|
|
1381
2351
|
@app.command(name="set-local-embeddings")
|
|
1382
2352
|
def set_local_embeddings_env(
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
2353
|
+
model: Optional[str] = typer.Option(
|
|
2354
|
+
None,
|
|
2355
|
+
"-m",
|
|
2356
|
+
"--model",
|
|
2357
|
+
help="Model identifier to use for this provider",
|
|
2358
|
+
),
|
|
2359
|
+
prompt_api_key: bool = typer.Option(
|
|
2360
|
+
False,
|
|
2361
|
+
"-k",
|
|
2362
|
+
"--prompt-api-key",
|
|
2363
|
+
help=(
|
|
2364
|
+
"Prompt for LOCAL_EMBEDDING_API_KEY (input hidden). Not suitable for CI. "
|
|
2365
|
+
"If --save (or DEEPEVAL_DEFAULT_SAVE) is used, the key is written to dotenv in plaintext."
|
|
2366
|
+
),
|
|
1388
2367
|
),
|
|
1389
|
-
|
|
2368
|
+
base_url: Optional[str] = typer.Option(
|
|
1390
2369
|
None,
|
|
1391
|
-
"
|
|
1392
|
-
|
|
2370
|
+
"-u",
|
|
2371
|
+
"--base-url",
|
|
2372
|
+
help="Override the API endpoint/base URL used by this provider.",
|
|
1393
2373
|
),
|
|
1394
2374
|
save: Optional[str] = typer.Option(
|
|
1395
2375
|
None,
|
|
2376
|
+
"-s",
|
|
1396
2377
|
"--save",
|
|
1397
2378
|
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
1398
2379
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1399
2380
|
),
|
|
2381
|
+
quiet: bool = typer.Option(
|
|
2382
|
+
False,
|
|
2383
|
+
"-q",
|
|
2384
|
+
"--quiet",
|
|
2385
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
2386
|
+
),
|
|
1400
2387
|
):
|
|
2388
|
+
api_key = None
|
|
2389
|
+
if prompt_api_key:
|
|
2390
|
+
api_key = coerce_blank_to_none(
|
|
2391
|
+
typer.prompt("Local Embedding Model API key", hide_input=True)
|
|
2392
|
+
)
|
|
2393
|
+
|
|
2394
|
+
model = coerce_blank_to_none(model)
|
|
2395
|
+
base_url = coerce_blank_to_none(base_url)
|
|
2396
|
+
|
|
1401
2397
|
settings = get_settings()
|
|
1402
2398
|
with settings.edit(save=save) as edit_ctx:
|
|
1403
2399
|
edit_ctx.switch_model_provider(EmbeddingKeyValues.USE_LOCAL_EMBEDDINGS)
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
if
|
|
2400
|
+
if model is not None:
|
|
2401
|
+
settings.LOCAL_EMBEDDING_MODEL_NAME = model
|
|
2402
|
+
if base_url is not None:
|
|
2403
|
+
settings.LOCAL_EMBEDDING_BASE_URL = base_url
|
|
2404
|
+
if api_key is not None:
|
|
1407
2405
|
settings.LOCAL_EMBEDDING_API_KEY = api_key
|
|
1408
2406
|
|
|
1409
|
-
handled, path,
|
|
2407
|
+
handled, path, updates = edit_ctx.result
|
|
1410
2408
|
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
|
|
1416
|
-
print(
|
|
1417
|
-
f"Saved environment variables to {path} (ensure it's git-ignored)."
|
|
2409
|
+
effective_model = settings.LOCAL_EMBEDDING_MODEL_NAME
|
|
2410
|
+
if not effective_model:
|
|
2411
|
+
raise typer.BadParameter(
|
|
2412
|
+
"Local embedding model name is not set. Pass --model (or set LOCAL_EMBEDDING_MODEL_NAME).",
|
|
2413
|
+
param_hint="--model",
|
|
1418
2414
|
)
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
|
|
2415
|
+
_handle_save_result(
|
|
2416
|
+
handled=handled,
|
|
2417
|
+
path=path,
|
|
2418
|
+
updates=updates,
|
|
2419
|
+
save=save,
|
|
2420
|
+
quiet=quiet,
|
|
2421
|
+
success_msg=(
|
|
2422
|
+
f":raising_hands: Congratulations! You're now using the local embedding model `{escape(effective_model)}` for all evals that require text embeddings."
|
|
2423
|
+
),
|
|
1428
2424
|
)
|
|
1429
2425
|
|
|
1430
2426
|
|
|
@@ -1432,35 +2428,51 @@ def set_local_embeddings_env(
|
|
|
1432
2428
|
def unset_local_embeddings_env(
|
|
1433
2429
|
save: Optional[str] = typer.Option(
|
|
1434
2430
|
None,
|
|
2431
|
+
"-s",
|
|
1435
2432
|
"--save",
|
|
1436
2433
|
help="Remove only the local embedding related environment variables from a dotenv file. "
|
|
1437
2434
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1438
2435
|
),
|
|
2436
|
+
clear_secrets: bool = typer.Option(
|
|
2437
|
+
False,
|
|
2438
|
+
"-x",
|
|
2439
|
+
"--clear-secrets",
|
|
2440
|
+
help="Also remove LOCAL_MODEL_API_KEY from the dotenv store.",
|
|
2441
|
+
),
|
|
2442
|
+
quiet: bool = typer.Option(
|
|
2443
|
+
False,
|
|
2444
|
+
"-q",
|
|
2445
|
+
"--quiet",
|
|
2446
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
2447
|
+
),
|
|
1439
2448
|
):
|
|
1440
2449
|
settings = get_settings()
|
|
1441
2450
|
with settings.edit(save=save) as edit_ctx:
|
|
1442
|
-
settings.LOCAL_EMBEDDING_API_KEY = None
|
|
1443
2451
|
settings.LOCAL_EMBEDDING_MODEL_NAME = None
|
|
1444
2452
|
settings.LOCAL_EMBEDDING_BASE_URL = None
|
|
1445
2453
|
settings.USE_LOCAL_EMBEDDINGS = None
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
|
|
1462
|
-
|
|
1463
|
-
|
|
2454
|
+
if clear_secrets:
|
|
2455
|
+
settings.LOCAL_EMBEDDING_API_KEY = None
|
|
2456
|
+
|
|
2457
|
+
handled, path, updates = edit_ctx.result
|
|
2458
|
+
|
|
2459
|
+
if _handle_save_result(
|
|
2460
|
+
handled=handled,
|
|
2461
|
+
path=path,
|
|
2462
|
+
updates=updates,
|
|
2463
|
+
save=save,
|
|
2464
|
+
quiet=quiet,
|
|
2465
|
+
updated_msg="Removed local embedding environment variables from {path}.",
|
|
2466
|
+
tip_msg=None,
|
|
2467
|
+
):
|
|
2468
|
+
if is_openai_configured():
|
|
2469
|
+
print(
|
|
2470
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
2471
|
+
)
|
|
2472
|
+
else:
|
|
2473
|
+
print(
|
|
2474
|
+
"The local embeddings model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
|
|
2475
|
+
)
|
|
1464
2476
|
|
|
1465
2477
|
|
|
1466
2478
|
#############################################
|
|
@@ -1470,172 +2482,263 @@ def unset_local_embeddings_env(
|
|
|
1470
2482
|
|
|
1471
2483
|
@app.command(name="set-gemini")
|
|
1472
2484
|
def set_gemini_model_env(
|
|
1473
|
-
|
|
1474
|
-
None, "--model-name", help="Gemini Model name"
|
|
1475
|
-
),
|
|
1476
|
-
google_api_key: Optional[str] = typer.Option(
|
|
2485
|
+
model: Optional[str] = typer.Option(
|
|
1477
2486
|
None,
|
|
1478
|
-
"
|
|
1479
|
-
|
|
2487
|
+
"-m",
|
|
2488
|
+
"--model",
|
|
2489
|
+
help="Model identifier to use for this provider",
|
|
2490
|
+
),
|
|
2491
|
+
prompt_api_key: bool = typer.Option(
|
|
2492
|
+
False,
|
|
2493
|
+
"-k",
|
|
2494
|
+
"--prompt-api-key",
|
|
2495
|
+
help=(
|
|
2496
|
+
"Prompt for GOOGLE_API_KEY (input hidden). Not suitable for CI. "
|
|
2497
|
+
"If --save (or DEEPEVAL_DEFAULT_SAVE) is used, the key is written to dotenv in plaintext."
|
|
2498
|
+
),
|
|
1480
2499
|
),
|
|
1481
|
-
|
|
1482
|
-
None,
|
|
2500
|
+
project: Optional[str] = typer.Option(
|
|
2501
|
+
None,
|
|
2502
|
+
"-p",
|
|
2503
|
+
"--project",
|
|
2504
|
+
help="GCP project ID (used by Vertex AI / Gemini when applicable).",
|
|
1483
2505
|
),
|
|
1484
|
-
|
|
1485
|
-
None,
|
|
2506
|
+
location: Optional[str] = typer.Option(
|
|
2507
|
+
None,
|
|
2508
|
+
"-l",
|
|
2509
|
+
"--location",
|
|
2510
|
+
help="GCP location/region for Vertex AI (e.g., `us-central1`).",
|
|
1486
2511
|
),
|
|
1487
|
-
|
|
2512
|
+
service_account_file: Optional[Path] = typer.Option(
|
|
1488
2513
|
None,
|
|
1489
|
-
"
|
|
1490
|
-
|
|
2514
|
+
"-S",
|
|
2515
|
+
"--service-account-file",
|
|
2516
|
+
help=("Path to a Google service account JSON key file."),
|
|
2517
|
+
exists=True,
|
|
2518
|
+
dir_okay=False,
|
|
2519
|
+
readable=True,
|
|
1491
2520
|
),
|
|
1492
2521
|
save: Optional[str] = typer.Option(
|
|
1493
2522
|
None,
|
|
2523
|
+
"-s",
|
|
1494
2524
|
"--save",
|
|
1495
2525
|
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
1496
2526
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1497
2527
|
),
|
|
2528
|
+
quiet: bool = typer.Option(
|
|
2529
|
+
False,
|
|
2530
|
+
"-q",
|
|
2531
|
+
"--quiet",
|
|
2532
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
2533
|
+
),
|
|
1498
2534
|
):
|
|
1499
|
-
|
|
1500
|
-
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
"You must provide either --google-api-key or both --project-id and --location.",
|
|
1504
|
-
err=True,
|
|
2535
|
+
api_key = None
|
|
2536
|
+
if prompt_api_key:
|
|
2537
|
+
api_key = coerce_blank_to_none(
|
|
2538
|
+
typer.prompt("Google API key", hide_input=True)
|
|
1505
2539
|
)
|
|
1506
|
-
|
|
2540
|
+
|
|
2541
|
+
model = coerce_blank_to_none(model)
|
|
2542
|
+
project = coerce_blank_to_none(project)
|
|
2543
|
+
location = coerce_blank_to_none(location)
|
|
1507
2544
|
|
|
1508
2545
|
settings = get_settings()
|
|
1509
2546
|
with settings.edit(save=save) as edit_ctx:
|
|
1510
2547
|
edit_ctx.switch_model_provider(ModelKeyValues.USE_GEMINI_MODEL)
|
|
1511
2548
|
|
|
1512
|
-
if
|
|
1513
|
-
settings.
|
|
2549
|
+
if model is not None:
|
|
2550
|
+
settings.GEMINI_MODEL_NAME = model
|
|
2551
|
+
if project is not None:
|
|
2552
|
+
settings.GOOGLE_CLOUD_PROJECT = project
|
|
2553
|
+
if location is not None:
|
|
2554
|
+
settings.GOOGLE_CLOUD_LOCATION = location
|
|
2555
|
+
if service_account_file is not None:
|
|
2556
|
+
settings.GOOGLE_SERVICE_ACCOUNT_KEY = load_service_account_key_file(
|
|
2557
|
+
service_account_file
|
|
2558
|
+
)
|
|
2559
|
+
if api_key is not None:
|
|
2560
|
+
settings.GOOGLE_API_KEY = api_key
|
|
1514
2561
|
settings.GOOGLE_GENAI_USE_VERTEXAI = False
|
|
1515
|
-
|
|
2562
|
+
elif (
|
|
2563
|
+
project is not None
|
|
2564
|
+
or location is not None
|
|
2565
|
+
or service_account_file is not None
|
|
2566
|
+
):
|
|
1516
2567
|
settings.GOOGLE_GENAI_USE_VERTEXAI = True
|
|
1517
|
-
if google_cloud_project:
|
|
1518
|
-
settings.GOOGLE_CLOUD_PROJECT = google_cloud_project
|
|
1519
|
-
if google_cloud_location:
|
|
1520
|
-
settings.GOOGLE_CLOUD_LOCATION = google_cloud_location
|
|
1521
|
-
if google_service_account_key:
|
|
1522
|
-
settings.GOOGLE_SERVICE_ACCOUNT_KEY = google_service_account_key
|
|
1523
|
-
if model_name:
|
|
1524
|
-
settings.GEMINI_MODEL_NAME = model_name
|
|
1525
2568
|
|
|
1526
|
-
handled, path,
|
|
2569
|
+
handled, path, updates = edit_ctx.result
|
|
1527
2570
|
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
print(
|
|
1534
|
-
f"Saved environment variables to {path} (ensure it's git-ignored)."
|
|
2571
|
+
effective_model = settings.GEMINI_MODEL_NAME
|
|
2572
|
+
if not effective_model:
|
|
2573
|
+
raise typer.BadParameter(
|
|
2574
|
+
"Gemini model name is not set. Pass --model (or set GEMINI_MODEL_NAME).",
|
|
2575
|
+
param_hint="--model",
|
|
1535
2576
|
)
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
|
|
1540
|
-
|
|
1541
|
-
|
|
1542
|
-
|
|
1543
|
-
|
|
1544
|
-
|
|
2577
|
+
_handle_save_result(
|
|
2578
|
+
handled=handled,
|
|
2579
|
+
path=path,
|
|
2580
|
+
updates=updates,
|
|
2581
|
+
save=save,
|
|
2582
|
+
quiet=quiet,
|
|
2583
|
+
success_msg=(
|
|
2584
|
+
f":raising_hands: Congratulations! You're now using Gemini `{escape(effective_model)}` for all evals that require an LLM."
|
|
2585
|
+
),
|
|
1545
2586
|
)
|
|
1546
|
-
if _model_name is not None:
|
|
1547
|
-
print(
|
|
1548
|
-
f":raising_hands: Congratulations! You're now using Gemini's `{escape(_model_name)}` for all evals that require an LLM."
|
|
1549
|
-
)
|
|
1550
|
-
else:
|
|
1551
|
-
print(
|
|
1552
|
-
":raising_hands: Congratulations! You're now using Gemini's model for all evals that require an LLM."
|
|
1553
|
-
)
|
|
1554
2587
|
|
|
1555
2588
|
|
|
1556
2589
|
@app.command(name="unset-gemini")
|
|
1557
2590
|
def unset_gemini_model_env(
|
|
1558
2591
|
save: Optional[str] = typer.Option(
|
|
1559
2592
|
None,
|
|
2593
|
+
"-s",
|
|
1560
2594
|
"--save",
|
|
1561
2595
|
help="Remove only the Gemini related environment variables from a dotenv file. "
|
|
1562
2596
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1563
2597
|
),
|
|
2598
|
+
clear_secrets: bool = typer.Option(
|
|
2599
|
+
False,
|
|
2600
|
+
"-x",
|
|
2601
|
+
"--clear-secrets",
|
|
2602
|
+
help="Also remove GOOGLE_API_KEY and GOOGLE_SERVICE_ACCOUNT_KEY from the dotenv store.",
|
|
2603
|
+
),
|
|
2604
|
+
quiet: bool = typer.Option(
|
|
2605
|
+
False,
|
|
2606
|
+
"-q",
|
|
2607
|
+
"--quiet",
|
|
2608
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
2609
|
+
),
|
|
1564
2610
|
):
|
|
1565
2611
|
settings = get_settings()
|
|
1566
2612
|
with settings.edit(save=save) as edit_ctx:
|
|
1567
|
-
settings.
|
|
2613
|
+
settings.USE_GEMINI_MODEL = None
|
|
1568
2614
|
settings.GOOGLE_GENAI_USE_VERTEXAI = None
|
|
1569
2615
|
settings.GOOGLE_CLOUD_PROJECT = None
|
|
1570
2616
|
settings.GOOGLE_CLOUD_LOCATION = None
|
|
1571
2617
|
settings.GEMINI_MODEL_NAME = None
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
|
|
1588
|
-
|
|
1589
|
-
|
|
1590
|
-
|
|
2618
|
+
if clear_secrets:
|
|
2619
|
+
settings.GOOGLE_API_KEY = None
|
|
2620
|
+
settings.GOOGLE_SERVICE_ACCOUNT_KEY = None
|
|
2621
|
+
|
|
2622
|
+
handled, path, updates = edit_ctx.result
|
|
2623
|
+
|
|
2624
|
+
if _handle_save_result(
|
|
2625
|
+
handled=handled,
|
|
2626
|
+
path=path,
|
|
2627
|
+
updates=updates,
|
|
2628
|
+
save=save,
|
|
2629
|
+
quiet=quiet,
|
|
2630
|
+
updated_msg="Removed Gemini model environment variables from {path}.",
|
|
2631
|
+
tip_msg=None,
|
|
2632
|
+
):
|
|
2633
|
+
if is_openai_configured():
|
|
2634
|
+
print(
|
|
2635
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
2636
|
+
)
|
|
2637
|
+
else:
|
|
2638
|
+
print(
|
|
2639
|
+
"The Gemini model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
|
|
2640
|
+
)
|
|
1591
2641
|
|
|
1592
2642
|
|
|
1593
2643
|
@app.command(name="set-litellm")
|
|
1594
2644
|
def set_litellm_model_env(
|
|
1595
|
-
|
|
1596
|
-
api_key: Optional[str] = typer.Option(
|
|
2645
|
+
model: Optional[str] = typer.Option(
|
|
1597
2646
|
None,
|
|
1598
|
-
"
|
|
1599
|
-
|
|
2647
|
+
"-m",
|
|
2648
|
+
"--model",
|
|
2649
|
+
help="Model identifier to use for this provider",
|
|
2650
|
+
),
|
|
2651
|
+
prompt_api_key: bool = typer.Option(
|
|
2652
|
+
False,
|
|
2653
|
+
"-k",
|
|
2654
|
+
"--prompt-api-key",
|
|
2655
|
+
help=(
|
|
2656
|
+
"Prompt for LITELLM_API_KEY (input hidden). Not suitable for CI. "
|
|
2657
|
+
"If --save (or DEEPEVAL_DEFAULT_SAVE) is used, the key is written to dotenv in plaintext."
|
|
2658
|
+
),
|
|
1600
2659
|
),
|
|
1601
|
-
|
|
1602
|
-
None,
|
|
2660
|
+
base_url: Optional[str] = typer.Option(
|
|
2661
|
+
None,
|
|
2662
|
+
"-u",
|
|
2663
|
+
"--base-url",
|
|
2664
|
+
help="Override the API endpoint/base URL used by this provider.",
|
|
2665
|
+
),
|
|
2666
|
+
proxy_prompt_api_key: bool = typer.Option(
|
|
2667
|
+
False,
|
|
2668
|
+
"-K",
|
|
2669
|
+
"--proxy-prompt-api-key",
|
|
2670
|
+
help=(
|
|
2671
|
+
"Prompt for LITELLM_PROXY_API_KEY (input hidden). Not suitable for CI. "
|
|
2672
|
+
"If --save (or DEEPEVAL_DEFAULT_SAVE) is used, the key is written to dotenv in plaintext."
|
|
2673
|
+
),
|
|
2674
|
+
),
|
|
2675
|
+
proxy_base_url: Optional[str] = typer.Option(
|
|
2676
|
+
None,
|
|
2677
|
+
"-U",
|
|
2678
|
+
"--proxy-base-url",
|
|
2679
|
+
help="Override the LITELLM_PROXY_API_BASE URL (useful for proxies, gateways, or self-hosted endpoints).",
|
|
1603
2680
|
),
|
|
1604
2681
|
save: Optional[str] = typer.Option(
|
|
1605
2682
|
None,
|
|
2683
|
+
"-s",
|
|
1606
2684
|
"--save",
|
|
1607
2685
|
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
1608
2686
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1609
2687
|
),
|
|
2688
|
+
quiet: bool = typer.Option(
|
|
2689
|
+
False,
|
|
2690
|
+
"-q",
|
|
2691
|
+
"--quiet",
|
|
2692
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
2693
|
+
),
|
|
1610
2694
|
):
|
|
2695
|
+
api_key = None
|
|
2696
|
+
if prompt_api_key:
|
|
2697
|
+
api_key = coerce_blank_to_none(
|
|
2698
|
+
typer.prompt("LiteLLM API key", hide_input=True)
|
|
2699
|
+
)
|
|
2700
|
+
|
|
2701
|
+
proxy_api_key = None
|
|
2702
|
+
if proxy_prompt_api_key:
|
|
2703
|
+
proxy_api_key = coerce_blank_to_none(
|
|
2704
|
+
typer.prompt("LiteLLM Proxy API key", hide_input=True)
|
|
2705
|
+
)
|
|
2706
|
+
|
|
2707
|
+
model = coerce_blank_to_none(model)
|
|
2708
|
+
base_url = coerce_blank_to_none(base_url)
|
|
2709
|
+
proxy_base_url = coerce_blank_to_none(proxy_base_url)
|
|
2710
|
+
|
|
1611
2711
|
settings = get_settings()
|
|
1612
2712
|
with settings.edit(save=save) as edit_ctx:
|
|
1613
2713
|
edit_ctx.switch_model_provider(ModelKeyValues.USE_LITELLM)
|
|
1614
|
-
|
|
2714
|
+
if model is not None:
|
|
2715
|
+
settings.LITELLM_MODEL_NAME = model
|
|
1615
2716
|
if api_key is not None:
|
|
1616
2717
|
settings.LITELLM_API_KEY = api_key
|
|
1617
|
-
if
|
|
1618
|
-
settings.LITELLM_API_BASE =
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
# updated in-memory & process env only
|
|
1632
|
-
print(
|
|
1633
|
-
"Settings updated for this session. To persist, use --save=dotenv[:path] "
|
|
1634
|
-
"(default .env.local) or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
|
|
2718
|
+
if base_url is not None:
|
|
2719
|
+
settings.LITELLM_API_BASE = base_url
|
|
2720
|
+
if proxy_api_key is not None:
|
|
2721
|
+
settings.LITELLM_PROXY_API_KEY = proxy_api_key
|
|
2722
|
+
if proxy_base_url is not None:
|
|
2723
|
+
settings.LITELLM_PROXY_API_BASE = proxy_base_url
|
|
2724
|
+
|
|
2725
|
+
handled, path, updates = edit_ctx.result
|
|
2726
|
+
|
|
2727
|
+
effective_model = settings.LITELLM_MODEL_NAME
|
|
2728
|
+
if not effective_model:
|
|
2729
|
+
raise typer.BadParameter(
|
|
2730
|
+
"LiteLLM model name is not set. Pass --model (or set LITELLM_MODEL_NAME).",
|
|
2731
|
+
param_hint="--model",
|
|
1635
2732
|
)
|
|
1636
|
-
|
|
1637
|
-
|
|
1638
|
-
|
|
2733
|
+
_handle_save_result(
|
|
2734
|
+
handled=handled,
|
|
2735
|
+
path=path,
|
|
2736
|
+
updates=updates,
|
|
2737
|
+
save=save,
|
|
2738
|
+
quiet=quiet,
|
|
2739
|
+
success_msg=(
|
|
2740
|
+
f":raising_hands: Congratulations! You're now using LiteLLM `{escape(effective_model)}` for all evals that require an LLM."
|
|
2741
|
+
),
|
|
1639
2742
|
)
|
|
1640
2743
|
|
|
1641
2744
|
|
|
@@ -1643,35 +2746,195 @@ def set_litellm_model_env(
|
|
|
1643
2746
|
def unset_litellm_model_env(
|
|
1644
2747
|
save: Optional[str] = typer.Option(
|
|
1645
2748
|
None,
|
|
2749
|
+
"-s",
|
|
1646
2750
|
"--save",
|
|
1647
2751
|
help="Remove only the LiteLLM related environment variables from a dotenv file. "
|
|
1648
2752
|
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
1649
2753
|
),
|
|
2754
|
+
clear_secrets: bool = typer.Option(
|
|
2755
|
+
False,
|
|
2756
|
+
"-x",
|
|
2757
|
+
"--clear-secrets",
|
|
2758
|
+
help="Also remove LITELLM_API_KEY and LITELLM_PROXY_API_KEY from the dotenv store.",
|
|
2759
|
+
),
|
|
2760
|
+
quiet: bool = typer.Option(
|
|
2761
|
+
False,
|
|
2762
|
+
"-q",
|
|
2763
|
+
"--quiet",
|
|
2764
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
2765
|
+
),
|
|
1650
2766
|
):
|
|
1651
2767
|
settings = get_settings()
|
|
1652
2768
|
with settings.edit(save=save) as edit_ctx:
|
|
1653
|
-
settings.
|
|
2769
|
+
settings.USE_LITELLM = None
|
|
1654
2770
|
settings.LITELLM_MODEL_NAME = None
|
|
1655
2771
|
settings.LITELLM_API_BASE = None
|
|
1656
|
-
settings.
|
|
2772
|
+
settings.LITELLM_PROXY_API_BASE = None
|
|
2773
|
+
if clear_secrets:
|
|
2774
|
+
settings.LITELLM_API_KEY = None
|
|
2775
|
+
settings.LITELLM_PROXY_API_KEY = None
|
|
2776
|
+
|
|
2777
|
+
handled, path, updates = edit_ctx.result
|
|
2778
|
+
|
|
2779
|
+
if _handle_save_result(
|
|
2780
|
+
handled=handled,
|
|
2781
|
+
path=path,
|
|
2782
|
+
updates=updates,
|
|
2783
|
+
save=save,
|
|
2784
|
+
quiet=quiet,
|
|
2785
|
+
updated_msg="Removed LiteLLM model environment variables from {path}.",
|
|
2786
|
+
tip_msg=None,
|
|
2787
|
+
):
|
|
2788
|
+
if is_openai_configured():
|
|
2789
|
+
print(
|
|
2790
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
2791
|
+
)
|
|
2792
|
+
else:
|
|
2793
|
+
print(
|
|
2794
|
+
"The LiteLLM model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
|
|
2795
|
+
)
|
|
1657
2796
|
|
|
1658
|
-
handled, path, _ = edit_ctx.result
|
|
1659
2797
|
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
# persisted to a file
|
|
1665
|
-
print(f"Removed LiteLLM model environment variables from {path}.")
|
|
2798
|
+
#############################################
|
|
2799
|
+
# Portkey #############
|
|
2800
|
+
#############################################
|
|
2801
|
+
|
|
1666
2802
|
|
|
1667
|
-
|
|
1668
|
-
|
|
1669
|
-
|
|
2803
|
+
@app.command(name="set-portkey")
|
|
2804
|
+
def set_portkey_model_env(
|
|
2805
|
+
model: Optional[str] = typer.Option(
|
|
2806
|
+
None,
|
|
2807
|
+
"-m",
|
|
2808
|
+
"--model",
|
|
2809
|
+
help="Model identifier to use for this provider",
|
|
2810
|
+
),
|
|
2811
|
+
prompt_api_key: bool = typer.Option(
|
|
2812
|
+
False,
|
|
2813
|
+
"-k",
|
|
2814
|
+
"--prompt-api-key",
|
|
2815
|
+
help=(
|
|
2816
|
+
"Prompt for PORTKEY_API_KEY (input hidden). Not suitable for CI. "
|
|
2817
|
+
"If --save (or DEEPEVAL_DEFAULT_SAVE) is used, the key is written to dotenv in plaintext."
|
|
2818
|
+
),
|
|
2819
|
+
),
|
|
2820
|
+
base_url: Optional[str] = typer.Option(
|
|
2821
|
+
None,
|
|
2822
|
+
"-u",
|
|
2823
|
+
"--base-url",
|
|
2824
|
+
help="Override the API endpoint/base URL used by this provider.",
|
|
2825
|
+
),
|
|
2826
|
+
provider: Optional[str] = typer.Option(
|
|
2827
|
+
None,
|
|
2828
|
+
"-P",
|
|
2829
|
+
"--provider",
|
|
2830
|
+
help="Override the PORTKEY_PROVIDER_NAME.",
|
|
2831
|
+
),
|
|
2832
|
+
save: Optional[str] = typer.Option(
|
|
2833
|
+
None,
|
|
2834
|
+
"-s",
|
|
2835
|
+
"--save",
|
|
2836
|
+
help="Persist CLI parameters as environment variables in a dotenv file. "
|
|
2837
|
+
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
2838
|
+
),
|
|
2839
|
+
quiet: bool = typer.Option(
|
|
2840
|
+
False,
|
|
2841
|
+
"-q",
|
|
2842
|
+
"--quiet",
|
|
2843
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
2844
|
+
),
|
|
2845
|
+
):
|
|
2846
|
+
api_key = None
|
|
2847
|
+
if prompt_api_key:
|
|
2848
|
+
api_key = coerce_blank_to_none(
|
|
2849
|
+
typer.prompt("Portkey API key", hide_input=True)
|
|
1670
2850
|
)
|
|
1671
|
-
|
|
1672
|
-
|
|
1673
|
-
|
|
2851
|
+
|
|
2852
|
+
model = coerce_blank_to_none(model)
|
|
2853
|
+
base_url = coerce_blank_to_none(base_url)
|
|
2854
|
+
provider = coerce_blank_to_none(provider)
|
|
2855
|
+
|
|
2856
|
+
settings = get_settings()
|
|
2857
|
+
with settings.edit(save=save) as edit_ctx:
|
|
2858
|
+
edit_ctx.switch_model_provider(ModelKeyValues.USE_PORTKEY_MODEL)
|
|
2859
|
+
if model is not None:
|
|
2860
|
+
settings.PORTKEY_MODEL_NAME = model
|
|
2861
|
+
if api_key is not None:
|
|
2862
|
+
settings.PORTKEY_API_KEY = api_key
|
|
2863
|
+
if base_url is not None:
|
|
2864
|
+
settings.PORTKEY_BASE_URL = base_url
|
|
2865
|
+
if provider is not None:
|
|
2866
|
+
settings.PORTKEY_PROVIDER_NAME = provider
|
|
2867
|
+
|
|
2868
|
+
handled, path, updates = edit_ctx.result
|
|
2869
|
+
|
|
2870
|
+
effective_model = settings.PORTKEY_MODEL_NAME
|
|
2871
|
+
if not effective_model:
|
|
2872
|
+
raise typer.BadParameter(
|
|
2873
|
+
"Portkey model name is not set. Pass --model (or set PORTKEY_MODEL_NAME).",
|
|
2874
|
+
param_hint="--model",
|
|
1674
2875
|
)
|
|
2876
|
+
_handle_save_result(
|
|
2877
|
+
handled=handled,
|
|
2878
|
+
path=path,
|
|
2879
|
+
updates=updates,
|
|
2880
|
+
save=save,
|
|
2881
|
+
quiet=quiet,
|
|
2882
|
+
success_msg=(
|
|
2883
|
+
f":raising_hands: Congratulations! You're now using Portkey `{escape(effective_model)}` for all evals that require an LLM."
|
|
2884
|
+
),
|
|
2885
|
+
)
|
|
2886
|
+
|
|
2887
|
+
|
|
2888
|
+
@app.command(name="unset-portkey")
|
|
2889
|
+
def unset_portkey_model_env(
|
|
2890
|
+
save: Optional[str] = typer.Option(
|
|
2891
|
+
None,
|
|
2892
|
+
"-s",
|
|
2893
|
+
"--save",
|
|
2894
|
+
help="Remove only the Portkey related environment variables from a dotenv file. "
|
|
2895
|
+
"Usage: --save=dotenv[:path] (default: .env.local)",
|
|
2896
|
+
),
|
|
2897
|
+
clear_secrets: bool = typer.Option(
|
|
2898
|
+
False,
|
|
2899
|
+
"-x",
|
|
2900
|
+
"--clear-secrets",
|
|
2901
|
+
help="Also remove PORTKEY_API_KEY from the dotenv store.",
|
|
2902
|
+
),
|
|
2903
|
+
quiet: bool = typer.Option(
|
|
2904
|
+
False,
|
|
2905
|
+
"-q",
|
|
2906
|
+
"--quiet",
|
|
2907
|
+
help="Suppress printing to the terminal (useful for CI).",
|
|
2908
|
+
),
|
|
2909
|
+
):
|
|
2910
|
+
settings = get_settings()
|
|
2911
|
+
with settings.edit(save=save) as edit_ctx:
|
|
2912
|
+
settings.USE_PORTKEY_MODEL = None
|
|
2913
|
+
settings.PORTKEY_MODEL_NAME = None
|
|
2914
|
+
settings.PORTKEY_BASE_URL = None
|
|
2915
|
+
settings.PORTKEY_PROVIDER_NAME = None
|
|
2916
|
+
if clear_secrets:
|
|
2917
|
+
settings.PORTKEY_API_KEY = None
|
|
2918
|
+
|
|
2919
|
+
handled, path, updates = edit_ctx.result
|
|
2920
|
+
|
|
2921
|
+
if _handle_save_result(
|
|
2922
|
+
handled=handled,
|
|
2923
|
+
path=path,
|
|
2924
|
+
updates=updates,
|
|
2925
|
+
save=save,
|
|
2926
|
+
quiet=quiet,
|
|
2927
|
+
updated_msg="Removed Portkey model environment variables from {path}.",
|
|
2928
|
+
tip_msg=None,
|
|
2929
|
+
):
|
|
2930
|
+
if is_openai_configured():
|
|
2931
|
+
print(
|
|
2932
|
+
":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
|
|
2933
|
+
)
|
|
2934
|
+
else:
|
|
2935
|
+
print(
|
|
2936
|
+
"The Portkey model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
|
|
2937
|
+
)
|
|
1675
2938
|
|
|
1676
2939
|
|
|
1677
2940
|
if __name__ == "__main__":
|