deepeval 3.4.7__py3-none-any.whl → 3.4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
deepeval/cli/main.py CHANGED
@@ -1,3 +1,20 @@
1
+ """
2
+ DeepEval CLI: Model Provider Configuration Commands
3
+
4
+ General behavior for all `set-*` / `unset-*` commands:
5
+
6
+ - Non-secret settings (model name, endpoint, deployment, toggles) are always
7
+ persisted in the hidden `.deepeval/.deepeval` JSON store.
8
+ - Secrets (API keys) are **never** written to the JSON store.
9
+ - If `--save=dotenv[:path]` is passed, both secrets and non-secrets are
10
+ written to the specified dotenv file (default: `.env.local`).
11
+ Dotenv files should be git-ignored.
12
+ - If `--save` is not passed, only the JSON store is updated.
13
+ - When unsetting a provider, only that provider’s keys are removed.
14
+ If another provider’s credentials remain (e.g. `OPENAI_API_KEY`), it
15
+ may still be selected as the default.
16
+ """
17
+
1
18
  import os
2
19
  from typing import Optional
3
20
  from rich import print
@@ -28,11 +45,16 @@ from deepeval.cli.utils import (
28
45
  PROD,
29
46
  clear_evaluation_model_keys,
30
47
  clear_embedding_model_keys,
48
+ resolve_save_target,
49
+ save_environ_to_store,
50
+ unset_environ_in_store,
51
+ switch_model_provider,
31
52
  )
32
53
  from deepeval.confident.api import (
33
54
  get_confident_api_key,
34
55
  is_confident,
35
56
  set_confident_api_key,
57
+ CONFIDENT_API_KEY_ENV_VAR,
36
58
  )
37
59
 
38
60
  app = typer.Typer(name="deepeval")
@@ -55,16 +77,49 @@ def find_available_port():
55
77
  return s.getsockname()[1]
56
78
 
57
79
 
80
+ def is_openai_configured() -> bool:
81
+ api_key = os.getenv("OPENAI_API_KEY") or KEY_FILE_HANDLER.fetch_data(
82
+ ModelKeyValues.OPENAI_API_KEY
83
+ )
84
+ return bool(api_key)
85
+
86
+
58
87
  @app.command(name="set-confident-region")
59
88
  def set_confident_region_command(
60
89
  region: Regions = typer.Argument(
61
90
  ..., help="The data region to use (US or EU)"
62
- )
91
+ ),
92
+ save: Optional[str] = typer.Option(
93
+ None,
94
+ "--save",
95
+ help="Persist CLI parameters as environment variables in a dotenv file. "
96
+ "Usage: --save=dotenv[:path] (default: .env.local)",
97
+ ),
63
98
  ):
64
99
  """Set the Confident AI data region."""
65
100
  # Add flag emojis based on region
66
101
  flag = "🇺🇸" if region == Regions.US else "🇪🇺"
67
102
  KEY_FILE_HANDLER.write_key(KeyValues.CONFIDENT_REGION, region.value)
103
+ save_target = resolve_save_target(save)
104
+ if save_target:
105
+ handled, path = save_environ_to_store(
106
+ save_target,
107
+ {
108
+ KeyValues.CONFIDENT_REGION: region.value,
109
+ },
110
+ )
111
+ if handled:
112
+ print(
113
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
114
+ )
115
+ else:
116
+ print("Unsupported --save option. Use --save=dotenv[:path].")
117
+ else:
118
+ print(
119
+ "Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
120
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
121
+ )
122
+
68
123
  print(
69
124
  f":raising_hands: Congratulations! You're now using the {flag} {region.value} data region for Confident AI."
70
125
  )
@@ -80,13 +135,20 @@ def login(
80
135
  None,
81
136
  "--confident-api-key",
82
137
  "-c",
83
- help="Optional confident API key to bypass login.",
138
+ help="Confident API key (non-interactive). If omitted, you'll be prompted to enter one. In all cases the key is saved to a dotenv file (default: .env.local) unless overridden with --save.",
139
+ ),
140
+ save: Optional[str] = typer.Option(
141
+ None,
142
+ "--save",
143
+ help="Where to persist settings. Format: dotenv[:path]. Defaults to .env.local. If omitted, login still writes to .env.local.",
84
144
  ),
85
145
  ):
86
146
  with capture_login_event() as span:
147
+ completed = False
87
148
  try:
149
+ # Resolve the key from CLI flag or interactive flow
88
150
  if confident_api_key:
89
- api_key = confident_api_key
151
+ key = confident_api_key.strip()
90
152
  else:
91
153
  render_login_message()
92
154
 
@@ -104,35 +166,91 @@ def login(
104
166
  login_url = f"{PROD}/pair?code={pairing_code}&port={port}"
105
167
  webbrowser.open(login_url)
106
168
  print(
107
- f"(open this link if your browser did not opend: [link={PROD}]{PROD}[/link])"
169
+ f"(open this link if your browser did not open: [link={PROD}]{PROD}[/link])"
108
170
  )
171
+
172
+ # Manual fallback if still empty
109
173
  if api_key == "":
110
174
  while True:
111
- api_key = input(f"🔐 Enter your API Key: ").strip()
175
+ api_key = input("🔐 Enter your API Key: ").strip()
112
176
  if api_key:
113
177
  break
114
178
  else:
115
179
  print(
116
180
  "❌ API Key cannot be empty. Please try again.\n"
117
181
  )
182
+ key = api_key.strip()
118
183
 
119
- set_confident_api_key(api_key)
120
- span.set_attribute("completed", True)
184
+ save_target = resolve_save_target(save) or "dotenv:.env.local"
185
+ handled, path = save_environ_to_store(
186
+ save_target,
187
+ {KeyValues.API_KEY: key, CONFIDENT_API_KEY_ENV_VAR: key},
188
+ )
189
+ if handled:
190
+ print(
191
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
192
+ )
193
+ else:
194
+ print("Unsupported --save option. Use --save=dotenv[:path].")
121
195
 
196
+ completed = True
122
197
  print(
123
- "\n🎉🥳 Congratulations! You've successfully logged in! :raising_hands: "
198
+ "\n🎉🥳 Congratulations! You've successfully logged in! :raising_hands:"
124
199
  )
125
200
  print(
126
- "You're now using DeepEval with [rgb(106,0,255)]Confident AI[/rgb(106,0,255)]. Follow our quickstart tutorial here: [bold][link=https://www.confident-ai.com/docs/llm-evaluation/quickstart]https://www.confident-ai.com/docs/llm-evaluation/quickstart[/link][/bold]"
201
+ "You're now using DeepEval with [rgb(106,0,255)]Confident AI[/rgb(106,0,255)]. "
202
+ "Follow our quickstart tutorial here: "
203
+ "[bold][link=https://www.confident-ai.com/docs/llm-evaluation/quickstart]"
204
+ "https://www.confident-ai.com/docs/llm-evaluation/quickstart[/link][/bold]"
127
205
  )
128
- except:
129
- span.set_attribute("completed", False)
206
+ except Exception as e:
207
+ completed = False
208
+ print(f"Login failed: {e}")
209
+ finally:
210
+ if getattr(span, "set_attribute", None):
211
+ span.set_attribute("completed", completed)
130
212
 
131
213
 
132
214
  @app.command()
133
- def logout():
215
+ def logout(
216
+ save: Optional[str] = typer.Option(
217
+ None,
218
+ "--save",
219
+ help="Where to remove the saved key from. Use format dotenv[:path]. If omitted, logout removes from .env.local. JSON keystore is always cleared.",
220
+ )
221
+ ):
222
+ """
223
+ Log out of Confident AI.
224
+
225
+ Behavior:
226
+ - Always clears the Confident API key from the JSON keystore and process env.
227
+ - Also removes credentials from a dotenv file; defaults to .env.local.
228
+ Override the target with --save=dotenv[:path].
229
+ """
134
230
  set_confident_api_key(None)
231
+
232
+ # Remove from dotenv file (both names)
233
+ save_target = resolve_save_target(save) or "dotenv:.env.local"
234
+ if save_target:
235
+ handled, path = unset_environ_in_store(
236
+ save_target,
237
+ [
238
+ KeyValues.API_KEY,
239
+ CONFIDENT_API_KEY_ENV_VAR,
240
+ ],
241
+ )
242
+ if handled:
243
+ print(f"Removed Confident AI key(s) from {path}.")
244
+ else:
245
+ print("Unsupported --save option. Use --save=dotenv[:path].")
246
+ else:
247
+ print(
248
+ "Tip: remove keys from a dotenv file with --save=dotenv[:path] (default .env.local) "
249
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
250
+ )
251
+
135
252
  delete_file_if_exists(LATEST_TEST_RUN_FILE_PATH)
253
+
136
254
  print("\n🎉🥳 You've successfully logged out! :raising_hands: ")
137
255
 
138
256
 
@@ -181,37 +299,157 @@ def set_openai_env(
181
299
  "REQUIRED if you use a custom/unsupported model."
182
300
  ),
183
301
  ),
302
+ save: Optional[str] = typer.Option(
303
+ None,
304
+ "--save",
305
+ help="Persist CLI parameters as environment variables in a dotenv file. "
306
+ "Usage: --save=dotenv[:path] (default: .env.local)",
307
+ ),
184
308
  ):
185
- """Configure OpenAI as the active model.
186
-
187
- Notes:
188
- - If `model` is a known OpenAI model, costs can be omitted (built-in pricing will be used).
189
- - If `model` is custom/unsupported, you must pass both --cost_per_input_token and --cost_per_output_token.
309
+ """
310
+ Configure OpenAI as the active LLM provider.
311
+
312
+ What this does:
313
+ - Sets the active provider flag to `USE_OPENAI_MODEL`.
314
+ - Persists the selected model name and any cost overrides in the JSON store.
315
+ - secrets are ever written to `.deepeval/.deepeval` (JSON).
316
+
317
+ Pricing rules:
318
+ - If `model` is a known OpenAI model, you may omit costs (built‑in pricing is used).
319
+ - If `model` is custom/unsupported, you must provide both
320
+ `--cost_per_input_token` and `--cost_per_output_token`.
321
+
322
+ Secrets & saving:
323
+ - Set your `OPENAI_API_KEY` via environment or a dotenv file.
324
+ - Pass `--save=dotenv[:path]` to write configuration to a dotenv file
325
+ (default: `.env.local`). Supported secrets, such as `OPENAI_API_KEY`, are
326
+ persisted there if present in your environment.
327
+
328
+ Args:
329
+ model: OpenAI model name, such as `gpt-4o-mini`.
330
+ cost_per_input_token: USD per input token (optional for known models).
331
+ cost_per_output_token: USD per output token (optional for known models).
332
+ save: Persist config (and supported secrets) to a dotenv file; format `dotenv[:path]`.
333
+
334
+ Example:
335
+ deepeval set-openai \\
336
+ --model gpt-4o-mini \\
337
+ --cost_per_input_token 0.0005 \\
338
+ --cost_per_output_token 0.0015 \\
339
+ --save dotenv:.env.local
190
340
  """
191
341
 
192
342
  clear_evaluation_model_keys()
193
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_OPENAI_MODEL, "YES")
194
343
  KEY_FILE_HANDLER.write_key(ModelKeyValues.OPENAI_MODEL_NAME, model)
195
- KEY_FILE_HANDLER.write_key(
196
- ModelKeyValues.OPENAI_COST_PER_INPUT_TOKEN, str(cost_per_input_token)
197
- )
198
- KEY_FILE_HANDLER.write_key(
199
- ModelKeyValues.OPENAI_COST_PER_OUTPUT_TOKEN, str(cost_per_output_token)
200
- )
344
+ if cost_per_input_token is not None:
345
+ KEY_FILE_HANDLER.write_key(
346
+ ModelKeyValues.OPENAI_COST_PER_INPUT_TOKEN,
347
+ str(cost_per_input_token),
348
+ )
349
+ if cost_per_output_token is not None:
350
+ KEY_FILE_HANDLER.write_key(
351
+ ModelKeyValues.OPENAI_COST_PER_OUTPUT_TOKEN,
352
+ str(cost_per_output_token),
353
+ )
354
+
355
+ save_target = resolve_save_target(save)
356
+ switch_model_provider(ModelKeyValues.USE_OPENAI_MODEL, save_target)
357
+ if save_target:
358
+ handled, path = save_environ_to_store(
359
+ save_target,
360
+ {
361
+ ModelKeyValues.OPENAI_MODEL_NAME: model,
362
+ **(
363
+ {
364
+ ModelKeyValues.OPENAI_COST_PER_INPUT_TOKEN: str(
365
+ cost_per_input_token
366
+ )
367
+ }
368
+ if cost_per_input_token is not None
369
+ else {}
370
+ ),
371
+ **(
372
+ {
373
+ ModelKeyValues.OPENAI_COST_PER_OUTPUT_TOKEN: str(
374
+ cost_per_output_token
375
+ )
376
+ }
377
+ if cost_per_output_token is not None
378
+ else {}
379
+ ),
380
+ },
381
+ )
382
+ if handled:
383
+ print(
384
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
385
+ )
386
+ else:
387
+ print("Unsupported --save option. Use --save=dotenv[:path].")
388
+
389
+ else:
390
+ print(
391
+ "Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
392
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
393
+ )
201
394
  print(
202
395
  f":raising_hands: Congratulations! You're now using OpenAI's `{model}` for all evals that require an LLM."
203
396
  )
204
397
 
205
398
 
206
399
  @app.command(name="unset-openai")
207
- def unset_openai_env():
400
+ def unset_openai_env(
401
+ save: Optional[str] = typer.Option(
402
+ None,
403
+ "--save",
404
+ help="Remove only the OpenAI related environment variables from a dotenv file. "
405
+ "Usage: --save=dotenv[:path] (default: .env.local)",
406
+ ),
407
+ ):
408
+ """
409
+ Unset OpenAI as the active provider.
410
+
411
+ Behavior:
412
+ - Removes OpenAI keys (model, costs, toggle) from the JSON store.
413
+ - If `--save` is provided, removes those keys from the specified dotenv file.
414
+ - After unsetting, if `OPENAI_API_KEY` is still set in the environment,
415
+ OpenAI may still be usable by default. Otherwise, no active provider is configured.
416
+
417
+ Args:
418
+ --save: Remove OpenAI keys from the given dotenv file as well.
419
+
420
+ Example:
421
+ deepeval unset-openai --save dotenv:.env.local
422
+ """
423
+
208
424
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.USE_OPENAI_MODEL)
209
425
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.OPENAI_MODEL_NAME)
210
426
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.OPENAI_COST_PER_INPUT_TOKEN)
211
427
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.OPENAI_COST_PER_OUTPUT_TOKEN)
212
- print(
213
- ":raising_hands: Congratulations! You're now using default OpenAI settings on DeepEval for all evals that require an LLM."
214
- )
428
+
429
+ save_target = resolve_save_target(save)
430
+ if save_target:
431
+ handled, path = unset_environ_in_store(
432
+ save_target,
433
+ [
434
+ ModelKeyValues.USE_OPENAI_MODEL,
435
+ ModelKeyValues.OPENAI_MODEL_NAME,
436
+ ModelKeyValues.OPENAI_COST_PER_INPUT_TOKEN,
437
+ ModelKeyValues.OPENAI_COST_PER_OUTPUT_TOKEN,
438
+ ],
439
+ )
440
+ if handled:
441
+ print(f"Removed OpenAI environment variables from {path}.")
442
+ else:
443
+ print("Unsupported --save option. Use --save=dotenv[:path].")
444
+
445
+ if is_openai_configured():
446
+ print(
447
+ ":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
448
+ )
449
+ else:
450
+ print(
451
+ "OpenAI has been unset. No active provider is configured. Set one with the CLI, or add credentials to .env[.local]."
452
+ )
215
453
 
216
454
 
217
455
  #############################################
@@ -222,7 +460,9 @@ def unset_openai_env():
222
460
  @app.command(name="set-azure-openai")
223
461
  def set_azure_openai_env(
224
462
  azure_openai_api_key: str = typer.Option(
225
- ..., "--openai-api-key", help="Azure OpenAI API key"
463
+ ...,
464
+ "--openai-api-key",
465
+ help="Azure OpenAI API key (NOT persisted; set in .env[.local])",
226
466
  ),
227
467
  azure_openai_endpoint: str = typer.Option(
228
468
  ..., "--openai-endpoint", help="Azure OpenAI endpoint"
@@ -239,11 +479,16 @@ def set_azure_openai_env(
239
479
  azure_model_version: Optional[str] = typer.Option(
240
480
  None, "--model-version", help="Azure model version (optional)"
241
481
  ),
482
+ save: Optional[str] = typer.Option(
483
+ None,
484
+ "--save",
485
+ help="Persist CLI parameters as environment variables in a dotenv file. "
486
+ "Usage: --save=dotenv[:path] (default: .env.local)",
487
+ ),
242
488
  ):
489
+
243
490
  clear_evaluation_model_keys()
244
- KEY_FILE_HANDLER.write_key(
245
- ModelKeyValues.AZURE_OPENAI_API_KEY, azure_openai_api_key
246
- )
491
+
247
492
  KEY_FILE_HANDLER.write_key(
248
493
  ModelKeyValues.AZURE_MODEL_NAME, openai_model_name
249
494
  )
@@ -262,8 +507,37 @@ def set_azure_openai_env(
262
507
  ModelKeyValues.AZURE_MODEL_VERSION, azure_model_version
263
508
  )
264
509
 
265
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_AZURE_OPENAI, "YES")
266
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_LOCAL_MODEL, "NO")
510
+ save_target = resolve_save_target(save)
511
+ switch_model_provider(ModelKeyValues.USE_AZURE_OPENAI, save_target)
512
+
513
+ if save_target:
514
+ handled, path = save_environ_to_store(
515
+ save_target,
516
+ {
517
+ ModelKeyValues.AZURE_OPENAI_API_KEY: azure_openai_api_key,
518
+ ModelKeyValues.AZURE_OPENAI_ENDPOINT: azure_openai_endpoint,
519
+ ModelKeyValues.OPENAI_API_VERSION: openai_api_version,
520
+ ModelKeyValues.AZURE_DEPLOYMENT_NAME: azure_deployment_name,
521
+ ModelKeyValues.AZURE_MODEL_NAME: openai_model_name,
522
+ **(
523
+ {ModelKeyValues.AZURE_MODEL_VERSION: azure_model_version}
524
+ if azure_model_version
525
+ else {}
526
+ ),
527
+ },
528
+ )
529
+ if handled:
530
+ print(
531
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
532
+ )
533
+ else:
534
+ print("Unsupported --save option. Use --save=dotenv[:path].")
535
+
536
+ else:
537
+ print(
538
+ "Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
539
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
540
+ )
267
541
 
268
542
  print(
269
543
  ":raising_hands: Congratulations! You're now using Azure OpenAI for all evals that require an LLM."
@@ -277,23 +551,57 @@ def set_azure_openai_embedding_env(
277
551
  "--embedding-deployment-name",
278
552
  help="Azure embedding deployment name",
279
553
  ),
554
+ save: Optional[str] = typer.Option(
555
+ None,
556
+ "--save",
557
+ help="Persist CLI parameters as environment variables in a dotenv file. "
558
+ "Usage: --save=dotenv[:path] (default: .env.local)",
559
+ ),
280
560
  ):
281
561
  clear_embedding_model_keys()
282
562
  KEY_FILE_HANDLER.write_key(
283
563
  EmbeddingKeyValues.AZURE_EMBEDDING_DEPLOYMENT_NAME,
284
564
  azure_embedding_deployment_name,
285
565
  )
286
- KEY_FILE_HANDLER.write_key(
287
- EmbeddingKeyValues.USE_AZURE_OPENAI_EMBEDDING, "YES"
566
+
567
+ save_target = resolve_save_target(save)
568
+ switch_model_provider(
569
+ EmbeddingKeyValues.USE_AZURE_OPENAI_EMBEDDING, save_target
288
570
  )
289
- KEY_FILE_HANDLER.write_key(EmbeddingKeyValues.USE_LOCAL_EMBEDDINGS, "NO")
571
+ if save_target:
572
+ handled, path = save_environ_to_store(
573
+ save_target,
574
+ {
575
+ EmbeddingKeyValues.AZURE_EMBEDDING_DEPLOYMENT_NAME: azure_embedding_deployment_name,
576
+ },
577
+ )
578
+ if handled:
579
+ print(
580
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
581
+ )
582
+ else:
583
+ print("Unsupported --save option. Use --save=dotenv[:path].")
584
+
585
+ else:
586
+ print(
587
+ "Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
588
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
589
+ )
590
+
290
591
  print(
291
592
  ":raising_hands: Congratulations! You're now using Azure OpenAI Embeddings within DeepEval."
292
593
  )
293
594
 
294
595
 
295
596
  @app.command(name="unset-azure-openai")
296
- def unset_azure_openai_env():
597
+ def unset_azure_openai_env(
598
+ save: Optional[str] = typer.Option(
599
+ None,
600
+ "--save",
601
+ help="Remove only the Azure OpenAI–related environment variables from a dotenv file. "
602
+ "Usage: --save=dotenv[:path] (default: .env.local)",
603
+ )
604
+ ):
297
605
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.AZURE_OPENAI_API_KEY)
298
606
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.AZURE_OPENAI_ENDPOINT)
299
607
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.OPENAI_API_VERSION)
@@ -302,21 +610,73 @@ def unset_azure_openai_env():
302
610
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.AZURE_MODEL_VERSION)
303
611
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.USE_AZURE_OPENAI)
304
612
 
305
- print(
306
- ":raising_hands: Congratulations! You're now using regular OpenAI for all evals that require an LLM."
307
- )
613
+ save_target = resolve_save_target(save)
614
+ if save_target:
615
+ handled, path = unset_environ_in_store(
616
+ save_target,
617
+ [
618
+ ModelKeyValues.AZURE_OPENAI_API_KEY,
619
+ ModelKeyValues.AZURE_OPENAI_ENDPOINT,
620
+ ModelKeyValues.OPENAI_API_VERSION,
621
+ ModelKeyValues.AZURE_DEPLOYMENT_NAME,
622
+ ModelKeyValues.AZURE_MODEL_NAME,
623
+ ModelKeyValues.AZURE_MODEL_VERSION,
624
+ ModelKeyValues.USE_AZURE_OPENAI,
625
+ ],
626
+ )
627
+ if handled:
628
+ print(f"Removed Azure OpenAI environment variables from {path}.")
629
+ else:
630
+ print("Unsupported --save option. Use --save=dotenv[:path].")
631
+
632
+ if is_openai_configured():
633
+ print(
634
+ ":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
635
+ )
636
+ else:
637
+ print(
638
+ "Azure OpenAI configuration removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
639
+ )
308
640
 
309
641
 
310
642
  @app.command(name="unset-azure-openai-embedding")
311
- def unset_azure_openai_embedding_env():
643
+ def unset_azure_openai_embedding_env(
644
+ save: Optional[str] = typer.Option(
645
+ None,
646
+ "--save",
647
+ help="Remove only the Azure OpenAI embedding related environment variables from a dotenv file. "
648
+ "Usage: --save=dotenv[:path] (default: .env.local)",
649
+ ),
650
+ ):
312
651
  KEY_FILE_HANDLER.remove_key(
313
652
  EmbeddingKeyValues.AZURE_EMBEDDING_DEPLOYMENT_NAME
314
653
  )
315
654
  KEY_FILE_HANDLER.remove_key(EmbeddingKeyValues.USE_AZURE_OPENAI_EMBEDDING)
316
655
 
317
- print(
318
- ":raising_hands: Congratulations! You're now using regular OpenAI embeddings for all evals that require text embeddings."
319
- )
656
+ save_target = resolve_save_target(save)
657
+ if save_target:
658
+ handled, path = unset_environ_in_store(
659
+ save_target,
660
+ [
661
+ EmbeddingKeyValues.AZURE_EMBEDDING_DEPLOYMENT_NAME,
662
+ EmbeddingKeyValues.USE_AZURE_OPENAI_EMBEDDING,
663
+ ],
664
+ )
665
+ if handled:
666
+ print(
667
+ f"Removed Azure OpenAI embedding environment variables from {path}."
668
+ )
669
+ else:
670
+ print("Unsupported --save option. Use --save=dotenv[:path].")
671
+
672
+ if is_openai_configured():
673
+ print(
674
+ ":raised_hands: Regular OpenAI embeddings will still be used by default because OPENAI_API_KEY is set."
675
+ )
676
+ else:
677
+ print(
678
+ "The Azure OpenAI embedding model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
679
+ )
320
680
 
321
681
 
322
682
  #############################################
@@ -333,27 +693,84 @@ def set_ollama_model_env(
333
693
  "--base-url",
334
694
  help="Base URL for the local model API",
335
695
  ),
696
+ save: Optional[str] = typer.Option(
697
+ None,
698
+ "--save",
699
+ help="Persist CLI parameters as environment variables in a dotenv file. "
700
+ "Usage: --save=dotenv[:path] (default: .env.local)",
701
+ ),
336
702
  ):
337
703
  clear_evaluation_model_keys()
338
704
  KEY_FILE_HANDLER.write_key(ModelKeyValues.LOCAL_MODEL_NAME, model_name)
339
705
  KEY_FILE_HANDLER.write_key(ModelKeyValues.LOCAL_MODEL_BASE_URL, base_url)
340
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_LOCAL_MODEL, "YES")
341
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_AZURE_OPENAI, "NO")
342
- KEY_FILE_HANDLER.write_key(ModelKeyValues.LOCAL_MODEL_API_KEY, "ollama")
706
+
707
+ save_target = resolve_save_target(save)
708
+ switch_model_provider(ModelKeyValues.USE_LOCAL_MODEL, save_target)
709
+ if save_target:
710
+ handled, path = save_environ_to_store(
711
+ save_target,
712
+ {
713
+ ModelKeyValues.LOCAL_MODEL_NAME: model_name,
714
+ ModelKeyValues.LOCAL_MODEL_BASE_URL: base_url,
715
+ ModelKeyValues.LOCAL_MODEL_API_KEY: "ollama",
716
+ },
717
+ )
718
+ if handled:
719
+ print(
720
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
721
+ )
722
+ else:
723
+ print("Unsupported --save option. Use --save=dotenv[:path].")
724
+
725
+ else:
726
+ print(
727
+ "Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
728
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
729
+ )
730
+
343
731
  print(
344
732
  ":raising_hands: Congratulations! You're now using a local Ollama model for all evals that require an LLM."
345
733
  )
346
734
 
347
735
 
348
736
  @app.command(name="unset-ollama")
349
- def unset_ollama_model_env():
737
+ def unset_ollama_model_env(
738
+ save: Optional[str] = typer.Option(
739
+ None,
740
+ "--save",
741
+ help="Remove only the Ollama related environment variables from a dotenv file. "
742
+ "Usage: --save=dotenv[:path] (default: .env.local)",
743
+ ),
744
+ ):
350
745
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.LOCAL_MODEL_NAME)
351
746
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.LOCAL_MODEL_BASE_URL)
352
747
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.USE_LOCAL_MODEL)
353
748
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.LOCAL_MODEL_API_KEY)
354
- print(
355
- ":raising_hands: Congratulations! You're now using regular OpenAI for all evals that require an LLM."
356
- )
749
+
750
+ save_target = resolve_save_target(save)
751
+ if save_target:
752
+ handled, path = unset_environ_in_store(
753
+ save_target,
754
+ [
755
+ ModelKeyValues.LOCAL_MODEL_NAME,
756
+ ModelKeyValues.LOCAL_MODEL_BASE_URL,
757
+ ModelKeyValues.USE_LOCAL_MODEL,
758
+ ModelKeyValues.LOCAL_MODEL_API_KEY,
759
+ ],
760
+ )
761
+ if handled:
762
+ print(f"Removed Ollama environment variables from {path}.")
763
+ else:
764
+ print("Unsupported --save option. Use --save=dotenv[:path].")
765
+
766
+ if is_openai_configured():
767
+ print(
768
+ ":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
769
+ )
770
+ else:
771
+ print(
772
+ "local Ollama model configuration removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
773
+ )
357
774
 
358
775
 
359
776
  @app.command(name="set-ollama-embeddings")
@@ -367,6 +784,12 @@ def set_ollama_embeddings_env(
367
784
  "--base-url",
368
785
  help="Base URL for the Ollama embedding model API",
369
786
  ),
787
+ save: Optional[str] = typer.Option(
788
+ None,
789
+ "--save",
790
+ help="Persist CLI parameters as environment variables in a dotenv file. "
791
+ "Usage: --save=dotenv[:path] (default: .env.local)",
792
+ ),
370
793
  ):
371
794
  clear_embedding_model_keys()
372
795
  KEY_FILE_HANDLER.write_key(
@@ -375,13 +798,30 @@ def set_ollama_embeddings_env(
375
798
  KEY_FILE_HANDLER.write_key(
376
799
  EmbeddingKeyValues.LOCAL_EMBEDDING_BASE_URL, base_url
377
800
  )
378
- KEY_FILE_HANDLER.write_key(
379
- EmbeddingKeyValues.LOCAL_EMBEDDING_API_KEY, "ollama"
380
- )
381
- KEY_FILE_HANDLER.write_key(EmbeddingKeyValues.USE_LOCAL_EMBEDDINGS, "YES")
382
- KEY_FILE_HANDLER.write_key(
383
- EmbeddingKeyValues.USE_AZURE_OPENAI_EMBEDDING, "NO"
384
- )
801
+
802
+ save_target = resolve_save_target(save)
803
+ switch_model_provider(EmbeddingKeyValues.USE_LOCAL_EMBEDDINGS, save_target)
804
+ if save_target:
805
+ handled, path = save_environ_to_store(
806
+ save_target,
807
+ {
808
+ EmbeddingKeyValues.LOCAL_EMBEDDING_MODEL_NAME: model_name,
809
+ EmbeddingKeyValues.LOCAL_EMBEDDING_BASE_URL: base_url,
810
+ EmbeddingKeyValues.LOCAL_EMBEDDING_API_KEY: "ollama",
811
+ },
812
+ )
813
+ if handled:
814
+ print(
815
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
816
+ )
817
+ else:
818
+ print("Unsupported --save option. Use --save=dotenv[:path].")
819
+
820
+ else:
821
+ print(
822
+ "Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
823
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
824
+ )
385
825
 
386
826
  print(
387
827
  ":raising_hands: Congratulations! You're now using Ollama embeddings for all evals that require text embeddings."
@@ -389,15 +829,45 @@ def set_ollama_embeddings_env(
389
829
 
390
830
 
391
831
  @app.command(name="unset-ollama-embeddings")
392
- def unset_ollama_embeddings_env():
832
+ def unset_ollama_embeddings_env(
833
+ save: Optional[str] = typer.Option(
834
+ None,
835
+ "--save",
836
+ help="Remove only the Ollama embedding related environment variables from a dotenv file. "
837
+ "Usage: --save=dotenv[:path] (default: .env.local)",
838
+ ),
839
+ ):
393
840
  KEY_FILE_HANDLER.remove_key(EmbeddingKeyValues.LOCAL_EMBEDDING_MODEL_NAME)
394
841
  KEY_FILE_HANDLER.remove_key(EmbeddingKeyValues.LOCAL_EMBEDDING_BASE_URL)
395
842
  KEY_FILE_HANDLER.remove_key(EmbeddingKeyValues.LOCAL_EMBEDDING_API_KEY)
396
843
  KEY_FILE_HANDLER.remove_key(EmbeddingKeyValues.USE_LOCAL_EMBEDDINGS)
397
844
 
398
- print(
399
- ":raising_hands: Congratulations! You're now using regular OpenAI embeddings for all evals that require text embeddings."
400
- )
845
+ save_target = resolve_save_target(save)
846
+ if save_target:
847
+ handled, path = unset_environ_in_store(
848
+ save_target,
849
+ [
850
+ EmbeddingKeyValues.LOCAL_EMBEDDING_MODEL_NAME,
851
+ EmbeddingKeyValues.LOCAL_EMBEDDING_BASE_URL,
852
+ EmbeddingKeyValues.LOCAL_EMBEDDING_API_KEY,
853
+ EmbeddingKeyValues.USE_LOCAL_EMBEDDINGS,
854
+ ],
855
+ )
856
+ if handled:
857
+ print(
858
+ f"Removed Ollama embedding environment variables from {path}."
859
+ )
860
+ else:
861
+ print("Unsupported --save option. Use --save=dotenv[:path].")
862
+
863
+ if is_openai_configured():
864
+ print(
865
+ ":raised_hands: Regular OpenAI will still be used by default because OPENAI_API_KEY is set."
866
+ )
867
+ else:
868
+ print(
869
+ "The local Ollama embedding model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
870
+ )
401
871
 
402
872
 
403
873
  #############################################
@@ -414,39 +884,107 @@ def set_local_model_env(
414
884
  ..., "--base-url", help="Base URL for the local model API"
415
885
  ),
416
886
  api_key: Optional[str] = typer.Option(
417
- None, "--api-key", help="API key for the local model (if required)"
887
+ None,
888
+ "--api-key",
889
+ help="API key for the local model (if required) (NOT persisted; set in .env[.local])",
418
890
  ),
419
891
  format: Optional[str] = typer.Option(
420
892
  "json",
421
893
  "--format",
422
894
  help="Format of the response from the local model (default: json)",
423
895
  ),
896
+ save: Optional[str] = typer.Option(
897
+ None,
898
+ "--save",
899
+ help="Persist CLI parameters as environment variables in a dotenv file. "
900
+ "Usage: --save=dotenv[:path] (default: .env.local)",
901
+ ),
424
902
  ):
425
903
  clear_evaluation_model_keys()
426
904
  KEY_FILE_HANDLER.write_key(ModelKeyValues.LOCAL_MODEL_NAME, model_name)
427
905
  KEY_FILE_HANDLER.write_key(ModelKeyValues.LOCAL_MODEL_BASE_URL, base_url)
428
- if api_key:
429
- KEY_FILE_HANDLER.write_key(ModelKeyValues.LOCAL_MODEL_API_KEY, api_key)
906
+
430
907
  if format:
431
908
  KEY_FILE_HANDLER.write_key(ModelKeyValues.LOCAL_MODEL_FORMAT, format)
432
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_LOCAL_MODEL, "YES")
433
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_AZURE_OPENAI, "NO")
909
+
910
+ save_target = resolve_save_target(save)
911
+ switch_model_provider(ModelKeyValues.USE_LOCAL_MODEL, save_target)
912
+ if save_target:
913
+ handled, path = save_environ_to_store(
914
+ save_target,
915
+ {
916
+ ModelKeyValues.LOCAL_MODEL_NAME: model_name,
917
+ ModelKeyValues.LOCAL_MODEL_BASE_URL: base_url,
918
+ **(
919
+ {ModelKeyValues.LOCAL_MODEL_API_KEY: api_key}
920
+ if api_key
921
+ else {}
922
+ ),
923
+ **(
924
+ {ModelKeyValues.LOCAL_MODEL_FORMAT: format}
925
+ if format
926
+ else {}
927
+ ),
928
+ },
929
+ )
930
+
931
+ if handled:
932
+ print(
933
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
934
+ )
935
+ else:
936
+ print("Unsupported --save option. Use --save=dotenv[:path].")
937
+
938
+ else:
939
+ print(
940
+ "Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
941
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
942
+ )
943
+
434
944
  print(
435
945
  ":raising_hands: Congratulations! You're now using a local model for all evals that require an LLM."
436
946
  )
437
947
 
438
948
 
439
949
  @app.command(name="unset-local-model")
440
- def unset_local_model_env():
950
+ def unset_local_model_env(
951
+ save: Optional[str] = typer.Option(
952
+ None,
953
+ "--save",
954
+ help="Remove only the local model related environment variables from a dotenv file. "
955
+ "Usage: --save=dotenv[:path] (default: .env.local)",
956
+ ),
957
+ ):
441
958
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.LOCAL_MODEL_NAME)
442
959
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.LOCAL_MODEL_BASE_URL)
443
960
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.LOCAL_MODEL_API_KEY)
444
961
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.LOCAL_MODEL_FORMAT)
445
962
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.USE_LOCAL_MODEL)
446
963
 
447
- print(
448
- ":raising_hands: Congratulations! You're now using regular OpenAI for all evals that require an LLM."
449
- )
964
+ save_target = resolve_save_target(save)
965
+ if save_target:
966
+ handled, path = unset_environ_in_store(
967
+ save_target,
968
+ [
969
+ ModelKeyValues.LOCAL_MODEL_NAME,
970
+ ModelKeyValues.LOCAL_MODEL_BASE_URL,
971
+ ModelKeyValues.USE_LOCAL_MODEL,
972
+ ModelKeyValues.LOCAL_MODEL_API_KEY,
973
+ ModelKeyValues.LOCAL_MODEL_FORMAT,
974
+ ],
975
+ )
976
+ if handled:
977
+ print(f"Removed local model environment variables from {path}.")
978
+ else:
979
+ print("Unsupported --save option. Use --save=dotenv[:path].")
980
+ if is_openai_configured():
981
+ print(
982
+ ":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
983
+ )
984
+ else:
985
+ print(
986
+ "The local model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
987
+ )
450
988
 
451
989
 
452
990
  #############################################
@@ -460,31 +998,90 @@ def set_grok_model_env(
460
998
  ..., "--model-name", help="Name of the Grok model"
461
999
  ),
462
1000
  api_key: str = typer.Option(
463
- ..., "--api-key", help="API key for the Grok model"
1001
+ ...,
1002
+ "--api-key",
1003
+ help="API key for the Grok model (NOT persisted; set in .env[.local])",
464
1004
  ),
465
1005
  temperature: float = typer.Option(
466
1006
  0, "--temperature", help="Temperature for the Grok model"
467
1007
  ),
1008
+ save: Optional[str] = typer.Option(
1009
+ None,
1010
+ "--save",
1011
+ help="Persist CLI parameters as environment variables in a dotenv file. "
1012
+ "Usage: --save=dotenv[:path] (default: .env.local)",
1013
+ ),
468
1014
  ):
469
1015
  clear_evaluation_model_keys()
470
1016
  KEY_FILE_HANDLER.write_key(ModelKeyValues.GROK_MODEL_NAME, model_name)
471
- KEY_FILE_HANDLER.write_key(ModelKeyValues.GROK_API_KEY, api_key)
472
1017
  KEY_FILE_HANDLER.write_key(ModelKeyValues.TEMPERATURE, str(temperature))
473
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_GROK_MODEL, "YES")
1018
+
1019
+ save_target = resolve_save_target(save)
1020
+ switch_model_provider(ModelKeyValues.USE_GROK_MODEL, save_target)
1021
+ if save_target:
1022
+ handled, path = save_environ_to_store(
1023
+ save_target,
1024
+ {
1025
+ ModelKeyValues.GROK_MODEL_NAME: model_name,
1026
+ ModelKeyValues.GROK_API_KEY: api_key,
1027
+ ModelKeyValues.TEMPERATURE: str(temperature),
1028
+ },
1029
+ )
1030
+
1031
+ if handled:
1032
+ print(
1033
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
1034
+ )
1035
+ else:
1036
+ print("Unsupported --save option. Use --save=dotenv[:path].")
1037
+ else:
1038
+ print(
1039
+ "Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
1040
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
1041
+ )
474
1042
  print(
475
1043
  ":raising_hands: Congratulations! You're now using a Grok model for all evals that require an LLM."
476
1044
  )
477
1045
 
478
1046
 
479
1047
  @app.command(name="unset-grok")
480
- def unset_grok_model_env():
1048
+ def unset_grok_model_env(
1049
+ save: Optional[str] = typer.Option(
1050
+ None,
1051
+ "--save",
1052
+ help="Remove only the Grok model related environment variables from a dotenv file. "
1053
+ "Usage: --save=dotenv[:path] (default: .env.local)",
1054
+ ),
1055
+ ):
481
1056
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.GROK_MODEL_NAME)
482
1057
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.GROK_API_KEY)
483
1058
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.TEMPERATURE)
484
1059
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.USE_GROK_MODEL)
485
- print(
486
- ":raising_hands: Congratulations! You're now using regular OpenAI for all evals that require an LLM."
487
- )
1060
+
1061
+ save_target = resolve_save_target(save)
1062
+ if save_target:
1063
+ handled, path = unset_environ_in_store(
1064
+ save_target,
1065
+ [
1066
+ ModelKeyValues.GROK_MODEL_NAME,
1067
+ ModelKeyValues.GROK_API_KEY,
1068
+ ModelKeyValues.TEMPERATURE,
1069
+ ModelKeyValues.USE_GROK_MODEL,
1070
+ ],
1071
+ )
1072
+ if handled:
1073
+ print(f"Removed Grok model environment variables from {path}.")
1074
+ else:
1075
+ print("Unsupported --save option. Use --save=dotenv[:path].")
1076
+
1077
+ if is_openai_configured():
1078
+ print(
1079
+ ":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
1080
+ )
1081
+ else:
1082
+ print(
1083
+ "The Grok model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
1084
+ )
488
1085
 
489
1086
 
490
1087
  #############################################
@@ -498,31 +1095,91 @@ def set_moonshot_model_env(
498
1095
  ..., "--model-name", help="Name of the Moonshot model"
499
1096
  ),
500
1097
  api_key: str = typer.Option(
501
- ..., "--api-key", help="API key for the Moonshot model"
1098
+ ...,
1099
+ "--api-key",
1100
+ help="API key for the Moonshot model (NOT persisted; set in .env[.local])",
502
1101
  ),
503
1102
  temperature: float = typer.Option(
504
1103
  0, "--temperature", help="Temperature for the Moonshot model"
505
1104
  ),
1105
+ save: Optional[str] = typer.Option(
1106
+ None,
1107
+ "--save",
1108
+ help="Persist CLI parameters as environment variables in a dotenv file. "
1109
+ "Usage: --save=dotenv[:path] (default: .env.local)",
1110
+ ),
506
1111
  ):
507
1112
  clear_evaluation_model_keys()
508
1113
  KEY_FILE_HANDLER.write_key(ModelKeyValues.MOONSHOT_MODEL_NAME, model_name)
509
- KEY_FILE_HANDLER.write_key(ModelKeyValues.MOONSHOT_API_KEY, api_key)
510
1114
  KEY_FILE_HANDLER.write_key(ModelKeyValues.TEMPERATURE, str(temperature))
511
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_MOONSHOT_MODEL, "YES")
1115
+
1116
+ save_target = resolve_save_target(save)
1117
+ switch_model_provider(ModelKeyValues.USE_MOONSHOT_MODEL, save_target)
1118
+ if save_target:
1119
+ handled, path = save_environ_to_store(
1120
+ save_target,
1121
+ {
1122
+ ModelKeyValues.MOONSHOT_MODEL_NAME: model_name,
1123
+ ModelKeyValues.MOONSHOT_API_KEY: api_key,
1124
+ ModelKeyValues.TEMPERATURE: str(temperature),
1125
+ },
1126
+ )
1127
+
1128
+ if handled:
1129
+ print(
1130
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
1131
+ )
1132
+ else:
1133
+ print("Unsupported --save option. Use --save=dotenv[:path].")
1134
+
1135
+ else:
1136
+ print(
1137
+ "Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
1138
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
1139
+ )
512
1140
  print(
513
1141
  ":raising_hands: Congratulations! You're now using a Moonshot model for all evals that require an LLM."
514
1142
  )
515
1143
 
516
1144
 
517
1145
  @app.command(name="unset-moonshot")
518
- def unset_moonshot_model_env():
1146
+ def unset_moonshot_model_env(
1147
+ save: Optional[str] = typer.Option(
1148
+ None,
1149
+ "--save",
1150
+ help="Remove only the Moonshot model related environment variables from a dotenv file. "
1151
+ "Usage: --save=dotenv[:path] (default: .env.local)",
1152
+ ),
1153
+ ):
519
1154
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.MOONSHOT_MODEL_NAME)
520
1155
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.MOONSHOT_API_KEY)
521
1156
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.TEMPERATURE)
522
1157
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.USE_MOONSHOT_MODEL)
523
- print(
524
- ":raising_hands: Congratulations! You're now using regular OpenAI for all evals that require an LLM."
525
- )
1158
+
1159
+ save_target = resolve_save_target(save)
1160
+ if save_target:
1161
+ handled, path = unset_environ_in_store(
1162
+ save_target,
1163
+ [
1164
+ ModelKeyValues.MOONSHOT_MODEL_NAME,
1165
+ ModelKeyValues.MOONSHOT_API_KEY,
1166
+ ModelKeyValues.TEMPERATURE,
1167
+ ModelKeyValues.USE_MOONSHOT_MODEL,
1168
+ ],
1169
+ )
1170
+ if handled:
1171
+ print(f"Removed Moonshot model environment variables from {path}.")
1172
+ else:
1173
+ print("Unsupported --save option. Use --save=dotenv[:path].")
1174
+
1175
+ if is_openai_configured():
1176
+ print(
1177
+ ":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
1178
+ )
1179
+ else:
1180
+ print(
1181
+ "The Moonshot model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
1182
+ )
526
1183
 
527
1184
 
528
1185
  #############################################
@@ -536,31 +1193,91 @@ def set_deepseek_model_env(
536
1193
  ..., "--model-name", help="Name of the DeepSeek model"
537
1194
  ),
538
1195
  api_key: str = typer.Option(
539
- ..., "--api-key", help="API key for the DeepSeek model"
1196
+ ...,
1197
+ "--api-key",
1198
+ help="API key for the DeepSeek model (NOT persisted; set in .env[.local])",
540
1199
  ),
541
1200
  temperature: float = typer.Option(
542
1201
  0, "--temperature", help="Temperature for the DeepSeek model"
543
1202
  ),
1203
+ save: Optional[str] = typer.Option(
1204
+ None,
1205
+ "--save",
1206
+ help="Persist CLI parameters as environment variables in a dotenv file. "
1207
+ "Usage: --save=dotenv[:path] (default: .env.local)",
1208
+ ),
544
1209
  ):
545
1210
  clear_evaluation_model_keys()
546
1211
  KEY_FILE_HANDLER.write_key(ModelKeyValues.DEEPSEEK_MODEL_NAME, model_name)
547
- KEY_FILE_HANDLER.write_key(ModelKeyValues.DEEPSEEK_API_KEY, api_key)
548
1212
  KEY_FILE_HANDLER.write_key(ModelKeyValues.TEMPERATURE, str(temperature))
549
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_DEEPSEEK_MODEL, "YES")
1213
+
1214
+ save_target = resolve_save_target(save)
1215
+ switch_model_provider(ModelKeyValues.USE_DEEPSEEK_MODEL, save_target)
1216
+ if save_target:
1217
+ handled, path = save_environ_to_store(
1218
+ save_target,
1219
+ {
1220
+ ModelKeyValues.DEEPSEEK_MODEL_NAME: model_name,
1221
+ ModelKeyValues.DEEPSEEK_API_KEY: api_key,
1222
+ ModelKeyValues.TEMPERATURE: str(temperature),
1223
+ },
1224
+ )
1225
+
1226
+ if handled:
1227
+ print(
1228
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
1229
+ )
1230
+ else:
1231
+ print("Unsupported --save option. Use --save=dotenv[:path].")
1232
+
1233
+ else:
1234
+ print(
1235
+ "Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
1236
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
1237
+ )
550
1238
  print(
551
1239
  ":raising_hands: Congratulations! You're now using a DeepSeek model for all evals that require an LLM."
552
1240
  )
553
1241
 
554
1242
 
555
1243
  @app.command(name="unset-deepseek")
556
- def unset_deepseek_model_env():
1244
+ def unset_deepseek_model_env(
1245
+ save: Optional[str] = typer.Option(
1246
+ None,
1247
+ "--save",
1248
+ help="Remove only the DeepSeek model related environment variables from a dotenv file. "
1249
+ "Usage: --save=dotenv[:path] (default: .env.local)",
1250
+ ),
1251
+ ):
557
1252
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.DEEPSEEK_MODEL_NAME)
558
1253
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.DEEPSEEK_API_KEY)
559
1254
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.TEMPERATURE)
560
1255
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.USE_DEEPSEEK_MODEL)
561
- print(
562
- ":raising_hands: Congratulations! You're now using regular OpenAI for all evals that require an LLM."
563
- )
1256
+
1257
+ save_target = resolve_save_target(save)
1258
+ if save_target:
1259
+ handled, path = unset_environ_in_store(
1260
+ save_target,
1261
+ [
1262
+ ModelKeyValues.DEEPSEEK_MODEL_NAME,
1263
+ ModelKeyValues.DEEPSEEK_API_KEY,
1264
+ ModelKeyValues.TEMPERATURE,
1265
+ ModelKeyValues.USE_DEEPSEEK_MODEL,
1266
+ ],
1267
+ )
1268
+ if handled:
1269
+ print(f"Removed DeepSeek model environment variables from {path}.")
1270
+ else:
1271
+ print("Unsupported --save option. Use --save=dotenv[:path].")
1272
+
1273
+ if is_openai_configured():
1274
+ print(
1275
+ ":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
1276
+ )
1277
+ else:
1278
+ print(
1279
+ "The Deepseek model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
1280
+ )
564
1281
 
565
1282
 
566
1283
  #############################################
@@ -577,7 +1294,15 @@ def set_local_embeddings_env(
577
1294
  ..., "--base-url", help="Base URL for the local embeddings API"
578
1295
  ),
579
1296
  api_key: Optional[str] = typer.Option(
580
- None, "--api-key", help="API key for the local embeddings (if required)"
1297
+ None,
1298
+ "--api-key",
1299
+ help="API key for the local embeddings (if required) (NOT persisted; set in .env[.local])",
1300
+ ),
1301
+ save: Optional[str] = typer.Option(
1302
+ None,
1303
+ "--save",
1304
+ help="Persist CLI parameters as environment variables in a dotenv file. "
1305
+ "Usage: --save=dotenv[:path] (default: .env.local)",
581
1306
  ),
582
1307
  ):
583
1308
  clear_embedding_model_keys()
@@ -587,35 +1312,80 @@ def set_local_embeddings_env(
587
1312
  KEY_FILE_HANDLER.write_key(
588
1313
  EmbeddingKeyValues.LOCAL_EMBEDDING_BASE_URL, base_url
589
1314
  )
590
- if api_key:
591
- KEY_FILE_HANDLER.write_key(
592
- EmbeddingKeyValues.LOCAL_EMBEDDING_API_KEY, api_key
593
- )
594
-
595
- KEY_FILE_HANDLER.write_key(EmbeddingKeyValues.USE_LOCAL_EMBEDDINGS, "YES")
596
- KEY_FILE_HANDLER.write_key(
597
- EmbeddingKeyValues.USE_AZURE_OPENAI_EMBEDDING, "NO"
598
- )
599
1315
 
1316
+ save_target = resolve_save_target(save)
1317
+ switch_model_provider(EmbeddingKeyValues.USE_LOCAL_EMBEDDINGS, save_target)
1318
+ if save_target:
1319
+ handled, path = save_environ_to_store(
1320
+ save_target,
1321
+ {
1322
+ EmbeddingKeyValues.LOCAL_EMBEDDING_MODEL_NAME: model_name,
1323
+ EmbeddingKeyValues.LOCAL_EMBEDDING_BASE_URL: base_url,
1324
+ **(
1325
+ {EmbeddingKeyValues.LOCAL_EMBEDDING_API_KEY: api_key}
1326
+ if api_key
1327
+ else {}
1328
+ ),
1329
+ },
1330
+ )
1331
+ if handled:
1332
+ print(
1333
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
1334
+ )
1335
+ else:
1336
+ print("Unsupported --save option. Use --save=dotenv[:path].")
1337
+ else:
1338
+ print(
1339
+ "Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
1340
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
1341
+ )
600
1342
  print(
601
1343
  ":raising_hands: Congratulations! You're now using local embeddings for all evals that require text embeddings."
602
1344
  )
603
1345
 
604
1346
 
605
1347
  @app.command(name="unset-local-embeddings")
606
- def unset_local_embeddings_env():
1348
+ def unset_local_embeddings_env(
1349
+ save: Optional[str] = typer.Option(
1350
+ None,
1351
+ "--save",
1352
+ help="Remove only the local embedding related environment variables from a dotenv file. "
1353
+ "Usage: --save=dotenv[:path] (default: .env.local)",
1354
+ ),
1355
+ ):
607
1356
  KEY_FILE_HANDLER.remove_key(EmbeddingKeyValues.LOCAL_EMBEDDING_MODEL_NAME)
608
1357
  KEY_FILE_HANDLER.remove_key(EmbeddingKeyValues.LOCAL_EMBEDDING_BASE_URL)
609
1358
  KEY_FILE_HANDLER.remove_key(EmbeddingKeyValues.LOCAL_EMBEDDING_API_KEY)
610
1359
  KEY_FILE_HANDLER.remove_key(EmbeddingKeyValues.USE_LOCAL_EMBEDDINGS)
611
1360
 
612
- print(
613
- ":raising_hands: Congratulations! You're now using regular OpenAI embeddings for all evals that require text embeddings."
614
- )
1361
+ save_target = resolve_save_target(save)
1362
+ if save_target:
1363
+ handled, path = unset_environ_in_store(
1364
+ save_target,
1365
+ [
1366
+ EmbeddingKeyValues.LOCAL_EMBEDDING_MODEL_NAME,
1367
+ EmbeddingKeyValues.LOCAL_EMBEDDING_BASE_URL,
1368
+ EmbeddingKeyValues.LOCAL_EMBEDDING_API_KEY,
1369
+ EmbeddingKeyValues.USE_LOCAL_EMBEDDINGS,
1370
+ ],
1371
+ )
1372
+ if handled:
1373
+ print(f"Removed local embedding environment variables from {path}.")
1374
+ else:
1375
+ print("Unsupported --save option. Use --save=dotenv[:path].")
1376
+
1377
+ if is_openai_configured():
1378
+ print(
1379
+ ":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
1380
+ )
1381
+ else:
1382
+ print(
1383
+ "The local embeddings model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
1384
+ )
615
1385
 
616
1386
 
617
1387
  #############################################
618
- # Ollama Integration ########################
1388
+ # Gemini Integration ########################
619
1389
  #############################################
620
1390
 
621
1391
 
@@ -625,7 +1395,9 @@ def set_gemini_model_env(
625
1395
  None, "--model-name", help="Gemini Model name"
626
1396
  ),
627
1397
  google_api_key: Optional[str] = typer.Option(
628
- None, "--google-api-key", help="Google API Key for Gemini"
1398
+ None,
1399
+ "--google-api-key",
1400
+ help="Google API Key for Gemini (NOT persisted; set in .env[.local])",
629
1401
  ),
630
1402
  google_cloud_project: Optional[str] = typer.Option(
631
1403
  None, "--project-id", help="Google Cloud project ID"
@@ -633,6 +1405,12 @@ def set_gemini_model_env(
633
1405
  google_cloud_location: Optional[str] = typer.Option(
634
1406
  None, "--location", help="Google Cloud location"
635
1407
  ),
1408
+ save: Optional[str] = typer.Option(
1409
+ None,
1410
+ "--save",
1411
+ help="Persist CLI parameters as environment variables in a dotenv file. "
1412
+ "Usage: --save=dotenv[:path] (default: .env.local)",
1413
+ ),
636
1414
  ):
637
1415
  clear_evaluation_model_keys()
638
1416
  if not google_api_key and not (
@@ -643,14 +1421,10 @@ def set_gemini_model_env(
643
1421
  err=True,
644
1422
  )
645
1423
  raise typer.Exit(code=1)
646
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_GEMINI_MODEL, "YES")
647
1424
  if model_name is not None:
648
1425
  KEY_FILE_HANDLER.write_key(ModelKeyValues.GEMINI_MODEL_NAME, model_name)
649
- if google_api_key is not None:
650
- KEY_FILE_HANDLER.write_key(
651
- ModelKeyValues.GOOGLE_API_KEY, google_api_key
652
- )
653
- else:
1426
+
1427
+ if google_api_key is None:
654
1428
  KEY_FILE_HANDLER.write_key(
655
1429
  ModelKeyValues.GOOGLE_GENAI_USE_VERTEXAI, "YES"
656
1430
  )
@@ -663,13 +1437,65 @@ def set_gemini_model_env(
663
1437
  KEY_FILE_HANDLER.write_key(
664
1438
  ModelKeyValues.GOOGLE_CLOUD_LOCATION, google_cloud_location
665
1439
  )
1440
+
1441
+ save_target = resolve_save_target(save)
1442
+ switch_model_provider(ModelKeyValues.USE_GEMINI_MODEL, save_target)
1443
+ if save_target:
1444
+ handled, path = save_environ_to_store(
1445
+ save_target,
1446
+ {
1447
+ **(
1448
+ {ModelKeyValues.GOOGLE_API_KEY: google_api_key}
1449
+ if google_api_key
1450
+ else {ModelKeyValues.GOOGLE_GENAI_USE_VERTEXAI: "YES"}
1451
+ ),
1452
+ **(
1453
+ {ModelKeyValues.GEMINI_MODEL_NAME: model_name}
1454
+ if model_name
1455
+ else {}
1456
+ ),
1457
+ **(
1458
+ {ModelKeyValues.GOOGLE_CLOUD_PROJECT: google_cloud_project}
1459
+ if google_cloud_project
1460
+ else {}
1461
+ ),
1462
+ **(
1463
+ {
1464
+ ModelKeyValues.GOOGLE_CLOUD_LOCATION: google_cloud_location
1465
+ }
1466
+ if google_cloud_location
1467
+ else {}
1468
+ ),
1469
+ },
1470
+ )
1471
+
1472
+ if handled:
1473
+ print(
1474
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
1475
+ )
1476
+ else:
1477
+ print("Unsupported --save option. Use --save=dotenv[:path].")
1478
+
1479
+ else:
1480
+ print(
1481
+ "Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
1482
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
1483
+ )
1484
+
666
1485
  print(
667
1486
  ":raising_hands: Congratulations! You're now using a Gemini model for all evals that require an LLM."
668
1487
  )
669
1488
 
670
1489
 
671
1490
  @app.command(name="unset-gemini")
672
- def unset_gemini_model_env():
1491
+ def unset_gemini_model_env(
1492
+ save: Optional[str] = typer.Option(
1493
+ None,
1494
+ "--save",
1495
+ help="Remove only the Gemini related environment variables from a dotenv file. "
1496
+ "Usage: --save=dotenv[:path] (default: .env.local)",
1497
+ ),
1498
+ ):
673
1499
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.USE_GEMINI_MODEL)
674
1500
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.GEMINI_MODEL_NAME)
675
1501
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.GOOGLE_API_KEY)
@@ -677,47 +1503,128 @@ def unset_gemini_model_env():
677
1503
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.GOOGLE_CLOUD_LOCATION)
678
1504
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.GOOGLE_GENAI_USE_VERTEXAI)
679
1505
 
680
- print(
681
- ":raised_hands: Gemini model has been unset. You're now using regular OpenAI for all evals that require an LLM."
682
- )
1506
+ save_target = resolve_save_target(save)
1507
+ if save_target:
1508
+ handled, path = unset_environ_in_store(
1509
+ save_target,
1510
+ [
1511
+ ModelKeyValues.USE_GEMINI_MODEL,
1512
+ ModelKeyValues.GEMINI_MODEL_NAME,
1513
+ ModelKeyValues.GOOGLE_API_KEY,
1514
+ ModelKeyValues.GOOGLE_CLOUD_PROJECT,
1515
+ ModelKeyValues.GOOGLE_CLOUD_LOCATION,
1516
+ ModelKeyValues.GOOGLE_GENAI_USE_VERTEXAI,
1517
+ ],
1518
+ )
1519
+ if handled:
1520
+ print(f"Removed Gemini environment variables from {path}.")
1521
+ else:
1522
+ print("Unsupported --save option. Use --save=dotenv[:path].")
1523
+
1524
+ if is_openai_configured():
1525
+ print(
1526
+ ":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
1527
+ )
1528
+ else:
1529
+ print(
1530
+ "The Gemini model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
1531
+ )
683
1532
 
684
1533
 
685
1534
  @app.command(name="set-litellm")
686
1535
  def set_litellm_model_env(
687
1536
  model_name: str = typer.Argument(..., help="Name of the LiteLLM model"),
688
1537
  api_key: Optional[str] = typer.Option(
689
- None, "--api-key", help="API key for the model (if required)"
1538
+ None,
1539
+ "--api-key",
1540
+ help="API key for the model (if required) (NOT persisted; set in .env[.local])",
690
1541
  ),
691
1542
  api_base: Optional[str] = typer.Option(
692
1543
  None, "--api-base", help="Base URL for the model API (if required)"
693
1544
  ),
1545
+ save: Optional[str] = typer.Option(
1546
+ None,
1547
+ "--save",
1548
+ help="Persist CLI parameters as environment variables in a dotenv file. "
1549
+ "Usage: --save=dotenv[:path] (default: .env.local)",
1550
+ ),
694
1551
  ):
695
- """Set up a LiteLLM model for evaluation."""
696
1552
  clear_evaluation_model_keys()
697
1553
  KEY_FILE_HANDLER.write_key(ModelKeyValues.LITELLM_MODEL_NAME, model_name)
698
- if api_key:
699
- KEY_FILE_HANDLER.write_key(ModelKeyValues.LITELLM_API_KEY, api_key)
1554
+
700
1555
  if api_base:
701
1556
  KEY_FILE_HANDLER.write_key(ModelKeyValues.LITELLM_API_BASE, api_base)
702
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_LITELLM, "YES")
703
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_AZURE_OPENAI, "NO")
704
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_LOCAL_MODEL, "NO")
705
- KEY_FILE_HANDLER.write_key(ModelKeyValues.USE_GEMINI_MODEL, "NO")
1557
+
1558
+ save_target = resolve_save_target(save)
1559
+ switch_model_provider(ModelKeyValues.USE_LITELLM, save_target)
1560
+ if save_target:
1561
+ handled, path = save_environ_to_store(
1562
+ save_target,
1563
+ {
1564
+ ModelKeyValues.LITELLM_MODEL_NAME: model_name,
1565
+ **(
1566
+ {ModelKeyValues.LITELLM_API_KEY: api_key} if api_key else {}
1567
+ ),
1568
+ **(
1569
+ {ModelKeyValues.LITELLM_API_BASE: api_base}
1570
+ if api_base
1571
+ else {}
1572
+ ),
1573
+ },
1574
+ )
1575
+ if handled:
1576
+ print(
1577
+ f"Saved environment variables to {path} (ensure it's git-ignored)."
1578
+ )
1579
+ else:
1580
+ print("Unsupported --save option. Use --save=dotenv[:path].")
1581
+ else:
1582
+ print(
1583
+ "Tip: persist these settings to a dotenv file with --save=dotenv[:path] (default .env.local) "
1584
+ "or set DEEPEVAL_DEFAULT_SAVE=dotenv:.env.local"
1585
+ )
706
1586
  print(
707
1587
  ":raising_hands: Congratulations! You're now using a LiteLLM model for all evals that require an LLM."
708
1588
  )
709
1589
 
710
1590
 
711
1591
  @app.command(name="unset-litellm")
712
- def unset_litellm_model_env():
713
- """Remove LiteLLM model configuration."""
1592
+ def unset_litellm_model_env(
1593
+ save: Optional[str] = typer.Option(
1594
+ None,
1595
+ "--save",
1596
+ help="Remove only the LiteLLM related environment variables from a dotenv file. "
1597
+ "Usage: --save=dotenv[:path] (default: .env.local)",
1598
+ ),
1599
+ ):
714
1600
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.LITELLM_MODEL_NAME)
715
1601
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.LITELLM_API_KEY)
716
1602
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.LITELLM_API_BASE)
717
1603
  KEY_FILE_HANDLER.remove_key(ModelKeyValues.USE_LITELLM)
718
- print(
719
- ":raising_hands: Congratulations! You're now using regular OpenAI for all evals that require an LLM."
720
- )
1604
+
1605
+ save_target = resolve_save_target(save)
1606
+ if save_target:
1607
+ handled, path = unset_environ_in_store(
1608
+ save_target,
1609
+ [
1610
+ ModelKeyValues.LITELLM_MODEL_NAME,
1611
+ ModelKeyValues.LITELLM_API_KEY,
1612
+ ModelKeyValues.LITELLM_API_BASE,
1613
+ ModelKeyValues.USE_LITELLM,
1614
+ ],
1615
+ )
1616
+ if handled:
1617
+ print(f"Removed LiteLLM environment variables from {path}.")
1618
+ else:
1619
+ print("Unsupported --save option. Use --save=dotenv[:path].")
1620
+ if is_openai_configured():
1621
+ print(
1622
+ ":raised_hands: OpenAI will still be used by default because OPENAI_API_KEY is set."
1623
+ )
1624
+ else:
1625
+ print(
1626
+ "The LiteLLM model configuration has been removed. No model is currently configured, but you can set one with the CLI or add credentials to .env[.local]."
1627
+ )
721
1628
 
722
1629
 
723
1630
  if __name__ == "__main__":