deepeval 3.7.2__py3-none-any.whl → 3.7.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/benchmarks/human_eval/human_eval.py +2 -1
  3. deepeval/cli/test.py +1 -1
  4. deepeval/config/settings.py +102 -13
  5. deepeval/dataset/dataset.py +35 -11
  6. deepeval/dataset/utils.py +2 -0
  7. deepeval/evaluate/configs.py +1 -1
  8. deepeval/evaluate/execute.py +4 -1
  9. deepeval/metrics/answer_relevancy/template.py +4 -4
  10. deepeval/metrics/argument_correctness/template.py +2 -2
  11. deepeval/metrics/bias/template.py +3 -3
  12. deepeval/metrics/contextual_precision/template.py +6 -6
  13. deepeval/metrics/contextual_recall/template.py +2 -2
  14. deepeval/metrics/contextual_relevancy/template.py +3 -3
  15. deepeval/metrics/conversation_completeness/template.py +2 -2
  16. deepeval/metrics/conversational_dag/templates.py +4 -4
  17. deepeval/metrics/conversational_g_eval/template.py +4 -3
  18. deepeval/metrics/dag/templates.py +4 -4
  19. deepeval/metrics/faithfulness/template.py +4 -4
  20. deepeval/metrics/hallucination/template.py +4 -4
  21. deepeval/metrics/misuse/template.py +2 -2
  22. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +7 -7
  23. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +6 -6
  24. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +2 -2
  25. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +3 -3
  26. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +9 -9
  27. deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +4 -4
  28. deepeval/metrics/non_advice/template.py +2 -2
  29. deepeval/metrics/pii_leakage/template.py +2 -2
  30. deepeval/metrics/prompt_alignment/template.py +4 -4
  31. deepeval/metrics/role_violation/template.py +2 -2
  32. deepeval/metrics/step_efficiency/step_efficiency.py +1 -1
  33. deepeval/metrics/toxicity/template.py +4 -4
  34. deepeval/metrics/turn_relevancy/template.py +2 -2
  35. deepeval/metrics/utils.py +3 -0
  36. deepeval/models/__init__.py +2 -0
  37. deepeval/models/embedding_models/azure_embedding_model.py +28 -15
  38. deepeval/models/embedding_models/local_embedding_model.py +23 -10
  39. deepeval/models/embedding_models/ollama_embedding_model.py +8 -6
  40. deepeval/models/embedding_models/openai_embedding_model.py +18 -2
  41. deepeval/models/llms/anthropic_model.py +17 -5
  42. deepeval/models/llms/azure_model.py +30 -18
  43. deepeval/models/llms/deepseek_model.py +22 -12
  44. deepeval/models/llms/gemini_model.py +120 -87
  45. deepeval/models/llms/grok_model.py +23 -16
  46. deepeval/models/llms/kimi_model.py +23 -12
  47. deepeval/models/llms/litellm_model.py +63 -25
  48. deepeval/models/llms/local_model.py +26 -18
  49. deepeval/models/llms/ollama_model.py +17 -7
  50. deepeval/models/llms/openai_model.py +22 -17
  51. deepeval/models/llms/portkey_model.py +132 -0
  52. deepeval/models/mlllms/__init__.py +1 -0
  53. deepeval/models/mlllms/azure_model.py +343 -0
  54. deepeval/models/mlllms/gemini_model.py +102 -73
  55. deepeval/models/mlllms/ollama_model.py +40 -9
  56. deepeval/models/mlllms/openai_model.py +65 -14
  57. deepeval/models/utils.py +48 -3
  58. deepeval/optimization/__init__.py +13 -0
  59. deepeval/optimization/adapters/__init__.py +2 -0
  60. deepeval/optimization/adapters/deepeval_scoring_adapter.py +588 -0
  61. deepeval/optimization/aggregates.py +14 -0
  62. deepeval/optimization/configs.py +34 -0
  63. deepeval/optimization/copro/configs.py +31 -0
  64. deepeval/optimization/copro/loop.py +837 -0
  65. deepeval/optimization/gepa/__init__.py +7 -0
  66. deepeval/optimization/gepa/configs.py +115 -0
  67. deepeval/optimization/gepa/loop.py +677 -0
  68. deepeval/optimization/miprov2/configs.py +134 -0
  69. deepeval/optimization/miprov2/loop.py +785 -0
  70. deepeval/optimization/mutations/__init__.py +0 -0
  71. deepeval/optimization/mutations/prompt_rewriter.py +458 -0
  72. deepeval/optimization/policies/__init__.py +16 -0
  73. deepeval/optimization/policies/selection.py +166 -0
  74. deepeval/optimization/policies/tie_breaker.py +67 -0
  75. deepeval/optimization/prompt_optimizer.py +462 -0
  76. deepeval/optimization/simba/__init__.py +0 -0
  77. deepeval/optimization/simba/configs.py +33 -0
  78. deepeval/optimization/simba/loop.py +983 -0
  79. deepeval/optimization/simba/types.py +15 -0
  80. deepeval/optimization/types.py +361 -0
  81. deepeval/optimization/utils.py +598 -0
  82. deepeval/prompt/prompt.py +10 -5
  83. deepeval/test_run/cache.py +2 -0
  84. deepeval/test_run/test_run.py +6 -1
  85. deepeval/tracing/context.py +3 -0
  86. deepeval/tracing/tracing.py +22 -11
  87. deepeval/utils.py +24 -0
  88. {deepeval-3.7.2.dist-info → deepeval-3.7.4.dist-info}/METADATA +1 -1
  89. {deepeval-3.7.2.dist-info → deepeval-3.7.4.dist-info}/RECORD +92 -66
  90. {deepeval-3.7.2.dist-info → deepeval-3.7.4.dist-info}/entry_points.txt +1 -1
  91. {deepeval-3.7.2.dist-info → deepeval-3.7.4.dist-info}/LICENSE.md +0 -0
  92. {deepeval-3.7.2.dist-info → deepeval-3.7.4.dist-info}/WHEEL +0 -0
@@ -1,16 +1,16 @@
1
- from typing import Optional, List, Union
2
1
  import requests
3
- from pydantic import BaseModel
2
+ from typing import Optional, List, Union
3
+ from pydantic import BaseModel, SecretStr
4
4
  from google.genai import types
5
5
  from google import genai
6
6
 
7
+ from deepeval.config.settings import get_settings
8
+ from deepeval.models.utils import require_secret_api_key
7
9
  from deepeval.models.retry_policy import (
8
10
  create_retry_decorator,
9
11
  )
10
- from deepeval.key_handler import ModelKeyValues, KEY_FILE_HANDLER
11
12
  from deepeval.models.base_model import DeepEvalBaseMLLM
12
13
  from deepeval.test_case import MLLMImage
13
- from deepeval.config.settings import get_settings
14
14
  from deepeval.constants import ProviderSlug as PS
15
15
 
16
16
 
@@ -60,77 +60,31 @@ class MultimodalGeminiModel(DeepEvalBaseMLLM):
60
60
  *args,
61
61
  **kwargs,
62
62
  ):
63
+ settings = get_settings()
63
64
  model_name = (
64
65
  model_name
65
- or KEY_FILE_HANDLER.fetch_data(ModelKeyValues.GEMINI_MODEL_NAME)
66
+ or settings.GEMINI_MODEL_NAME
66
67
  or default_multimodal_gemini_model
67
68
  )
68
69
 
69
- # Get API key from key handler if not provided
70
- self.api_key = api_key or KEY_FILE_HANDLER.fetch_data(
71
- ModelKeyValues.GOOGLE_API_KEY
72
- )
73
- self.project = project or KEY_FILE_HANDLER.fetch_data(
74
- ModelKeyValues.GOOGLE_CLOUD_PROJECT
75
- )
76
- self.location = location or KEY_FILE_HANDLER.fetch_data(
77
- ModelKeyValues.GOOGLE_CLOUD_LOCATION
78
- )
79
- self.use_vertexai = KEY_FILE_HANDLER.fetch_data(
80
- ModelKeyValues.GOOGLE_GENAI_USE_VERTEXAI
81
- )
82
-
83
- super().__init__(model_name, *args, **kwargs)
84
- self.model = self.load_model(*args, **kwargs)
85
-
86
- def should_use_vertexai(self):
87
- """Checks if the model should use Vertex AI for generation.
88
-
89
- This is determined first by the value of `GOOGLE_GENAI_USE_VERTEXAI`
90
- environment variable. If not set, it checks for the presence of the
91
- project and location.
92
-
93
- Returns:
94
- True if the model should use Vertex AI, False otherwise
95
- """
96
- if self.use_vertexai is not None:
97
- return self.use_vertexai.lower() == "yes"
98
-
99
- if self.project and self.location:
100
- return True
70
+ # Get API key from settings if not provided
71
+ if api_key is not None:
72
+ # keep it secret, keep it safe from serializings, logging and aolike
73
+ self.api_key: SecretStr | None = SecretStr(api_key)
101
74
  else:
102
- return False
103
-
104
- def load_model(self, *args, **kwargs):
105
- """Creates a client.
106
- With Gen AI SDK, model is set at inference time, so there is no
107
- model to load and initialize.
108
- This method name is kept for compatibility with other LLMs.
109
-
110
- Returns:
111
- A GenerativeModel instance configured for evaluation.
112
- """
113
- if self.should_use_vertexai():
114
- if not self.project or not self.location:
115
- raise ValueError(
116
- "When using Vertex AI API, both project and location are required."
117
- "Either provide them as arguments or set GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION environment variables, "
118
- "or set them in your DeepEval configuration."
119
- )
75
+ self.api_key = settings.GOOGLE_API_KEY
120
76
 
121
- # Create client for Vertex AI
122
- self.client = genai.Client(
123
- vertexai=True, project=self.project, location=self.location
124
- )
125
- else:
126
- if not self.api_key:
127
- raise ValueError(
128
- "Google API key is required. Either provide it directly, set GOOGLE_API_KEY environment variable, "
129
- "or set it in your DeepEval configuration."
130
- )
77
+ self.project = project or settings.GOOGLE_CLOUD_PROJECT
78
+ self.location = (
79
+ location
80
+ or settings.GOOGLE_CLOUD_LOCATION is not None
81
+ and str(settings.GOOGLE_CLOUD_LOCATION)
82
+ )
83
+ self.use_vertexai = settings.GOOGLE_GENAI_USE_VERTEXAI
131
84
 
132
- # Create client for Gemini API
133
- self.client = genai.Client(api_key=self.api_key)
85
+ # Keep any extra kwargs for the underlying genai.Client
86
+ self.args = args
87
+ self.kwargs = kwargs
134
88
 
135
89
  # Configure default model generation settings
136
90
  self.model_safety_settings = [
@@ -152,9 +106,28 @@ class MultimodalGeminiModel(DeepEvalBaseMLLM):
152
106
  ),
153
107
  ]
154
108
  self.model_temperature = 0.0
155
- return self.client.models
156
109
 
157
- # TODO: Refactor genete prompt to minimize the work done on retry
110
+ super().__init__(model_name, *args, **kwargs)
111
+
112
+ def should_use_vertexai(self):
113
+ """Checks if the model should use Vertex AI for generation.
114
+
115
+ This is determined first by the value of `GOOGLE_GENAI_USE_VERTEXAI`
116
+ environment variable. If not set, it checks for the presence of the
117
+ project and location.
118
+
119
+ Returns:
120
+ True if the model should use Vertex AI, False otherwise
121
+ """
122
+ if self.use_vertexai is not None:
123
+ return self.use_vertexai.lower() == "yes"
124
+
125
+ if self.project and self.location:
126
+ return True
127
+ else:
128
+ return False
129
+
130
+ # TODO: Refactor generate prompt to minimize the work done on retry
158
131
  @retry_gemini
159
132
  def generate_prompt(
160
133
  self, multimodal_input: List[Union[str, MLLMImage]] = []
@@ -214,10 +187,11 @@ class MultimodalGeminiModel(DeepEvalBaseMLLM):
214
187
  Returns:
215
188
  Generated text response
216
189
  """
190
+ client = self.load_model()
217
191
  prompt = self.generate_prompt(multimodal_input)
218
192
 
219
193
  if schema is not None:
220
- response = self.client.models.generate_content(
194
+ response = client.models.generate_content(
221
195
  model=self.model_name,
222
196
  contents=prompt,
223
197
  config=types.GenerateContentConfig(
@@ -229,7 +203,7 @@ class MultimodalGeminiModel(DeepEvalBaseMLLM):
229
203
  )
230
204
  return response.parsed, 0
231
205
  else:
232
- response = self.client.models.generate_content(
206
+ response = client.models.generate_content(
233
207
  model=self.model_name,
234
208
  contents=prompt,
235
209
  config=types.GenerateContentConfig(
@@ -254,10 +228,11 @@ class MultimodalGeminiModel(DeepEvalBaseMLLM):
254
228
  Returns:
255
229
  Generated text response
256
230
  """
231
+ client = self.load_model()
257
232
  prompt = self.generate_prompt(multimodal_input)
258
233
 
259
234
  if schema is not None:
260
- response = await self.client.aio.models.generate_content(
235
+ response = await client.aio.models.generate_content(
261
236
  model=self.model_name,
262
237
  contents=prompt,
263
238
  config=types.GenerateContentConfig(
@@ -269,7 +244,7 @@ class MultimodalGeminiModel(DeepEvalBaseMLLM):
269
244
  )
270
245
  return response.parsed, 0
271
246
  else:
272
- response = await self.client.aio.models.generate_content(
247
+ response = await client.aio.models.generate_content(
273
248
  model=self.model_name,
274
249
  contents=prompt,
275
250
  config=types.GenerateContentConfig(
@@ -279,6 +254,60 @@ class MultimodalGeminiModel(DeepEvalBaseMLLM):
279
254
  )
280
255
  return response.text, 0
281
256
 
257
+ #########
258
+ # Model #
259
+ #########
260
+
282
261
  def get_model_name(self) -> str:
283
262
  """Returns the name of the Gemini model being used."""
284
263
  return self.model_name
264
+
265
+ def load_model(self, *args, **kwargs):
266
+ """Creates and returns a GenAI client.
267
+
268
+ With the Gen AI SDK, the model is set at inference time, so we only
269
+ construct the client here. Kept for compatibility with other MLLMs.
270
+ """
271
+ return self._build_client(**kwargs)
272
+
273
+ def _client_kwargs(self, **override_kwargs) -> dict:
274
+ """
275
+ Return kwargs forwarded to genai.Client.
276
+
277
+ Start from the ctor kwargs captured on `self.kwargs`, then apply any
278
+ overrides passed via load_model(...).
279
+ """
280
+ client_kwargs = dict(self.kwargs or {})
281
+ if override_kwargs:
282
+ client_kwargs.update(override_kwargs)
283
+ return client_kwargs
284
+
285
+ def _build_client(self, **override_kwargs):
286
+ """Build and return a genai.Client for either Gemini API or Vertex AI."""
287
+ client_kwargs = self._client_kwargs(**override_kwargs)
288
+
289
+ if self.should_use_vertexai():
290
+ if not self.project or not self.location:
291
+ raise ValueError(
292
+ "When using Vertex AI API, both project and location are required."
293
+ "Either provide them as arguments or set GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION environment variables, "
294
+ "or set them in your DeepEval configuration."
295
+ )
296
+
297
+ # Create client for Vertex AI
298
+ return genai.Client(
299
+ vertexai=True,
300
+ project=self.project,
301
+ location=self.location,
302
+ **client_kwargs,
303
+ )
304
+
305
+ api_key = require_secret_api_key(
306
+ self.api_key,
307
+ provider_label="Google Gemini",
308
+ env_var_name="GOOGLE_API_KEY",
309
+ param_hint="`api_key` to MultimodalGeminiModel(...)",
310
+ )
311
+
312
+ # Create client for Gemini API
313
+ return genai.Client(api_key=api_key, **client_kwargs)
@@ -8,7 +8,6 @@ import io
8
8
  from deepeval.models.retry_policy import (
9
9
  create_retry_decorator,
10
10
  )
11
- from deepeval.key_handler import KEY_FILE_HANDLER, ModelKeyValues
12
11
  from deepeval.models import DeepEvalBaseMLLM
13
12
  from deepeval.test_case import MLLMImage
14
13
  from deepeval.config.settings import get_settings
@@ -19,14 +18,34 @@ retry_ollama = create_retry_decorator(PS.OLLAMA)
19
18
 
20
19
 
21
20
  class MultimodalOllamaModel(DeepEvalBaseMLLM):
22
- def __init__(self, **kwargs):
23
- model_name = KEY_FILE_HANDLER.fetch_data(
24
- ModelKeyValues.LOCAL_MODEL_NAME
25
- )
26
- self.base_url = KEY_FILE_HANDLER.fetch_data(
27
- ModelKeyValues.LOCAL_MODEL_BASE_URL
21
+ def __init__(
22
+ self,
23
+ model: Optional[str] = None,
24
+ host: Optional[str] = None,
25
+ **kwargs,
26
+ ):
27
+ """
28
+ Multimodal Ollama model.
29
+
30
+ - `model`: Ollama model name (e.g. "llava").
31
+ - `host`: Ollama base URL (e.g. "http://localhost:11434").
32
+ - extra **kwargs are passed through to the underlying Client.
33
+ """
34
+ settings = get_settings()
35
+
36
+ # Resolve host/base URL
37
+ self.base_url = (
38
+ host
39
+ or settings.LOCAL_MODEL_BASE_URL
40
+ and str(settings.LOCAL_MODEL_BASE_URL)
28
41
  )
29
- self.kwargs = kwargs
42
+
43
+ # Resolve model name
44
+ model_name = model or settings.LOCAL_MODEL_NAME
45
+
46
+ # Client kwargs
47
+ self.kwargs = kwargs or {}
48
+
30
49
  super().__init__(model_name)
31
50
 
32
51
  @retry_ollama
@@ -132,13 +151,25 @@ class MultimodalOllamaModel(DeepEvalBaseMLLM):
132
151
  print(f"Error converting image to base64: {e}")
133
152
  return None
134
153
 
154
+ ###############################################
155
+ # Model
156
+ ###############################################
157
+
135
158
  def load_model(self, async_mode: bool = False):
136
159
  if not async_mode:
137
160
  return self._build_client(Client)
138
161
  return self._build_client(AsyncClient)
139
162
 
163
+ def _client_kwargs(self) -> Dict:
164
+ """
165
+ Return client-init kwargs.
166
+ Ollama's Python client doesn't have built-in retry config like OpenAI,
167
+ so we just pass these through untouched.
168
+ """
169
+ return dict(self.kwargs or {})
170
+
140
171
  def _build_client(self, cls):
141
- return cls(host=self.base_url, **self.kwargs)
172
+ return cls(host=self.base_url, **self._client_kwargs())
142
173
 
143
174
  def get_model_name(self):
144
175
  return f"{self.model_name} (Ollama)"
@@ -1,18 +1,20 @@
1
- from typing import Optional, Tuple, List, Union
1
+ import base64
2
+ from typing import Optional, Tuple, List, Union, Dict
2
3
  from openai import OpenAI, AsyncOpenAI
3
4
  from openai.types.chat import ParsedChatCompletion
4
- from pydantic import BaseModel
5
+ from pydantic import BaseModel, SecretStr
5
6
  from io import BytesIO
6
- import base64
7
7
 
8
+ from deepeval.config.settings import get_settings
8
9
  from deepeval.models.llms.openai_model import (
9
10
  model_pricing,
10
11
  structured_outputs_models,
12
+ _request_timeout_seconds,
11
13
  )
12
14
  from deepeval.models import DeepEvalBaseMLLM
13
15
  from deepeval.models.llms.utils import trim_and_load_json
14
16
  from deepeval.test_case import MLLMImage
15
- from deepeval.models.utils import parse_model_name
17
+ from deepeval.models.utils import parse_model_name, require_secret_api_key
16
18
  from deepeval.models.retry_policy import (
17
19
  create_retry_decorator,
18
20
  sdk_retries_for,
@@ -60,17 +62,26 @@ class MultimodalOpenAIModel(DeepEvalBaseMLLM):
60
62
  *args,
61
63
  **kwargs,
62
64
  ):
65
+ settings = get_settings()
63
66
  model_name = None
64
67
  if isinstance(model, str):
65
68
  model_name = parse_model_name(model)
66
69
  if model_name not in valid_multimodal_gpt_models:
67
70
  raise ValueError(
68
- f"Invalid model. Available Multimodal GPT models: {', '.join(model for model in valid_multimodal_gpt_models)}"
71
+ f"Invalid model. Available Multimodal GPT models: "
72
+ f"{', '.join(model for model in valid_multimodal_gpt_models)}"
69
73
  )
74
+ elif settings.OPENAI_MODEL_NAME is not None:
75
+ model_name = settings.OPENAI_MODEL_NAME
70
76
  elif model is None:
71
77
  model_name = default_multimodal_gpt_model
72
78
 
73
- self._openai_api_key = _openai_api_key
79
+ if _openai_api_key is not None:
80
+ # keep it secret, keep it safe from serializings, logging and aolike
81
+ self._openai_api_key: SecretStr | None = SecretStr(_openai_api_key)
82
+ else:
83
+ self._openai_api_key = settings.OPENAI_API_KEY
84
+
74
85
  self.args = args
75
86
  self.kwargs = kwargs
76
87
 
@@ -86,7 +97,7 @@ class MultimodalOpenAIModel(DeepEvalBaseMLLM):
86
97
  multimodal_input: List[Union[str, MLLMImage]],
87
98
  schema: Optional[BaseModel] = None,
88
99
  ) -> Tuple[str, float]:
89
- client = OpenAI(api_key=self._openai_api_key)
100
+ client = self.load_model(async_mode=False)
90
101
  prompt = self.generate_prompt(multimodal_input)
91
102
 
92
103
  if schema:
@@ -123,7 +134,7 @@ class MultimodalOpenAIModel(DeepEvalBaseMLLM):
123
134
  multimodal_input: List[Union[str, MLLMImage]],
124
135
  schema: Optional[BaseModel] = None,
125
136
  ) -> Tuple[str, float]:
126
- client = AsyncOpenAI(api_key=self._openai_api_key)
137
+ client = self.load_model(async_mode=True)
127
138
  prompt = self.generate_prompt(multimodal_input)
128
139
 
129
140
  if schema:
@@ -247,12 +258,52 @@ class MultimodalOpenAIModel(DeepEvalBaseMLLM):
247
258
  base64_encoded_image = base64.b64encode(image_bytes).decode("utf-8")
248
259
  return base64_encoded_image
249
260
 
250
- def _client(self, async_mode: bool = False):
251
- kw = {"api_key": self._openai_api_key}
252
- if not sdk_retries_for(PS.OPENAI):
253
- kw["max_retries"] = 0
254
- Client = AsyncOpenAI if async_mode else OpenAI
255
- return Client(**kw)
261
+ ###############################################
262
+ # Model
263
+ ###############################################
256
264
 
257
265
  def get_model_name(self):
258
266
  return self.model_name
267
+
268
+ def load_model(self, async_mode: bool = False):
269
+ Client = AsyncOpenAI if async_mode else OpenAI
270
+ return self._build_client(Client)
271
+
272
+ def _client_kwargs(self) -> Dict:
273
+ """
274
+ If Tenacity is managing retries, force OpenAI SDK retries off to avoid
275
+ double retries. If the user opts into SDK retries for 'openai' via
276
+ DEEPEVAL_SDK_RETRY_PROVIDERS, leave their retry settings as is.
277
+ """
278
+ kwargs: Dict = {}
279
+ if not sdk_retries_for(PS.OPENAI):
280
+ kwargs["max_retries"] = 0
281
+
282
+ if not kwargs.get("timeout"):
283
+ kwargs["timeout"] = _request_timeout_seconds()
284
+ return kwargs
285
+
286
+ def _build_client(self, cls):
287
+ api_key = require_secret_api_key(
288
+ self._openai_api_key,
289
+ provider_label="OpenAI",
290
+ env_var_name="OPENAI_API_KEY",
291
+ param_hint="`_openai_api_key` to MultimodalOpenAIModel(...)",
292
+ )
293
+
294
+ kw = dict(
295
+ api_key=api_key,
296
+ **self._client_kwargs(),
297
+ )
298
+ try:
299
+ return cls(**kw)
300
+ except TypeError as e:
301
+ # older OpenAI SDKs may not accept max_retries, in that case remove and retry once
302
+ if "max_retries" in str(e):
303
+ kw.pop("max_retries", None)
304
+ return cls(**kw)
305
+ raise
306
+
307
+ def _client(self, async_mode: bool = False):
308
+ # Backwards-compat path for internal callers in this module
309
+ return self.load_model(async_mode=async_mode)
deepeval/models/utils.py CHANGED
@@ -1,4 +1,7 @@
1
1
  from typing import Optional
2
+ from pydantic import SecretStr
3
+
4
+ from deepeval.errors import DeepEvalError
2
5
 
3
6
 
4
7
  def parse_model_name(model_name: Optional[str] = None) -> str:
@@ -25,7 +28,49 @@ def parse_model_name(model_name: Optional[str] = None) -> str:
25
28
  if model_name is None:
26
29
  return None
27
30
 
28
- # if "/" in model_name:
29
- # _, parsed_model_name = model_name.split("/", 1)
30
- # return parsed_model_name
31
+ if "/" in model_name:
32
+ _, parsed_model_name = model_name.split("/", 1)
33
+ return parsed_model_name
31
34
  return model_name
35
+
36
+
37
+ def require_secret_api_key(
38
+ secret: Optional[SecretStr],
39
+ *,
40
+ provider_label: str,
41
+ env_var_name: str,
42
+ param_hint: str,
43
+ ) -> str:
44
+ """
45
+ Normalize and validate a provider API key stored as a SecretStr.
46
+
47
+ Args:
48
+ secret:
49
+ The SecretStr coming from Settings or an explicit constructor arg.
50
+ provider_label:
51
+ Human readable provider name for error messages, such as Anthropic, or OpenAI etc
52
+ env_var_name:
53
+ The environment variable backing this key
54
+ param_hint:
55
+ A short hint telling users how to pass the key explicitly
56
+
57
+ Returns:
58
+ The underlying API key string.
59
+
60
+ Raises:
61
+ DeepEvalError: if the key is missing or empty.
62
+ """
63
+ if secret is None:
64
+ raise DeepEvalError(
65
+ f"{provider_label} API key is not configured. "
66
+ f"Set {env_var_name} in your environment or pass "
67
+ f"{param_hint}."
68
+ )
69
+
70
+ api_key = secret.get_secret_value()
71
+ if not api_key:
72
+ raise DeepEvalError(
73
+ f"{provider_label} API key is empty. Please configure a valid key."
74
+ )
75
+
76
+ return api_key
@@ -0,0 +1,13 @@
1
+ from deepeval.optimization.prompt_optimizer import PromptOptimizer
2
+ from deepeval.optimization.configs import OptimizerDisplayConfig
3
+ from deepeval.optimization.gepa.loop import (
4
+ GEPARunner as GEPARunner,
5
+ GEPAConfig as GEPAConfig,
6
+ )
7
+
8
+ __all__ = [
9
+ "GEPARunner",
10
+ "GEPAConfig",
11
+ "PromptOptimizer",
12
+ "OptimizerDisplayConfig",
13
+ ]
@@ -0,0 +1,2 @@
1
+ # nothing yet
2
+ __all__ = []