deepeval 3.7.5__py3-none-any.whl → 3.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (133) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/config/settings.py +35 -1
  3. deepeval/dataset/api.py +23 -1
  4. deepeval/dataset/golden.py +106 -21
  5. deepeval/evaluate/evaluate.py +0 -3
  6. deepeval/evaluate/execute.py +10 -222
  7. deepeval/evaluate/utils.py +6 -30
  8. deepeval/key_handler.py +3 -0
  9. deepeval/metrics/__init__.py +0 -4
  10. deepeval/metrics/answer_relevancy/answer_relevancy.py +89 -132
  11. deepeval/metrics/answer_relevancy/template.py +102 -179
  12. deepeval/metrics/arena_g_eval/arena_g_eval.py +98 -96
  13. deepeval/metrics/arena_g_eval/template.py +17 -1
  14. deepeval/metrics/argument_correctness/argument_correctness.py +81 -87
  15. deepeval/metrics/argument_correctness/template.py +19 -2
  16. deepeval/metrics/base_metric.py +13 -41
  17. deepeval/metrics/bias/bias.py +102 -108
  18. deepeval/metrics/bias/template.py +14 -2
  19. deepeval/metrics/contextual_precision/contextual_precision.py +56 -92
  20. deepeval/metrics/contextual_recall/contextual_recall.py +58 -85
  21. deepeval/metrics/contextual_relevancy/contextual_relevancy.py +53 -83
  22. deepeval/metrics/conversation_completeness/conversation_completeness.py +101 -119
  23. deepeval/metrics/conversation_completeness/template.py +23 -3
  24. deepeval/metrics/conversational_dag/conversational_dag.py +12 -8
  25. deepeval/metrics/conversational_dag/nodes.py +66 -123
  26. deepeval/metrics/conversational_dag/templates.py +16 -0
  27. deepeval/metrics/conversational_g_eval/conversational_g_eval.py +47 -66
  28. deepeval/metrics/dag/dag.py +10 -0
  29. deepeval/metrics/dag/nodes.py +63 -126
  30. deepeval/metrics/dag/templates.py +14 -0
  31. deepeval/metrics/exact_match/exact_match.py +9 -1
  32. deepeval/metrics/faithfulness/faithfulness.py +82 -136
  33. deepeval/metrics/g_eval/g_eval.py +87 -78
  34. deepeval/metrics/g_eval/template.py +18 -1
  35. deepeval/metrics/g_eval/utils.py +7 -6
  36. deepeval/metrics/goal_accuracy/goal_accuracy.py +91 -76
  37. deepeval/metrics/goal_accuracy/template.py +21 -3
  38. deepeval/metrics/hallucination/hallucination.py +60 -75
  39. deepeval/metrics/hallucination/template.py +13 -0
  40. deepeval/metrics/indicator.py +3 -6
  41. deepeval/metrics/json_correctness/json_correctness.py +40 -38
  42. deepeval/metrics/json_correctness/template.py +10 -0
  43. deepeval/metrics/knowledge_retention/knowledge_retention.py +60 -97
  44. deepeval/metrics/knowledge_retention/schema.py +9 -3
  45. deepeval/metrics/knowledge_retention/template.py +12 -0
  46. deepeval/metrics/mcp/mcp_task_completion.py +68 -38
  47. deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +92 -74
  48. deepeval/metrics/mcp/template.py +52 -0
  49. deepeval/metrics/mcp_use_metric/mcp_use_metric.py +58 -64
  50. deepeval/metrics/mcp_use_metric/template.py +12 -0
  51. deepeval/metrics/misuse/misuse.py +77 -97
  52. deepeval/metrics/misuse/template.py +15 -0
  53. deepeval/metrics/multimodal_metrics/__init__.py +0 -1
  54. deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +37 -38
  55. deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +55 -76
  56. deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +37 -38
  57. deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +37 -38
  58. deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +57 -76
  59. deepeval/metrics/non_advice/non_advice.py +79 -105
  60. deepeval/metrics/non_advice/template.py +12 -0
  61. deepeval/metrics/pattern_match/pattern_match.py +12 -4
  62. deepeval/metrics/pii_leakage/pii_leakage.py +75 -106
  63. deepeval/metrics/pii_leakage/template.py +14 -0
  64. deepeval/metrics/plan_adherence/plan_adherence.py +63 -89
  65. deepeval/metrics/plan_adherence/template.py +11 -0
  66. deepeval/metrics/plan_quality/plan_quality.py +63 -87
  67. deepeval/metrics/plan_quality/template.py +9 -0
  68. deepeval/metrics/prompt_alignment/prompt_alignment.py +72 -83
  69. deepeval/metrics/prompt_alignment/template.py +12 -0
  70. deepeval/metrics/role_adherence/role_adherence.py +48 -71
  71. deepeval/metrics/role_adherence/template.py +14 -0
  72. deepeval/metrics/role_violation/role_violation.py +75 -108
  73. deepeval/metrics/role_violation/template.py +12 -0
  74. deepeval/metrics/step_efficiency/step_efficiency.py +55 -65
  75. deepeval/metrics/step_efficiency/template.py +11 -0
  76. deepeval/metrics/summarization/summarization.py +115 -183
  77. deepeval/metrics/summarization/template.py +19 -0
  78. deepeval/metrics/task_completion/task_completion.py +67 -73
  79. deepeval/metrics/tool_correctness/tool_correctness.py +43 -42
  80. deepeval/metrics/tool_use/tool_use.py +42 -66
  81. deepeval/metrics/topic_adherence/template.py +13 -0
  82. deepeval/metrics/topic_adherence/topic_adherence.py +53 -67
  83. deepeval/metrics/toxicity/template.py +13 -0
  84. deepeval/metrics/toxicity/toxicity.py +80 -99
  85. deepeval/metrics/turn_contextual_precision/schema.py +3 -3
  86. deepeval/metrics/turn_contextual_precision/template.py +1 -1
  87. deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +110 -68
  88. deepeval/metrics/turn_contextual_recall/schema.py +3 -3
  89. deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +104 -61
  90. deepeval/metrics/turn_contextual_relevancy/schema.py +2 -2
  91. deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +106 -65
  92. deepeval/metrics/turn_faithfulness/schema.py +1 -1
  93. deepeval/metrics/turn_faithfulness/turn_faithfulness.py +104 -73
  94. deepeval/metrics/turn_relevancy/template.py +14 -0
  95. deepeval/metrics/turn_relevancy/turn_relevancy.py +56 -69
  96. deepeval/metrics/utils.py +145 -90
  97. deepeval/models/base_model.py +44 -6
  98. deepeval/models/embedding_models/azure_embedding_model.py +34 -12
  99. deepeval/models/embedding_models/local_embedding_model.py +22 -7
  100. deepeval/models/embedding_models/ollama_embedding_model.py +17 -6
  101. deepeval/models/embedding_models/openai_embedding_model.py +3 -2
  102. deepeval/models/llms/amazon_bedrock_model.py +226 -71
  103. deepeval/models/llms/anthropic_model.py +141 -47
  104. deepeval/models/llms/azure_model.py +167 -94
  105. deepeval/models/llms/constants.py +2032 -0
  106. deepeval/models/llms/deepseek_model.py +79 -29
  107. deepeval/models/llms/gemini_model.py +126 -67
  108. deepeval/models/llms/grok_model.py +125 -59
  109. deepeval/models/llms/kimi_model.py +126 -81
  110. deepeval/models/llms/litellm_model.py +92 -18
  111. deepeval/models/llms/local_model.py +114 -15
  112. deepeval/models/llms/ollama_model.py +97 -76
  113. deepeval/models/llms/openai_model.py +167 -310
  114. deepeval/models/llms/portkey_model.py +58 -16
  115. deepeval/models/llms/utils.py +5 -2
  116. deepeval/models/utils.py +60 -4
  117. deepeval/simulator/conversation_simulator.py +43 -0
  118. deepeval/simulator/template.py +13 -0
  119. deepeval/test_case/api.py +24 -45
  120. deepeval/test_case/arena_test_case.py +7 -2
  121. deepeval/test_case/conversational_test_case.py +55 -6
  122. deepeval/test_case/llm_test_case.py +60 -6
  123. deepeval/test_run/api.py +3 -0
  124. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/METADATA +1 -1
  125. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/RECORD +128 -132
  126. deepeval/metrics/multimodal_metrics/multimodal_g_eval/__init__.py +0 -0
  127. deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +0 -386
  128. deepeval/metrics/multimodal_metrics/multimodal_g_eval/schema.py +0 -11
  129. deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +0 -133
  130. deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +0 -68
  131. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/LICENSE.md +0 -0
  132. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/WHEEL +0 -0
  133. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/entry_points.txt +0 -0
@@ -11,15 +11,19 @@ from deepeval.metrics.utils import (
11
11
  construct_verbose_logs,
12
12
  get_turns_in_sliding_window,
13
13
  get_unit_interactions,
14
- trimAndLoadJson,
15
14
  initialize_model,
16
15
  convert_turn_to_dict,
16
+ a_generate_with_schema_and_extract,
17
+ generate_with_schema_and_extract,
17
18
  )
18
19
  from deepeval.models import DeepEvalBaseLLM
19
20
  from deepeval.metrics.indicator import metric_progress_indicator
20
21
  from deepeval.test_case import ConversationalTestCase, Turn, TurnParams
21
22
  from deepeval.utils import get_or_create_event_loop, prettify_list
22
- from deepeval.metrics.turn_relevancy.schema import *
23
+ from deepeval.metrics.turn_relevancy.schema import (
24
+ TurnRelevancyVerdict,
25
+ TurnRelevancyScoreReason,
26
+ )
23
27
  from deepeval.metrics.api import metric_data_manager
24
28
 
25
29
 
@@ -53,7 +57,12 @@ class TurnRelevancyMetric(BaseConversationalMetric):
53
57
  _log_metric_to_confident: bool = True,
54
58
  ):
55
59
  check_conversational_test_case_params(
56
- test_case, self._required_test_case_params, self
60
+ test_case,
61
+ self._required_test_case_params,
62
+ self,
63
+ False,
64
+ self.model,
65
+ test_case.multimodal,
57
66
  )
58
67
 
59
68
  self.evaluation_cost = 0 if self.using_native_model else None
@@ -108,7 +117,12 @@ class TurnRelevancyMetric(BaseConversationalMetric):
108
117
  _log_metric_to_confident: bool = True,
109
118
  ) -> float:
110
119
  check_conversational_test_case_params(
111
- test_case, self._required_test_case_params, self
120
+ test_case,
121
+ self._required_test_case_params,
122
+ self,
123
+ False,
124
+ self.model,
125
+ test_case.multimodal,
112
126
  )
113
127
 
114
128
  self.evaluation_cost = 0 if self.using_native_model else None
@@ -148,7 +162,7 @@ class TurnRelevancyMetric(BaseConversationalMetric):
148
162
  )
149
163
  return self.score
150
164
 
151
- async def _a_generate_reason(self) -> str:
165
+ async def _a_generate_reason(self) -> Optional[str]:
152
166
  if self.include_reason is False:
153
167
  return None
154
168
 
@@ -162,24 +176,19 @@ class TurnRelevancyMetric(BaseConversationalMetric):
162
176
  prompt = TurnRelevancyTemplate.generate_reason(
163
177
  score=self.score, irrelevancies=irrelevancies
164
178
  )
165
- if self.using_native_model:
166
- res, cost = await self.model.a_generate(
167
- prompt, schema=TurnRelevancyScoreReason
168
- )
169
- self.evaluation_cost += cost
170
- return res.reason
171
- else:
172
- try:
173
- res: TurnRelevancyScoreReason = await self.model.a_generate(
174
- prompt, schema=TurnRelevancyScoreReason
175
- )
176
- return res.reason
177
- except TypeError:
178
- res = await self.model.a_generate(prompt)
179
- data = trimAndLoadJson(res, self)
180
- return data["reason"]
181
179
 
182
- def _generate_reason(self) -> str:
180
+ return await a_generate_with_schema_and_extract(
181
+ metric=self,
182
+ prompt=prompt,
183
+ schema_cls=TurnRelevancyScoreReason,
184
+ extract_schema=lambda s: s.reason,
185
+ extract_json=lambda data: data["reason"],
186
+ )
187
+
188
+ def _generate_reason(self) -> Optional[str]:
189
+ if self.include_reason is False:
190
+ return None
191
+
183
192
  irrelevancies: List[Dict[str, str]] = []
184
193
  for index, verdict in enumerate(self.verdicts):
185
194
  if verdict.verdict.strip().lower() == "no":
@@ -190,22 +199,14 @@ class TurnRelevancyMetric(BaseConversationalMetric):
190
199
  prompt = TurnRelevancyTemplate.generate_reason(
191
200
  score=self.score, irrelevancies=irrelevancies
192
201
  )
193
- if self.using_native_model:
194
- res, cost = self.model.generate(
195
- prompt, schema=TurnRelevancyScoreReason
196
- )
197
- self.evaluation_cost += cost
198
- return res.reason
199
- else:
200
- try:
201
- res: TurnRelevancyScoreReason = self.model.generate(
202
- prompt, schema=TurnRelevancyScoreReason
203
- )
204
- return res.reason
205
- except TypeError:
206
- res = self.model.generate(prompt)
207
- data = trimAndLoadJson(res, self)
208
- return data["reason"]
202
+
203
+ return generate_with_schema_and_extract(
204
+ metric=self,
205
+ prompt=prompt,
206
+ schema_cls=TurnRelevancyScoreReason,
207
+ extract_schema=lambda s: s.reason,
208
+ extract_json=lambda data: data["reason"],
209
+ )
209
210
 
210
211
  async def _a_generate_verdict(
211
212
  self, turns_sliding_window: List[Turn]
@@ -215,22 +216,14 @@ class TurnRelevancyMetric(BaseConversationalMetric):
215
216
  convert_turn_to_dict(turn) for turn in turns_sliding_window
216
217
  ]
217
218
  )
218
- if self.using_native_model:
219
- res, cost = await self.model.a_generate(
220
- prompt, schema=TurnRelevancyVerdict
221
- )
222
- self.evaluation_cost += cost
223
- return res
224
- else:
225
- try:
226
- res: TurnRelevancyVerdict = await self.model.a_generate(
227
- prompt, schema=TurnRelevancyVerdict
228
- )
229
- return res
230
- except TypeError:
231
- res = await self.model.a_generate(prompt)
232
- data = trimAndLoadJson(res, self)
233
- return TurnRelevancyVerdict(**data)
219
+
220
+ return await a_generate_with_schema_and_extract(
221
+ metric=self,
222
+ prompt=prompt,
223
+ schema_cls=TurnRelevancyVerdict,
224
+ extract_schema=lambda s: s,
225
+ extract_json=lambda data: TurnRelevancyVerdict(**data),
226
+ )
234
227
 
235
228
  def _generate_verdict(
236
229
  self, turns_sliding_window: List[Turn]
@@ -240,20 +233,14 @@ class TurnRelevancyMetric(BaseConversationalMetric):
240
233
  convert_turn_to_dict(turn) for turn in turns_sliding_window
241
234
  ]
242
235
  )
243
- if self.using_native_model:
244
- res, cost = self.model.generate(prompt, schema=TurnRelevancyVerdict)
245
- self.evaluation_cost += cost
246
- return res
247
- else:
248
- try:
249
- res: TurnRelevancyVerdict = self.model.generate(
250
- prompt, schema=TurnRelevancyVerdict
251
- )
252
- return res
253
- except TypeError:
254
- res = self.model.generate(prompt)
255
- data = trimAndLoadJson(res, self)
256
- return TurnRelevancyVerdict(**data)
236
+
237
+ return generate_with_schema_and_extract(
238
+ metric=self,
239
+ prompt=prompt,
240
+ schema_cls=TurnRelevancyVerdict,
241
+ extract_schema=lambda s: s,
242
+ extract_json=lambda data: TurnRelevancyVerdict(**data),
243
+ )
257
244
 
258
245
  def _calculate_score(self) -> float:
259
246
  number_of_verdicts = len(self.verdicts)
@@ -274,7 +261,7 @@ class TurnRelevancyMetric(BaseConversationalMetric):
274
261
  else:
275
262
  try:
276
263
  self.score >= self.threshold
277
- except:
264
+ except TypeError:
278
265
  self.success = False
279
266
  return self.success
280
267
 
deepeval/metrics/utils.py CHANGED
@@ -2,7 +2,17 @@ import inspect
2
2
  import json
3
3
  import re
4
4
  import sys
5
- from typing import Any, Dict, Optional, List, Union, Tuple
5
+ from typing import (
6
+ Any,
7
+ Callable,
8
+ Dict,
9
+ List,
10
+ Optional,
11
+ Tuple,
12
+ Type,
13
+ TypeVar,
14
+ Union,
15
+ )
6
16
 
7
17
  from deepeval.errors import (
8
18
  MissingTestCaseParamsError,
@@ -26,6 +36,14 @@ from deepeval.models import (
26
36
  GrokModel,
27
37
  DeepSeekModel,
28
38
  )
39
+ from deepeval.models.llms.constants import (
40
+ OPENAI_MODELS_DATA,
41
+ GEMINI_MODELS_DATA,
42
+ OLLAMA_MODELS_DATA,
43
+ ANTHROPIC_MODELS_DATA,
44
+ GROK_MODELS_DATA,
45
+ KIMI_MODELS_DATA,
46
+ )
29
47
  from deepeval.key_handler import (
30
48
  ModelKeyValues,
31
49
  EmbeddingKeyValues,
@@ -34,7 +52,6 @@ from deepeval.key_handler import (
34
52
  from deepeval.metrics import (
35
53
  BaseMetric,
36
54
  BaseConversationalMetric,
37
- BaseMultimodalMetric,
38
55
  BaseArenaMetric,
39
56
  )
40
57
  from deepeval.models.base_model import DeepEvalBaseEmbeddingModel
@@ -49,19 +66,20 @@ from deepeval.test_case import (
49
66
  TurnParams,
50
67
  )
51
68
 
52
- MULTIMODAL_SUPPORTED_MODELS = [
53
- GPTModel,
54
- GeminiModel,
55
- OllamaModel,
56
- AzureOpenAIModel,
57
- ]
69
+ MULTIMODAL_SUPPORTED_MODELS = {
70
+ GPTModel: OPENAI_MODELS_DATA,
71
+ GeminiModel: GEMINI_MODELS_DATA,
72
+ OllamaModel: OLLAMA_MODELS_DATA,
73
+ AzureOpenAIModel: OPENAI_MODELS_DATA,
74
+ KimiModel: KIMI_MODELS_DATA,
75
+ AnthropicModel: ANTHROPIC_MODELS_DATA,
76
+ GrokModel: GROK_MODELS_DATA,
77
+ }
58
78
 
59
79
 
60
80
  def copy_metrics(
61
- metrics: List[
62
- Union[BaseMetric, BaseConversationalMetric, BaseMultimodalMetric]
63
- ],
64
- ) -> List[Union[BaseMetric, BaseMultimodalMetric, BaseConversationalMetric]]:
81
+ metrics: List[Union[BaseMetric, BaseConversationalMetric]],
82
+ ) -> List[Union[BaseMetric, BaseConversationalMetric]]:
65
83
  copied_metrics = []
66
84
  for metric in metrics:
67
85
  metric_class = type(metric)
@@ -204,13 +222,21 @@ def check_conversational_test_case_params(
204
222
  ):
205
223
  if multimodal:
206
224
  if not model or not model.supports_multimodal():
207
- if model and type(model) in MULTIMODAL_SUPPORTED_MODELS:
225
+ if model and type(model) in MULTIMODAL_SUPPORTED_MODELS.keys():
226
+ valid_multimodal_models = []
227
+ for model_name, model_data in MULTIMODAL_SUPPORTED_MODELS.get(
228
+ type(model)
229
+ ).items():
230
+ if callable(model_data):
231
+ model_data = model_data()
232
+ if model_data.supports_multimodal:
233
+ valid_multimodal_models.append(model_name)
208
234
  raise ValueError(
209
- f"The evaluation model {model.name} does not support multimodal evaluations at the moment. Available multi-modal models for the {model.__class__.__name__} provider includes {', '.join(model.__class__.valid_multimodal_models)}."
235
+ f"The evaluation model {model.name} does not support multimodal evaluations at the moment. Available multi-modal models for the {model.__class__.__name__} provider includes {', '.join(valid_multimodal_models)}."
210
236
  )
211
237
  else:
212
238
  raise ValueError(
213
- f"The evaluation model {model.name} does not support multimodal inputs, please use one of the following evaluation models: {', '.join([cls.__name__ for cls in MULTIMODAL_SUPPORTED_MODELS])}"
239
+ f"The evaluation model {model.name} does not support multimodal inputs, please use one of the following evaluation models: {', '.join([cls.__name__ for cls in MULTIMODAL_SUPPORTED_MODELS.keys()])}"
214
240
  )
215
241
 
216
242
  if isinstance(test_case, ConversationalTestCase) is False:
@@ -245,8 +271,49 @@ def check_conversational_test_case_params(
245
271
  def check_llm_test_case_params(
246
272
  test_case: LLMTestCase,
247
273
  test_case_params: List[LLMTestCaseParams],
274
+ input_image_count: Optional[int],
275
+ actual_output_image_count: Optional[int],
248
276
  metric: Union[BaseMetric, BaseArenaMetric],
277
+ model: Optional[DeepEvalBaseLLM] = None,
278
+ multimodal: Optional[bool] = False,
249
279
  ):
280
+ if multimodal:
281
+ if not model or not model.supports_multimodal():
282
+ if model and type(model) in MULTIMODAL_SUPPORTED_MODELS.keys():
283
+ valid_multimodal_models = []
284
+ for model_name, model_data in MULTIMODAL_SUPPORTED_MODELS.get(
285
+ type(model)
286
+ ).items():
287
+ if callable(model_data):
288
+ model_data = model_data()
289
+ if model_data.supports_multimodal:
290
+ valid_multimodal_models.append(model_name)
291
+ raise ValueError(
292
+ f"The evaluation model {model.name} does not support multimodal evaluations at the moment. Available multi-modal models for the {model.__class__.__name__} provider includes {', '.join(valid_multimodal_models)}."
293
+ )
294
+ else:
295
+ raise ValueError(
296
+ f"The evaluation model {model.name} does not support multimodal inputs, please use one of the following evaluation models: {', '.join([cls.__name__ for cls in MULTIMODAL_SUPPORTED_MODELS.keys()])}"
297
+ )
298
+
299
+ if input_image_count:
300
+ count = 0
301
+ for ele in convert_to_multi_modal_array(test_case.input):
302
+ if isinstance(ele, MLLMImage):
303
+ count += 1
304
+ if count != input_image_count:
305
+ error_str = f"Can only evaluate test cases with '{input_image_count}' input images using the '{metric.__name__}' metric. `{count}` found."
306
+ raise ValueError(error_str)
307
+
308
+ if actual_output_image_count:
309
+ count = 0
310
+ for ele in convert_to_multi_modal_array(test_case.actual_output):
311
+ if isinstance(ele, MLLMImage):
312
+ count += 1
313
+ if count != actual_output_image_count:
314
+ error_str = f"Unable to evaluate test cases with '{actual_output_image_count}' output images using the '{metric.__name__}' metric. `{count}` found."
315
+ raise ValueError(error_str)
316
+
250
317
  if isinstance(test_case, LLMTestCase) is False:
251
318
  error_str = f"Unable to evaluate test cases that are not of type 'LLMTestCase' using the non-conversational '{metric.__name__}' metric."
252
319
  metric.error = error_str
@@ -276,6 +343,8 @@ def check_arena_test_case_params(
276
343
  arena_test_case: ArenaTestCase,
277
344
  test_case_params: List[LLMTestCaseParams],
278
345
  metric: BaseArenaMetric,
346
+ model: Optional[DeepEvalBaseLLM] = None,
347
+ multimodal: Optional[bool] = False,
279
348
  ):
280
349
  if not isinstance(arena_test_case, ArenaTestCase):
281
350
  raise ValueError(
@@ -296,79 +365,8 @@ def check_arena_test_case_params(
296
365
  )
297
366
 
298
367
  for test_case in cases:
299
- check_llm_test_case_params(test_case, test_case_params, metric)
300
-
301
-
302
- def check_mllm_test_case_params(
303
- test_case: LLMTestCase,
304
- test_case_params: List[LLMTestCaseParams],
305
- input_image_count: Optional[int],
306
- actual_output_image_count: Optional[int],
307
- metric: BaseMetric,
308
- model: Optional[DeepEvalBaseLLM] = None,
309
- ):
310
- if not model or not model.supports_multimodal():
311
- if model and type(model) in MULTIMODAL_SUPPORTED_MODELS:
312
- raise ValueError(
313
- f"The evaluation model {model.name} does not support multimodal evaluations at the moment. Available multi-modal models for the {model.__class__.__name__} provider includes {', '.join(model.__class__.valid_multimodal_models)}."
314
- )
315
- else:
316
- raise ValueError(
317
- f"The evaluation model {model.name} does not support multimodal inputs, please use one of the following evaluation models: {', '.join([cls.__name__ for cls in MULTIMODAL_SUPPORTED_MODELS])}"
318
- )
319
-
320
- if input_image_count:
321
- count = 0
322
- for ele in convert_to_multi_modal_array(test_case.input):
323
- if isinstance(ele, MLLMImage):
324
- count += 1
325
- if count != input_image_count:
326
- error_str = f"Can only evaluate test cases with '{input_image_count}' input images using the '{metric.__name__}' metric. `{count}` found."
327
- raise ValueError(error_str)
328
-
329
- if actual_output_image_count:
330
- count = 0
331
- for ele in convert_to_multi_modal_array(test_case.actual_output):
332
- if isinstance(ele, MLLMImage):
333
- count += 1
334
- if count != actual_output_image_count:
335
- error_str = f"Unable to evaluate test cases with '{actual_output_image_count}' output images using the '{metric.__name__}' metric. `{count}` found."
336
- raise ValueError(error_str)
337
-
338
- missing_params = []
339
- for param in test_case_params:
340
- if getattr(test_case, param.value) is None:
341
- missing_params.append(f"'{param.value}'")
342
-
343
- if missing_params:
344
- if len(missing_params) == 1:
345
- missing_params_str = missing_params[0]
346
- elif len(missing_params) == 2:
347
- missing_params_str = " and ".join(missing_params)
348
- else:
349
- missing_params_str = (
350
- ", ".join(missing_params[:-1]) + ", and " + missing_params[-1]
351
- )
352
-
353
- error_str = f"{missing_params_str} cannot be None for the '{metric.__name__}' metric"
354
- metric.error = error_str
355
- raise MissingTestCaseParamsError(error_str)
356
-
357
-
358
- def check_mllm_test_cases_params(
359
- test_cases: List[LLMTestCase],
360
- test_case_params: List[LLMTestCaseParams],
361
- input_image_count: Optional[int],
362
- actual_output_image_count: Optional[int],
363
- metric: BaseMetric,
364
- ):
365
- for test_case in test_cases:
366
- check_mllm_test_case_params(
367
- test_case,
368
- test_case_params,
369
- input_image_count,
370
- actual_output_image_count,
371
- metric,
368
+ check_llm_test_case_params(
369
+ test_case, test_case_params, None, None, metric, model, multimodal
372
370
  )
373
371
 
374
372
 
@@ -398,6 +396,63 @@ def trimAndLoadJson(
398
396
  raise Exception(f"An unexpected error occurred: {str(e)}")
399
397
 
400
398
 
399
+ SchemaType = TypeVar("SchemaType")
400
+ ReturnType = TypeVar("ReturnType")
401
+
402
+
403
+ def generate_with_schema_and_extract(
404
+ metric: Union[BaseMetric, BaseArenaMetric, BaseConversationalMetric],
405
+ prompt: Any,
406
+ schema_cls: Type[SchemaType],
407
+ *,
408
+ extract_schema: Callable[[SchemaType], ReturnType],
409
+ extract_json: Callable[[Dict[str, Any]], ReturnType],
410
+ ) -> ReturnType:
411
+ """
412
+ Synchronous wrapper:
413
+ - calls model.generate_with_schema(...)
414
+ - accrues cost if applicable
415
+ - if schema instance -> extract_schema
416
+ else parse JSON -> extract_json
417
+ """
418
+ if metric.using_native_model:
419
+ result, cost = metric.model.generate_with_schema(
420
+ prompt, schema=schema_cls
421
+ )
422
+ metric._accrue_cost(cost)
423
+ else:
424
+ result = metric.model.generate_with_schema(prompt, schema=schema_cls)
425
+ if isinstance(result, schema_cls):
426
+ return extract_schema(result)
427
+ data = trimAndLoadJson(result, metric)
428
+ return extract_json(data)
429
+
430
+
431
+ async def a_generate_with_schema_and_extract(
432
+ metric: Union[BaseMetric, BaseArenaMetric, BaseConversationalMetric],
433
+ prompt: Any,
434
+ schema_cls: Type[SchemaType],
435
+ *,
436
+ extract_schema: Callable[[SchemaType], ReturnType],
437
+ extract_json: Callable[[Dict[str, Any]], ReturnType],
438
+ ) -> ReturnType:
439
+ if metric.using_native_model:
440
+ result, cost = await metric.model.a_generate_with_schema(
441
+ prompt, schema=schema_cls
442
+ )
443
+ metric._accrue_cost(cost)
444
+ else:
445
+ result = await metric.model.a_generate_with_schema(
446
+ prompt, schema=schema_cls
447
+ )
448
+
449
+ if isinstance(result, schema_cls):
450
+ return extract_schema(result)
451
+
452
+ data = trimAndLoadJson(result, metric)
453
+ return extract_json(data)
454
+
455
+
401
456
  ###############################################
402
457
  # Default Model Providers
403
458
  ###############################################
@@ -414,8 +469,8 @@ def should_use_local_model():
414
469
 
415
470
 
416
471
  def should_use_ollama_model():
417
- base_url = KEY_FILE_HANDLER.fetch_data(ModelKeyValues.LOCAL_MODEL_API_KEY)
418
- return base_url == "ollama"
472
+ value = KEY_FILE_HANDLER.fetch_data(ModelKeyValues.LOCAL_MODEL_API_KEY)
473
+ return value == "ollama"
419
474
 
420
475
 
421
476
  def should_use_gemini_model():
@@ -1,6 +1,18 @@
1
1
  from abc import ABC, abstractmethod
2
- from typing import Any, Optional, List
2
+ from typing import Any, Optional, List, Union
3
3
  from deepeval.models.utils import parse_model_name
4
+ from dataclasses import dataclass
5
+
6
+
7
+ @dataclass
8
+ class DeepEvalModelData:
9
+ supports_log_probs: Optional[bool] = None
10
+ supports_multimodal: Optional[bool] = None
11
+ supports_structured_outputs: Optional[bool] = None
12
+ supports_json: Optional[bool] = None
13
+ input_price: Optional[float] = None
14
+ output_price: Optional[float] = None
15
+ supports_temperature: Optional[bool] = True
4
16
 
5
17
 
6
18
  class DeepEvalBaseModel(ABC):
@@ -66,9 +78,6 @@ class DeepEvalBaseLLM(ABC):
66
78
  def get_model_name(self, *args, **kwargs) -> str:
67
79
  return self.name
68
80
 
69
- def supports_multimodal(self) -> bool:
70
- return False
71
-
72
81
  def batch_generate(self, *args, **kwargs) -> List[str]:
73
82
  """Runs the model to output LLM responses.
74
83
 
@@ -79,8 +88,37 @@ class DeepEvalBaseLLM(ABC):
79
88
  "batch_generate is not implemented for this model"
80
89
  )
81
90
 
82
- def supports_multimodal(self):
83
- return False
91
+ # Capabilities
92
+ def supports_log_probs(self) -> Union[bool, None]:
93
+ return None
94
+
95
+ def supports_temperature(self) -> Union[bool, None]:
96
+ return None
97
+
98
+ def supports_multimodal(self) -> Union[bool, None]:
99
+ return None
100
+
101
+ def supports_structured_outputs(self) -> Union[bool, None]:
102
+ return None
103
+
104
+ def supports_json_mode(self) -> Union[bool, None]:
105
+ return None
106
+
107
+ def generate_with_schema(self, *args, schema=None, **kwargs):
108
+ if schema is not None:
109
+ try:
110
+ return self.generate(*args, schema=schema, **kwargs)
111
+ except TypeError:
112
+ pass # this means provider doesn't accept schema kwarg
113
+ return self.generate(*args, **kwargs)
114
+
115
+ async def a_generate_with_schema(self, *args, schema=None, **kwargs):
116
+ if schema is not None:
117
+ try:
118
+ return await self.a_generate(*args, schema=schema, **kwargs)
119
+ except TypeError:
120
+ pass
121
+ return await self.a_generate(*args, **kwargs)
84
122
 
85
123
 
86
124
  class DeepEvalBaseEmbeddingModel(ABC):
@@ -13,6 +13,7 @@ from deepeval.models.utils import (
13
13
  require_secret_api_key,
14
14
  normalize_kwargs_and_extract_aliases,
15
15
  )
16
+ from deepeval.utils import require_param
16
17
 
17
18
 
18
19
  retry_azure = create_retry_decorator(PS.AZURE)
@@ -31,7 +32,7 @@ class AzureOpenAIEmbeddingModel(DeepEvalBaseEmbeddingModel):
31
32
  api_key: Optional[str] = None,
32
33
  base_url: Optional[str] = None,
33
34
  deployment_name: Optional[str] = None,
34
- openai_api_version: Optional[str] = None,
35
+ api_version: Optional[str] = None,
35
36
  generation_kwargs: Optional[Dict] = None,
36
37
  **kwargs,
37
38
  ):
@@ -53,25 +54,46 @@ class AzureOpenAIEmbeddingModel(DeepEvalBaseEmbeddingModel):
53
54
 
54
55
  if api_key is not None:
55
56
  # keep it secret, keep it safe from serializings, logging and alike
56
- self.api_key: SecretStr | None = SecretStr(api_key)
57
+ self.api_key: Optional[SecretStr] = SecretStr(api_key)
57
58
  else:
58
59
  self.api_key = settings.AZURE_OPENAI_API_KEY
59
60
 
60
- self.openai_api_version = (
61
- openai_api_version or settings.OPENAI_API_VERSION
61
+ api_version = api_version or settings.OPENAI_API_VERSION
62
+ if base_url is not None:
63
+ base_url = str(base_url).rstrip("/")
64
+ elif settings.AZURE_OPENAI_ENDPOINT is not None:
65
+ base_url = str(settings.AZURE_OPENAI_ENDPOINT).rstrip("/")
66
+
67
+ deployment_name = (
68
+ deployment_name or settings.AZURE_EMBEDDING_DEPLOYMENT_NAME
69
+ )
70
+
71
+ model = model or settings.AZURE_EMBEDDING_MODEL_NAME or deployment_name
72
+
73
+ # validation
74
+ self.deployment_name = require_param(
75
+ deployment_name,
76
+ provider_label="AzureOpenAIEmbeddingModel",
77
+ env_var_name="AZURE_EMBEDDING_DEPLOYMENT_NAME",
78
+ param_hint="deployment_name",
62
79
  )
63
- self.base_url = (
64
- base_url
65
- or settings.AZURE_OPENAI_ENDPOINT
66
- and str(settings.AZURE_OPENAI_ENDPOINT)
80
+
81
+ self.base_url = require_param(
82
+ base_url,
83
+ provider_label="AzureOpenAIEmbeddingModel",
84
+ env_var_name="AZURE_OPENAI_ENDPOINT",
85
+ param_hint="base_url",
67
86
  )
68
87
 
69
- self.deployment_name = (
70
- deployment_name or settings.AZURE_EMBEDDING_DEPLOYMENT_NAME
88
+ self.api_version = require_param(
89
+ api_version,
90
+ provider_label="AzureOpenAIEmbeddingModel",
91
+ env_var_name="OPENAI_API_VERSION",
92
+ param_hint="api_version",
71
93
  )
94
+
72
95
  # Keep sanitized kwargs for client call to strip legacy keys
73
96
  self.kwargs = normalized_kwargs
74
- model = model or self.deployment_name
75
97
  self.generation_kwargs = generation_kwargs or {}
76
98
  super().__init__(model)
77
99
 
@@ -126,7 +148,7 @@ class AzureOpenAIEmbeddingModel(DeepEvalBaseEmbeddingModel):
126
148
 
127
149
  client_init_kwargs = dict(
128
150
  api_key=api_key,
129
- api_version=self.openai_api_version,
151
+ api_version=self.api_version,
130
152
  azure_endpoint=self.base_url,
131
153
  azure_deployment=self.deployment_name,
132
154
  **client_kwargs,