deepeval 3.7.5__py3-none-any.whl → 3.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/cli/main.py +2022 -759
  3. deepeval/cli/utils.py +208 -36
  4. deepeval/config/dotenv_handler.py +19 -0
  5. deepeval/config/settings.py +675 -245
  6. deepeval/config/utils.py +9 -1
  7. deepeval/dataset/api.py +23 -1
  8. deepeval/dataset/golden.py +106 -21
  9. deepeval/evaluate/evaluate.py +0 -3
  10. deepeval/evaluate/execute.py +162 -315
  11. deepeval/evaluate/utils.py +6 -30
  12. deepeval/key_handler.py +124 -51
  13. deepeval/metrics/__init__.py +0 -4
  14. deepeval/metrics/answer_relevancy/answer_relevancy.py +89 -132
  15. deepeval/metrics/answer_relevancy/template.py +102 -179
  16. deepeval/metrics/arena_g_eval/arena_g_eval.py +98 -96
  17. deepeval/metrics/arena_g_eval/template.py +17 -1
  18. deepeval/metrics/argument_correctness/argument_correctness.py +81 -87
  19. deepeval/metrics/argument_correctness/template.py +19 -2
  20. deepeval/metrics/base_metric.py +19 -41
  21. deepeval/metrics/bias/bias.py +102 -108
  22. deepeval/metrics/bias/template.py +14 -2
  23. deepeval/metrics/contextual_precision/contextual_precision.py +56 -92
  24. deepeval/metrics/contextual_recall/contextual_recall.py +58 -85
  25. deepeval/metrics/contextual_relevancy/contextual_relevancy.py +53 -83
  26. deepeval/metrics/conversation_completeness/conversation_completeness.py +101 -119
  27. deepeval/metrics/conversation_completeness/template.py +23 -3
  28. deepeval/metrics/conversational_dag/conversational_dag.py +12 -8
  29. deepeval/metrics/conversational_dag/nodes.py +66 -123
  30. deepeval/metrics/conversational_dag/templates.py +16 -0
  31. deepeval/metrics/conversational_g_eval/conversational_g_eval.py +47 -66
  32. deepeval/metrics/dag/dag.py +10 -0
  33. deepeval/metrics/dag/nodes.py +63 -126
  34. deepeval/metrics/dag/templates.py +14 -0
  35. deepeval/metrics/exact_match/exact_match.py +9 -1
  36. deepeval/metrics/faithfulness/faithfulness.py +82 -136
  37. deepeval/metrics/g_eval/g_eval.py +93 -79
  38. deepeval/metrics/g_eval/template.py +18 -1
  39. deepeval/metrics/g_eval/utils.py +7 -6
  40. deepeval/metrics/goal_accuracy/goal_accuracy.py +91 -76
  41. deepeval/metrics/goal_accuracy/template.py +21 -3
  42. deepeval/metrics/hallucination/hallucination.py +60 -75
  43. deepeval/metrics/hallucination/template.py +13 -0
  44. deepeval/metrics/indicator.py +11 -10
  45. deepeval/metrics/json_correctness/json_correctness.py +40 -38
  46. deepeval/metrics/json_correctness/template.py +10 -0
  47. deepeval/metrics/knowledge_retention/knowledge_retention.py +60 -97
  48. deepeval/metrics/knowledge_retention/schema.py +9 -3
  49. deepeval/metrics/knowledge_retention/template.py +12 -0
  50. deepeval/metrics/mcp/mcp_task_completion.py +72 -43
  51. deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +93 -75
  52. deepeval/metrics/mcp/schema.py +4 -0
  53. deepeval/metrics/mcp/template.py +59 -0
  54. deepeval/metrics/mcp_use_metric/mcp_use_metric.py +58 -64
  55. deepeval/metrics/mcp_use_metric/template.py +12 -0
  56. deepeval/metrics/misuse/misuse.py +77 -97
  57. deepeval/metrics/misuse/template.py +15 -0
  58. deepeval/metrics/multimodal_metrics/__init__.py +0 -1
  59. deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +37 -38
  60. deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +55 -76
  61. deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +37 -38
  62. deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +37 -38
  63. deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +57 -76
  64. deepeval/metrics/non_advice/non_advice.py +79 -105
  65. deepeval/metrics/non_advice/template.py +12 -0
  66. deepeval/metrics/pattern_match/pattern_match.py +12 -4
  67. deepeval/metrics/pii_leakage/pii_leakage.py +75 -106
  68. deepeval/metrics/pii_leakage/template.py +14 -0
  69. deepeval/metrics/plan_adherence/plan_adherence.py +63 -89
  70. deepeval/metrics/plan_adherence/template.py +11 -0
  71. deepeval/metrics/plan_quality/plan_quality.py +63 -87
  72. deepeval/metrics/plan_quality/template.py +9 -0
  73. deepeval/metrics/prompt_alignment/prompt_alignment.py +78 -86
  74. deepeval/metrics/prompt_alignment/template.py +12 -0
  75. deepeval/metrics/role_adherence/role_adherence.py +48 -71
  76. deepeval/metrics/role_adherence/template.py +14 -0
  77. deepeval/metrics/role_violation/role_violation.py +75 -108
  78. deepeval/metrics/role_violation/template.py +12 -0
  79. deepeval/metrics/step_efficiency/step_efficiency.py +55 -65
  80. deepeval/metrics/step_efficiency/template.py +11 -0
  81. deepeval/metrics/summarization/summarization.py +115 -183
  82. deepeval/metrics/summarization/template.py +19 -0
  83. deepeval/metrics/task_completion/task_completion.py +67 -73
  84. deepeval/metrics/tool_correctness/tool_correctness.py +43 -42
  85. deepeval/metrics/tool_use/schema.py +4 -0
  86. deepeval/metrics/tool_use/template.py +16 -2
  87. deepeval/metrics/tool_use/tool_use.py +72 -94
  88. deepeval/metrics/topic_adherence/schema.py +4 -0
  89. deepeval/metrics/topic_adherence/template.py +21 -1
  90. deepeval/metrics/topic_adherence/topic_adherence.py +68 -81
  91. deepeval/metrics/toxicity/template.py +13 -0
  92. deepeval/metrics/toxicity/toxicity.py +80 -99
  93. deepeval/metrics/turn_contextual_precision/schema.py +3 -3
  94. deepeval/metrics/turn_contextual_precision/template.py +9 -2
  95. deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +154 -154
  96. deepeval/metrics/turn_contextual_recall/schema.py +3 -3
  97. deepeval/metrics/turn_contextual_recall/template.py +8 -1
  98. deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +148 -143
  99. deepeval/metrics/turn_contextual_relevancy/schema.py +2 -2
  100. deepeval/metrics/turn_contextual_relevancy/template.py +8 -1
  101. deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +154 -157
  102. deepeval/metrics/turn_faithfulness/schema.py +1 -1
  103. deepeval/metrics/turn_faithfulness/template.py +8 -1
  104. deepeval/metrics/turn_faithfulness/turn_faithfulness.py +180 -203
  105. deepeval/metrics/turn_relevancy/template.py +14 -0
  106. deepeval/metrics/turn_relevancy/turn_relevancy.py +56 -69
  107. deepeval/metrics/utils.py +161 -91
  108. deepeval/models/__init__.py +2 -0
  109. deepeval/models/base_model.py +44 -6
  110. deepeval/models/embedding_models/azure_embedding_model.py +34 -12
  111. deepeval/models/embedding_models/local_embedding_model.py +22 -7
  112. deepeval/models/embedding_models/ollama_embedding_model.py +17 -6
  113. deepeval/models/embedding_models/openai_embedding_model.py +3 -2
  114. deepeval/models/llms/__init__.py +2 -0
  115. deepeval/models/llms/amazon_bedrock_model.py +229 -73
  116. deepeval/models/llms/anthropic_model.py +143 -48
  117. deepeval/models/llms/azure_model.py +169 -95
  118. deepeval/models/llms/constants.py +2032 -0
  119. deepeval/models/llms/deepseek_model.py +82 -35
  120. deepeval/models/llms/gemini_model.py +126 -67
  121. deepeval/models/llms/grok_model.py +128 -65
  122. deepeval/models/llms/kimi_model.py +129 -87
  123. deepeval/models/llms/litellm_model.py +94 -18
  124. deepeval/models/llms/local_model.py +115 -16
  125. deepeval/models/llms/ollama_model.py +97 -76
  126. deepeval/models/llms/openai_model.py +169 -311
  127. deepeval/models/llms/portkey_model.py +58 -16
  128. deepeval/models/llms/utils.py +5 -2
  129. deepeval/models/retry_policy.py +10 -5
  130. deepeval/models/utils.py +56 -4
  131. deepeval/simulator/conversation_simulator.py +49 -2
  132. deepeval/simulator/template.py +16 -1
  133. deepeval/synthesizer/synthesizer.py +19 -17
  134. deepeval/test_case/api.py +24 -45
  135. deepeval/test_case/arena_test_case.py +7 -2
  136. deepeval/test_case/conversational_test_case.py +55 -6
  137. deepeval/test_case/llm_test_case.py +60 -6
  138. deepeval/test_run/api.py +3 -0
  139. deepeval/test_run/test_run.py +6 -1
  140. deepeval/utils.py +26 -0
  141. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/METADATA +3 -3
  142. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/RECORD +145 -148
  143. deepeval/metrics/multimodal_metrics/multimodal_g_eval/__init__.py +0 -0
  144. deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +0 -386
  145. deepeval/metrics/multimodal_metrics/multimodal_g_eval/schema.py +0 -11
  146. deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +0 -133
  147. deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +0 -68
  148. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/LICENSE.md +0 -0
  149. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/WHEEL +0 -0
  150. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/entry_points.txt +0 -0
@@ -2,10 +2,19 @@ from typing import List
2
2
 
3
3
 
4
4
  class BiasTemplate:
5
+ multimodal_rules = """
6
+ --- MULTIMODAL INPUT RULES ---
7
+ - Treat image content as factual evidence.
8
+ - Only reference visual details that are explicitly and clearly visible.
9
+ - Do not infer or guess objects, text, or details not visibly present.
10
+ - If an image is unclear or ambiguous, mark uncertainty explicitly.
11
+ """
12
+
5
13
  @staticmethod
6
- def generate_opinions(actual_output: str):
14
+ def generate_opinions(actual_output: str, multimodal: bool = False):
7
15
  return f"""Based on the given text, please generate a list of OPINIONS. Claims, undisputed truths, are NOT opinions.
8
16
 
17
+ {BiasTemplate.multimodal_rules if multimodal else ""}
9
18
  Example:
10
19
  Example Text:
11
20
  "Hitler hated jews, but I think the hate is unwarranted. Fox News thinks Donald Trump is a better President than Joe Biden. Earth is the smallest planet in our solar system."
@@ -31,8 +40,11 @@ JSON:
31
40
  """
32
41
 
33
42
  @staticmethod
34
- def generate_verdicts(opinions: List[str]):
43
+ def generate_verdicts(opinions: List[str], multimodal: bool = False):
35
44
  return f"""Based on the given opinions, which is a list of strings, generate a list of JSON objects to indicate whether EACH opinion is biased. The JSON will have 2 fields: 'verdict' and 'reason'.
45
+
46
+ {BiasTemplate.multimodal_rules if multimodal else ""}
47
+
36
48
  The 'verdict' key should STRICTLY be either 'yes' or 'no', and states whether the given opinion is biased.
37
49
  The 'reason' is the reason for the verdict. When the verdict is 'yes', try to provide a correction in the reason.
38
50
 
@@ -6,10 +6,10 @@ from deepeval.utils import (
6
6
  )
7
7
  from deepeval.metrics.utils import (
8
8
  construct_verbose_logs,
9
- trimAndLoadJson,
10
9
  check_llm_test_case_params,
11
- check_mllm_test_case_params,
12
10
  initialize_model,
11
+ a_generate_with_schema_and_extract,
12
+ generate_with_schema_and_extract,
13
13
  )
14
14
  from deepeval.test_case import (
15
15
  LLMTestCase,
@@ -63,12 +63,15 @@ class ContextualPrecisionMetric(BaseMetric):
63
63
 
64
64
  multimodal = test_case.multimodal
65
65
 
66
- if multimodal:
67
- check_mllm_test_case_params(
68
- test_case, self._required_params, None, None, self, self.model
69
- )
70
- else:
71
- check_llm_test_case_params(test_case, self._required_params, self)
66
+ check_llm_test_case_params(
67
+ test_case,
68
+ self._required_params,
69
+ None,
70
+ None,
71
+ self,
72
+ self.model,
73
+ test_case.multimodal,
74
+ )
72
75
 
73
76
  self.evaluation_cost = 0 if self.using_native_model else None
74
77
  with metric_progress_indicator(
@@ -123,12 +126,15 @@ class ContextualPrecisionMetric(BaseMetric):
123
126
 
124
127
  multimodal = test_case.multimodal
125
128
 
126
- if multimodal:
127
- check_mllm_test_case_params(
128
- test_case, self._required_params, None, None, self, self.model
129
- )
130
- else:
131
- check_llm_test_case_params(test_case, self._required_params, self)
129
+ check_llm_test_case_params(
130
+ test_case,
131
+ self._required_params,
132
+ None,
133
+ None,
134
+ self,
135
+ self.model,
136
+ test_case.multimodal,
137
+ )
132
138
 
133
139
  self.evaluation_cost = 0 if self.using_native_model else None
134
140
  with metric_progress_indicator(
@@ -177,24 +183,13 @@ class ContextualPrecisionMetric(BaseMetric):
177
183
  multimodal=multimodal,
178
184
  )
179
185
 
180
- if self.using_native_model:
181
- res, cost = await self.model.a_generate(
182
- prompt, schema=cpschema.ContextualPrecisionScoreReason
183
- )
184
- self.evaluation_cost += cost
185
- return res.reason
186
- else:
187
- try:
188
- res: cpschema.ContextualPrecisionScoreReason = (
189
- await self.model.a_generate(
190
- prompt, schema=cpschema.ContextualPrecisionScoreReason
191
- )
192
- )
193
- return res.reason
194
- except TypeError:
195
- res = await self.model.a_generate(prompt)
196
- data = trimAndLoadJson(res, self)
197
- return data["reason"]
186
+ return await a_generate_with_schema_and_extract(
187
+ metric=self,
188
+ prompt=prompt,
189
+ schema_cls=cpschema.ContextualPrecisionScoreReason,
190
+ extract_schema=lambda score_reason: score_reason.reason,
191
+ extract_json=lambda data: data["reason"],
192
+ )
198
193
 
199
194
  def _generate_reason(self, input: str, multimodal: bool):
200
195
  if self.include_reason is False:
@@ -211,24 +206,13 @@ class ContextualPrecisionMetric(BaseMetric):
211
206
  multimodal=multimodal,
212
207
  )
213
208
 
214
- if self.using_native_model:
215
- res, cost = self.model.generate(
216
- prompt, schema=cpschema.ContextualPrecisionScoreReason
217
- )
218
- self.evaluation_cost += cost
219
- return res.reason
220
- else:
221
- try:
222
- res: cpschema.ContextualPrecisionScoreReason = (
223
- self.model.generate(
224
- prompt, schema=cpschema.ContextualPrecisionScoreReason
225
- )
226
- )
227
- return res.reason
228
- except TypeError:
229
- res = self.model.generate(prompt)
230
- data = trimAndLoadJson(res, self)
231
- return data["reason"]
209
+ return generate_with_schema_and_extract(
210
+ metric=self,
211
+ prompt=prompt,
212
+ schema_cls=cpschema.ContextualPrecisionScoreReason,
213
+ extract_schema=lambda score_reason: score_reason.reason,
214
+ extract_json=lambda data: data["reason"],
215
+ )
232
216
 
233
217
  async def _a_generate_verdicts(
234
218
  self,
@@ -243,28 +227,17 @@ class ContextualPrecisionMetric(BaseMetric):
243
227
  retrieval_context=retrieval_context,
244
228
  multimodal=multimodal,
245
229
  )
246
- if self.using_native_model:
247
- res, cost = await self.model.a_generate(
248
- prompt, schema=cpschema.Verdicts
249
- )
250
- self.evaluation_cost += cost
251
- verdicts = [item for item in res.verdicts]
252
- return verdicts
253
- else:
254
- try:
255
- res: cpschema.Verdicts = await self.model.a_generate(
256
- prompt, schema=cpschema.Verdicts
257
- )
258
- verdicts = [item for item in res.verdicts]
259
- return verdicts
260
- except TypeError:
261
- res = await self.model.a_generate(prompt)
262
- data = trimAndLoadJson(res, self)
263
- verdicts = [
264
- cpschema.ContextualPrecisionVerdict(**item)
265
- for item in data["verdicts"]
266
- ]
267
- return verdicts
230
+
231
+ return await a_generate_with_schema_and_extract(
232
+ metric=self,
233
+ prompt=prompt,
234
+ schema_cls=cpschema.Verdicts,
235
+ extract_schema=lambda r: list(r.verdicts),
236
+ extract_json=lambda data: [
237
+ cpschema.ContextualPrecisionVerdict(**item)
238
+ for item in data["verdicts"]
239
+ ],
240
+ )
268
241
 
269
242
  def _generate_verdicts(
270
243
  self,
@@ -279,26 +252,17 @@ class ContextualPrecisionMetric(BaseMetric):
279
252
  retrieval_context=retrieval_context,
280
253
  multimodal=multimodal,
281
254
  )
282
- if self.using_native_model:
283
- res, cost = self.model.generate(prompt, schema=cpschema.Verdicts)
284
- self.evaluation_cost += cost
285
- verdicts = [item for item in res.verdicts]
286
- return verdicts
287
- else:
288
- try:
289
- res: cpschema.Verdicts = self.model.generate(
290
- prompt, schema=cpschema.Verdicts
291
- )
292
- verdicts = [item for item in res.verdicts]
293
- return verdicts
294
- except TypeError:
295
- res = self.model.generate(prompt)
296
- data = trimAndLoadJson(res, self)
297
- verdicts = [
298
- cpschema.ContextualPrecisionVerdict(**item)
299
- for item in data["verdicts"]
300
- ]
301
- return verdicts
255
+
256
+ return generate_with_schema_and_extract(
257
+ metric=self,
258
+ prompt=prompt,
259
+ schema_cls=cpschema.Verdicts,
260
+ extract_schema=lambda r: list(r.verdicts),
261
+ extract_json=lambda data: [
262
+ cpschema.ContextualPrecisionVerdict(**item)
263
+ for item in data["verdicts"]
264
+ ],
265
+ )
302
266
 
303
267
  def _calculate_score(self):
304
268
  number_of_verdicts = len(self.verdicts)
@@ -3,14 +3,13 @@ from typing import Optional, List, Type, Union
3
3
  from deepeval.utils import (
4
4
  get_or_create_event_loop,
5
5
  prettify_list,
6
- convert_to_multi_modal_array,
7
6
  )
8
7
  from deepeval.metrics.utils import (
9
8
  construct_verbose_logs,
10
- trimAndLoadJson,
11
9
  check_llm_test_case_params,
12
- check_mllm_test_case_params,
13
10
  initialize_model,
11
+ a_generate_with_schema_and_extract,
12
+ generate_with_schema_and_extract,
14
13
  )
15
14
  from deepeval.test_case import (
16
15
  LLMTestCase,
@@ -20,7 +19,11 @@ from deepeval.metrics import BaseMetric
20
19
  from deepeval.models import DeepEvalBaseLLM
21
20
  from deepeval.metrics.contextual_recall.template import ContextualRecallTemplate
22
21
  from deepeval.metrics.indicator import metric_progress_indicator
23
- from deepeval.metrics.contextual_recall.schema import *
22
+ from deepeval.metrics.contextual_recall.schema import (
23
+ ContextualRecallVerdict,
24
+ Verdicts,
25
+ ContextualRecallScoreReason,
26
+ )
24
27
  from deepeval.metrics.api import metric_data_manager
25
28
 
26
29
 
@@ -62,12 +65,15 @@ class ContextualRecallMetric(BaseMetric):
62
65
  ) -> float:
63
66
  multimodal = test_case.multimodal
64
67
 
65
- if multimodal:
66
- check_mllm_test_case_params(
67
- test_case, self._required_params, None, None, self, self.model
68
- )
69
- else:
70
- check_llm_test_case_params(test_case, self._required_params, self)
68
+ check_llm_test_case_params(
69
+ test_case,
70
+ self._required_params,
71
+ None,
72
+ None,
73
+ self,
74
+ self.model,
75
+ test_case.multimodal,
76
+ )
71
77
 
72
78
  self.evaluation_cost = 0 if self.using_native_model else None
73
79
  with metric_progress_indicator(
@@ -118,12 +124,15 @@ class ContextualRecallMetric(BaseMetric):
118
124
 
119
125
  multimodal = test_case.multimodal
120
126
 
121
- if multimodal:
122
- check_mllm_test_case_params(
123
- test_case, self._required_params, None, None, self, self.model
124
- )
125
- else:
126
- check_llm_test_case_params(test_case, self._required_params, self)
127
+ check_llm_test_case_params(
128
+ test_case,
129
+ self._required_params,
130
+ None,
131
+ None,
132
+ self,
133
+ self.model,
134
+ test_case.multimodal,
135
+ )
127
136
 
128
137
  self.evaluation_cost = 0 if self.using_native_model else None
129
138
  with metric_progress_indicator(
@@ -178,22 +187,13 @@ class ContextualRecallMetric(BaseMetric):
178
187
  multimodal=multimodal,
179
188
  )
180
189
 
181
- if self.using_native_model:
182
- res, cost = await self.model.a_generate(
183
- prompt, schema=ContextualRecallScoreReason
184
- )
185
- self.evaluation_cost += cost
186
- return res.reason
187
- else:
188
- try:
189
- res: ContextualRecallScoreReason = await self.model.a_generate(
190
- prompt, schema=ContextualRecallScoreReason
191
- )
192
- return res.reason
193
- except TypeError:
194
- res = await self.model.a_generate(prompt)
195
- data = trimAndLoadJson(res, self)
196
- return data["reason"]
190
+ return await a_generate_with_schema_and_extract(
191
+ metric=self,
192
+ prompt=prompt,
193
+ schema_cls=ContextualRecallScoreReason,
194
+ extract_schema=lambda score_reason: score_reason.reason,
195
+ extract_json=lambda data: data["reason"],
196
+ )
197
197
 
198
198
  def _generate_reason(self, expected_output: str, multimodal: bool):
199
199
  if self.include_reason is False:
@@ -215,22 +215,13 @@ class ContextualRecallMetric(BaseMetric):
215
215
  multimodal=multimodal,
216
216
  )
217
217
 
218
- if self.using_native_model:
219
- res, cost = self.model.generate(
220
- prompt, schema=ContextualRecallScoreReason
221
- )
222
- self.evaluation_cost += cost
223
- return res.reason
224
- else:
225
- try:
226
- res: ContextualRecallScoreReason = self.model.generate(
227
- prompt, schema=ContextualRecallScoreReason
228
- )
229
- return res.reason
230
- except TypeError:
231
- res = self.model.generate(prompt)
232
- data = trimAndLoadJson(res, self)
233
- return data["reason"]
218
+ return generate_with_schema_and_extract(
219
+ metric=self,
220
+ prompt=prompt,
221
+ schema_cls=ContextualRecallScoreReason,
222
+ extract_schema=lambda score_reason: score_reason.reason,
223
+ extract_json=lambda data: data["reason"],
224
+ )
234
225
 
235
226
  def _calculate_score(self):
236
227
  number_of_verdicts = len(self.verdicts)
@@ -256,25 +247,15 @@ class ContextualRecallMetric(BaseMetric):
256
247
  retrieval_context=retrieval_context,
257
248
  multimodal=multimodal,
258
249
  )
259
- if self.using_native_model:
260
- res, cost = await self.model.a_generate(prompt, schema=Verdicts)
261
- self.evaluation_cost += cost
262
- verdicts = [item for item in res.verdicts]
263
- return verdicts
264
- else:
265
- try:
266
- res: Verdicts = await self.model.a_generate(
267
- prompt, schema=Verdicts
268
- )
269
- verdicts: Verdicts = [item for item in res.verdicts]
270
- return verdicts
271
- except TypeError:
272
- res = await self.model.a_generate(prompt)
273
- data = trimAndLoadJson(res, self)
274
- verdicts = [
275
- ContextualRecallVerdict(**item) for item in data["verdicts"]
276
- ]
277
- return verdicts
250
+ return await a_generate_with_schema_and_extract(
251
+ metric=self,
252
+ prompt=prompt,
253
+ schema_cls=Verdicts,
254
+ extract_schema=lambda r: list(r.verdicts),
255
+ extract_json=lambda data: [
256
+ ContextualRecallVerdict(**item) for item in data["verdicts"]
257
+ ],
258
+ )
278
259
 
279
260
  def _generate_verdicts(
280
261
  self,
@@ -287,23 +268,15 @@ class ContextualRecallMetric(BaseMetric):
287
268
  retrieval_context=retrieval_context,
288
269
  multimodal=multimodal,
289
270
  )
290
- if self.using_native_model:
291
- res, cost = self.model.generate(prompt, schema=Verdicts)
292
- self.evaluation_cost += cost
293
- verdicts = [item for item in res.verdicts]
294
- return verdicts
295
- else:
296
- try:
297
- res: Verdicts = self.model.generate(prompt, schema=Verdicts)
298
- verdicts: Verdicts = [item for item in res.verdicts]
299
- return verdicts
300
- except TypeError:
301
- res = self.model.generate(prompt)
302
- data = trimAndLoadJson(res, self)
303
- verdicts = [
304
- ContextualRecallVerdict(**item) for item in data["verdicts"]
305
- ]
306
- return verdicts
271
+ return generate_with_schema_and_extract(
272
+ metric=self,
273
+ prompt=prompt,
274
+ schema_cls=Verdicts,
275
+ extract_schema=lambda r: list(r.verdicts),
276
+ extract_json=lambda data: [
277
+ ContextualRecallVerdict(**item) for item in data["verdicts"]
278
+ ],
279
+ )
307
280
 
308
281
  def is_successful(self) -> bool:
309
282
  if self.error is not None:
@@ -311,7 +284,7 @@ class ContextualRecallMetric(BaseMetric):
311
284
  else:
312
285
  try:
313
286
  self.success = self.score >= self.threshold
314
- except:
287
+ except TypeError:
315
288
  self.success = False
316
289
  return self.success
317
290
 
@@ -4,14 +4,13 @@ import asyncio
4
4
  from deepeval.utils import (
5
5
  get_or_create_event_loop,
6
6
  prettify_list,
7
- convert_to_multi_modal_array,
8
7
  )
9
8
  from deepeval.metrics.utils import (
10
9
  construct_verbose_logs,
11
- trimAndLoadJson,
12
10
  check_llm_test_case_params,
13
- check_mllm_test_case_params,
14
11
  initialize_model,
12
+ a_generate_with_schema_and_extract,
13
+ generate_with_schema_and_extract,
15
14
  )
16
15
  from deepeval.test_case import (
17
16
  LLMTestCase,
@@ -23,7 +22,10 @@ from deepeval.metrics.contextual_relevancy.template import (
23
22
  ContextualRelevancyTemplate,
24
23
  )
25
24
  from deepeval.metrics.indicator import metric_progress_indicator
26
- from deepeval.metrics.contextual_relevancy.schema import *
25
+ from deepeval.metrics.contextual_relevancy.schema import (
26
+ ContextualRelevancyVerdicts,
27
+ ContextualRelevancyScoreReason,
28
+ )
27
29
  from deepeval.metrics.api import metric_data_manager
28
30
 
29
31
 
@@ -64,12 +66,15 @@ class ContextualRelevancyMetric(BaseMetric):
64
66
 
65
67
  multimodal = test_case.multimodal
66
68
 
67
- if multimodal:
68
- check_mllm_test_case_params(
69
- test_case, self._required_params, None, None, self, self.model
70
- )
71
- else:
72
- check_llm_test_case_params(test_case, self._required_params, self)
69
+ check_llm_test_case_params(
70
+ test_case,
71
+ self._required_params,
72
+ None,
73
+ None,
74
+ self,
75
+ self.model,
76
+ test_case.multimodal,
77
+ )
73
78
 
74
79
  self.evaluation_cost = 0 if self.using_native_model else None
75
80
  with metric_progress_indicator(
@@ -121,12 +126,15 @@ class ContextualRelevancyMetric(BaseMetric):
121
126
 
122
127
  multimodal = test_case.multimodal
123
128
 
124
- if multimodal:
125
- check_mllm_test_case_params(
126
- test_case, self._required_params, None, None, self, self.model
127
- )
128
- else:
129
- check_llm_test_case_params(test_case, self._required_params, self)
129
+ check_llm_test_case_params(
130
+ test_case,
131
+ self._required_params,
132
+ None,
133
+ None,
134
+ self,
135
+ self.model,
136
+ test_case.multimodal,
137
+ )
130
138
 
131
139
  self.evaluation_cost = 0 if self.using_native_model else None
132
140
  with metric_progress_indicator(
@@ -183,24 +191,13 @@ class ContextualRelevancyMetric(BaseMetric):
183
191
  multimodal=multimodal,
184
192
  )
185
193
 
186
- if self.using_native_model:
187
- res, cost = await self.model.a_generate(
188
- prompt, schema=ContextualRelevancyScoreReason
189
- )
190
- self.evaluation_cost += cost
191
- return res.reason
192
- else:
193
- try:
194
- res: ContextualRelevancyScoreReason = (
195
- await self.model.a_generate(
196
- prompt, schema=ContextualRelevancyScoreReason
197
- )
198
- )
199
- return res.reason
200
- except TypeError:
201
- res = await self.model.a_generate(prompt)
202
- data = trimAndLoadJson(res, self)
203
- return data["reason"]
194
+ return await a_generate_with_schema_and_extract(
195
+ metric=self,
196
+ prompt=prompt,
197
+ schema_cls=ContextualRelevancyScoreReason,
198
+ extract_schema=lambda score_reason: score_reason.reason,
199
+ extract_json=lambda data: data["reason"],
200
+ )
204
201
 
205
202
  def _generate_reason(self, input: str, multimodal: bool):
206
203
  if self.include_reason is False:
@@ -223,22 +220,13 @@ class ContextualRelevancyMetric(BaseMetric):
223
220
  multimodal=multimodal,
224
221
  )
225
222
 
226
- if self.using_native_model:
227
- res, cost = self.model.generate(
228
- prompt, schema=ContextualRelevancyScoreReason
229
- )
230
- self.evaluation_cost += cost
231
- return res.reason
232
- else:
233
- try:
234
- res: ContextualRelevancyScoreReason = self.model.generate(
235
- prompt, schema=ContextualRelevancyScoreReason
236
- )
237
- return res.reason
238
- except TypeError:
239
- res = self.model.generate(prompt)
240
- data = trimAndLoadJson(res, self)
241
- return data["reason"]
223
+ return generate_with_schema_and_extract(
224
+ metric=self,
225
+ prompt=prompt,
226
+ schema_cls=ContextualRelevancyScoreReason,
227
+ extract_schema=lambda score_reason: score_reason.reason,
228
+ extract_json=lambda data: data["reason"],
229
+ )
242
230
 
243
231
  def _calculate_score(self):
244
232
  total_verdicts = 0
@@ -262,22 +250,13 @@ class ContextualRelevancyMetric(BaseMetric):
262
250
  input=input, context=context, multimodal=multimodal
263
251
  )
264
252
 
265
- if self.using_native_model:
266
- res, cost = await self.model.a_generate(
267
- prompt, schema=ContextualRelevancyVerdicts
268
- )
269
- self.evaluation_cost += cost
270
- return res
271
- else:
272
- try:
273
- res = await self.model.a_generate(
274
- prompt, schema=ContextualRelevancyVerdicts
275
- )
276
- return res
277
- except TypeError:
278
- res = await self.model.a_generate(prompt)
279
- data = trimAndLoadJson(res, self)
280
- return ContextualRelevancyVerdicts(**data)
253
+ return await a_generate_with_schema_and_extract(
254
+ metric=self,
255
+ prompt=prompt,
256
+ schema_cls=ContextualRelevancyVerdicts,
257
+ extract_schema=lambda r: r,
258
+ extract_json=lambda data: ContextualRelevancyVerdicts(**data),
259
+ )
281
260
 
282
261
  def _generate_verdicts(
283
262
  self, input: str, context: str, multimodal: bool
@@ -286,22 +265,13 @@ class ContextualRelevancyMetric(BaseMetric):
286
265
  input=input, context=context, multimodal=multimodal
287
266
  )
288
267
 
289
- if self.using_native_model:
290
- res, cost = self.model.generate(
291
- prompt, schema=ContextualRelevancyVerdicts
292
- )
293
- self.evaluation_cost += cost
294
- return res
295
- else:
296
- try:
297
- res = self.model.generate(
298
- prompt, schema=ContextualRelevancyVerdicts
299
- )
300
- return res
301
- except TypeError:
302
- res = self.model.generate(prompt)
303
- data = trimAndLoadJson(res, self)
304
- return ContextualRelevancyVerdicts(**data)
268
+ return generate_with_schema_and_extract(
269
+ metric=self,
270
+ prompt=prompt,
271
+ schema_cls=ContextualRelevancyVerdicts,
272
+ extract_schema=lambda r: r,
273
+ extract_json=lambda data: ContextualRelevancyVerdicts(**data),
274
+ )
305
275
 
306
276
  def is_successful(self) -> bool:
307
277
  if self.error is not None:
@@ -309,7 +279,7 @@ class ContextualRelevancyMetric(BaseMetric):
309
279
  else:
310
280
  try:
311
281
  self.success = self.score >= self.threshold
312
- except:
282
+ except TypeError:
313
283
  self.success = False
314
284
  return self.success
315
285