deepeval 3.7.5__py3-none-any.whl → 3.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (133) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/config/settings.py +35 -1
  3. deepeval/dataset/api.py +23 -1
  4. deepeval/dataset/golden.py +106 -21
  5. deepeval/evaluate/evaluate.py +0 -3
  6. deepeval/evaluate/execute.py +10 -222
  7. deepeval/evaluate/utils.py +6 -30
  8. deepeval/key_handler.py +3 -0
  9. deepeval/metrics/__init__.py +0 -4
  10. deepeval/metrics/answer_relevancy/answer_relevancy.py +89 -132
  11. deepeval/metrics/answer_relevancy/template.py +102 -179
  12. deepeval/metrics/arena_g_eval/arena_g_eval.py +98 -96
  13. deepeval/metrics/arena_g_eval/template.py +17 -1
  14. deepeval/metrics/argument_correctness/argument_correctness.py +81 -87
  15. deepeval/metrics/argument_correctness/template.py +19 -2
  16. deepeval/metrics/base_metric.py +13 -41
  17. deepeval/metrics/bias/bias.py +102 -108
  18. deepeval/metrics/bias/template.py +14 -2
  19. deepeval/metrics/contextual_precision/contextual_precision.py +56 -92
  20. deepeval/metrics/contextual_recall/contextual_recall.py +58 -85
  21. deepeval/metrics/contextual_relevancy/contextual_relevancy.py +53 -83
  22. deepeval/metrics/conversation_completeness/conversation_completeness.py +101 -119
  23. deepeval/metrics/conversation_completeness/template.py +23 -3
  24. deepeval/metrics/conversational_dag/conversational_dag.py +12 -8
  25. deepeval/metrics/conversational_dag/nodes.py +66 -123
  26. deepeval/metrics/conversational_dag/templates.py +16 -0
  27. deepeval/metrics/conversational_g_eval/conversational_g_eval.py +47 -66
  28. deepeval/metrics/dag/dag.py +10 -0
  29. deepeval/metrics/dag/nodes.py +63 -126
  30. deepeval/metrics/dag/templates.py +14 -0
  31. deepeval/metrics/exact_match/exact_match.py +9 -1
  32. deepeval/metrics/faithfulness/faithfulness.py +82 -136
  33. deepeval/metrics/g_eval/g_eval.py +87 -78
  34. deepeval/metrics/g_eval/template.py +18 -1
  35. deepeval/metrics/g_eval/utils.py +7 -6
  36. deepeval/metrics/goal_accuracy/goal_accuracy.py +91 -76
  37. deepeval/metrics/goal_accuracy/template.py +21 -3
  38. deepeval/metrics/hallucination/hallucination.py +60 -75
  39. deepeval/metrics/hallucination/template.py +13 -0
  40. deepeval/metrics/indicator.py +3 -6
  41. deepeval/metrics/json_correctness/json_correctness.py +40 -38
  42. deepeval/metrics/json_correctness/template.py +10 -0
  43. deepeval/metrics/knowledge_retention/knowledge_retention.py +60 -97
  44. deepeval/metrics/knowledge_retention/schema.py +9 -3
  45. deepeval/metrics/knowledge_retention/template.py +12 -0
  46. deepeval/metrics/mcp/mcp_task_completion.py +68 -38
  47. deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +92 -74
  48. deepeval/metrics/mcp/template.py +52 -0
  49. deepeval/metrics/mcp_use_metric/mcp_use_metric.py +58 -64
  50. deepeval/metrics/mcp_use_metric/template.py +12 -0
  51. deepeval/metrics/misuse/misuse.py +77 -97
  52. deepeval/metrics/misuse/template.py +15 -0
  53. deepeval/metrics/multimodal_metrics/__init__.py +0 -1
  54. deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +37 -38
  55. deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +55 -76
  56. deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +37 -38
  57. deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +37 -38
  58. deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +57 -76
  59. deepeval/metrics/non_advice/non_advice.py +79 -105
  60. deepeval/metrics/non_advice/template.py +12 -0
  61. deepeval/metrics/pattern_match/pattern_match.py +12 -4
  62. deepeval/metrics/pii_leakage/pii_leakage.py +75 -106
  63. deepeval/metrics/pii_leakage/template.py +14 -0
  64. deepeval/metrics/plan_adherence/plan_adherence.py +63 -89
  65. deepeval/metrics/plan_adherence/template.py +11 -0
  66. deepeval/metrics/plan_quality/plan_quality.py +63 -87
  67. deepeval/metrics/plan_quality/template.py +9 -0
  68. deepeval/metrics/prompt_alignment/prompt_alignment.py +72 -83
  69. deepeval/metrics/prompt_alignment/template.py +12 -0
  70. deepeval/metrics/role_adherence/role_adherence.py +48 -71
  71. deepeval/metrics/role_adherence/template.py +14 -0
  72. deepeval/metrics/role_violation/role_violation.py +75 -108
  73. deepeval/metrics/role_violation/template.py +12 -0
  74. deepeval/metrics/step_efficiency/step_efficiency.py +55 -65
  75. deepeval/metrics/step_efficiency/template.py +11 -0
  76. deepeval/metrics/summarization/summarization.py +115 -183
  77. deepeval/metrics/summarization/template.py +19 -0
  78. deepeval/metrics/task_completion/task_completion.py +67 -73
  79. deepeval/metrics/tool_correctness/tool_correctness.py +43 -42
  80. deepeval/metrics/tool_use/tool_use.py +42 -66
  81. deepeval/metrics/topic_adherence/template.py +13 -0
  82. deepeval/metrics/topic_adherence/topic_adherence.py +53 -67
  83. deepeval/metrics/toxicity/template.py +13 -0
  84. deepeval/metrics/toxicity/toxicity.py +80 -99
  85. deepeval/metrics/turn_contextual_precision/schema.py +3 -3
  86. deepeval/metrics/turn_contextual_precision/template.py +1 -1
  87. deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +110 -68
  88. deepeval/metrics/turn_contextual_recall/schema.py +3 -3
  89. deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +104 -61
  90. deepeval/metrics/turn_contextual_relevancy/schema.py +2 -2
  91. deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +106 -65
  92. deepeval/metrics/turn_faithfulness/schema.py +1 -1
  93. deepeval/metrics/turn_faithfulness/turn_faithfulness.py +104 -73
  94. deepeval/metrics/turn_relevancy/template.py +14 -0
  95. deepeval/metrics/turn_relevancy/turn_relevancy.py +56 -69
  96. deepeval/metrics/utils.py +145 -90
  97. deepeval/models/base_model.py +44 -6
  98. deepeval/models/embedding_models/azure_embedding_model.py +34 -12
  99. deepeval/models/embedding_models/local_embedding_model.py +22 -7
  100. deepeval/models/embedding_models/ollama_embedding_model.py +17 -6
  101. deepeval/models/embedding_models/openai_embedding_model.py +3 -2
  102. deepeval/models/llms/amazon_bedrock_model.py +226 -71
  103. deepeval/models/llms/anthropic_model.py +141 -47
  104. deepeval/models/llms/azure_model.py +167 -94
  105. deepeval/models/llms/constants.py +2032 -0
  106. deepeval/models/llms/deepseek_model.py +79 -29
  107. deepeval/models/llms/gemini_model.py +126 -67
  108. deepeval/models/llms/grok_model.py +125 -59
  109. deepeval/models/llms/kimi_model.py +126 -81
  110. deepeval/models/llms/litellm_model.py +92 -18
  111. deepeval/models/llms/local_model.py +114 -15
  112. deepeval/models/llms/ollama_model.py +97 -76
  113. deepeval/models/llms/openai_model.py +167 -310
  114. deepeval/models/llms/portkey_model.py +58 -16
  115. deepeval/models/llms/utils.py +5 -2
  116. deepeval/models/utils.py +60 -4
  117. deepeval/simulator/conversation_simulator.py +43 -0
  118. deepeval/simulator/template.py +13 -0
  119. deepeval/test_case/api.py +24 -45
  120. deepeval/test_case/arena_test_case.py +7 -2
  121. deepeval/test_case/conversational_test_case.py +55 -6
  122. deepeval/test_case/llm_test_case.py +60 -6
  123. deepeval/test_run/api.py +3 -0
  124. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/METADATA +1 -1
  125. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/RECORD +128 -132
  126. deepeval/metrics/multimodal_metrics/multimodal_g_eval/__init__.py +0 -0
  127. deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +0 -386
  128. deepeval/metrics/multimodal_metrics/multimodal_g_eval/schema.py +0 -11
  129. deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +0 -133
  130. deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +0 -68
  131. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/LICENSE.md +0 -0
  132. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/WHEEL +0 -0
  133. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/entry_points.txt +0 -0
@@ -14,12 +14,17 @@ from deepeval.utils import get_or_create_event_loop, prettify_list
14
14
  from deepeval.metrics.utils import (
15
15
  check_arena_test_case_params,
16
16
  construct_verbose_logs,
17
- trimAndLoadJson,
18
17
  initialize_model,
18
+ a_generate_with_schema_and_extract,
19
+ generate_with_schema_and_extract,
19
20
  )
20
21
  from deepeval.models import DeepEvalBaseLLM
21
22
  from deepeval.metrics.indicator import metric_progress_indicator
22
- from deepeval.metrics.arena_g_eval.schema import *
23
+ from deepeval.metrics.arena_g_eval.schema import (
24
+ RewrittenReason,
25
+ Winner,
26
+ Steps,
27
+ )
23
28
  from deepeval.metrics.g_eval.utils import (
24
29
  construct_g_eval_params_string,
25
30
  validate_criteria_and_evaluation_steps,
@@ -62,7 +67,13 @@ class ArenaGEval(BaseArenaMetric):
62
67
  _progress: Optional[Progress] = None,
63
68
  _pbar_id: Optional[int] = None,
64
69
  ) -> str:
65
- check_arena_test_case_params(test_case, self.evaluation_params, self)
70
+ check_arena_test_case_params(
71
+ test_case,
72
+ self.evaluation_params,
73
+ self,
74
+ self.model,
75
+ test_case.multimodal,
76
+ )
66
77
  self.evaluation_cost = 0 if self.using_native_model else None
67
78
 
68
79
  with metric_progress_indicator(self, _show_indicator=_show_indicator):
@@ -76,12 +87,12 @@ class ArenaGEval(BaseArenaMetric):
76
87
  )
77
88
  else:
78
89
  self.evaluation_steps: List[str] = (
79
- self._generate_evaluation_steps()
90
+ self._generate_evaluation_steps(test_case.multimodal)
80
91
  )
81
92
  if _progress:
82
93
  update_pbar(_progress, _pbar_id)
83
94
  masked_winner, masked_reason, dummy_to_real_names = (
84
- self._compare(test_case)
95
+ self._compare(test_case, test_case.multimodal)
85
96
  )
86
97
  if _progress:
87
98
  update_pbar(_progress, _pbar_id)
@@ -111,7 +122,13 @@ class ArenaGEval(BaseArenaMetric):
111
122
  _progress: Optional[Progress] = None,
112
123
  _pbar_id: Optional[int] = None,
113
124
  ) -> str:
114
- check_arena_test_case_params(test_case, self.evaluation_params, self)
125
+ check_arena_test_case_params(
126
+ test_case,
127
+ self.evaluation_params,
128
+ self,
129
+ self.model,
130
+ test_case.multimodal,
131
+ )
115
132
  self.evaluation_cost = 0 if self.using_native_model else None
116
133
 
117
134
  with metric_progress_indicator(
@@ -120,12 +137,12 @@ class ArenaGEval(BaseArenaMetric):
120
137
  _show_indicator=_show_indicator,
121
138
  ):
122
139
  self.evaluation_steps: List[str] = (
123
- await self._a_generate_evaluation_steps()
140
+ await self._a_generate_evaluation_steps(test_case.multimodal)
124
141
  )
125
142
  if _progress:
126
143
  update_pbar(_progress, _pbar_id)
127
144
  masked_winner, masked_reason, dummy_to_real_names = (
128
- await self._a_compare(test_case)
145
+ await self._a_compare(test_case, test_case.multimodal)
129
146
  )
130
147
  if _progress:
131
148
  update_pbar(_progress, _pbar_id)
@@ -147,7 +164,7 @@ class ArenaGEval(BaseArenaMetric):
147
164
  )
148
165
  return self.winner
149
166
 
150
- async def _a_generate_evaluation_steps(self) -> List[str]:
167
+ async def _a_generate_evaluation_steps(self, multimodal: bool) -> List[str]:
151
168
  if self.evaluation_steps:
152
169
  return self.evaluation_steps
153
170
 
@@ -155,23 +172,20 @@ class ArenaGEval(BaseArenaMetric):
155
172
  self.evaluation_params
156
173
  )
157
174
  prompt = ArenaGEvalTemplate.generate_evaluation_steps(
158
- criteria=self.criteria, parameters=g_eval_params_str
175
+ criteria=self.criteria,
176
+ parameters=g_eval_params_str,
177
+ multimodal=multimodal,
178
+ )
179
+
180
+ return await a_generate_with_schema_and_extract(
181
+ self,
182
+ prompt,
183
+ Steps,
184
+ extract_schema=lambda s: s.steps,
185
+ extract_json=lambda data: data["steps"],
159
186
  )
160
- if self.using_native_model:
161
- res, cost = await self.model.a_generate(prompt)
162
- self.evaluation_cost += cost
163
- data = trimAndLoadJson(res, self)
164
- return data["steps"]
165
- else:
166
- try:
167
- res: Steps = await self.model.a_generate(prompt, schema=Steps)
168
- return res.steps
169
- except TypeError:
170
- res = await self.model.a_generate(prompt)
171
- data = trimAndLoadJson(res, self)
172
- return data["steps"]
173
187
 
174
- def _generate_evaluation_steps(self) -> List[str]:
188
+ def _generate_evaluation_steps(self, multimodal: bool) -> List[str]:
175
189
  if self.evaluation_steps:
176
190
  return self.evaluation_steps
177
191
 
@@ -179,25 +193,20 @@ class ArenaGEval(BaseArenaMetric):
179
193
  self.evaluation_params
180
194
  )
181
195
  prompt = ArenaGEvalTemplate.generate_evaluation_steps(
182
- criteria=self.criteria, parameters=g_eval_params_str
196
+ criteria=self.criteria,
197
+ parameters=g_eval_params_str,
198
+ multimodal=multimodal,
199
+ )
200
+ return generate_with_schema_and_extract(
201
+ self,
202
+ prompt,
203
+ Steps,
204
+ extract_schema=lambda s: s.steps,
205
+ extract_json=lambda data: data["steps"],
183
206
  )
184
- if self.using_native_model:
185
- res, cost = self.model.generate(prompt)
186
- self.evaluation_cost += cost
187
- data = trimAndLoadJson(res, self)
188
- return data["steps"]
189
- else:
190
- try:
191
- res: Steps = self.model.generate(prompt, schema=Steps)
192
- return res.steps
193
- except TypeError:
194
- res = self.model.generate(prompt)
195
- data = trimAndLoadJson(res, self)
196
- return data["steps"]
197
207
 
198
208
  async def _a_compare(
199
- self,
200
- test_case: ArenaTestCase,
209
+ self, test_case: ArenaTestCase, multimodal: bool
201
210
  ) -> Tuple[str, str, Dict[str, str]]:
202
211
  formatted_test_case, dummy_to_real_names = format_arena_test_case(
203
212
  self.evaluation_params, test_case
@@ -209,23 +218,27 @@ class ArenaGEval(BaseArenaMetric):
209
218
  evaluation_steps=number_evaluation_steps(self.evaluation_steps),
210
219
  test_case_contents=formatted_test_case,
211
220
  parameters=g_eval_params_str,
221
+ multimodal=multimodal,
222
+ )
223
+
224
+ return await a_generate_with_schema_and_extract(
225
+ self,
226
+ prompt,
227
+ Winner,
228
+ extract_schema=lambda s: (
229
+ s.winner,
230
+ s.reason,
231
+ dummy_to_real_names,
232
+ ),
233
+ extract_json=lambda data: (
234
+ data["winner"],
235
+ data["reason"],
236
+ dummy_to_real_names,
237
+ ),
212
238
  )
213
- if self.using_native_model:
214
- res, cost = await self.model.a_generate(prompt, schema=Winner)
215
- self.evaluation_cost += cost
216
- return res.winner, res.reason, dummy_to_real_names
217
- else:
218
- try:
219
- res: Winner = await self.model.a_generate(prompt, schema=Winner)
220
- return res.winner, res.reason, dummy_to_real_names
221
- except TypeError:
222
- res = await self.model.a_generate(prompt)
223
- data = trimAndLoadJson(res, self)
224
- return data["winner"], data["reason"], dummy_to_real_names
225
239
 
226
240
  def _compare(
227
- self,
228
- test_case: ArenaTestCase,
241
+ self, test_case: ArenaTestCase, multimodal: bool
229
242
  ) -> Tuple[str, str, Dict[str, str]]:
230
243
  formatted_test_case, dummy_to_real_names = format_arena_test_case(
231
244
  self.evaluation_params, test_case
@@ -237,19 +250,23 @@ class ArenaGEval(BaseArenaMetric):
237
250
  evaluation_steps=number_evaluation_steps(self.evaluation_steps),
238
251
  test_case_contents=formatted_test_case,
239
252
  parameters=g_eval_params_str,
253
+ multimodal=multimodal,
254
+ )
255
+ return generate_with_schema_and_extract(
256
+ self,
257
+ prompt,
258
+ Winner,
259
+ extract_schema=lambda s: (
260
+ s.winner,
261
+ s.reason,
262
+ dummy_to_real_names,
263
+ ),
264
+ extract_json=lambda data: (
265
+ data["winner"],
266
+ data["reason"],
267
+ dummy_to_real_names,
268
+ ),
240
269
  )
241
- if self.using_native_model:
242
- res, cost = self.model.generate(prompt, schema=Winner)
243
- self.evaluation_cost += cost
244
- return res.winner, res.reason, dummy_to_real_names
245
- else:
246
- try:
247
- res: Winner = self.model.generate(prompt, schema=Winner)
248
- return res.winner, res.reason, dummy_to_real_names
249
- except TypeError:
250
- res = self.model.generate(prompt)
251
- data = trimAndLoadJson(res, self)
252
- return data["winner"], data["reason"], dummy_to_real_names
253
270
 
254
271
  async def _a_generate_rewritten_reason(
255
272
  self,
@@ -260,22 +277,14 @@ class ArenaGEval(BaseArenaMetric):
260
277
  reason=reason,
261
278
  dummy_to_real_names=dummy_to_real_names,
262
279
  )
263
- if self.using_native_model:
264
- res, cost = await self.model.a_generate(
265
- prompt, schema=RewrittenReason
266
- )
267
- self.evaluation_cost += cost
268
- return res.rewritten_reason
269
- else:
270
- try:
271
- res: RewrittenReason = await self.model.a_generate(
272
- prompt, schema=RewrittenReason
273
- )
274
- return res.rewritten_reason
275
- except TypeError:
276
- res = await self.model.a_generate(prompt)
277
- data = trimAndLoadJson(res, self)
278
- return data["rewritten_reason"]
280
+
281
+ return await a_generate_with_schema_and_extract(
282
+ self,
283
+ prompt,
284
+ RewrittenReason,
285
+ extract_schema=lambda s: s.rewritten_reason,
286
+ extract_json=lambda data: data["rewritten_reason"],
287
+ )
279
288
 
280
289
  def _generate_rewritten_reason(
281
290
  self,
@@ -286,20 +295,13 @@ class ArenaGEval(BaseArenaMetric):
286
295
  reason=reason,
287
296
  dummy_to_real_names=dummy_to_real_names,
288
297
  )
289
- if self.using_native_model:
290
- res, cost = self.model.generate(prompt, schema=RewrittenReason)
291
- self.evaluation_cost += cost
292
- return res.rewritten_reason
293
- else:
294
- try:
295
- res: RewrittenReason = self.model.generate(
296
- prompt, schema=RewrittenReason
297
- )
298
- return res.rewritten_reason
299
- except TypeError:
300
- res = self.model.generate(prompt)
301
- data = trimAndLoadJson(res, self)
302
- return data["rewritten_reason"]
298
+ return generate_with_schema_and_extract(
299
+ self,
300
+ prompt,
301
+ RewrittenReason,
302
+ extract_schema=lambda s: s.rewritten_reason,
303
+ extract_json=lambda data: data["rewritten_reason"],
304
+ )
303
305
 
304
306
  def is_successful(self) -> bool:
305
307
  if self.error is not None:
@@ -3,11 +3,23 @@ import textwrap
3
3
 
4
4
 
5
5
  class ArenaGEvalTemplate:
6
+ multimodal_rules = """
7
+ --- MULTIMODAL INPUT RULES ---
8
+ - Treat image content as factual evidence.
9
+ - Only reference visual details that are explicitly and clearly visible.
10
+ - Do not infer or guess objects, text, or details not visibly present.
11
+ - If an image is unclear or ambiguous, mark uncertainty explicitly.
12
+ """
13
+
6
14
  @staticmethod
7
- def generate_evaluation_steps(parameters: str, criteria: str):
15
+ def generate_evaluation_steps(
16
+ parameters: str, criteria: str, multimodal: Optional[bool]
17
+ ):
8
18
  return textwrap.dedent(
9
19
  f"""Given an evaluation criteria which outlines how you should choose the winner out of all contestants based on the {parameters}, generate 3-4 concise evaluation steps based on the criteria below. You MUST make it clear how to evaluate {parameters} in relation to one another.
10
20
 
21
+ {ArenaGEvalTemplate.multimodal_rules if multimodal else ""}
22
+
11
23
  Evaluation Criteria:
12
24
  {criteria}
13
25
 
@@ -28,6 +40,7 @@ class ArenaGEvalTemplate:
28
40
  evaluation_steps: str,
29
41
  test_case_contents: List[str],
30
42
  parameters: str,
43
+ multimodal: Optional[bool],
31
44
  ):
32
45
  reasoning_expectation = (
33
46
  "Be specific and grounded in the evaluation steps."
@@ -36,6 +49,9 @@ class ArenaGEvalTemplate:
36
49
  return textwrap.dedent(
37
50
  f"""
38
51
  You are a judge. Given the following evaluation steps, select the single contestant that best aligns with the evaluation steps.
52
+
53
+ {ArenaGEvalTemplate.multimodal_rules if multimodal else ""}
54
+
39
55
  Return a JSON object with three fields:
40
56
 
41
57
  - `"winner"`: the contestant that is best aligned with the evaluation steps.
@@ -3,9 +3,10 @@ from typing import Optional, List, Type, Union
3
3
  from deepeval.utils import get_or_create_event_loop, prettify_list
4
4
  from deepeval.metrics.utils import (
5
5
  construct_verbose_logs,
6
- trimAndLoadJson,
7
6
  check_llm_test_case_params,
8
7
  initialize_model,
8
+ a_generate_with_schema_and_extract,
9
+ generate_with_schema_and_extract,
9
10
  )
10
11
  from deepeval.test_case import (
11
12
  LLMTestCase,
@@ -18,7 +19,11 @@ from deepeval.metrics.argument_correctness.template import (
18
19
  ArgumentCorrectnessTemplate,
19
20
  )
20
21
  from deepeval.metrics.indicator import metric_progress_indicator
21
- from deepeval.metrics.argument_correctness.schema import *
22
+ from deepeval.metrics.argument_correctness.schema import (
23
+ ArgumentCorrectnessVerdict,
24
+ Verdicts,
25
+ ArgumentCorrectnessScoreReason,
26
+ )
22
27
  from deepeval.metrics.api import metric_data_manager
23
28
 
24
29
 
@@ -57,7 +62,15 @@ class ArgumentCorrectnessMetric(BaseMetric):
57
62
  _log_metric_to_confident: bool = True,
58
63
  ) -> float:
59
64
 
60
- check_llm_test_case_params(test_case, self._required_params, self)
65
+ check_llm_test_case_params(
66
+ test_case,
67
+ self._required_params,
68
+ None,
69
+ None,
70
+ self,
71
+ self.model,
72
+ test_case.multimodal,
73
+ )
61
74
 
62
75
  self.evaluation_cost = 0 if self.using_native_model else None
63
76
  with metric_progress_indicator(
@@ -81,11 +94,15 @@ class ArgumentCorrectnessMetric(BaseMetric):
81
94
  else:
82
95
  self.verdicts: List[ArgumentCorrectnessVerdict] = (
83
96
  self._generate_verdicts(
84
- test_case.input, test_case.tools_called
97
+ test_case.input,
98
+ test_case.tools_called,
99
+ test_case.multimodal,
85
100
  )
86
101
  )
87
102
  self.score = self._calculate_score()
88
- self.reason = self._generate_reason(test_case.input)
103
+ self.reason = self._generate_reason(
104
+ test_case.input, test_case.multimodal
105
+ )
89
106
  self.success = self.score >= self.threshold
90
107
  self.verbose_logs = construct_verbose_logs(
91
108
  self,
@@ -108,7 +125,15 @@ class ArgumentCorrectnessMetric(BaseMetric):
108
125
  _log_metric_to_confident: bool = True,
109
126
  ) -> float:
110
127
 
111
- check_llm_test_case_params(test_case, self._required_params, self)
128
+ check_llm_test_case_params(
129
+ test_case,
130
+ self._required_params,
131
+ None,
132
+ None,
133
+ self,
134
+ self.model,
135
+ test_case.multimodal,
136
+ )
112
137
 
113
138
  self.evaluation_cost = 0 if self.using_native_model else None
114
139
  with metric_progress_indicator(
@@ -124,11 +149,15 @@ class ArgumentCorrectnessMetric(BaseMetric):
124
149
  else:
125
150
  self.verdicts: List[ArgumentCorrectnessVerdict] = (
126
151
  await self._a_generate_verdicts(
127
- test_case.input, test_case.tools_called
152
+ test_case.input,
153
+ test_case.tools_called,
154
+ test_case.multimodal,
128
155
  )
129
156
  )
130
157
  self.score = self._calculate_score()
131
- self.reason = await self._a_generate_reason(test_case.input)
158
+ self.reason = await self._a_generate_reason(
159
+ test_case.input, test_case.multimodal
160
+ )
132
161
  self.success = self.score >= self.threshold
133
162
  self.verbose_logs = construct_verbose_logs(
134
163
  self,
@@ -143,7 +172,7 @@ class ArgumentCorrectnessMetric(BaseMetric):
143
172
  )
144
173
  return self.score
145
174
 
146
- async def _a_generate_reason(self, input: str) -> str:
175
+ async def _a_generate_reason(self, input: str, multimodal: bool) -> str:
147
176
  if self.include_reason is False:
148
177
  return None
149
178
 
@@ -156,27 +185,18 @@ class ArgumentCorrectnessMetric(BaseMetric):
156
185
  incorrect_tool_calls_reasons=incorrect_tool_calls_reasons,
157
186
  input=input,
158
187
  score=format(self.score, ".2f"),
188
+ multimodal=multimodal,
189
+ )
190
+
191
+ return await a_generate_with_schema_and_extract(
192
+ metric=self,
193
+ prompt=prompt,
194
+ schema_cls=ArgumentCorrectnessScoreReason,
195
+ extract_schema=lambda score_reason: score_reason.reason,
196
+ extract_json=lambda data: data["reason"],
159
197
  )
160
- if self.using_native_model:
161
- res, cost = await self.model.a_generate(
162
- prompt, schema=ArgumentCorrectnessScoreReason
163
- )
164
- self.evaluation_cost += cost
165
- return res.reason
166
- else:
167
- try:
168
- res: ArgumentCorrectnessScoreReason = (
169
- await self.model.a_generate(
170
- prompt=prompt, schema=ArgumentCorrectnessScoreReason
171
- )
172
- )
173
- return res.reason
174
- except TypeError:
175
- res = await self.model.a_generate(prompt)
176
- data = trimAndLoadJson(res, self)
177
- return data["reason"]
178
198
 
179
- def _generate_reason(self, input: str) -> str:
199
+ def _generate_reason(self, input: str, multimodal: bool) -> str:
180
200
  if self.include_reason is False:
181
201
  return None
182
202
 
@@ -189,76 +209,50 @@ class ArgumentCorrectnessMetric(BaseMetric):
189
209
  incorrect_tool_calls_reasons=incorrect_tool_calls_reasons,
190
210
  input=input,
191
211
  score=format(self.score, ".2f"),
212
+ multimodal=multimodal,
192
213
  )
193
214
 
194
- if self.using_native_model:
195
- res, cost = self.model.generate(
196
- prompt, schema=ArgumentCorrectnessScoreReason
197
- )
198
- self.evaluation_cost += cost
199
- return res.reason
200
- else:
201
- try:
202
- res: ArgumentCorrectnessScoreReason = self.model.generate(
203
- prompt=prompt, schema=ArgumentCorrectnessScoreReason
204
- )
205
- return res.reason
206
- except TypeError:
207
- res = self.model.generate(prompt)
208
- data = trimAndLoadJson(res, self)
209
- return data["reason"]
215
+ return generate_with_schema_and_extract(
216
+ metric=self,
217
+ prompt=prompt,
218
+ schema_cls=ArgumentCorrectnessScoreReason,
219
+ extract_schema=lambda score_reason: score_reason.reason,
220
+ extract_json=lambda data: data["reason"],
221
+ )
210
222
 
211
223
  async def _a_generate_verdicts(
212
- self,
213
- input: str,
214
- tools_called: List[ToolCall],
224
+ self, input: str, tools_called: List[ToolCall], multimodal: bool
215
225
  ) -> List[ArgumentCorrectnessVerdict]:
216
226
  prompt = self.evaluation_template.generate_verdicts(
217
- input=input,
218
- tools_called=tools_called,
227
+ input=input, tools_called=tools_called, multimodal=multimodal
228
+ )
229
+
230
+ return await a_generate_with_schema_and_extract(
231
+ metric=self,
232
+ prompt=prompt,
233
+ schema_cls=Verdicts,
234
+ extract_schema=lambda r: list(r.verdicts),
235
+ extract_json=lambda data: [
236
+ ArgumentCorrectnessVerdict(**item) for item in data["verdicts"]
237
+ ],
219
238
  )
220
- if self.using_native_model:
221
- res, cost = await self.model.a_generate(prompt, schema=Verdicts)
222
- self.evaluation_cost += cost
223
- return [item for item in res.verdicts]
224
- else:
225
- try:
226
- res: Verdicts = await self.model.a_generate(
227
- prompt, schema=Verdicts
228
- )
229
- return [item for item in res.verdicts]
230
- except TypeError:
231
- res = await self.model.a_generate(prompt)
232
- data = trimAndLoadJson(res, self)
233
- return [
234
- ArgumentCorrectnessVerdict(**item)
235
- for item in data["verdicts"]
236
- ]
237
239
 
238
240
  def _generate_verdicts(
239
- self,
240
- input: str,
241
- tools_called: List[ToolCall],
241
+ self, input: str, tools_called: List[ToolCall], multimodal: bool
242
242
  ) -> List[ArgumentCorrectnessVerdict]:
243
243
  prompt = self.evaluation_template.generate_verdicts(
244
- input=input,
245
- tools_called=tools_called,
244
+ input=input, tools_called=tools_called, multimodal=multimodal
245
+ )
246
+
247
+ return generate_with_schema_and_extract(
248
+ metric=self,
249
+ prompt=prompt,
250
+ schema_cls=Verdicts,
251
+ extract_schema=lambda r: list(r.verdicts),
252
+ extract_json=lambda data: [
253
+ ArgumentCorrectnessVerdict(**item) for item in data["verdicts"]
254
+ ],
246
255
  )
247
- if self.using_native_model:
248
- res, cost = self.model.generate(prompt, schema=Verdicts)
249
- self.evaluation_cost += cost
250
- return [item for item in res.verdicts]
251
- else:
252
- try:
253
- res: Verdicts = self.model.generate(prompt, schema=Verdicts)
254
- return [item for item in res.verdicts]
255
- except TypeError:
256
- res = self.model.generate(prompt)
257
- data = trimAndLoadJson(res, self)
258
- return [
259
- ArgumentCorrectnessVerdict(**item)
260
- for item in data["verdicts"]
261
- ]
262
256
 
263
257
  def _calculate_score(self):
264
258
  number_of_verdicts = len(self.verdicts)
@@ -279,7 +273,7 @@ class ArgumentCorrectnessMetric(BaseMetric):
279
273
  else:
280
274
  try:
281
275
  self.success = self.score >= self.threshold
282
- except:
276
+ except TypeError:
283
277
  self.success = False
284
278
  return self.success
285
279
 
@@ -4,8 +4,18 @@ import textwrap
4
4
 
5
5
 
6
6
  class ArgumentCorrectnessTemplate:
7
+ multimodal_rules = """
8
+ --- MULTIMODAL INPUT RULES ---
9
+ - Treat image content as factual evidence.
10
+ - Only reference visual details that are explicitly and clearly visible.
11
+ - Do not infer or guess objects, text, or details not visibly present.
12
+ - If an image is unclear or ambiguous, mark uncertainty explicitly.
13
+ """
14
+
7
15
  @staticmethod
8
- def generate_verdicts(input: str, tools_called: List[ToolCall]):
16
+ def generate_verdicts(
17
+ input: str, tools_called: List[ToolCall], multimodal: bool = False
18
+ ):
9
19
 
10
20
  stringified_tools_called = repr(tools_called)
11
21
 
@@ -19,6 +29,8 @@ class ArgumentCorrectnessTemplate:
19
29
  Provide a 'reason' ONLY if the answer is 'no'.
20
30
  If there is no input parameter, answer 'no' for the verdict and provide the reason as "No input parameter provided".
21
31
 
32
+ {ArgumentCorrectnessTemplate.multimodal_rules if multimodal else ""}
33
+
22
34
  **
23
35
  IMPORTANT: Please make sure to only return in valid and parseable JSON format, with the 'verdicts' key mapping to a list of JSON objects. Ensure all strings are closed appropriately. Repair any invalid JSON before you output it.
24
36
  Example input:
@@ -92,12 +104,17 @@ class ArgumentCorrectnessTemplate:
92
104
 
93
105
  @staticmethod
94
106
  def generate_reason(
95
- incorrect_tool_calls_reasons: List[str], input: str, score: float
107
+ incorrect_tool_calls_reasons: List[str],
108
+ input: str,
109
+ score: float,
110
+ multimodal: bool = False,
96
111
  ):
97
112
  return textwrap.dedent(
98
113
  f"""Given the argument correctness score, the list of reasons of incorrect tool calls, and the input, provide a CONCISE reason for the score. Explain why it is not higher, but also why it is at its current score. You can mention tool calls or input, but do not mention an output or a response.
99
114
  If there is nothing incorrect, just say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
100
115
 
116
+ {ArgumentCorrectnessTemplate.multimodal_rules if multimodal else ""}
117
+
101
118
  **
102
119
  IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason. Ensure all strings are closed appropriately. Repair any invalid JSON before you output it.
103
120