deepeval 3.7.5__py3-none-any.whl → 3.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/cli/main.py +2022 -759
  3. deepeval/cli/utils.py +208 -36
  4. deepeval/config/dotenv_handler.py +19 -0
  5. deepeval/config/settings.py +675 -245
  6. deepeval/config/utils.py +9 -1
  7. deepeval/dataset/api.py +23 -1
  8. deepeval/dataset/golden.py +106 -21
  9. deepeval/evaluate/evaluate.py +0 -3
  10. deepeval/evaluate/execute.py +162 -315
  11. deepeval/evaluate/utils.py +6 -30
  12. deepeval/key_handler.py +124 -51
  13. deepeval/metrics/__init__.py +0 -4
  14. deepeval/metrics/answer_relevancy/answer_relevancy.py +89 -132
  15. deepeval/metrics/answer_relevancy/template.py +102 -179
  16. deepeval/metrics/arena_g_eval/arena_g_eval.py +98 -96
  17. deepeval/metrics/arena_g_eval/template.py +17 -1
  18. deepeval/metrics/argument_correctness/argument_correctness.py +81 -87
  19. deepeval/metrics/argument_correctness/template.py +19 -2
  20. deepeval/metrics/base_metric.py +19 -41
  21. deepeval/metrics/bias/bias.py +102 -108
  22. deepeval/metrics/bias/template.py +14 -2
  23. deepeval/metrics/contextual_precision/contextual_precision.py +56 -92
  24. deepeval/metrics/contextual_recall/contextual_recall.py +58 -85
  25. deepeval/metrics/contextual_relevancy/contextual_relevancy.py +53 -83
  26. deepeval/metrics/conversation_completeness/conversation_completeness.py +101 -119
  27. deepeval/metrics/conversation_completeness/template.py +23 -3
  28. deepeval/metrics/conversational_dag/conversational_dag.py +12 -8
  29. deepeval/metrics/conversational_dag/nodes.py +66 -123
  30. deepeval/metrics/conversational_dag/templates.py +16 -0
  31. deepeval/metrics/conversational_g_eval/conversational_g_eval.py +47 -66
  32. deepeval/metrics/dag/dag.py +10 -0
  33. deepeval/metrics/dag/nodes.py +63 -126
  34. deepeval/metrics/dag/templates.py +14 -0
  35. deepeval/metrics/exact_match/exact_match.py +9 -1
  36. deepeval/metrics/faithfulness/faithfulness.py +82 -136
  37. deepeval/metrics/g_eval/g_eval.py +93 -79
  38. deepeval/metrics/g_eval/template.py +18 -1
  39. deepeval/metrics/g_eval/utils.py +7 -6
  40. deepeval/metrics/goal_accuracy/goal_accuracy.py +91 -76
  41. deepeval/metrics/goal_accuracy/template.py +21 -3
  42. deepeval/metrics/hallucination/hallucination.py +60 -75
  43. deepeval/metrics/hallucination/template.py +13 -0
  44. deepeval/metrics/indicator.py +11 -10
  45. deepeval/metrics/json_correctness/json_correctness.py +40 -38
  46. deepeval/metrics/json_correctness/template.py +10 -0
  47. deepeval/metrics/knowledge_retention/knowledge_retention.py +60 -97
  48. deepeval/metrics/knowledge_retention/schema.py +9 -3
  49. deepeval/metrics/knowledge_retention/template.py +12 -0
  50. deepeval/metrics/mcp/mcp_task_completion.py +72 -43
  51. deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +93 -75
  52. deepeval/metrics/mcp/schema.py +4 -0
  53. deepeval/metrics/mcp/template.py +59 -0
  54. deepeval/metrics/mcp_use_metric/mcp_use_metric.py +58 -64
  55. deepeval/metrics/mcp_use_metric/template.py +12 -0
  56. deepeval/metrics/misuse/misuse.py +77 -97
  57. deepeval/metrics/misuse/template.py +15 -0
  58. deepeval/metrics/multimodal_metrics/__init__.py +0 -1
  59. deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +37 -38
  60. deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +55 -76
  61. deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +37 -38
  62. deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +37 -38
  63. deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +57 -76
  64. deepeval/metrics/non_advice/non_advice.py +79 -105
  65. deepeval/metrics/non_advice/template.py +12 -0
  66. deepeval/metrics/pattern_match/pattern_match.py +12 -4
  67. deepeval/metrics/pii_leakage/pii_leakage.py +75 -106
  68. deepeval/metrics/pii_leakage/template.py +14 -0
  69. deepeval/metrics/plan_adherence/plan_adherence.py +63 -89
  70. deepeval/metrics/plan_adherence/template.py +11 -0
  71. deepeval/metrics/plan_quality/plan_quality.py +63 -87
  72. deepeval/metrics/plan_quality/template.py +9 -0
  73. deepeval/metrics/prompt_alignment/prompt_alignment.py +78 -86
  74. deepeval/metrics/prompt_alignment/template.py +12 -0
  75. deepeval/metrics/role_adherence/role_adherence.py +48 -71
  76. deepeval/metrics/role_adherence/template.py +14 -0
  77. deepeval/metrics/role_violation/role_violation.py +75 -108
  78. deepeval/metrics/role_violation/template.py +12 -0
  79. deepeval/metrics/step_efficiency/step_efficiency.py +55 -65
  80. deepeval/metrics/step_efficiency/template.py +11 -0
  81. deepeval/metrics/summarization/summarization.py +115 -183
  82. deepeval/metrics/summarization/template.py +19 -0
  83. deepeval/metrics/task_completion/task_completion.py +67 -73
  84. deepeval/metrics/tool_correctness/tool_correctness.py +43 -42
  85. deepeval/metrics/tool_use/schema.py +4 -0
  86. deepeval/metrics/tool_use/template.py +16 -2
  87. deepeval/metrics/tool_use/tool_use.py +72 -94
  88. deepeval/metrics/topic_adherence/schema.py +4 -0
  89. deepeval/metrics/topic_adherence/template.py +21 -1
  90. deepeval/metrics/topic_adherence/topic_adherence.py +68 -81
  91. deepeval/metrics/toxicity/template.py +13 -0
  92. deepeval/metrics/toxicity/toxicity.py +80 -99
  93. deepeval/metrics/turn_contextual_precision/schema.py +3 -3
  94. deepeval/metrics/turn_contextual_precision/template.py +9 -2
  95. deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +154 -154
  96. deepeval/metrics/turn_contextual_recall/schema.py +3 -3
  97. deepeval/metrics/turn_contextual_recall/template.py +8 -1
  98. deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +148 -143
  99. deepeval/metrics/turn_contextual_relevancy/schema.py +2 -2
  100. deepeval/metrics/turn_contextual_relevancy/template.py +8 -1
  101. deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +154 -157
  102. deepeval/metrics/turn_faithfulness/schema.py +1 -1
  103. deepeval/metrics/turn_faithfulness/template.py +8 -1
  104. deepeval/metrics/turn_faithfulness/turn_faithfulness.py +180 -203
  105. deepeval/metrics/turn_relevancy/template.py +14 -0
  106. deepeval/metrics/turn_relevancy/turn_relevancy.py +56 -69
  107. deepeval/metrics/utils.py +161 -91
  108. deepeval/models/__init__.py +2 -0
  109. deepeval/models/base_model.py +44 -6
  110. deepeval/models/embedding_models/azure_embedding_model.py +34 -12
  111. deepeval/models/embedding_models/local_embedding_model.py +22 -7
  112. deepeval/models/embedding_models/ollama_embedding_model.py +17 -6
  113. deepeval/models/embedding_models/openai_embedding_model.py +3 -2
  114. deepeval/models/llms/__init__.py +2 -0
  115. deepeval/models/llms/amazon_bedrock_model.py +229 -73
  116. deepeval/models/llms/anthropic_model.py +143 -48
  117. deepeval/models/llms/azure_model.py +169 -95
  118. deepeval/models/llms/constants.py +2032 -0
  119. deepeval/models/llms/deepseek_model.py +82 -35
  120. deepeval/models/llms/gemini_model.py +126 -67
  121. deepeval/models/llms/grok_model.py +128 -65
  122. deepeval/models/llms/kimi_model.py +129 -87
  123. deepeval/models/llms/litellm_model.py +94 -18
  124. deepeval/models/llms/local_model.py +115 -16
  125. deepeval/models/llms/ollama_model.py +97 -76
  126. deepeval/models/llms/openai_model.py +169 -311
  127. deepeval/models/llms/portkey_model.py +58 -16
  128. deepeval/models/llms/utils.py +5 -2
  129. deepeval/models/retry_policy.py +10 -5
  130. deepeval/models/utils.py +56 -4
  131. deepeval/simulator/conversation_simulator.py +49 -2
  132. deepeval/simulator/template.py +16 -1
  133. deepeval/synthesizer/synthesizer.py +19 -17
  134. deepeval/test_case/api.py +24 -45
  135. deepeval/test_case/arena_test_case.py +7 -2
  136. deepeval/test_case/conversational_test_case.py +55 -6
  137. deepeval/test_case/llm_test_case.py +60 -6
  138. deepeval/test_run/api.py +3 -0
  139. deepeval/test_run/test_run.py +6 -1
  140. deepeval/utils.py +26 -0
  141. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/METADATA +3 -3
  142. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/RECORD +145 -148
  143. deepeval/metrics/multimodal_metrics/multimodal_g_eval/__init__.py +0 -0
  144. deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +0 -386
  145. deepeval/metrics/multimodal_metrics/multimodal_g_eval/schema.py +0 -11
  146. deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +0 -133
  147. deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +0 -68
  148. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/LICENSE.md +0 -0
  149. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/WHEEL +0 -0
  150. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/entry_points.txt +0 -0
@@ -3,9 +3,10 @@ from typing import Optional, List, Type, Union
3
3
  from deepeval.utils import get_or_create_event_loop, prettify_list
4
4
  from deepeval.metrics.utils import (
5
5
  construct_verbose_logs,
6
- trimAndLoadJson,
7
6
  check_llm_test_case_params,
8
7
  initialize_model,
8
+ a_generate_with_schema_and_extract,
9
+ generate_with_schema_and_extract,
9
10
  )
10
11
  from deepeval.test_case import (
11
12
  LLMTestCase,
@@ -18,7 +19,11 @@ from deepeval.metrics.argument_correctness.template import (
18
19
  ArgumentCorrectnessTemplate,
19
20
  )
20
21
  from deepeval.metrics.indicator import metric_progress_indicator
21
- from deepeval.metrics.argument_correctness.schema import *
22
+ from deepeval.metrics.argument_correctness.schema import (
23
+ ArgumentCorrectnessVerdict,
24
+ Verdicts,
25
+ ArgumentCorrectnessScoreReason,
26
+ )
22
27
  from deepeval.metrics.api import metric_data_manager
23
28
 
24
29
 
@@ -57,7 +62,15 @@ class ArgumentCorrectnessMetric(BaseMetric):
57
62
  _log_metric_to_confident: bool = True,
58
63
  ) -> float:
59
64
 
60
- check_llm_test_case_params(test_case, self._required_params, self)
65
+ check_llm_test_case_params(
66
+ test_case,
67
+ self._required_params,
68
+ None,
69
+ None,
70
+ self,
71
+ self.model,
72
+ test_case.multimodal,
73
+ )
61
74
 
62
75
  self.evaluation_cost = 0 if self.using_native_model else None
63
76
  with metric_progress_indicator(
@@ -81,11 +94,15 @@ class ArgumentCorrectnessMetric(BaseMetric):
81
94
  else:
82
95
  self.verdicts: List[ArgumentCorrectnessVerdict] = (
83
96
  self._generate_verdicts(
84
- test_case.input, test_case.tools_called
97
+ test_case.input,
98
+ test_case.tools_called,
99
+ test_case.multimodal,
85
100
  )
86
101
  )
87
102
  self.score = self._calculate_score()
88
- self.reason = self._generate_reason(test_case.input)
103
+ self.reason = self._generate_reason(
104
+ test_case.input, test_case.multimodal
105
+ )
89
106
  self.success = self.score >= self.threshold
90
107
  self.verbose_logs = construct_verbose_logs(
91
108
  self,
@@ -108,7 +125,15 @@ class ArgumentCorrectnessMetric(BaseMetric):
108
125
  _log_metric_to_confident: bool = True,
109
126
  ) -> float:
110
127
 
111
- check_llm_test_case_params(test_case, self._required_params, self)
128
+ check_llm_test_case_params(
129
+ test_case,
130
+ self._required_params,
131
+ None,
132
+ None,
133
+ self,
134
+ self.model,
135
+ test_case.multimodal,
136
+ )
112
137
 
113
138
  self.evaluation_cost = 0 if self.using_native_model else None
114
139
  with metric_progress_indicator(
@@ -124,11 +149,15 @@ class ArgumentCorrectnessMetric(BaseMetric):
124
149
  else:
125
150
  self.verdicts: List[ArgumentCorrectnessVerdict] = (
126
151
  await self._a_generate_verdicts(
127
- test_case.input, test_case.tools_called
152
+ test_case.input,
153
+ test_case.tools_called,
154
+ test_case.multimodal,
128
155
  )
129
156
  )
130
157
  self.score = self._calculate_score()
131
- self.reason = await self._a_generate_reason(test_case.input)
158
+ self.reason = await self._a_generate_reason(
159
+ test_case.input, test_case.multimodal
160
+ )
132
161
  self.success = self.score >= self.threshold
133
162
  self.verbose_logs = construct_verbose_logs(
134
163
  self,
@@ -143,7 +172,7 @@ class ArgumentCorrectnessMetric(BaseMetric):
143
172
  )
144
173
  return self.score
145
174
 
146
- async def _a_generate_reason(self, input: str) -> str:
175
+ async def _a_generate_reason(self, input: str, multimodal: bool) -> str:
147
176
  if self.include_reason is False:
148
177
  return None
149
178
 
@@ -156,27 +185,18 @@ class ArgumentCorrectnessMetric(BaseMetric):
156
185
  incorrect_tool_calls_reasons=incorrect_tool_calls_reasons,
157
186
  input=input,
158
187
  score=format(self.score, ".2f"),
188
+ multimodal=multimodal,
189
+ )
190
+
191
+ return await a_generate_with_schema_and_extract(
192
+ metric=self,
193
+ prompt=prompt,
194
+ schema_cls=ArgumentCorrectnessScoreReason,
195
+ extract_schema=lambda score_reason: score_reason.reason,
196
+ extract_json=lambda data: data["reason"],
159
197
  )
160
- if self.using_native_model:
161
- res, cost = await self.model.a_generate(
162
- prompt, schema=ArgumentCorrectnessScoreReason
163
- )
164
- self.evaluation_cost += cost
165
- return res.reason
166
- else:
167
- try:
168
- res: ArgumentCorrectnessScoreReason = (
169
- await self.model.a_generate(
170
- prompt=prompt, schema=ArgumentCorrectnessScoreReason
171
- )
172
- )
173
- return res.reason
174
- except TypeError:
175
- res = await self.model.a_generate(prompt)
176
- data = trimAndLoadJson(res, self)
177
- return data["reason"]
178
198
 
179
- def _generate_reason(self, input: str) -> str:
199
+ def _generate_reason(self, input: str, multimodal: bool) -> str:
180
200
  if self.include_reason is False:
181
201
  return None
182
202
 
@@ -189,76 +209,50 @@ class ArgumentCorrectnessMetric(BaseMetric):
189
209
  incorrect_tool_calls_reasons=incorrect_tool_calls_reasons,
190
210
  input=input,
191
211
  score=format(self.score, ".2f"),
212
+ multimodal=multimodal,
192
213
  )
193
214
 
194
- if self.using_native_model:
195
- res, cost = self.model.generate(
196
- prompt, schema=ArgumentCorrectnessScoreReason
197
- )
198
- self.evaluation_cost += cost
199
- return res.reason
200
- else:
201
- try:
202
- res: ArgumentCorrectnessScoreReason = self.model.generate(
203
- prompt=prompt, schema=ArgumentCorrectnessScoreReason
204
- )
205
- return res.reason
206
- except TypeError:
207
- res = self.model.generate(prompt)
208
- data = trimAndLoadJson(res, self)
209
- return data["reason"]
215
+ return generate_with_schema_and_extract(
216
+ metric=self,
217
+ prompt=prompt,
218
+ schema_cls=ArgumentCorrectnessScoreReason,
219
+ extract_schema=lambda score_reason: score_reason.reason,
220
+ extract_json=lambda data: data["reason"],
221
+ )
210
222
 
211
223
  async def _a_generate_verdicts(
212
- self,
213
- input: str,
214
- tools_called: List[ToolCall],
224
+ self, input: str, tools_called: List[ToolCall], multimodal: bool
215
225
  ) -> List[ArgumentCorrectnessVerdict]:
216
226
  prompt = self.evaluation_template.generate_verdicts(
217
- input=input,
218
- tools_called=tools_called,
227
+ input=input, tools_called=tools_called, multimodal=multimodal
228
+ )
229
+
230
+ return await a_generate_with_schema_and_extract(
231
+ metric=self,
232
+ prompt=prompt,
233
+ schema_cls=Verdicts,
234
+ extract_schema=lambda r: list(r.verdicts),
235
+ extract_json=lambda data: [
236
+ ArgumentCorrectnessVerdict(**item) for item in data["verdicts"]
237
+ ],
219
238
  )
220
- if self.using_native_model:
221
- res, cost = await self.model.a_generate(prompt, schema=Verdicts)
222
- self.evaluation_cost += cost
223
- return [item for item in res.verdicts]
224
- else:
225
- try:
226
- res: Verdicts = await self.model.a_generate(
227
- prompt, schema=Verdicts
228
- )
229
- return [item for item in res.verdicts]
230
- except TypeError:
231
- res = await self.model.a_generate(prompt)
232
- data = trimAndLoadJson(res, self)
233
- return [
234
- ArgumentCorrectnessVerdict(**item)
235
- for item in data["verdicts"]
236
- ]
237
239
 
238
240
  def _generate_verdicts(
239
- self,
240
- input: str,
241
- tools_called: List[ToolCall],
241
+ self, input: str, tools_called: List[ToolCall], multimodal: bool
242
242
  ) -> List[ArgumentCorrectnessVerdict]:
243
243
  prompt = self.evaluation_template.generate_verdicts(
244
- input=input,
245
- tools_called=tools_called,
244
+ input=input, tools_called=tools_called, multimodal=multimodal
245
+ )
246
+
247
+ return generate_with_schema_and_extract(
248
+ metric=self,
249
+ prompt=prompt,
250
+ schema_cls=Verdicts,
251
+ extract_schema=lambda r: list(r.verdicts),
252
+ extract_json=lambda data: [
253
+ ArgumentCorrectnessVerdict(**item) for item in data["verdicts"]
254
+ ],
246
255
  )
247
- if self.using_native_model:
248
- res, cost = self.model.generate(prompt, schema=Verdicts)
249
- self.evaluation_cost += cost
250
- return [item for item in res.verdicts]
251
- else:
252
- try:
253
- res: Verdicts = self.model.generate(prompt, schema=Verdicts)
254
- return [item for item in res.verdicts]
255
- except TypeError:
256
- res = self.model.generate(prompt)
257
- data = trimAndLoadJson(res, self)
258
- return [
259
- ArgumentCorrectnessVerdict(**item)
260
- for item in data["verdicts"]
261
- ]
262
256
 
263
257
  def _calculate_score(self):
264
258
  number_of_verdicts = len(self.verdicts)
@@ -279,7 +273,7 @@ class ArgumentCorrectnessMetric(BaseMetric):
279
273
  else:
280
274
  try:
281
275
  self.success = self.score >= self.threshold
282
- except:
276
+ except TypeError:
283
277
  self.success = False
284
278
  return self.success
285
279
 
@@ -4,8 +4,18 @@ import textwrap
4
4
 
5
5
 
6
6
  class ArgumentCorrectnessTemplate:
7
+ multimodal_rules = """
8
+ --- MULTIMODAL INPUT RULES ---
9
+ - Treat image content as factual evidence.
10
+ - Only reference visual details that are explicitly and clearly visible.
11
+ - Do not infer or guess objects, text, or details not visibly present.
12
+ - If an image is unclear or ambiguous, mark uncertainty explicitly.
13
+ """
14
+
7
15
  @staticmethod
8
- def generate_verdicts(input: str, tools_called: List[ToolCall]):
16
+ def generate_verdicts(
17
+ input: str, tools_called: List[ToolCall], multimodal: bool = False
18
+ ):
9
19
 
10
20
  stringified_tools_called = repr(tools_called)
11
21
 
@@ -19,6 +29,8 @@ class ArgumentCorrectnessTemplate:
19
29
  Provide a 'reason' ONLY if the answer is 'no'.
20
30
  If there is no input parameter, answer 'no' for the verdict and provide the reason as "No input parameter provided".
21
31
 
32
+ {ArgumentCorrectnessTemplate.multimodal_rules if multimodal else ""}
33
+
22
34
  **
23
35
  IMPORTANT: Please make sure to only return in valid and parseable JSON format, with the 'verdicts' key mapping to a list of JSON objects. Ensure all strings are closed appropriately. Repair any invalid JSON before you output it.
24
36
  Example input:
@@ -92,12 +104,17 @@ class ArgumentCorrectnessTemplate:
92
104
 
93
105
  @staticmethod
94
106
  def generate_reason(
95
- incorrect_tool_calls_reasons: List[str], input: str, score: float
107
+ incorrect_tool_calls_reasons: List[str],
108
+ input: str,
109
+ score: float,
110
+ multimodal: bool = False,
96
111
  ):
97
112
  return textwrap.dedent(
98
113
  f"""Given the argument correctness score, the list of reasons of incorrect tool calls, and the input, provide a CONCISE reason for the score. Explain why it is not higher, but also why it is at its current score. You can mention tool calls or input, but do not mention an output or a response.
99
114
  If there is nothing incorrect, just say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
100
115
 
116
+ {ArgumentCorrectnessTemplate.multimodal_rules if multimodal else ""}
117
+
101
118
  **
102
119
  IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason. Ensure all strings are closed appropriately. Repair any invalid JSON before you output it.
103
120
 
@@ -48,6 +48,12 @@ class BaseMetric:
48
48
  def __name__(self):
49
49
  return "Base Metric"
50
50
 
51
+ def _accrue_cost(self, cost: float) -> None:
52
+ if self.evaluation_cost is not None and cost is not None:
53
+ self.evaluation_cost += cost
54
+ else:
55
+ self.evaluation_cost = None
56
+
51
57
 
52
58
  class BaseConversationalMetric:
53
59
  threshold: float
@@ -64,6 +70,8 @@ class BaseConversationalMetric:
64
70
  evaluation_cost: Optional[float] = None
65
71
  verbose_logs: Optional[str] = None
66
72
  skipped = False
73
+ model: Optional[DeepEvalBaseLLM] = None
74
+ using_native_model: Optional[bool] = None
67
75
 
68
76
  @abstractmethod
69
77
  def measure(
@@ -87,47 +95,11 @@ class BaseConversationalMetric:
87
95
  def __name__(self):
88
96
  return "Base Conversational Metric"
89
97
 
90
-
91
- class BaseMultimodalMetric:
92
- score: Optional[float] = None
93
- score_breakdown: Dict = None
94
- reason: Optional[str] = None
95
- success: Optional[bool] = None
96
- evaluation_model: Optional[str] = None
97
- strict_mode: bool = False
98
- async_mode: bool = True
99
- verbose_mode: bool = True
100
- include_reason: bool = False
101
- error: Optional[str] = None
102
- evaluation_cost: Optional[float] = None
103
- verbose_logs: Optional[str] = None
104
- skipped = False
105
-
106
- @property
107
- def threshold(self) -> float:
108
- return self._threshold
109
-
110
- @threshold.setter
111
- def threshold(self, value: float):
112
- self._threshold = value
113
-
114
- @abstractmethod
115
- def measure(self, test_case: LLMTestCase, *args, **kwargs) -> float:
116
- raise NotImplementedError
117
-
118
- @abstractmethod
119
- async def a_measure(self, test_case: LLMTestCase, *args, **kwargs) -> float:
120
- raise NotImplementedError(
121
- f"Async execution for {self.__class__.__name__} not supported yet. Please set 'async_mode' to 'False'."
122
- )
123
-
124
- @abstractmethod
125
- def is_successful(self) -> bool:
126
- raise NotImplementedError
127
-
128
- @property
129
- def __name__(self):
130
- return "Base Multimodal Metric"
98
+ def _accrue_cost(self, cost: float) -> None:
99
+ if self.evaluation_cost is not None and cost is not None:
100
+ self.evaluation_cost += cost
101
+ else:
102
+ self.evaluation_cost = None
131
103
 
132
104
 
133
105
  class BaseArenaMetric:
@@ -159,3 +131,9 @@ class BaseArenaMetric:
159
131
  @property
160
132
  def __name__(self):
161
133
  return "Base Arena Metric"
134
+
135
+ def _accrue_cost(self, cost: float) -> None:
136
+ if self.evaluation_cost is not None and cost is not None:
137
+ self.evaluation_cost += cost
138
+ else:
139
+ self.evaluation_cost = None
@@ -11,12 +11,18 @@ from deepeval.models import DeepEvalBaseLLM
11
11
  from deepeval.utils import get_or_create_event_loop, prettify_list
12
12
  from deepeval.metrics.utils import (
13
13
  construct_verbose_logs,
14
- trimAndLoadJson,
15
14
  check_llm_test_case_params,
16
15
  initialize_model,
16
+ a_generate_with_schema_and_extract,
17
+ generate_with_schema_and_extract,
17
18
  )
18
19
  from deepeval.metrics.bias.template import BiasTemplate
19
- from deepeval.metrics.bias.schema import *
20
+ from deepeval.metrics.bias.schema import (
21
+ Opinions,
22
+ BiasVerdict,
23
+ Verdicts,
24
+ BiasScoreReason,
25
+ )
20
26
 
21
27
 
22
28
  class BiasMetric(BaseMetric):
@@ -51,7 +57,16 @@ class BiasMetric(BaseMetric):
51
57
  _in_component: bool = False,
52
58
  _log_metric_to_confident: bool = True,
53
59
  ) -> float:
54
- check_llm_test_case_params(test_case, self._required_params, self)
60
+
61
+ check_llm_test_case_params(
62
+ test_case,
63
+ self._required_params,
64
+ None,
65
+ None,
66
+ self,
67
+ self.model,
68
+ test_case.multimodal,
69
+ )
55
70
 
56
71
  self.evaluation_cost = 0 if self.using_native_model else None
57
72
  with metric_progress_indicator(
@@ -69,9 +84,11 @@ class BiasMetric(BaseMetric):
69
84
  )
70
85
  else:
71
86
  self.opinions: List[str] = self._generate_opinions(
72
- test_case.actual_output
87
+ test_case.actual_output, test_case.multimodal
88
+ )
89
+ self.verdicts: List[BiasVerdict] = self._generate_verdicts(
90
+ test_case.multimodal
73
91
  )
74
- self.verdicts: List[BiasVerdict] = self._generate_verdicts()
75
92
  self.score = self._calculate_score()
76
93
  self.reason = self._generate_reason()
77
94
  self.success = self.score <= self.threshold
@@ -96,7 +113,16 @@ class BiasMetric(BaseMetric):
96
113
  _in_component: bool = False,
97
114
  _log_metric_to_confident: bool = True,
98
115
  ) -> float:
99
- check_llm_test_case_params(test_case, self._required_params, self)
116
+
117
+ check_llm_test_case_params(
118
+ test_case,
119
+ self._required_params,
120
+ None,
121
+ None,
122
+ self,
123
+ self.model,
124
+ test_case.multimodal,
125
+ )
100
126
 
101
127
  self.evaluation_cost = 0 if self.using_native_model else None
102
128
  with metric_progress_indicator(
@@ -106,9 +132,11 @@ class BiasMetric(BaseMetric):
106
132
  _in_component=_in_component,
107
133
  ):
108
134
  self.opinions: List[str] = await self._a_generate_opinions(
109
- test_case.actual_output
135
+ test_case.actual_output, test_case.multimodal
136
+ )
137
+ self.verdicts: List[BiasVerdict] = await self._a_generate_verdicts(
138
+ test_case.multimodal
110
139
  )
111
- self.verdicts: List[BiasVerdict] = await self._a_generate_verdicts()
112
140
  self.score = self._calculate_score()
113
141
  self.reason = await self._a_generate_reason()
114
142
  self.success = self.score <= self.threshold
@@ -127,7 +155,9 @@ class BiasMetric(BaseMetric):
127
155
  )
128
156
  return self.score
129
157
 
130
- async def _a_generate_reason(self) -> str:
158
+ async def _a_generate_reason(
159
+ self,
160
+ ) -> str:
131
161
  if self.include_reason is False:
132
162
  return None
133
163
 
@@ -141,22 +171,13 @@ class BiasMetric(BaseMetric):
141
171
  score=format(self.score, ".2f"),
142
172
  )
143
173
 
144
- if self.using_native_model:
145
- res, cost = await self.model.a_generate(
146
- prompt, schema=BiasScoreReason
147
- )
148
- self.evaluation_cost += cost
149
- return res.reason
150
- else:
151
- try:
152
- res: BiasScoreReason = await self.model.a_generate(
153
- prompt, schema=BiasScoreReason
154
- )
155
- return res.reason
156
- except TypeError:
157
- res = await self.model.a_generate(prompt)
158
- data = trimAndLoadJson(res, self)
159
- return data["reason"]
174
+ return await a_generate_with_schema_and_extract(
175
+ metric=self,
176
+ prompt=prompt,
177
+ schema_cls=BiasScoreReason,
178
+ extract_schema=lambda score_reason: score_reason.reason,
179
+ extract_json=lambda data: data["reason"],
180
+ )
160
181
 
161
182
  def _generate_reason(self) -> str:
162
183
  if self.include_reason is False:
@@ -172,106 +193,79 @@ class BiasMetric(BaseMetric):
172
193
  score=format(self.score, ".2f"),
173
194
  )
174
195
 
175
- if self.using_native_model:
176
- res, cost = self.model.generate(prompt, schema=BiasScoreReason)
177
- self.evaluation_cost += cost
178
- return res.reason
179
- else:
180
- try:
181
- res: BiasScoreReason = self.model.generate(
182
- prompt, schema=BiasScoreReason
183
- )
184
- return res.reason
185
- except TypeError:
186
- res = self.model.generate(prompt)
187
- data = trimAndLoadJson(res, self)
188
- return data["reason"]
196
+ return generate_with_schema_and_extract(
197
+ metric=self,
198
+ prompt=prompt,
199
+ schema_cls=BiasScoreReason,
200
+ extract_schema=lambda score_reason: score_reason.reason,
201
+ extract_json=lambda data: data["reason"],
202
+ )
189
203
 
190
- async def _a_generate_verdicts(self) -> List[BiasVerdict]:
204
+ async def _a_generate_verdicts(self, multimodal: bool) -> List[BiasVerdict]:
191
205
  if len(self.opinions) == 0:
192
206
  return []
193
207
 
194
- verdicts: List[BiasVerdict] = []
195
208
  prompt = self.evaluation_template.generate_verdicts(
196
- opinions=self.opinions
209
+ opinions=self.opinions, multimodal=multimodal
197
210
  )
198
- if self.using_native_model:
199
- res, cost = await self.model.a_generate(prompt, schema=Verdicts)
200
- self.evaluation_cost += cost
201
- verdicts = [item for item in res.verdicts]
202
- return verdicts
203
- else:
204
- try:
205
- res: Verdicts = await self.model.a_generate(
206
- prompt, schema=Verdicts
207
- )
208
- verdicts = [item for item in res.verdicts]
209
- return verdicts
210
- except TypeError:
211
- res = await self.model.a_generate(prompt)
212
- data = trimAndLoadJson(res, self)
213
- verdicts = [BiasVerdict(**item) for item in data["verdicts"]]
214
- return verdicts
215
211
 
216
- def _generate_verdicts(self) -> List[BiasVerdict]:
212
+ return await a_generate_with_schema_and_extract(
213
+ metric=self,
214
+ prompt=prompt,
215
+ schema_cls=Verdicts,
216
+ extract_schema=lambda r: list(r.verdicts),
217
+ extract_json=lambda data: [
218
+ BiasVerdict(**item) for item in data["verdicts"]
219
+ ],
220
+ )
221
+
222
+ def _generate_verdicts(self, multimodal: bool) -> List[BiasVerdict]:
217
223
  if len(self.opinions) == 0:
218
224
  return []
219
225
 
220
- verdicts: List[BiasVerdict] = []
221
226
  prompt = self.evaluation_template.generate_verdicts(
222
- opinions=self.opinions
227
+ opinions=self.opinions, multimodal=multimodal
223
228
  )
224
- if self.using_native_model:
225
- res, cost = self.model.generate(prompt, schema=Verdicts)
226
- self.evaluation_cost += cost
227
- verdicts = [item for item in res.verdicts]
228
- return verdicts
229
- else:
230
- try:
231
- res: Verdicts = self.model.generate(prompt, schema=Verdicts)
232
- verdicts = [item for item in res.verdicts]
233
- return verdicts
234
- except TypeError:
235
- res = self.model.generate(prompt)
236
- data = trimAndLoadJson(res, self)
237
- verdicts = [BiasVerdict(**item) for item in data["verdicts"]]
238
- return verdicts
239
229
 
240
- async def _a_generate_opinions(self, actual_output: str) -> List[str]:
230
+ return generate_with_schema_and_extract(
231
+ metric=self,
232
+ prompt=prompt,
233
+ schema_cls=Verdicts,
234
+ extract_schema=lambda r: list(r.verdicts),
235
+ extract_json=lambda data: [
236
+ BiasVerdict(**item) for item in data["verdicts"]
237
+ ],
238
+ )
239
+
240
+ async def _a_generate_opinions(
241
+ self, actual_output: str, multimodal: bool
242
+ ) -> List[str]:
241
243
  prompt = self.evaluation_template.generate_opinions(
242
- actual_output=actual_output
244
+ actual_output=actual_output, multimodal=multimodal
243
245
  )
244
- if self.using_native_model:
245
- res, cost = await self.model.a_generate(prompt, schema=Opinions)
246
- self.evaluation_cost += cost
247
- return res.opinions
248
- else:
249
- try:
250
- res: Opinions = await self.model.a_generate(
251
- prompt, schema=Opinions
252
- )
253
- return res.opinions
254
- except TypeError:
255
- res = await self.model.a_generate(prompt)
256
- data = trimAndLoadJson(res, self)
257
- return data["opinions"]
258
246
 
259
- def _generate_opinions(self, actual_output: str) -> List[str]:
247
+ return await a_generate_with_schema_and_extract(
248
+ metric=self,
249
+ prompt=prompt,
250
+ schema_cls=Opinions,
251
+ extract_schema=lambda r: r.opinions,
252
+ extract_json=lambda data: data["opinions"],
253
+ )
254
+
255
+ def _generate_opinions(
256
+ self, actual_output: str, multimodal: bool
257
+ ) -> List[str]:
260
258
  prompt = self.evaluation_template.generate_opinions(
261
- actual_output=actual_output
259
+ actual_output=actual_output, multimodal=multimodal
260
+ )
261
+
262
+ return generate_with_schema_and_extract(
263
+ metric=self,
264
+ prompt=prompt,
265
+ schema_cls=Opinions,
266
+ extract_schema=lambda r: r.opinions,
267
+ extract_json=lambda data: data["opinions"],
262
268
  )
263
- if self.using_native_model:
264
- res, cost = self.model.generate(prompt, schema=Opinions)
265
- self.evaluation_cost += cost
266
- return res.opinions
267
- else:
268
- try:
269
- res: Opinions = self.model.generate(prompt, schema=Opinions)
270
- return res.opinions
271
- except TypeError:
272
- res = self.model.generate(prompt)
273
- data = trimAndLoadJson(res, self)
274
- return data["opinions"]
275
269
 
276
270
  def _calculate_score(self) -> float:
277
271
  number_of_verdicts = len(self.verdicts)
@@ -292,7 +286,7 @@ class BiasMetric(BaseMetric):
292
286
  else:
293
287
  try:
294
288
  self.success = self.score <= self.threshold
295
- except:
289
+ except TypeError:
296
290
  self.success = False
297
291
  return self.success
298
292