deepeval 3.7.3__py3-none-any.whl → 3.7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (156) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/cli/test.py +1 -1
  3. deepeval/config/settings.py +102 -13
  4. deepeval/dataset/golden.py +54 -2
  5. deepeval/evaluate/configs.py +1 -1
  6. deepeval/evaluate/evaluate.py +16 -8
  7. deepeval/evaluate/execute.py +74 -27
  8. deepeval/evaluate/utils.py +26 -22
  9. deepeval/integrations/pydantic_ai/agent.py +19 -2
  10. deepeval/integrations/pydantic_ai/instrumentator.py +62 -23
  11. deepeval/metrics/__init__.py +14 -12
  12. deepeval/metrics/answer_relevancy/answer_relevancy.py +74 -29
  13. deepeval/metrics/answer_relevancy/template.py +188 -92
  14. deepeval/metrics/argument_correctness/template.py +2 -2
  15. deepeval/metrics/base_metric.py +2 -5
  16. deepeval/metrics/bias/template.py +3 -3
  17. deepeval/metrics/contextual_precision/contextual_precision.py +53 -15
  18. deepeval/metrics/contextual_precision/template.py +115 -66
  19. deepeval/metrics/contextual_recall/contextual_recall.py +50 -13
  20. deepeval/metrics/contextual_recall/template.py +106 -55
  21. deepeval/metrics/contextual_relevancy/contextual_relevancy.py +47 -15
  22. deepeval/metrics/contextual_relevancy/template.py +87 -58
  23. deepeval/metrics/conversation_completeness/template.py +2 -2
  24. deepeval/metrics/conversational_dag/templates.py +4 -4
  25. deepeval/metrics/conversational_g_eval/template.py +4 -3
  26. deepeval/metrics/dag/templates.py +5 -5
  27. deepeval/metrics/faithfulness/faithfulness.py +70 -27
  28. deepeval/metrics/faithfulness/schema.py +1 -1
  29. deepeval/metrics/faithfulness/template.py +200 -115
  30. deepeval/metrics/g_eval/utils.py +2 -2
  31. deepeval/metrics/hallucination/template.py +4 -4
  32. deepeval/metrics/indicator.py +4 -4
  33. deepeval/metrics/misuse/template.py +2 -2
  34. deepeval/metrics/multimodal_metrics/__init__.py +0 -18
  35. deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +24 -17
  36. deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +26 -21
  37. deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +24 -17
  38. deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +24 -17
  39. deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +19 -19
  40. deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +63 -78
  41. deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +20 -20
  42. deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +71 -50
  43. deepeval/metrics/non_advice/template.py +2 -2
  44. deepeval/metrics/pii_leakage/template.py +2 -2
  45. deepeval/metrics/prompt_alignment/template.py +4 -4
  46. deepeval/metrics/ragas.py +3 -3
  47. deepeval/metrics/role_violation/template.py +2 -2
  48. deepeval/metrics/step_efficiency/step_efficiency.py +1 -1
  49. deepeval/metrics/tool_correctness/tool_correctness.py +2 -2
  50. deepeval/metrics/toxicity/template.py +4 -4
  51. deepeval/metrics/turn_contextual_precision/schema.py +21 -0
  52. deepeval/metrics/turn_contextual_precision/template.py +187 -0
  53. deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +550 -0
  54. deepeval/metrics/turn_contextual_recall/schema.py +21 -0
  55. deepeval/metrics/turn_contextual_recall/template.py +178 -0
  56. deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +520 -0
  57. deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_contextual_relevancy}/schema.py +7 -1
  58. deepeval/metrics/turn_contextual_relevancy/template.py +161 -0
  59. deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +535 -0
  60. deepeval/metrics/{multimodal_metrics/multimodal_faithfulness → turn_faithfulness}/schema.py +11 -3
  61. deepeval/metrics/turn_faithfulness/template.py +218 -0
  62. deepeval/metrics/turn_faithfulness/turn_faithfulness.py +596 -0
  63. deepeval/metrics/turn_relevancy/template.py +2 -2
  64. deepeval/metrics/utils.py +39 -58
  65. deepeval/models/__init__.py +0 -12
  66. deepeval/models/base_model.py +16 -38
  67. deepeval/models/embedding_models/__init__.py +7 -0
  68. deepeval/models/embedding_models/azure_embedding_model.py +69 -32
  69. deepeval/models/embedding_models/local_embedding_model.py +39 -22
  70. deepeval/models/embedding_models/ollama_embedding_model.py +42 -18
  71. deepeval/models/embedding_models/openai_embedding_model.py +50 -15
  72. deepeval/models/llms/amazon_bedrock_model.py +1 -2
  73. deepeval/models/llms/anthropic_model.py +53 -20
  74. deepeval/models/llms/azure_model.py +140 -43
  75. deepeval/models/llms/deepseek_model.py +38 -23
  76. deepeval/models/llms/gemini_model.py +222 -103
  77. deepeval/models/llms/grok_model.py +39 -27
  78. deepeval/models/llms/kimi_model.py +39 -23
  79. deepeval/models/llms/litellm_model.py +103 -45
  80. deepeval/models/llms/local_model.py +35 -22
  81. deepeval/models/llms/ollama_model.py +129 -17
  82. deepeval/models/llms/openai_model.py +151 -50
  83. deepeval/models/llms/portkey_model.py +149 -0
  84. deepeval/models/llms/utils.py +5 -3
  85. deepeval/models/retry_policy.py +17 -14
  86. deepeval/models/utils.py +94 -4
  87. deepeval/optimizer/__init__.py +5 -0
  88. deepeval/optimizer/algorithms/__init__.py +6 -0
  89. deepeval/optimizer/algorithms/base.py +29 -0
  90. deepeval/optimizer/algorithms/configs.py +18 -0
  91. deepeval/optimizer/algorithms/copro/__init__.py +5 -0
  92. deepeval/optimizer/algorithms/copro/copro.py +836 -0
  93. deepeval/optimizer/algorithms/gepa/__init__.py +5 -0
  94. deepeval/optimizer/algorithms/gepa/gepa.py +737 -0
  95. deepeval/optimizer/algorithms/miprov2/__init__.py +17 -0
  96. deepeval/optimizer/algorithms/miprov2/bootstrapper.py +435 -0
  97. deepeval/optimizer/algorithms/miprov2/miprov2.py +752 -0
  98. deepeval/optimizer/algorithms/miprov2/proposer.py +301 -0
  99. deepeval/optimizer/algorithms/simba/__init__.py +5 -0
  100. deepeval/optimizer/algorithms/simba/simba.py +999 -0
  101. deepeval/optimizer/algorithms/simba/types.py +15 -0
  102. deepeval/optimizer/configs.py +31 -0
  103. deepeval/optimizer/policies.py +227 -0
  104. deepeval/optimizer/prompt_optimizer.py +263 -0
  105. deepeval/optimizer/rewriter/__init__.py +5 -0
  106. deepeval/optimizer/rewriter/rewriter.py +124 -0
  107. deepeval/optimizer/rewriter/utils.py +214 -0
  108. deepeval/optimizer/scorer/__init__.py +5 -0
  109. deepeval/optimizer/scorer/base.py +86 -0
  110. deepeval/optimizer/scorer/scorer.py +316 -0
  111. deepeval/optimizer/scorer/utils.py +30 -0
  112. deepeval/optimizer/types.py +148 -0
  113. deepeval/optimizer/utils.py +480 -0
  114. deepeval/prompt/prompt.py +7 -6
  115. deepeval/test_case/__init__.py +1 -3
  116. deepeval/test_case/api.py +12 -10
  117. deepeval/test_case/conversational_test_case.py +19 -1
  118. deepeval/test_case/llm_test_case.py +152 -1
  119. deepeval/test_case/utils.py +4 -8
  120. deepeval/test_run/api.py +15 -14
  121. deepeval/test_run/cache.py +2 -0
  122. deepeval/test_run/test_run.py +9 -4
  123. deepeval/tracing/patchers.py +9 -4
  124. deepeval/tracing/tracing.py +2 -2
  125. deepeval/utils.py +89 -0
  126. {deepeval-3.7.3.dist-info → deepeval-3.7.5.dist-info}/METADATA +1 -4
  127. {deepeval-3.7.3.dist-info → deepeval-3.7.5.dist-info}/RECORD +134 -118
  128. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +0 -343
  129. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/schema.py +0 -19
  130. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +0 -122
  131. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +0 -301
  132. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/schema.py +0 -15
  133. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +0 -132
  134. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +0 -285
  135. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/schema.py +0 -15
  136. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +0 -112
  137. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +0 -282
  138. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +0 -102
  139. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/__init__.py +0 -0
  140. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +0 -356
  141. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +0 -175
  142. deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/__init__.py +0 -0
  143. deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +0 -290
  144. deepeval/models/mlllms/__init__.py +0 -4
  145. deepeval/models/mlllms/azure_model.py +0 -334
  146. deepeval/models/mlllms/gemini_model.py +0 -284
  147. deepeval/models/mlllms/ollama_model.py +0 -144
  148. deepeval/models/mlllms/openai_model.py +0 -258
  149. deepeval/test_case/mllm_test_case.py +0 -170
  150. /deepeval/metrics/{multimodal_metrics/multimodal_answer_relevancy → turn_contextual_precision}/__init__.py +0 -0
  151. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_precision → turn_contextual_recall}/__init__.py +0 -0
  152. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_recall → turn_contextual_relevancy}/__init__.py +0 -0
  153. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_faithfulness}/__init__.py +0 -0
  154. {deepeval-3.7.3.dist-info → deepeval-3.7.5.dist-info}/LICENSE.md +0 -0
  155. {deepeval-3.7.3.dist-info → deepeval-3.7.5.dist-info}/WHEEL +0 -0
  156. {deepeval-3.7.3.dist-info → deepeval-3.7.5.dist-info}/entry_points.txt +0 -0
@@ -1,11 +1,16 @@
1
1
  from typing import Optional, List, Type, Union
2
2
  import asyncio
3
3
 
4
- from deepeval.utils import get_or_create_event_loop, prettify_list
4
+ from deepeval.utils import (
5
+ get_or_create_event_loop,
6
+ prettify_list,
7
+ convert_to_multi_modal_array,
8
+ )
5
9
  from deepeval.metrics.utils import (
6
10
  construct_verbose_logs,
7
11
  trimAndLoadJson,
8
12
  check_llm_test_case_params,
13
+ check_mllm_test_case_params,
9
14
  initialize_model,
10
15
  )
11
16
  from deepeval.test_case import (
@@ -57,7 +62,14 @@ class ContextualRelevancyMetric(BaseMetric):
57
62
  _log_metric_to_confident: bool = True,
58
63
  ) -> float:
59
64
 
60
- check_llm_test_case_params(test_case, self._required_params, self)
65
+ multimodal = test_case.multimodal
66
+
67
+ if multimodal:
68
+ check_mllm_test_case_params(
69
+ test_case, self._required_params, None, None, self, self.model
70
+ )
71
+ else:
72
+ check_llm_test_case_params(test_case, self._required_params, self)
61
73
 
62
74
  self.evaluation_cost = 0 if self.using_native_model else None
63
75
  with metric_progress_indicator(
@@ -74,12 +86,16 @@ class ContextualRelevancyMetric(BaseMetric):
74
86
  )
75
87
  )
76
88
  else:
89
+
90
+ input = test_case.input
91
+ retrieval_context = test_case.retrieval_context
92
+
77
93
  self.verdicts_list: List[ContextualRelevancyVerdicts] = [
78
- (self._generate_verdicts(test_case.input, context))
79
- for context in test_case.retrieval_context
94
+ (self._generate_verdicts(input, context, multimodal))
95
+ for context in retrieval_context
80
96
  ]
81
97
  self.score = self._calculate_score()
82
- self.reason = self._generate_reason(test_case.input)
98
+ self.reason = self._generate_reason(input, multimodal)
83
99
  self.success = self.score >= self.threshold
84
100
  self.verbose_logs = construct_verbose_logs(
85
101
  self,
@@ -103,7 +119,14 @@ class ContextualRelevancyMetric(BaseMetric):
103
119
  _log_metric_to_confident: bool = True,
104
120
  ) -> float:
105
121
 
106
- check_llm_test_case_params(test_case, self._required_params, self)
122
+ multimodal = test_case.multimodal
123
+
124
+ if multimodal:
125
+ check_mllm_test_case_params(
126
+ test_case, self._required_params, None, None, self, self.model
127
+ )
128
+ else:
129
+ check_llm_test_case_params(test_case, self._required_params, self)
107
130
 
108
131
  self.evaluation_cost = 0 if self.using_native_model else None
109
132
  with metric_progress_indicator(
@@ -112,16 +135,19 @@ class ContextualRelevancyMetric(BaseMetric):
112
135
  _show_indicator=_show_indicator,
113
136
  _in_component=_in_component,
114
137
  ):
138
+ input = test_case.input
139
+ retrieval_context = test_case.retrieval_context
140
+
115
141
  self.verdicts_list: List[ContextualRelevancyVerdicts] = (
116
142
  await asyncio.gather(
117
143
  *[
118
- self._a_generate_verdicts(test_case.input, context)
119
- for context in test_case.retrieval_context
144
+ self._a_generate_verdicts(input, context, multimodal)
145
+ for context in retrieval_context
120
146
  ]
121
147
  )
122
148
  )
123
149
  self.score = self._calculate_score()
124
- self.reason = await self._a_generate_reason(test_case.input)
150
+ self.reason = await self._a_generate_reason(input, multimodal)
125
151
  self.success = self.score >= self.threshold
126
152
  self.verbose_logs = construct_verbose_logs(
127
153
  self,
@@ -136,7 +162,7 @@ class ContextualRelevancyMetric(BaseMetric):
136
162
  )
137
163
  return self.score
138
164
 
139
- async def _a_generate_reason(self, input: str):
165
+ async def _a_generate_reason(self, input: str, multimodal: bool):
140
166
  if self.include_reason is False:
141
167
  return None
142
168
 
@@ -154,7 +180,9 @@ class ContextualRelevancyMetric(BaseMetric):
154
180
  irrelevant_statements=irrelevant_statements,
155
181
  relevant_statements=relevant_statements,
156
182
  score=format(self.score, ".2f"),
183
+ multimodal=multimodal,
157
184
  )
185
+
158
186
  if self.using_native_model:
159
187
  res, cost = await self.model.a_generate(
160
188
  prompt, schema=ContextualRelevancyScoreReason
@@ -174,7 +202,7 @@ class ContextualRelevancyMetric(BaseMetric):
174
202
  data = trimAndLoadJson(res, self)
175
203
  return data["reason"]
176
204
 
177
- def _generate_reason(self, input: str):
205
+ def _generate_reason(self, input: str, multimodal: bool):
178
206
  if self.include_reason is False:
179
207
  return None
180
208
 
@@ -192,7 +220,9 @@ class ContextualRelevancyMetric(BaseMetric):
192
220
  irrelevant_statements=irrelevant_statements,
193
221
  relevant_statements=relevant_statements,
194
222
  score=format(self.score, ".2f"),
223
+ multimodal=multimodal,
195
224
  )
225
+
196
226
  if self.using_native_model:
197
227
  res, cost = self.model.generate(
198
228
  prompt, schema=ContextualRelevancyScoreReason
@@ -226,11 +256,12 @@ class ContextualRelevancyMetric(BaseMetric):
226
256
  return 0 if self.strict_mode and score < self.threshold else score
227
257
 
228
258
  async def _a_generate_verdicts(
229
- self, input: str, context: List[str]
259
+ self, input: str, context: List[str], multimodal: bool
230
260
  ) -> ContextualRelevancyVerdicts:
231
261
  prompt = self.evaluation_template.generate_verdicts(
232
- input=input, context=context
262
+ input=input, context=context, multimodal=multimodal
233
263
  )
264
+
234
265
  if self.using_native_model:
235
266
  res, cost = await self.model.a_generate(
236
267
  prompt, schema=ContextualRelevancyVerdicts
@@ -249,11 +280,12 @@ class ContextualRelevancyMetric(BaseMetric):
249
280
  return ContextualRelevancyVerdicts(**data)
250
281
 
251
282
  def _generate_verdicts(
252
- self, input: str, context: str
283
+ self, input: str, context: str, multimodal: bool
253
284
  ) -> ContextualRelevancyVerdicts:
254
285
  prompt = self.evaluation_template.generate_verdicts(
255
- input=input, context=context
286
+ input=input, context=context, multimodal=multimodal
256
287
  )
288
+
257
289
  if self.using_native_model:
258
290
  res, cost = self.model.generate(
259
291
  prompt, schema=ContextualRelevancyVerdicts
@@ -1,4 +1,5 @@
1
- from typing import List
1
+ from typing import List, Union
2
+ import textwrap
2
3
 
3
4
 
4
5
  class ContextualRelevancyTemplate:
@@ -8,70 +9,98 @@ class ContextualRelevancyTemplate:
8
9
  irrelevant_statements: List[str],
9
10
  relevant_statements: List[str],
10
11
  score: float,
12
+ multimodal: bool = False,
11
13
  ):
12
- return f"""Based on the given input, reasons for why the retrieval context is irrelevant to the input, the statements in the retrieval context that is actually relevant to the retrieval context, and the contextual relevancy score (the closer to 1 the better), please generate a CONCISE reason for the score.
13
- In your reason, you should quote data provided in the reasons for irrelevancy and relevant statements to support your point.
14
+ # Note: irrelevancies parameter name in multimodal version is kept as irrelevant_statements for consistency
15
+ return textwrap.dedent(
16
+ f"""Based on the given input, reasons for why the retrieval context is irrelevant to the input, the statements in the retrieval context that is actually relevant to the retrieval context, and the contextual relevancy score (the closer to 1 the better), please generate a CONCISE reason for the score.
17
+ In your reason, you should quote data provided in the reasons for irrelevancy and relevant statements to support your point.
14
18
 
15
- **
16
- IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
17
- Example JSON:
18
- {{
19
- "reason": "The score is <contextual_relevancy_score> because <your_reason>."
20
- }}
19
+ **
20
+ IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
21
+ Example JSON:
22
+ {{
23
+ "reason": "The score is <contextual_relevancy_score> because <your_reason>."
24
+ }}
21
25
 
22
- If the score is 1, keep it short and say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
23
- **
26
+ If the score is 1, keep it short and say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
27
+ **
24
28
 
25
29
 
26
- Contextual Relevancy Score:
27
- {score}
30
+ Contextual Relevancy Score:
31
+ {score}
28
32
 
29
- Input:
30
- {input}
33
+ Input:
34
+ {input}
35
+
36
+ Reasons for why the retrieval context is irrelevant to the input:
37
+ {irrelevant_statements}
31
38
 
32
- Reasons for why the retrieval context is irrelevant to the input:
33
- {irrelevant_statements}
39
+ Statement in the retrieval context that is relevant to the input:
40
+ {relevant_statements}
34
41
 
35
- Statement in the retrieval context that is relevant to the input:
36
- {relevant_statements}
37
-
38
- JSON:
39
- """
42
+ JSON:
43
+ """
44
+ )
40
45
 
41
46
  @staticmethod
42
- def generate_verdicts(input: str, context: str):
43
- return f"""Based on the input and context, please generate a JSON object to indicate whether each statement found in the context is relevant to the provided input. The JSON will be a list of 'verdicts', with 2 mandatory fields: 'verdict' and 'statement', and 1 optional field: 'reason'.
44
- You should first extract statements found in the context, which are high level information found in the context, before deciding on a verdict and optionally a reason for each statement.
45
- The 'verdict' key should STRICTLY be either 'yes' or 'no', and states whether the statement is relevant to the input.
46
- Provide a 'reason' ONLY IF verdict is no. You MUST quote the irrelevant parts of the statement to back up your reason.
47
- If provided context contains no actual content or statements then: give \"no\" as a \"verdict\",
48
- put context into \"statement\", and \"No statements found in provided context.\" into \"reason\".
49
- **
50
- IMPORTANT: Please make sure to only return in JSON format.
51
- Example Context: "Einstein won the Nobel Prize for his discovery of the photoelectric effect. He won the Nobel Prize in 1968. There was a cat."
52
- Example Input: "What were some of Einstein's achievements?"
53
-
54
- Example:
55
- {{
56
- "verdicts": [
57
- {{
58
- "verdict": "yes",
59
- "statement": "Einstein won the Nobel Prize for his discovery of the photoelectric effect in 1968",
60
- }},
61
- {{
62
- "verdict": "no",
63
- "statement": "There was a cat.",
64
- "reason": "The retrieval context contained the information 'There was a cat' when it has nothing to do with Einstein's achievements."
65
- }}
66
- ]
67
- }}
68
- **
69
-
70
- Input:
71
- {input}
72
-
73
- Context:
74
- {context}
75
-
76
- JSON:
77
- """
47
+ def generate_verdicts(
48
+ input: str,
49
+ context: str,
50
+ multimodal: bool = False,
51
+ ):
52
+ context_type = "context (image or string)" if multimodal else "context"
53
+ statement_or_image = "statement or image" if multimodal else "statement"
54
+
55
+ # Conditional instructions based on mode
56
+ extraction_instructions = ""
57
+ if multimodal:
58
+ extraction_instructions = textwrap.dedent(
59
+ """
60
+ If the context is textual, you should first extract the statements found in the context if the context, which are high level information found in the context, before deciding on a verdict and optionally a reason for each statement.
61
+ If the context is an image, `statement` should be a description of the image. Do not assume any information not visibly available.
62
+ """
63
+ ).strip()
64
+ else:
65
+ extraction_instructions = "You should first extract statements found in the context, which are high level information found in the context, before deciding on a verdict and optionally a reason for each statement."
66
+
67
+ # Additional instruction for empty context (only in non-multimodal)
68
+ empty_context_instruction = ""
69
+ if not multimodal:
70
+ empty_context_instruction = '\nIf provided context contains no actual content or statements then: give "no" as a "verdict",\nput context into "statement", and "No statements found in provided context." into "reason".'
71
+
72
+ return textwrap.dedent(
73
+ f"""Based on the input and {context_type}, please generate a JSON object to indicate whether {'the context' if multimodal else 'each statement found in the context'} is relevant to the provided input. The JSON will be a list of 'verdicts', with 2 mandatory fields: 'verdict' and 'statement', and 1 optional field: 'reason'.
74
+ {extraction_instructions}
75
+ The 'verdict' key should STRICTLY be either 'yes' or 'no', and states whether the {statement_or_image} is relevant to the input.
76
+ Provide a 'reason' ONLY IF verdict is no. You MUST quote the irrelevant parts of the {statement_or_image} to back up your reason.{empty_context_instruction}
77
+ **
78
+ IMPORTANT: Please make sure to only return in JSON format.
79
+ Example Context: "Einstein won the Nobel Prize for his discovery of the photoelectric effect. He won the Nobel Prize in 1968. There was a cat."
80
+ Example Input: "What were some of Einstein's achievements?"
81
+
82
+ Example:
83
+ {{
84
+ "verdicts": [
85
+ {{
86
+ "statement": "Einstein won the Nobel Prize for his discovery of the photoelectric effect in 1968",
87
+ "verdict": "yes"
88
+ }},
89
+ {{
90
+ "statement": "There was a cat.",
91
+ "reason": "The retrieval context contained the information 'There was a cat' when it has nothing to do with Einstein's achievements.",
92
+ "verdict": "no"
93
+ }}
94
+ ]
95
+ }}
96
+ **
97
+
98
+ Input:
99
+ {input}
100
+
101
+ Context:
102
+ {context}
103
+
104
+ JSON:
105
+ """
106
+ )
@@ -86,8 +86,8 @@ User wants to tell the assistant something.
86
86
 
87
87
  Example JSON:
88
88
  {{
89
- "verdict": "no",
90
- "reason": "The user wanted to tell the assistant something but the LLM not only refused to answer but replied 'Oh ok, in that case should you need anything just let me know!', which is completely irrelevant and doesn't satisfy the user at all. "
89
+ "reason": "The user wanted to tell the assistant something but the LLM not only refused to answer but replied 'Oh ok, in that case should you need anything just let me know!', which is completely irrelevant and doesn't satisfy the user at all.",
90
+ "verdict": "no"
91
91
  }}
92
92
  ===== END OF EXAMPLE ======
93
93
 
@@ -77,8 +77,8 @@ class ConversationalBinaryJudgementTemplate:
77
77
 
78
78
  Example:
79
79
  {{
80
- "verdict": true,
81
- "reason": "The assistant provided a clear and direct answer in response to every user query."
80
+ "reason": "The assistant provided a clear and direct answer in response to every user query.",
81
+ "verdict": true
82
82
  }}
83
83
  **
84
84
  JSON:
@@ -108,8 +108,8 @@ class ConversationalNonBinaryJudgementTemplate:
108
108
 
109
109
  Example:
110
110
  {{
111
- "verdict": "{options[1]}",
112
- "reason": "The assistant partially addressed the user’s issue but missed clarifying their follow-up question."
111
+ "reason": "The assistant partially addressed the user's issue but missed clarifying their follow-up question.",
112
+ "verdict": "{options[1]}"
113
113
  }}
114
114
  **
115
115
  JSON:
@@ -70,7 +70,8 @@ JSON:
70
70
  ---
71
71
  Example JSON:
72
72
  {{
73
- "score": 0,
74
- "reason": "Your concise and informative reason here."
73
+ "reason": "Your concise and informative reason here.",
74
+ "score": 0
75
75
  }}
76
- """
76
+
77
+ JSON:"""
@@ -60,11 +60,11 @@ class BinaryJudgementTemplate:
60
60
  {text}
61
61
 
62
62
  **
63
- IMPORTANT: Please make sure to only return a json with two keys: `verdict` (True or False), and the 'reason' key providing the reason. The verdict must be a boolean only, either True or False.
63
+ IMPORTANT: Please make sure to only return a json with two keys: `verdict` (true or false), and the 'reason' key providing the reason. The verdict must be a boolean only, either true or false.
64
64
  Example JSON:
65
65
  {{
66
- "verdict": True,
67
- "reason": "..."
66
+ "reason": "...",
67
+ "verdict": true
68
68
  }}
69
69
  **
70
70
 
@@ -85,8 +85,8 @@ class NonBinaryJudgementTemplate:
85
85
  IMPORTANT: Please make sure to only return a json with two keys: 'verdict' {options} and 'reason' providing the reason.
86
86
  Example JSON:
87
87
  {{
88
- "verdict": {options},
89
- "reason": "..."
88
+ "reason": "...",
89
+ "verdict": {options}
90
90
  }}
91
91
  **
92
92
 
@@ -1,16 +1,17 @@
1
1
  from typing import List, Optional, Union, Type
2
2
  import asyncio
3
3
 
4
- from deepeval.test_case import (
5
- LLMTestCase,
6
- LLMTestCaseParams,
7
- )
4
+ from deepeval.test_case import LLMTestCase, LLMTestCaseParams, MLLMImage
8
5
  from deepeval.metrics import BaseMetric
9
- from deepeval.utils import get_or_create_event_loop, prettify_list
6
+ from deepeval.utils import (
7
+ get_or_create_event_loop,
8
+ prettify_list,
9
+ )
10
10
  from deepeval.metrics.utils import (
11
11
  construct_verbose_logs,
12
12
  trimAndLoadJson,
13
13
  check_llm_test_case_params,
14
+ check_mllm_test_case_params,
14
15
  initialize_model,
15
16
  )
16
17
  from deepeval.models import DeepEvalBaseLLM
@@ -67,7 +68,13 @@ class FaithfulnessMetric(BaseMetric):
67
68
  _log_metric_to_confident: bool = True,
68
69
  ) -> float:
69
70
 
70
- check_llm_test_case_params(test_case, self._required_params, self)
71
+ multimodal = test_case.multimodal
72
+ if multimodal:
73
+ check_mllm_test_case_params(
74
+ test_case, self._required_params, None, None, self, self.model
75
+ )
76
+ else:
77
+ check_llm_test_case_params(test_case, self._required_params, self)
71
78
 
72
79
  self.evaluation_cost = 0 if self.using_native_model else None
73
80
  with metric_progress_indicator(
@@ -84,11 +91,16 @@ class FaithfulnessMetric(BaseMetric):
84
91
  )
85
92
  )
86
93
  else:
87
- self.truths = self._generate_truths(test_case.retrieval_context)
88
- self.claims = self._generate_claims(test_case.actual_output)
89
- self.verdicts = self._generate_verdicts()
94
+ retrieval_context = test_case.retrieval_context
95
+ actual_output = test_case.actual_output
96
+
97
+ self.truths = self._generate_truths(
98
+ retrieval_context, multimodal
99
+ )
100
+ self.claims = self._generate_claims(actual_output, multimodal)
101
+ self.verdicts = self._generate_verdicts(multimodal)
90
102
  self.score = self._calculate_score()
91
- self.reason = self._generate_reason()
103
+ self.reason = self._generate_reason(multimodal)
92
104
  self.success = self.score >= self.threshold
93
105
  self.verbose_logs = construct_verbose_logs(
94
106
  self,
@@ -114,7 +126,13 @@ class FaithfulnessMetric(BaseMetric):
114
126
  _log_metric_to_confident: bool = True,
115
127
  ) -> float:
116
128
 
117
- check_llm_test_case_params(test_case, self._required_params, self)
129
+ multimodal = test_case.multimodal
130
+ if multimodal:
131
+ check_mllm_test_case_params(
132
+ test_case, self._required_params, None, None, self, self.model
133
+ )
134
+ else:
135
+ check_llm_test_case_params(test_case, self._required_params, self)
118
136
 
119
137
  self.evaluation_cost = 0 if self.using_native_model else None
120
138
  with metric_progress_indicator(
@@ -123,13 +141,16 @@ class FaithfulnessMetric(BaseMetric):
123
141
  _show_indicator=_show_indicator,
124
142
  _in_component=_in_component,
125
143
  ):
144
+ retrieval_context = test_case.retrieval_context
145
+ actual_output = test_case.actual_output
146
+
126
147
  self.truths, self.claims = await asyncio.gather(
127
- self._a_generate_truths(test_case.retrieval_context),
128
- self._a_generate_claims(test_case.actual_output),
148
+ self._a_generate_truths(retrieval_context, multimodal),
149
+ self._a_generate_claims(actual_output, multimodal),
129
150
  )
130
- self.verdicts = await self._a_generate_verdicts()
151
+ self.verdicts = await self._a_generate_verdicts(multimodal)
131
152
  self.score = self._calculate_score()
132
- self.reason = await self._a_generate_reason()
153
+ self.reason = await self._a_generate_reason(multimodal)
133
154
  self.success = self.score >= self.threshold
134
155
  self.verbose_logs = construct_verbose_logs(
135
156
  self,
@@ -146,7 +167,7 @@ class FaithfulnessMetric(BaseMetric):
146
167
  )
147
168
  return self.score
148
169
 
149
- async def _a_generate_reason(self) -> str:
170
+ async def _a_generate_reason(self, multimodal: bool) -> str:
150
171
  if self.include_reason is False:
151
172
  return None
152
173
 
@@ -158,6 +179,7 @@ class FaithfulnessMetric(BaseMetric):
158
179
  prompt = self.evaluation_template.generate_reason(
159
180
  contradictions=contradictions,
160
181
  score=format(self.score, ".2f"),
182
+ multimodal=multimodal,
161
183
  )
162
184
 
163
185
  if self.using_native_model:
@@ -177,7 +199,7 @@ class FaithfulnessMetric(BaseMetric):
177
199
  data = trimAndLoadJson(res, self)
178
200
  return data["reason"]
179
201
 
180
- def _generate_reason(self) -> str:
202
+ def _generate_reason(self, multimodal: bool) -> str:
181
203
  if self.include_reason is False:
182
204
  return None
183
205
 
@@ -189,6 +211,7 @@ class FaithfulnessMetric(BaseMetric):
189
211
  prompt = self.evaluation_template.generate_reason(
190
212
  contradictions=contradictions,
191
213
  score=format(self.score, ".2f"),
214
+ multimodal=multimodal,
192
215
  )
193
216
 
194
217
  if self.using_native_model:
@@ -208,14 +231,20 @@ class FaithfulnessMetric(BaseMetric):
208
231
  data = trimAndLoadJson(res, self)
209
232
  return data["reason"]
210
233
 
211
- async def _a_generate_verdicts(self) -> List[FaithfulnessVerdict]:
234
+ async def _a_generate_verdicts(
235
+ self, multimodal: bool
236
+ ) -> List[FaithfulnessVerdict]:
212
237
  if len(self.claims) == 0:
213
238
  return []
214
239
 
215
240
  verdicts: List[FaithfulnessVerdict] = []
241
+
216
242
  prompt = self.evaluation_template.generate_verdicts(
217
- claims=self.claims, retrieval_context="\n\n".join(self.truths)
243
+ claims=self.claims,
244
+ retrieval_context="\n\n".join(self.truths),
245
+ multimodal=multimodal,
218
246
  )
247
+
219
248
  if self.using_native_model:
220
249
  res, cost = await self.model.a_generate(prompt, schema=Verdicts)
221
250
  self.evaluation_cost += cost
@@ -236,14 +265,18 @@ class FaithfulnessMetric(BaseMetric):
236
265
  ]
237
266
  return verdicts
238
267
 
239
- def _generate_verdicts(self) -> List[FaithfulnessVerdict]:
268
+ def _generate_verdicts(self, multimodal: bool) -> List[FaithfulnessVerdict]:
240
269
  if len(self.claims) == 0:
241
270
  return []
242
271
 
243
272
  verdicts: List[FaithfulnessVerdict] = []
273
+
244
274
  prompt = self.evaluation_template.generate_verdicts(
245
- claims=self.claims, retrieval_context="\n\n".join(self.truths)
275
+ claims=self.claims,
276
+ retrieval_context="\n\n".join(self.truths),
277
+ multimodal=multimodal,
246
278
  )
279
+
247
280
  if self.using_native_model:
248
281
  res, cost = self.model.generate(prompt, schema=Verdicts)
249
282
  self.evaluation_cost += cost
@@ -262,10 +295,13 @@ class FaithfulnessMetric(BaseMetric):
262
295
  ]
263
296
  return verdicts
264
297
 
265
- async def _a_generate_truths(self, retrieval_context: str) -> List[str]:
298
+ async def _a_generate_truths(
299
+ self, retrieval_context: str, multimodal: bool
300
+ ) -> List[str]:
266
301
  prompt = self.evaluation_template.generate_truths(
267
302
  retrieval_context="\n\n".join(retrieval_context),
268
303
  extraction_limit=self.truths_extraction_limit,
304
+ multimodal=multimodal,
269
305
  )
270
306
  if self.using_native_model:
271
307
  res, cost = await self.model.a_generate(prompt, schema=Truths)
@@ -280,10 +316,13 @@ class FaithfulnessMetric(BaseMetric):
280
316
  data = trimAndLoadJson(res, self)
281
317
  return data["truths"]
282
318
 
283
- def _generate_truths(self, retrieval_context: str) -> List[str]:
319
+ def _generate_truths(
320
+ self, retrieval_context: str, multimodal: bool
321
+ ) -> List[str]:
284
322
  prompt = self.evaluation_template.generate_truths(
285
323
  retrieval_context="\n\n".join(retrieval_context),
286
324
  extraction_limit=self.truths_extraction_limit,
325
+ multimodal=multimodal,
287
326
  )
288
327
  if self.using_native_model:
289
328
  res, cost = self.model.generate(prompt, schema=Truths)
@@ -298,9 +337,11 @@ class FaithfulnessMetric(BaseMetric):
298
337
  data = trimAndLoadJson(res, self)
299
338
  return data["truths"]
300
339
 
301
- async def _a_generate_claims(self, actual_output: str) -> List[str]:
340
+ async def _a_generate_claims(
341
+ self, actual_output: str, multimodal: bool
342
+ ) -> List[str]:
302
343
  prompt = self.evaluation_template.generate_claims(
303
- actual_output=actual_output
344
+ actual_output=actual_output, multimodal=multimodal
304
345
  )
305
346
  if self.using_native_model:
306
347
  res, cost = await self.model.a_generate(prompt, schema=Claims)
@@ -315,9 +356,11 @@ class FaithfulnessMetric(BaseMetric):
315
356
  data = trimAndLoadJson(res, self)
316
357
  return data["claims"]
317
358
 
318
- def _generate_claims(self, actual_output: str) -> List[str]:
359
+ def _generate_claims(
360
+ self, actual_output: str, multimodal: bool
361
+ ) -> List[str]:
319
362
  prompt = self.evaluation_template.generate_claims(
320
- actual_output=actual_output
363
+ actual_output=actual_output, multimodal=multimodal
321
364
  )
322
365
  if self.using_native_model:
323
366
  res, cost = self.model.generate(prompt, schema=Claims)
@@ -3,7 +3,7 @@ from pydantic import BaseModel, Field
3
3
 
4
4
 
5
5
  class FaithfulnessVerdict(BaseModel):
6
- verdict: Literal["yes", "idk", "no"]
6
+ verdict: Literal["yes", "no", "idk"]
7
7
  reason: Optional[str] = Field(default=None)
8
8
 
9
9