deepeval 3.7.4__py3-none-any.whl → 3.7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (155) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/dataset/golden.py +54 -2
  3. deepeval/evaluate/evaluate.py +16 -8
  4. deepeval/evaluate/execute.py +70 -26
  5. deepeval/evaluate/utils.py +26 -22
  6. deepeval/integrations/pydantic_ai/agent.py +19 -2
  7. deepeval/integrations/pydantic_ai/instrumentator.py +62 -23
  8. deepeval/metrics/__init__.py +14 -12
  9. deepeval/metrics/answer_relevancy/answer_relevancy.py +74 -29
  10. deepeval/metrics/answer_relevancy/template.py +188 -92
  11. deepeval/metrics/base_metric.py +2 -5
  12. deepeval/metrics/contextual_precision/contextual_precision.py +53 -15
  13. deepeval/metrics/contextual_precision/template.py +115 -66
  14. deepeval/metrics/contextual_recall/contextual_recall.py +50 -13
  15. deepeval/metrics/contextual_recall/template.py +106 -55
  16. deepeval/metrics/contextual_relevancy/contextual_relevancy.py +47 -15
  17. deepeval/metrics/contextual_relevancy/template.py +87 -58
  18. deepeval/metrics/dag/templates.py +2 -2
  19. deepeval/metrics/faithfulness/faithfulness.py +70 -27
  20. deepeval/metrics/faithfulness/schema.py +1 -1
  21. deepeval/metrics/faithfulness/template.py +200 -115
  22. deepeval/metrics/g_eval/utils.py +2 -2
  23. deepeval/metrics/indicator.py +4 -4
  24. deepeval/metrics/multimodal_metrics/__init__.py +0 -18
  25. deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +24 -17
  26. deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +26 -21
  27. deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +24 -17
  28. deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +24 -17
  29. deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +19 -19
  30. deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +63 -78
  31. deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +20 -20
  32. deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +71 -50
  33. deepeval/metrics/ragas.py +3 -3
  34. deepeval/metrics/tool_correctness/tool_correctness.py +2 -2
  35. deepeval/metrics/turn_contextual_precision/schema.py +21 -0
  36. deepeval/metrics/turn_contextual_precision/template.py +187 -0
  37. deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +550 -0
  38. deepeval/metrics/turn_contextual_recall/schema.py +21 -0
  39. deepeval/metrics/turn_contextual_recall/template.py +178 -0
  40. deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +520 -0
  41. deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_contextual_relevancy}/schema.py +7 -1
  42. deepeval/metrics/turn_contextual_relevancy/template.py +161 -0
  43. deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +535 -0
  44. deepeval/metrics/{multimodal_metrics/multimodal_faithfulness → turn_faithfulness}/schema.py +11 -3
  45. deepeval/metrics/turn_faithfulness/template.py +218 -0
  46. deepeval/metrics/turn_faithfulness/turn_faithfulness.py +596 -0
  47. deepeval/metrics/utils.py +39 -58
  48. deepeval/models/__init__.py +0 -12
  49. deepeval/models/base_model.py +16 -38
  50. deepeval/models/embedding_models/__init__.py +7 -0
  51. deepeval/models/embedding_models/azure_embedding_model.py +52 -28
  52. deepeval/models/embedding_models/local_embedding_model.py +18 -14
  53. deepeval/models/embedding_models/ollama_embedding_model.py +38 -16
  54. deepeval/models/embedding_models/openai_embedding_model.py +40 -21
  55. deepeval/models/llms/amazon_bedrock_model.py +1 -2
  56. deepeval/models/llms/anthropic_model.py +44 -23
  57. deepeval/models/llms/azure_model.py +121 -36
  58. deepeval/models/llms/deepseek_model.py +18 -13
  59. deepeval/models/llms/gemini_model.py +129 -43
  60. deepeval/models/llms/grok_model.py +18 -13
  61. deepeval/models/llms/kimi_model.py +18 -13
  62. deepeval/models/llms/litellm_model.py +42 -22
  63. deepeval/models/llms/local_model.py +12 -7
  64. deepeval/models/llms/ollama_model.py +114 -12
  65. deepeval/models/llms/openai_model.py +137 -41
  66. deepeval/models/llms/portkey_model.py +24 -7
  67. deepeval/models/llms/utils.py +5 -3
  68. deepeval/models/retry_policy.py +17 -14
  69. deepeval/models/utils.py +46 -1
  70. deepeval/optimizer/__init__.py +5 -0
  71. deepeval/optimizer/algorithms/__init__.py +6 -0
  72. deepeval/optimizer/algorithms/base.py +29 -0
  73. deepeval/optimizer/algorithms/configs.py +18 -0
  74. deepeval/optimizer/algorithms/copro/__init__.py +5 -0
  75. deepeval/{optimization/copro/loop.py → optimizer/algorithms/copro/copro.py} +112 -113
  76. deepeval/optimizer/algorithms/gepa/__init__.py +5 -0
  77. deepeval/{optimization/gepa/loop.py → optimizer/algorithms/gepa/gepa.py} +175 -115
  78. deepeval/optimizer/algorithms/miprov2/__init__.py +17 -0
  79. deepeval/optimizer/algorithms/miprov2/bootstrapper.py +435 -0
  80. deepeval/optimizer/algorithms/miprov2/miprov2.py +752 -0
  81. deepeval/optimizer/algorithms/miprov2/proposer.py +301 -0
  82. deepeval/optimizer/algorithms/simba/__init__.py +5 -0
  83. deepeval/{optimization/simba/loop.py → optimizer/algorithms/simba/simba.py} +128 -112
  84. deepeval/{optimization → optimizer}/configs.py +5 -8
  85. deepeval/{optimization/policies/selection.py → optimizer/policies.py} +63 -2
  86. deepeval/optimizer/prompt_optimizer.py +263 -0
  87. deepeval/optimizer/rewriter/__init__.py +5 -0
  88. deepeval/optimizer/rewriter/rewriter.py +124 -0
  89. deepeval/optimizer/rewriter/utils.py +214 -0
  90. deepeval/optimizer/scorer/__init__.py +5 -0
  91. deepeval/optimizer/scorer/base.py +86 -0
  92. deepeval/optimizer/scorer/scorer.py +316 -0
  93. deepeval/optimizer/scorer/utils.py +30 -0
  94. deepeval/optimizer/types.py +148 -0
  95. deepeval/{optimization → optimizer}/utils.py +47 -165
  96. deepeval/prompt/prompt.py +5 -9
  97. deepeval/test_case/__init__.py +1 -3
  98. deepeval/test_case/api.py +12 -10
  99. deepeval/test_case/conversational_test_case.py +19 -1
  100. deepeval/test_case/llm_test_case.py +152 -1
  101. deepeval/test_case/utils.py +4 -8
  102. deepeval/test_run/api.py +15 -14
  103. deepeval/test_run/test_run.py +3 -3
  104. deepeval/tracing/patchers.py +9 -4
  105. deepeval/tracing/tracing.py +2 -2
  106. deepeval/utils.py +65 -0
  107. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/METADATA +1 -4
  108. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/RECORD +116 -125
  109. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +0 -343
  110. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/schema.py +0 -19
  111. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +0 -122
  112. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +0 -301
  113. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/schema.py +0 -15
  114. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +0 -132
  115. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +0 -285
  116. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/schema.py +0 -15
  117. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +0 -112
  118. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +0 -282
  119. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +0 -102
  120. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/__init__.py +0 -0
  121. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +0 -356
  122. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +0 -175
  123. deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/__init__.py +0 -0
  124. deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +0 -290
  125. deepeval/models/mlllms/__init__.py +0 -4
  126. deepeval/models/mlllms/azure_model.py +0 -343
  127. deepeval/models/mlllms/gemini_model.py +0 -313
  128. deepeval/models/mlllms/ollama_model.py +0 -175
  129. deepeval/models/mlllms/openai_model.py +0 -309
  130. deepeval/optimization/__init__.py +0 -13
  131. deepeval/optimization/adapters/__init__.py +0 -2
  132. deepeval/optimization/adapters/deepeval_scoring_adapter.py +0 -588
  133. deepeval/optimization/aggregates.py +0 -14
  134. deepeval/optimization/copro/configs.py +0 -31
  135. deepeval/optimization/gepa/__init__.py +0 -7
  136. deepeval/optimization/gepa/configs.py +0 -115
  137. deepeval/optimization/miprov2/configs.py +0 -134
  138. deepeval/optimization/miprov2/loop.py +0 -785
  139. deepeval/optimization/mutations/__init__.py +0 -0
  140. deepeval/optimization/mutations/prompt_rewriter.py +0 -458
  141. deepeval/optimization/policies/__init__.py +0 -16
  142. deepeval/optimization/policies/tie_breaker.py +0 -67
  143. deepeval/optimization/prompt_optimizer.py +0 -462
  144. deepeval/optimization/simba/__init__.py +0 -0
  145. deepeval/optimization/simba/configs.py +0 -33
  146. deepeval/optimization/types.py +0 -361
  147. deepeval/test_case/mllm_test_case.py +0 -170
  148. /deepeval/metrics/{multimodal_metrics/multimodal_answer_relevancy → turn_contextual_precision}/__init__.py +0 -0
  149. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_precision → turn_contextual_recall}/__init__.py +0 -0
  150. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_recall → turn_contextual_relevancy}/__init__.py +0 -0
  151. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_faithfulness}/__init__.py +0 -0
  152. /deepeval/{optimization → optimizer/algorithms}/simba/types.py +0 -0
  153. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/LICENSE.md +0 -0
  154. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/WHEEL +0 -0
  155. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/entry_points.txt +0 -0
@@ -1,10 +1,14 @@
1
1
  from typing import Optional, List, Type, Union
2
2
 
3
- from deepeval.utils import get_or_create_event_loop, prettify_list
3
+ from deepeval.utils import (
4
+ get_or_create_event_loop,
5
+ prettify_list,
6
+ )
4
7
  from deepeval.metrics.utils import (
5
8
  construct_verbose_logs,
6
9
  trimAndLoadJson,
7
10
  check_llm_test_case_params,
11
+ check_mllm_test_case_params,
8
12
  initialize_model,
9
13
  )
10
14
  from deepeval.test_case import (
@@ -56,7 +60,15 @@ class ContextualPrecisionMetric(BaseMetric):
56
60
  _in_component: bool = False,
57
61
  _log_metric_to_confident: bool = True,
58
62
  ) -> float:
59
- check_llm_test_case_params(test_case, self._required_params, self)
63
+
64
+ multimodal = test_case.multimodal
65
+
66
+ if multimodal:
67
+ check_mllm_test_case_params(
68
+ test_case, self._required_params, None, None, self, self.model
69
+ )
70
+ else:
71
+ check_llm_test_case_params(test_case, self._required_params, self)
60
72
 
61
73
  self.evaluation_cost = 0 if self.using_native_model else None
62
74
  with metric_progress_indicator(
@@ -73,15 +85,20 @@ class ContextualPrecisionMetric(BaseMetric):
73
85
  )
74
86
  )
75
87
  else:
88
+ input = test_case.input
89
+ expected_output = test_case.expected_output
90
+ retrieval_context = test_case.retrieval_context
91
+
76
92
  self.verdicts: List[cpschema.ContextualPrecisionVerdict] = (
77
93
  self._generate_verdicts(
78
- test_case.input,
79
- test_case.expected_output,
80
- test_case.retrieval_context,
94
+ input,
95
+ expected_output,
96
+ retrieval_context,
97
+ multimodal,
81
98
  )
82
99
  )
83
100
  self.score = self._calculate_score()
84
- self.reason = self._generate_reason(test_case.input)
101
+ self.reason = self._generate_reason(input, multimodal)
85
102
  self.success = self.score >= self.threshold
86
103
  self.verbose_logs = construct_verbose_logs(
87
104
  self,
@@ -104,7 +121,14 @@ class ContextualPrecisionMetric(BaseMetric):
104
121
  _log_metric_to_confident: bool = True,
105
122
  ) -> float:
106
123
 
107
- check_llm_test_case_params(test_case, self._required_params, self)
124
+ multimodal = test_case.multimodal
125
+
126
+ if multimodal:
127
+ check_mllm_test_case_params(
128
+ test_case, self._required_params, None, None, self, self.model
129
+ )
130
+ else:
131
+ check_llm_test_case_params(test_case, self._required_params, self)
108
132
 
109
133
  self.evaluation_cost = 0 if self.using_native_model else None
110
134
  with metric_progress_indicator(
@@ -113,15 +137,17 @@ class ContextualPrecisionMetric(BaseMetric):
113
137
  _show_indicator=_show_indicator,
114
138
  _in_component=_in_component,
115
139
  ):
140
+ input = test_case.input
141
+ expected_output = test_case.expected_output
142
+ retrieval_context = test_case.retrieval_context
143
+
116
144
  self.verdicts: List[cpschema.ContextualPrecisionVerdict] = (
117
145
  await self._a_generate_verdicts(
118
- test_case.input,
119
- test_case.expected_output,
120
- test_case.retrieval_context,
146
+ input, expected_output, retrieval_context, multimodal
121
147
  )
122
148
  )
123
149
  self.score = self._calculate_score()
124
- self.reason = await self._a_generate_reason(test_case.input)
150
+ self.reason = await self._a_generate_reason(input, multimodal)
125
151
  self.success = self.score >= self.threshold
126
152
  self.verbose_logs = construct_verbose_logs(
127
153
  self,
@@ -136,7 +162,7 @@ class ContextualPrecisionMetric(BaseMetric):
136
162
  )
137
163
  return self.score
138
164
 
139
- async def _a_generate_reason(self, input: str):
165
+ async def _a_generate_reason(self, input: str, multimodal: bool):
140
166
  if self.include_reason is False:
141
167
  return None
142
168
 
@@ -148,6 +174,7 @@ class ContextualPrecisionMetric(BaseMetric):
148
174
  input=input,
149
175
  verdicts=retrieval_contexts_verdicts,
150
176
  score=format(self.score, ".2f"),
177
+ multimodal=multimodal,
151
178
  )
152
179
 
153
180
  if self.using_native_model:
@@ -169,7 +196,7 @@ class ContextualPrecisionMetric(BaseMetric):
169
196
  data = trimAndLoadJson(res, self)
170
197
  return data["reason"]
171
198
 
172
- def _generate_reason(self, input: str):
199
+ def _generate_reason(self, input: str, multimodal: bool):
173
200
  if self.include_reason is False:
174
201
  return None
175
202
 
@@ -181,6 +208,7 @@ class ContextualPrecisionMetric(BaseMetric):
181
208
  input=input,
182
209
  verdicts=retrieval_contexts_verdicts,
183
210
  score=format(self.score, ".2f"),
211
+ multimodal=multimodal,
184
212
  )
185
213
 
186
214
  if self.using_native_model:
@@ -203,12 +231,17 @@ class ContextualPrecisionMetric(BaseMetric):
203
231
  return data["reason"]
204
232
 
205
233
  async def _a_generate_verdicts(
206
- self, input: str, expected_output: str, retrieval_context: List[str]
234
+ self,
235
+ input: str,
236
+ expected_output: str,
237
+ retrieval_context: List[str],
238
+ multimodal: bool,
207
239
  ) -> List[cpschema.ContextualPrecisionVerdict]:
208
240
  prompt = self.evaluation_template.generate_verdicts(
209
241
  input=input,
210
242
  expected_output=expected_output,
211
243
  retrieval_context=retrieval_context,
244
+ multimodal=multimodal,
212
245
  )
213
246
  if self.using_native_model:
214
247
  res, cost = await self.model.a_generate(
@@ -234,12 +267,17 @@ class ContextualPrecisionMetric(BaseMetric):
234
267
  return verdicts
235
268
 
236
269
  def _generate_verdicts(
237
- self, input: str, expected_output: str, retrieval_context: List[str]
270
+ self,
271
+ input: str,
272
+ expected_output: str,
273
+ retrieval_context: List[str],
274
+ multimodal: bool,
238
275
  ) -> List[cpschema.ContextualPrecisionVerdict]:
239
276
  prompt = self.evaluation_template.generate_verdicts(
240
277
  input=input,
241
278
  expected_output=expected_output,
242
279
  retrieval_context=retrieval_context,
280
+ multimodal=multimodal,
243
281
  )
244
282
  if self.using_native_model:
245
283
  res, cost = self.model.generate(prompt, schema=cpschema.Verdicts)
@@ -1,84 +1,133 @@
1
- from typing import List, Dict
1
+ from typing import List, Dict, Union
2
+ import textwrap
3
+ from deepeval.test_case import MLLMImage
4
+ from deepeval.utils import convert_to_multi_modal_array
2
5
 
3
6
 
4
7
  class ContextualPrecisionTemplate:
5
8
  @staticmethod
6
9
  def generate_verdicts(
7
- input: str, expected_output: str, retrieval_context: List[str]
10
+ input: str,
11
+ expected_output: str,
12
+ retrieval_context: List[str],
13
+ multimodal: bool = False,
8
14
  ):
9
15
  document_count_str = f" ({len(retrieval_context)} document{'s' if len(retrieval_context) > 1 else ''})"
10
- return f"""Given the input, expected output, and retrieval context, please generate a list of JSON objects to determine whether each node in the retrieval context was remotely useful in arriving at the expected output.
11
-
12
- **
13
- IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON. These JSON only contain the `verdict` key that outputs only 'yes' or 'no', and a `reason` key to justify the verdict. In your reason, you should aim to quote parts of the context.
14
- Example Retrieval Context: ["Einstein won the Nobel Prize for his discovery of the photoelectric effect", "He won the Nobel Prize in 1968.", "There was a cat."]
15
- Example Input: "Who won the Nobel Prize in 1968 and for what?"
16
- Example Expected Output: "Einstein won the Nobel Prize in 1968 for his discovery of the photoelectric effect."
17
-
18
- Example:
19
- {{
20
- "verdicts": [
21
- {{
22
- "reason": "It clearly addresses the question by stating that 'Einstein won the Nobel Prize for his discovery of the photoelectric effect.'",
23
- "verdict": "yes"
24
- }},
25
- {{
26
- "reason": "The text verifies that the prize was indeed won in 1968.",
27
- "verdict": "yes"
28
- }},
29
- {{
30
- "reason": "'There was a cat' is not at all relevant to the topic of winning a Nobel Prize.",
31
- "verdict": "no"
32
- }}
33
- ]
34
- }}
35
- Since you are going to generate a verdict for each context, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to that of the contexts.
36
- **
37
-
38
- Input:
39
- {input}
40
-
41
- Expected output:
42
- {expected_output}
43
-
44
- Retrieval Context{document_count_str}:
45
- {retrieval_context}
46
-
47
- JSON:
48
- """
16
+
17
+ # For multimodal, we need to annotate the retrieval context with node IDs
18
+ context_to_display = (
19
+ ContextualPrecisionTemplate.id_retrieval_context(retrieval_context)
20
+ if multimodal
21
+ else retrieval_context
22
+ )
23
+
24
+ multimodal_note = (
25
+ " (which can be text or an image)" if multimodal else ""
26
+ )
27
+
28
+ prompt_template = textwrap.dedent(
29
+ f"""Given the input, expected output, and retrieval context, please generate a list of JSON objects to determine whether each node in the retrieval context was remotely useful in arriving at the expected output.
30
+
31
+ **
32
+ IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON. These JSON only contain the `verdict` key that outputs only 'yes' or 'no', and a `reason` key to justify the verdict. In your reason, you should aim to quote parts of the context {multimodal_note}.
33
+ Example Retrieval Context: ["Einstein won the Nobel Prize for his discovery of the photoelectric effect", "He won the Nobel Prize in 1968.", "There was a cat."]
34
+ Example Input: "Who won the Nobel Prize in 1968 and for what?"
35
+ Example Expected Output: "Einstein won the Nobel Prize in 1968 for his discovery of the photoelectric effect."
36
+
37
+ Example:
38
+ {{
39
+ "verdicts": [
40
+ {{
41
+ "reason": "It clearly addresses the question by stating that 'Einstein won the Nobel Prize for his discovery of the photoelectric effect.'",
42
+ "verdict": "yes"
43
+ }},
44
+ {{
45
+ "reason": "The text verifies that the prize was indeed won in 1968.",
46
+ "verdict": "yes"
47
+ }},
48
+ {{
49
+ "reason": "'There was a cat' is not at all relevant to the topic of winning a Nobel Prize.",
50
+ "verdict": "no"
51
+ }}
52
+ ]
53
+ }}
54
+ Since you are going to generate a verdict for each context, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to that of the contexts.
55
+ **
56
+
57
+ Input:
58
+ {input}
59
+
60
+ Expected output:
61
+ {expected_output}
62
+
63
+ Retrieval Context {document_count_str}:
64
+ {context_to_display}
65
+
66
+ JSON:
67
+ """
68
+ )
69
+
70
+ return prompt_template
49
71
 
50
72
  @staticmethod
51
73
  def generate_reason(
52
- input: str, score: float, verdicts: List[Dict[str, str]]
74
+ input: str,
75
+ score: float,
76
+ verdicts: List[Dict[str, str]],
77
+ multimodal: bool = False,
53
78
  ):
54
- # given the input and retrieval context for this input, where the verdict is whether ... and the node is the ..., give a reason for the score
55
- return f"""Given the input, retrieval contexts, and contextual precision score, provide a CONCISE summary for the score. Explain why it is not higher, but also why it is at its current score.
56
- The retrieval contexts is a list of JSON with three keys: `verdict`, `reason` (reason for the verdict) and `node`. `verdict` will be either 'yes' or 'no', which represents whether the corresponding 'node' in the retrieval context is relevant to the input.
57
- Contextual precision represents if the relevant nodes are ranked higher than irrelevant nodes. Also note that retrieval contexts is given IN THE ORDER OF THEIR RANKINGS.
79
+ return textwrap.dedent(
80
+ f"""Given the input, retrieval contexts, and contextual precision score, provide a CONCISE {'summarize' if multimodal else 'summary'} for the score. Explain why it is not higher, but also why it is at its current score.
81
+ The retrieval contexts is a list of JSON with three keys: `verdict`, `reason` (reason for the verdict) and `node`. `verdict` will be either 'yes' or 'no', which represents whether the corresponding 'node' in the retrieval context is relevant to the input.
82
+ Contextual precision represents if the relevant nodes are ranked higher than irrelevant nodes. Also note that retrieval contexts is given IN THE ORDER OF THEIR RANKINGS.
83
+
84
+ **
85
+ IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
86
+ Example JSON:
87
+ {{
88
+ "reason": "The score is <contextual_precision_score> because <your_reason>."
89
+ }}
58
90
 
59
- **
60
- IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
61
- Example JSON:
62
- {{
63
- "reason": "The score is <contextual_precision_score> because <your_reason>."
64
- }}
65
91
 
92
+ DO NOT mention 'verdict' in your reason, but instead phrase it as irrelevant nodes. The term 'verdict' {'are' if multimodal else 'is'} just here for you to understand the broader scope of things.
93
+ Also DO NOT mention there are `reason` fields in the retrieval contexts you are presented with, instead just use the information in the `reason` field.
94
+ In your reason, you MUST USE the `reason`, QUOTES in the 'reason', and the node RANK (starting from 1, eg. first node) to explain why the 'no' verdicts should be ranked lower than the 'yes' verdicts.
95
+ When addressing nodes, make it explicit that {'it is' if multimodal else 'they are'} nodes in {'retrieval context' if multimodal else 'retrieval contexts'}.
96
+ If the score is 1, keep it short and say something positive with an upbeat tone (but don't overdo it{',' if multimodal else ''} otherwise it gets annoying).
97
+ **
66
98
 
67
- DO NOT mention 'verdict' in your reason, but instead phrase it as irrelevant nodes. The term 'verdict' is just here for you to understand the broader scope of things.
68
- Also DO NOT mention there are `reason` fields in the retrieval contexts you are presented with, instead just use the information in the `reason` field.
69
- In your reason, you MUST USE the `reason`, QUOTES in the 'reason', and the node RANK (starting from 1, eg. first node) to explain why the 'no' verdicts should be ranked lower than the 'yes' verdicts.
70
- When addressing nodes, make it explicit that they are nodes in retrieval contexts.
71
- If the score is 1, keep it short and say something positive with an upbeat tone (but don't overdo it, otherwise it gets annoying).
72
- **
99
+ Contextual Precision Score:
100
+ {score}
73
101
 
74
- Contextual Precision Score:
75
- {score}
102
+ Input:
103
+ {input}
76
104
 
77
- Input:
78
- {input}
105
+ Retrieval Contexts:
106
+ {verdicts}
107
+
108
+ JSON:
109
+ """
110
+ )
111
+
112
+ @staticmethod
113
+ def id_retrieval_context(
114
+ retrieval_context: List[str],
115
+ ) -> List[str]:
116
+ """
117
+ Annotates retrieval context with node IDs for multimodal processing.
79
118
 
80
- Retrieval Contexts:
81
- {verdicts}
119
+ Args:
120
+ retrieval_context: List of contexts (can be strings or MLLMImages)
82
121
 
83
- JSON:
84
- """
122
+ Returns:
123
+ Annotated list with "Node X:" prefixes
124
+ """
125
+ annotated_retrieval_context = []
126
+ retrieval_context = convert_to_multi_modal_array(retrieval_context)
127
+ for i, context in enumerate(retrieval_context):
128
+ if isinstance(context, str):
129
+ annotated_retrieval_context.append(f"Node {i + 1}: {context}")
130
+ elif isinstance(context, MLLMImage):
131
+ annotated_retrieval_context.append(f"Node {i + 1}:")
132
+ annotated_retrieval_context.append(context)
133
+ return annotated_retrieval_context
@@ -1,10 +1,15 @@
1
1
  from typing import Optional, List, Type, Union
2
2
 
3
- from deepeval.utils import get_or_create_event_loop, prettify_list
3
+ from deepeval.utils import (
4
+ get_or_create_event_loop,
5
+ prettify_list,
6
+ convert_to_multi_modal_array,
7
+ )
4
8
  from deepeval.metrics.utils import (
5
9
  construct_verbose_logs,
6
10
  trimAndLoadJson,
7
11
  check_llm_test_case_params,
12
+ check_mllm_test_case_params,
8
13
  initialize_model,
9
14
  )
10
15
  from deepeval.test_case import (
@@ -55,7 +60,14 @@ class ContextualRecallMetric(BaseMetric):
55
60
  _in_component: bool = False,
56
61
  _log_metric_to_confident: bool = True,
57
62
  ) -> float:
58
- check_llm_test_case_params(test_case, self._required_params, self)
63
+ multimodal = test_case.multimodal
64
+
65
+ if multimodal:
66
+ check_mllm_test_case_params(
67
+ test_case, self._required_params, None, None, self, self.model
68
+ )
69
+ else:
70
+ check_llm_test_case_params(test_case, self._required_params, self)
59
71
 
60
72
  self.evaluation_cost = 0 if self.using_native_model else None
61
73
  with metric_progress_indicator(
@@ -72,13 +84,16 @@ class ContextualRecallMetric(BaseMetric):
72
84
  )
73
85
  )
74
86
  else:
87
+ expected_output = test_case.expected_output
88
+ retrieval_context = test_case.retrieval_context
89
+
75
90
  self.verdicts: List[ContextualRecallVerdict] = (
76
91
  self._generate_verdicts(
77
- test_case.expected_output, test_case.retrieval_context
92
+ expected_output, retrieval_context, multimodal
78
93
  )
79
94
  )
80
95
  self.score = self._calculate_score()
81
- self.reason = self._generate_reason(test_case.expected_output)
96
+ self.reason = self._generate_reason(expected_output, multimodal)
82
97
  self.success = self.score >= self.threshold
83
98
  self.verbose_logs = construct_verbose_logs(
84
99
  self,
@@ -101,7 +116,14 @@ class ContextualRecallMetric(BaseMetric):
101
116
  _log_metric_to_confident: bool = True,
102
117
  ) -> float:
103
118
 
104
- check_llm_test_case_params(test_case, self._required_params, self)
119
+ multimodal = test_case.multimodal
120
+
121
+ if multimodal:
122
+ check_mllm_test_case_params(
123
+ test_case, self._required_params, None, None, self, self.model
124
+ )
125
+ else:
126
+ check_llm_test_case_params(test_case, self._required_params, self)
105
127
 
106
128
  self.evaluation_cost = 0 if self.using_native_model else None
107
129
  with metric_progress_indicator(
@@ -110,14 +132,17 @@ class ContextualRecallMetric(BaseMetric):
110
132
  _show_indicator=_show_indicator,
111
133
  _in_component=_in_component,
112
134
  ):
135
+ expected_output = test_case.expected_output
136
+ retrieval_context = test_case.retrieval_context
137
+
113
138
  self.verdicts: List[ContextualRecallVerdict] = (
114
139
  await self._a_generate_verdicts(
115
- test_case.expected_output, test_case.retrieval_context
140
+ expected_output, retrieval_context, multimodal
116
141
  )
117
142
  )
118
143
  self.score = self._calculate_score()
119
144
  self.reason = await self._a_generate_reason(
120
- test_case.expected_output
145
+ expected_output, multimodal
121
146
  )
122
147
  self.success = self.score >= self.threshold
123
148
  self.verbose_logs = construct_verbose_logs(
@@ -133,7 +158,7 @@ class ContextualRecallMetric(BaseMetric):
133
158
  )
134
159
  return self.score
135
160
 
136
- async def _a_generate_reason(self, expected_output: str):
161
+ async def _a_generate_reason(self, expected_output: str, multimodal: bool):
137
162
  if self.include_reason is False:
138
163
  return None
139
164
 
@@ -150,6 +175,7 @@ class ContextualRecallMetric(BaseMetric):
150
175
  supportive_reasons=supportive_reasons,
151
176
  unsupportive_reasons=unsupportive_reasons,
152
177
  score=format(self.score, ".2f"),
178
+ multimodal=multimodal,
153
179
  )
154
180
 
155
181
  if self.using_native_model:
@@ -169,7 +195,7 @@ class ContextualRecallMetric(BaseMetric):
169
195
  data = trimAndLoadJson(res, self)
170
196
  return data["reason"]
171
197
 
172
- def _generate_reason(self, expected_output: str):
198
+ def _generate_reason(self, expected_output: str, multimodal: bool):
173
199
  if self.include_reason is False:
174
200
  return None
175
201
 
@@ -186,6 +212,7 @@ class ContextualRecallMetric(BaseMetric):
186
212
  supportive_reasons=supportive_reasons,
187
213
  unsupportive_reasons=unsupportive_reasons,
188
214
  score=format(self.score, ".2f"),
215
+ multimodal=multimodal,
189
216
  )
190
217
 
191
218
  if self.using_native_model:
@@ -219,10 +246,15 @@ class ContextualRecallMetric(BaseMetric):
219
246
  return 0 if self.strict_mode and score < self.threshold else score
220
247
 
221
248
  async def _a_generate_verdicts(
222
- self, expected_output: str, retrieval_context: List[str]
249
+ self,
250
+ expected_output: str,
251
+ retrieval_context: List[str],
252
+ multimodal: bool,
223
253
  ) -> List[ContextualRecallVerdict]:
224
254
  prompt = self.evaluation_template.generate_verdicts(
225
- expected_output=expected_output, retrieval_context=retrieval_context
255
+ expected_output=expected_output,
256
+ retrieval_context=retrieval_context,
257
+ multimodal=multimodal,
226
258
  )
227
259
  if self.using_native_model:
228
260
  res, cost = await self.model.a_generate(prompt, schema=Verdicts)
@@ -245,10 +277,15 @@ class ContextualRecallMetric(BaseMetric):
245
277
  return verdicts
246
278
 
247
279
  def _generate_verdicts(
248
- self, expected_output: str, retrieval_context: List[str]
280
+ self,
281
+ expected_output: str,
282
+ retrieval_context: List[str],
283
+ multimodal: bool,
249
284
  ) -> List[ContextualRecallVerdict]:
250
285
  prompt = self.evaluation_template.generate_verdicts(
251
- expected_output=expected_output, retrieval_context=retrieval_context
286
+ expected_output=expected_output,
287
+ retrieval_context=retrieval_context,
288
+ multimodal=multimodal,
252
289
  )
253
290
  if self.using_native_model:
254
291
  res, cost = self.model.generate(prompt, schema=Verdicts)