deepeval 3.7.4__py3-none-any.whl → 3.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (224) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/config/settings.py +35 -1
  3. deepeval/dataset/api.py +23 -1
  4. deepeval/dataset/golden.py +139 -2
  5. deepeval/evaluate/evaluate.py +16 -11
  6. deepeval/evaluate/execute.py +13 -181
  7. deepeval/evaluate/utils.py +6 -26
  8. deepeval/integrations/pydantic_ai/agent.py +19 -2
  9. deepeval/integrations/pydantic_ai/instrumentator.py +62 -23
  10. deepeval/key_handler.py +3 -0
  11. deepeval/metrics/__init__.py +14 -16
  12. deepeval/metrics/answer_relevancy/answer_relevancy.py +118 -116
  13. deepeval/metrics/answer_relevancy/template.py +22 -3
  14. deepeval/metrics/arena_g_eval/arena_g_eval.py +98 -96
  15. deepeval/metrics/arena_g_eval/template.py +17 -1
  16. deepeval/metrics/argument_correctness/argument_correctness.py +81 -87
  17. deepeval/metrics/argument_correctness/template.py +19 -2
  18. deepeval/metrics/base_metric.py +13 -44
  19. deepeval/metrics/bias/bias.py +102 -108
  20. deepeval/metrics/bias/template.py +14 -2
  21. deepeval/metrics/contextual_precision/contextual_precision.py +96 -94
  22. deepeval/metrics/contextual_precision/template.py +115 -66
  23. deepeval/metrics/contextual_recall/contextual_recall.py +94 -84
  24. deepeval/metrics/contextual_recall/template.py +106 -55
  25. deepeval/metrics/contextual_relevancy/contextual_relevancy.py +86 -84
  26. deepeval/metrics/contextual_relevancy/template.py +87 -58
  27. deepeval/metrics/conversation_completeness/conversation_completeness.py +101 -119
  28. deepeval/metrics/conversation_completeness/template.py +23 -3
  29. deepeval/metrics/conversational_dag/conversational_dag.py +12 -8
  30. deepeval/metrics/conversational_dag/nodes.py +66 -123
  31. deepeval/metrics/conversational_dag/templates.py +16 -0
  32. deepeval/metrics/conversational_g_eval/conversational_g_eval.py +47 -66
  33. deepeval/metrics/dag/dag.py +10 -0
  34. deepeval/metrics/dag/nodes.py +63 -126
  35. deepeval/metrics/dag/templates.py +16 -2
  36. deepeval/metrics/exact_match/exact_match.py +9 -1
  37. deepeval/metrics/faithfulness/faithfulness.py +138 -149
  38. deepeval/metrics/faithfulness/schema.py +1 -1
  39. deepeval/metrics/faithfulness/template.py +200 -115
  40. deepeval/metrics/g_eval/g_eval.py +87 -78
  41. deepeval/metrics/g_eval/template.py +18 -1
  42. deepeval/metrics/g_eval/utils.py +7 -6
  43. deepeval/metrics/goal_accuracy/goal_accuracy.py +91 -76
  44. deepeval/metrics/goal_accuracy/template.py +21 -3
  45. deepeval/metrics/hallucination/hallucination.py +60 -75
  46. deepeval/metrics/hallucination/template.py +13 -0
  47. deepeval/metrics/indicator.py +7 -10
  48. deepeval/metrics/json_correctness/json_correctness.py +40 -38
  49. deepeval/metrics/json_correctness/template.py +10 -0
  50. deepeval/metrics/knowledge_retention/knowledge_retention.py +60 -97
  51. deepeval/metrics/knowledge_retention/schema.py +9 -3
  52. deepeval/metrics/knowledge_retention/template.py +12 -0
  53. deepeval/metrics/mcp/mcp_task_completion.py +68 -38
  54. deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +92 -74
  55. deepeval/metrics/mcp/template.py +52 -0
  56. deepeval/metrics/mcp_use_metric/mcp_use_metric.py +58 -64
  57. deepeval/metrics/mcp_use_metric/template.py +12 -0
  58. deepeval/metrics/misuse/misuse.py +77 -97
  59. deepeval/metrics/misuse/template.py +15 -0
  60. deepeval/metrics/multimodal_metrics/__init__.py +0 -19
  61. deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +59 -53
  62. deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +79 -95
  63. deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +59 -53
  64. deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +59 -53
  65. deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +111 -109
  66. deepeval/metrics/non_advice/non_advice.py +79 -105
  67. deepeval/metrics/non_advice/template.py +12 -0
  68. deepeval/metrics/pattern_match/pattern_match.py +12 -4
  69. deepeval/metrics/pii_leakage/pii_leakage.py +75 -106
  70. deepeval/metrics/pii_leakage/template.py +14 -0
  71. deepeval/metrics/plan_adherence/plan_adherence.py +63 -89
  72. deepeval/metrics/plan_adherence/template.py +11 -0
  73. deepeval/metrics/plan_quality/plan_quality.py +63 -87
  74. deepeval/metrics/plan_quality/template.py +9 -0
  75. deepeval/metrics/prompt_alignment/prompt_alignment.py +72 -83
  76. deepeval/metrics/prompt_alignment/template.py +12 -0
  77. deepeval/metrics/ragas.py +3 -3
  78. deepeval/metrics/role_adherence/role_adherence.py +48 -71
  79. deepeval/metrics/role_adherence/template.py +14 -0
  80. deepeval/metrics/role_violation/role_violation.py +75 -108
  81. deepeval/metrics/role_violation/template.py +12 -0
  82. deepeval/metrics/step_efficiency/step_efficiency.py +55 -65
  83. deepeval/metrics/step_efficiency/template.py +11 -0
  84. deepeval/metrics/summarization/summarization.py +115 -183
  85. deepeval/metrics/summarization/template.py +19 -0
  86. deepeval/metrics/task_completion/task_completion.py +67 -73
  87. deepeval/metrics/tool_correctness/tool_correctness.py +45 -44
  88. deepeval/metrics/tool_use/tool_use.py +42 -66
  89. deepeval/metrics/topic_adherence/template.py +13 -0
  90. deepeval/metrics/topic_adherence/topic_adherence.py +53 -67
  91. deepeval/metrics/toxicity/template.py +13 -0
  92. deepeval/metrics/toxicity/toxicity.py +80 -99
  93. deepeval/metrics/turn_contextual_precision/schema.py +21 -0
  94. deepeval/metrics/turn_contextual_precision/template.py +187 -0
  95. deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +592 -0
  96. deepeval/metrics/turn_contextual_recall/schema.py +21 -0
  97. deepeval/metrics/turn_contextual_recall/template.py +178 -0
  98. deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +563 -0
  99. deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_contextual_relevancy}/schema.py +7 -1
  100. deepeval/metrics/turn_contextual_relevancy/template.py +161 -0
  101. deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +576 -0
  102. deepeval/metrics/{multimodal_metrics/multimodal_faithfulness → turn_faithfulness}/schema.py +11 -3
  103. deepeval/metrics/turn_faithfulness/template.py +218 -0
  104. deepeval/metrics/turn_faithfulness/turn_faithfulness.py +627 -0
  105. deepeval/metrics/turn_relevancy/template.py +14 -0
  106. deepeval/metrics/turn_relevancy/turn_relevancy.py +56 -69
  107. deepeval/metrics/utils.py +158 -122
  108. deepeval/models/__init__.py +0 -12
  109. deepeval/models/base_model.py +49 -33
  110. deepeval/models/embedding_models/__init__.py +7 -0
  111. deepeval/models/embedding_models/azure_embedding_model.py +79 -33
  112. deepeval/models/embedding_models/local_embedding_model.py +39 -20
  113. deepeval/models/embedding_models/ollama_embedding_model.py +52 -19
  114. deepeval/models/embedding_models/openai_embedding_model.py +42 -22
  115. deepeval/models/llms/amazon_bedrock_model.py +226 -72
  116. deepeval/models/llms/anthropic_model.py +178 -63
  117. deepeval/models/llms/azure_model.py +218 -60
  118. deepeval/models/llms/constants.py +2032 -0
  119. deepeval/models/llms/deepseek_model.py +95 -40
  120. deepeval/models/llms/gemini_model.py +209 -64
  121. deepeval/models/llms/grok_model.py +139 -68
  122. deepeval/models/llms/kimi_model.py +140 -90
  123. deepeval/models/llms/litellm_model.py +131 -37
  124. deepeval/models/llms/local_model.py +125 -21
  125. deepeval/models/llms/ollama_model.py +147 -24
  126. deepeval/models/llms/openai_model.py +222 -269
  127. deepeval/models/llms/portkey_model.py +81 -22
  128. deepeval/models/llms/utils.py +8 -3
  129. deepeval/models/retry_policy.py +17 -14
  130. deepeval/models/utils.py +106 -5
  131. deepeval/optimizer/__init__.py +5 -0
  132. deepeval/optimizer/algorithms/__init__.py +6 -0
  133. deepeval/optimizer/algorithms/base.py +29 -0
  134. deepeval/optimizer/algorithms/configs.py +18 -0
  135. deepeval/optimizer/algorithms/copro/__init__.py +5 -0
  136. deepeval/{optimization/copro/loop.py → optimizer/algorithms/copro/copro.py} +112 -113
  137. deepeval/optimizer/algorithms/gepa/__init__.py +5 -0
  138. deepeval/{optimization/gepa/loop.py → optimizer/algorithms/gepa/gepa.py} +175 -115
  139. deepeval/optimizer/algorithms/miprov2/__init__.py +17 -0
  140. deepeval/optimizer/algorithms/miprov2/bootstrapper.py +435 -0
  141. deepeval/optimizer/algorithms/miprov2/miprov2.py +752 -0
  142. deepeval/optimizer/algorithms/miprov2/proposer.py +301 -0
  143. deepeval/optimizer/algorithms/simba/__init__.py +5 -0
  144. deepeval/{optimization/simba/loop.py → optimizer/algorithms/simba/simba.py} +128 -112
  145. deepeval/{optimization → optimizer}/configs.py +5 -8
  146. deepeval/{optimization/policies/selection.py → optimizer/policies.py} +63 -2
  147. deepeval/optimizer/prompt_optimizer.py +263 -0
  148. deepeval/optimizer/rewriter/__init__.py +5 -0
  149. deepeval/optimizer/rewriter/rewriter.py +124 -0
  150. deepeval/optimizer/rewriter/utils.py +214 -0
  151. deepeval/optimizer/scorer/__init__.py +5 -0
  152. deepeval/optimizer/scorer/base.py +86 -0
  153. deepeval/optimizer/scorer/scorer.py +316 -0
  154. deepeval/optimizer/scorer/utils.py +30 -0
  155. deepeval/optimizer/types.py +148 -0
  156. deepeval/{optimization → optimizer}/utils.py +47 -165
  157. deepeval/prompt/prompt.py +5 -9
  158. deepeval/simulator/conversation_simulator.py +43 -0
  159. deepeval/simulator/template.py +13 -0
  160. deepeval/test_case/__init__.py +1 -3
  161. deepeval/test_case/api.py +26 -45
  162. deepeval/test_case/arena_test_case.py +7 -2
  163. deepeval/test_case/conversational_test_case.py +68 -1
  164. deepeval/test_case/llm_test_case.py +206 -1
  165. deepeval/test_case/utils.py +4 -8
  166. deepeval/test_run/api.py +18 -14
  167. deepeval/test_run/test_run.py +3 -3
  168. deepeval/tracing/patchers.py +9 -4
  169. deepeval/tracing/tracing.py +2 -2
  170. deepeval/utils.py +65 -0
  171. {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/METADATA +1 -4
  172. {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/RECORD +180 -193
  173. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +0 -343
  174. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/schema.py +0 -19
  175. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +0 -122
  176. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +0 -301
  177. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/schema.py +0 -15
  178. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +0 -132
  179. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +0 -285
  180. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/schema.py +0 -15
  181. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +0 -112
  182. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +0 -282
  183. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +0 -102
  184. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/__init__.py +0 -0
  185. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +0 -356
  186. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +0 -175
  187. deepeval/metrics/multimodal_metrics/multimodal_g_eval/__init__.py +0 -0
  188. deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +0 -386
  189. deepeval/metrics/multimodal_metrics/multimodal_g_eval/schema.py +0 -11
  190. deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +0 -148
  191. deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +0 -68
  192. deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/__init__.py +0 -0
  193. deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +0 -290
  194. deepeval/models/mlllms/__init__.py +0 -4
  195. deepeval/models/mlllms/azure_model.py +0 -343
  196. deepeval/models/mlllms/gemini_model.py +0 -313
  197. deepeval/models/mlllms/ollama_model.py +0 -175
  198. deepeval/models/mlllms/openai_model.py +0 -309
  199. deepeval/optimization/__init__.py +0 -13
  200. deepeval/optimization/adapters/__init__.py +0 -2
  201. deepeval/optimization/adapters/deepeval_scoring_adapter.py +0 -588
  202. deepeval/optimization/aggregates.py +0 -14
  203. deepeval/optimization/copro/configs.py +0 -31
  204. deepeval/optimization/gepa/__init__.py +0 -7
  205. deepeval/optimization/gepa/configs.py +0 -115
  206. deepeval/optimization/miprov2/configs.py +0 -134
  207. deepeval/optimization/miprov2/loop.py +0 -785
  208. deepeval/optimization/mutations/__init__.py +0 -0
  209. deepeval/optimization/mutations/prompt_rewriter.py +0 -458
  210. deepeval/optimization/policies/__init__.py +0 -16
  211. deepeval/optimization/policies/tie_breaker.py +0 -67
  212. deepeval/optimization/prompt_optimizer.py +0 -462
  213. deepeval/optimization/simba/__init__.py +0 -0
  214. deepeval/optimization/simba/configs.py +0 -33
  215. deepeval/optimization/types.py +0 -361
  216. deepeval/test_case/mllm_test_case.py +0 -170
  217. /deepeval/metrics/{multimodal_metrics/multimodal_answer_relevancy → turn_contextual_precision}/__init__.py +0 -0
  218. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_precision → turn_contextual_recall}/__init__.py +0 -0
  219. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_recall → turn_contextual_relevancy}/__init__.py +0 -0
  220. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_faithfulness}/__init__.py +0 -0
  221. /deepeval/{optimization → optimizer/algorithms}/simba/types.py +0 -0
  222. {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/LICENSE.md +0 -0
  223. {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/WHEEL +0 -0
  224. {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/entry_points.txt +0 -0
@@ -1,84 +1,133 @@
1
- from typing import List, Dict
1
+ from typing import List, Dict, Union
2
+ import textwrap
3
+ from deepeval.test_case import MLLMImage
4
+ from deepeval.utils import convert_to_multi_modal_array
2
5
 
3
6
 
4
7
  class ContextualPrecisionTemplate:
5
8
  @staticmethod
6
9
  def generate_verdicts(
7
- input: str, expected_output: str, retrieval_context: List[str]
10
+ input: str,
11
+ expected_output: str,
12
+ retrieval_context: List[str],
13
+ multimodal: bool = False,
8
14
  ):
9
15
  document_count_str = f" ({len(retrieval_context)} document{'s' if len(retrieval_context) > 1 else ''})"
10
- return f"""Given the input, expected output, and retrieval context, please generate a list of JSON objects to determine whether each node in the retrieval context was remotely useful in arriving at the expected output.
11
-
12
- **
13
- IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON. These JSON only contain the `verdict` key that outputs only 'yes' or 'no', and a `reason` key to justify the verdict. In your reason, you should aim to quote parts of the context.
14
- Example Retrieval Context: ["Einstein won the Nobel Prize for his discovery of the photoelectric effect", "He won the Nobel Prize in 1968.", "There was a cat."]
15
- Example Input: "Who won the Nobel Prize in 1968 and for what?"
16
- Example Expected Output: "Einstein won the Nobel Prize in 1968 for his discovery of the photoelectric effect."
17
-
18
- Example:
19
- {{
20
- "verdicts": [
21
- {{
22
- "reason": "It clearly addresses the question by stating that 'Einstein won the Nobel Prize for his discovery of the photoelectric effect.'",
23
- "verdict": "yes"
24
- }},
25
- {{
26
- "reason": "The text verifies that the prize was indeed won in 1968.",
27
- "verdict": "yes"
28
- }},
29
- {{
30
- "reason": "'There was a cat' is not at all relevant to the topic of winning a Nobel Prize.",
31
- "verdict": "no"
32
- }}
33
- ]
34
- }}
35
- Since you are going to generate a verdict for each context, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to that of the contexts.
36
- **
37
-
38
- Input:
39
- {input}
40
-
41
- Expected output:
42
- {expected_output}
43
-
44
- Retrieval Context{document_count_str}:
45
- {retrieval_context}
46
-
47
- JSON:
48
- """
16
+
17
+ # For multimodal, we need to annotate the retrieval context with node IDs
18
+ context_to_display = (
19
+ ContextualPrecisionTemplate.id_retrieval_context(retrieval_context)
20
+ if multimodal
21
+ else retrieval_context
22
+ )
23
+
24
+ multimodal_note = (
25
+ " (which can be text or an image)" if multimodal else ""
26
+ )
27
+
28
+ prompt_template = textwrap.dedent(
29
+ f"""Given the input, expected output, and retrieval context, please generate a list of JSON objects to determine whether each node in the retrieval context was remotely useful in arriving at the expected output.
30
+
31
+ **
32
+ IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON. These JSON only contain the `verdict` key that outputs only 'yes' or 'no', and a `reason` key to justify the verdict. In your reason, you should aim to quote parts of the context {multimodal_note}.
33
+ Example Retrieval Context: ["Einstein won the Nobel Prize for his discovery of the photoelectric effect", "He won the Nobel Prize in 1968.", "There was a cat."]
34
+ Example Input: "Who won the Nobel Prize in 1968 and for what?"
35
+ Example Expected Output: "Einstein won the Nobel Prize in 1968 for his discovery of the photoelectric effect."
36
+
37
+ Example:
38
+ {{
39
+ "verdicts": [
40
+ {{
41
+ "reason": "It clearly addresses the question by stating that 'Einstein won the Nobel Prize for his discovery of the photoelectric effect.'",
42
+ "verdict": "yes"
43
+ }},
44
+ {{
45
+ "reason": "The text verifies that the prize was indeed won in 1968.",
46
+ "verdict": "yes"
47
+ }},
48
+ {{
49
+ "reason": "'There was a cat' is not at all relevant to the topic of winning a Nobel Prize.",
50
+ "verdict": "no"
51
+ }}
52
+ ]
53
+ }}
54
+ Since you are going to generate a verdict for each context, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to that of the contexts.
55
+ **
56
+
57
+ Input:
58
+ {input}
59
+
60
+ Expected output:
61
+ {expected_output}
62
+
63
+ Retrieval Context {document_count_str}:
64
+ {context_to_display}
65
+
66
+ JSON:
67
+ """
68
+ )
69
+
70
+ return prompt_template
49
71
 
50
72
  @staticmethod
51
73
  def generate_reason(
52
- input: str, score: float, verdicts: List[Dict[str, str]]
74
+ input: str,
75
+ score: float,
76
+ verdicts: List[Dict[str, str]],
77
+ multimodal: bool = False,
53
78
  ):
54
- # given the input and retrieval context for this input, where the verdict is whether ... and the node is the ..., give a reason for the score
55
- return f"""Given the input, retrieval contexts, and contextual precision score, provide a CONCISE summary for the score. Explain why it is not higher, but also why it is at its current score.
56
- The retrieval contexts is a list of JSON with three keys: `verdict`, `reason` (reason for the verdict) and `node`. `verdict` will be either 'yes' or 'no', which represents whether the corresponding 'node' in the retrieval context is relevant to the input.
57
- Contextual precision represents if the relevant nodes are ranked higher than irrelevant nodes. Also note that retrieval contexts is given IN THE ORDER OF THEIR RANKINGS.
79
+ return textwrap.dedent(
80
+ f"""Given the input, retrieval contexts, and contextual precision score, provide a CONCISE {'summarize' if multimodal else 'summary'} for the score. Explain why it is not higher, but also why it is at its current score.
81
+ The retrieval contexts is a list of JSON with three keys: `verdict`, `reason` (reason for the verdict) and `node`. `verdict` will be either 'yes' or 'no', which represents whether the corresponding 'node' in the retrieval context is relevant to the input.
82
+ Contextual precision represents if the relevant nodes are ranked higher than irrelevant nodes. Also note that retrieval contexts is given IN THE ORDER OF THEIR RANKINGS.
83
+
84
+ **
85
+ IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
86
+ Example JSON:
87
+ {{
88
+ "reason": "The score is <contextual_precision_score> because <your_reason>."
89
+ }}
58
90
 
59
- **
60
- IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
61
- Example JSON:
62
- {{
63
- "reason": "The score is <contextual_precision_score> because <your_reason>."
64
- }}
65
91
 
92
+ DO NOT mention 'verdict' in your reason, but instead phrase it as irrelevant nodes. The term 'verdict' {'are' if multimodal else 'is'} just here for you to understand the broader scope of things.
93
+ Also DO NOT mention there are `reason` fields in the retrieval contexts you are presented with, instead just use the information in the `reason` field.
94
+ In your reason, you MUST USE the `reason`, QUOTES in the 'reason', and the node RANK (starting from 1, eg. first node) to explain why the 'no' verdicts should be ranked lower than the 'yes' verdicts.
95
+ When addressing nodes, make it explicit that {'it is' if multimodal else 'they are'} nodes in {'retrieval context' if multimodal else 'retrieval contexts'}.
96
+ If the score is 1, keep it short and say something positive with an upbeat tone (but don't overdo it{',' if multimodal else ''} otherwise it gets annoying).
97
+ **
66
98
 
67
- DO NOT mention 'verdict' in your reason, but instead phrase it as irrelevant nodes. The term 'verdict' is just here for you to understand the broader scope of things.
68
- Also DO NOT mention there are `reason` fields in the retrieval contexts you are presented with, instead just use the information in the `reason` field.
69
- In your reason, you MUST USE the `reason`, QUOTES in the 'reason', and the node RANK (starting from 1, eg. first node) to explain why the 'no' verdicts should be ranked lower than the 'yes' verdicts.
70
- When addressing nodes, make it explicit that they are nodes in retrieval contexts.
71
- If the score is 1, keep it short and say something positive with an upbeat tone (but don't overdo it, otherwise it gets annoying).
72
- **
99
+ Contextual Precision Score:
100
+ {score}
73
101
 
74
- Contextual Precision Score:
75
- {score}
102
+ Input:
103
+ {input}
76
104
 
77
- Input:
78
- {input}
105
+ Retrieval Contexts:
106
+ {verdicts}
107
+
108
+ JSON:
109
+ """
110
+ )
111
+
112
+ @staticmethod
113
+ def id_retrieval_context(
114
+ retrieval_context: List[str],
115
+ ) -> List[str]:
116
+ """
117
+ Annotates retrieval context with node IDs for multimodal processing.
79
118
 
80
- Retrieval Contexts:
81
- {verdicts}
119
+ Args:
120
+ retrieval_context: List of contexts (can be strings or MLLMImages)
82
121
 
83
- JSON:
84
- """
122
+ Returns:
123
+ Annotated list with "Node X:" prefixes
124
+ """
125
+ annotated_retrieval_context = []
126
+ retrieval_context = convert_to_multi_modal_array(retrieval_context)
127
+ for i, context in enumerate(retrieval_context):
128
+ if isinstance(context, str):
129
+ annotated_retrieval_context.append(f"Node {i + 1}: {context}")
130
+ elif isinstance(context, MLLMImage):
131
+ annotated_retrieval_context.append(f"Node {i + 1}:")
132
+ annotated_retrieval_context.append(context)
133
+ return annotated_retrieval_context
@@ -1,11 +1,15 @@
1
1
  from typing import Optional, List, Type, Union
2
2
 
3
- from deepeval.utils import get_or_create_event_loop, prettify_list
3
+ from deepeval.utils import (
4
+ get_or_create_event_loop,
5
+ prettify_list,
6
+ )
4
7
  from deepeval.metrics.utils import (
5
8
  construct_verbose_logs,
6
- trimAndLoadJson,
7
9
  check_llm_test_case_params,
8
10
  initialize_model,
11
+ a_generate_with_schema_and_extract,
12
+ generate_with_schema_and_extract,
9
13
  )
10
14
  from deepeval.test_case import (
11
15
  LLMTestCase,
@@ -15,7 +19,11 @@ from deepeval.metrics import BaseMetric
15
19
  from deepeval.models import DeepEvalBaseLLM
16
20
  from deepeval.metrics.contextual_recall.template import ContextualRecallTemplate
17
21
  from deepeval.metrics.indicator import metric_progress_indicator
18
- from deepeval.metrics.contextual_recall.schema import *
22
+ from deepeval.metrics.contextual_recall.schema import (
23
+ ContextualRecallVerdict,
24
+ Verdicts,
25
+ ContextualRecallScoreReason,
26
+ )
19
27
  from deepeval.metrics.api import metric_data_manager
20
28
 
21
29
 
@@ -55,7 +63,17 @@ class ContextualRecallMetric(BaseMetric):
55
63
  _in_component: bool = False,
56
64
  _log_metric_to_confident: bool = True,
57
65
  ) -> float:
58
- check_llm_test_case_params(test_case, self._required_params, self)
66
+ multimodal = test_case.multimodal
67
+
68
+ check_llm_test_case_params(
69
+ test_case,
70
+ self._required_params,
71
+ None,
72
+ None,
73
+ self,
74
+ self.model,
75
+ test_case.multimodal,
76
+ )
59
77
 
60
78
  self.evaluation_cost = 0 if self.using_native_model else None
61
79
  with metric_progress_indicator(
@@ -72,13 +90,16 @@ class ContextualRecallMetric(BaseMetric):
72
90
  )
73
91
  )
74
92
  else:
93
+ expected_output = test_case.expected_output
94
+ retrieval_context = test_case.retrieval_context
95
+
75
96
  self.verdicts: List[ContextualRecallVerdict] = (
76
97
  self._generate_verdicts(
77
- test_case.expected_output, test_case.retrieval_context
98
+ expected_output, retrieval_context, multimodal
78
99
  )
79
100
  )
80
101
  self.score = self._calculate_score()
81
- self.reason = self._generate_reason(test_case.expected_output)
102
+ self.reason = self._generate_reason(expected_output, multimodal)
82
103
  self.success = self.score >= self.threshold
83
104
  self.verbose_logs = construct_verbose_logs(
84
105
  self,
@@ -101,7 +122,17 @@ class ContextualRecallMetric(BaseMetric):
101
122
  _log_metric_to_confident: bool = True,
102
123
  ) -> float:
103
124
 
104
- check_llm_test_case_params(test_case, self._required_params, self)
125
+ multimodal = test_case.multimodal
126
+
127
+ check_llm_test_case_params(
128
+ test_case,
129
+ self._required_params,
130
+ None,
131
+ None,
132
+ self,
133
+ self.model,
134
+ test_case.multimodal,
135
+ )
105
136
 
106
137
  self.evaluation_cost = 0 if self.using_native_model else None
107
138
  with metric_progress_indicator(
@@ -110,14 +141,17 @@ class ContextualRecallMetric(BaseMetric):
110
141
  _show_indicator=_show_indicator,
111
142
  _in_component=_in_component,
112
143
  ):
144
+ expected_output = test_case.expected_output
145
+ retrieval_context = test_case.retrieval_context
146
+
113
147
  self.verdicts: List[ContextualRecallVerdict] = (
114
148
  await self._a_generate_verdicts(
115
- test_case.expected_output, test_case.retrieval_context
149
+ expected_output, retrieval_context, multimodal
116
150
  )
117
151
  )
118
152
  self.score = self._calculate_score()
119
153
  self.reason = await self._a_generate_reason(
120
- test_case.expected_output
154
+ expected_output, multimodal
121
155
  )
122
156
  self.success = self.score >= self.threshold
123
157
  self.verbose_logs = construct_verbose_logs(
@@ -133,7 +167,7 @@ class ContextualRecallMetric(BaseMetric):
133
167
  )
134
168
  return self.score
135
169
 
136
- async def _a_generate_reason(self, expected_output: str):
170
+ async def _a_generate_reason(self, expected_output: str, multimodal: bool):
137
171
  if self.include_reason is False:
138
172
  return None
139
173
 
@@ -150,26 +184,18 @@ class ContextualRecallMetric(BaseMetric):
150
184
  supportive_reasons=supportive_reasons,
151
185
  unsupportive_reasons=unsupportive_reasons,
152
186
  score=format(self.score, ".2f"),
187
+ multimodal=multimodal,
153
188
  )
154
189
 
155
- if self.using_native_model:
156
- res, cost = await self.model.a_generate(
157
- prompt, schema=ContextualRecallScoreReason
158
- )
159
- self.evaluation_cost += cost
160
- return res.reason
161
- else:
162
- try:
163
- res: ContextualRecallScoreReason = await self.model.a_generate(
164
- prompt, schema=ContextualRecallScoreReason
165
- )
166
- return res.reason
167
- except TypeError:
168
- res = await self.model.a_generate(prompt)
169
- data = trimAndLoadJson(res, self)
170
- return data["reason"]
190
+ return await a_generate_with_schema_and_extract(
191
+ metric=self,
192
+ prompt=prompt,
193
+ schema_cls=ContextualRecallScoreReason,
194
+ extract_schema=lambda score_reason: score_reason.reason,
195
+ extract_json=lambda data: data["reason"],
196
+ )
171
197
 
172
- def _generate_reason(self, expected_output: str):
198
+ def _generate_reason(self, expected_output: str, multimodal: bool):
173
199
  if self.include_reason is False:
174
200
  return None
175
201
 
@@ -186,24 +212,16 @@ class ContextualRecallMetric(BaseMetric):
186
212
  supportive_reasons=supportive_reasons,
187
213
  unsupportive_reasons=unsupportive_reasons,
188
214
  score=format(self.score, ".2f"),
215
+ multimodal=multimodal,
189
216
  )
190
217
 
191
- if self.using_native_model:
192
- res, cost = self.model.generate(
193
- prompt, schema=ContextualRecallScoreReason
194
- )
195
- self.evaluation_cost += cost
196
- return res.reason
197
- else:
198
- try:
199
- res: ContextualRecallScoreReason = self.model.generate(
200
- prompt, schema=ContextualRecallScoreReason
201
- )
202
- return res.reason
203
- except TypeError:
204
- res = self.model.generate(prompt)
205
- data = trimAndLoadJson(res, self)
206
- return data["reason"]
218
+ return generate_with_schema_and_extract(
219
+ metric=self,
220
+ prompt=prompt,
221
+ schema_cls=ContextualRecallScoreReason,
222
+ extract_schema=lambda score_reason: score_reason.reason,
223
+ extract_json=lambda data: data["reason"],
224
+ )
207
225
 
208
226
  def _calculate_score(self):
209
227
  number_of_verdicts = len(self.verdicts)
@@ -219,54 +237,46 @@ class ContextualRecallMetric(BaseMetric):
219
237
  return 0 if self.strict_mode and score < self.threshold else score
220
238
 
221
239
  async def _a_generate_verdicts(
222
- self, expected_output: str, retrieval_context: List[str]
240
+ self,
241
+ expected_output: str,
242
+ retrieval_context: List[str],
243
+ multimodal: bool,
223
244
  ) -> List[ContextualRecallVerdict]:
224
245
  prompt = self.evaluation_template.generate_verdicts(
225
- expected_output=expected_output, retrieval_context=retrieval_context
246
+ expected_output=expected_output,
247
+ retrieval_context=retrieval_context,
248
+ multimodal=multimodal,
249
+ )
250
+ return await a_generate_with_schema_and_extract(
251
+ metric=self,
252
+ prompt=prompt,
253
+ schema_cls=Verdicts,
254
+ extract_schema=lambda r: list(r.verdicts),
255
+ extract_json=lambda data: [
256
+ ContextualRecallVerdict(**item) for item in data["verdicts"]
257
+ ],
226
258
  )
227
- if self.using_native_model:
228
- res, cost = await self.model.a_generate(prompt, schema=Verdicts)
229
- self.evaluation_cost += cost
230
- verdicts = [item for item in res.verdicts]
231
- return verdicts
232
- else:
233
- try:
234
- res: Verdicts = await self.model.a_generate(
235
- prompt, schema=Verdicts
236
- )
237
- verdicts: Verdicts = [item for item in res.verdicts]
238
- return verdicts
239
- except TypeError:
240
- res = await self.model.a_generate(prompt)
241
- data = trimAndLoadJson(res, self)
242
- verdicts = [
243
- ContextualRecallVerdict(**item) for item in data["verdicts"]
244
- ]
245
- return verdicts
246
259
 
247
260
  def _generate_verdicts(
248
- self, expected_output: str, retrieval_context: List[str]
261
+ self,
262
+ expected_output: str,
263
+ retrieval_context: List[str],
264
+ multimodal: bool,
249
265
  ) -> List[ContextualRecallVerdict]:
250
266
  prompt = self.evaluation_template.generate_verdicts(
251
- expected_output=expected_output, retrieval_context=retrieval_context
267
+ expected_output=expected_output,
268
+ retrieval_context=retrieval_context,
269
+ multimodal=multimodal,
270
+ )
271
+ return generate_with_schema_and_extract(
272
+ metric=self,
273
+ prompt=prompt,
274
+ schema_cls=Verdicts,
275
+ extract_schema=lambda r: list(r.verdicts),
276
+ extract_json=lambda data: [
277
+ ContextualRecallVerdict(**item) for item in data["verdicts"]
278
+ ],
252
279
  )
253
- if self.using_native_model:
254
- res, cost = self.model.generate(prompt, schema=Verdicts)
255
- self.evaluation_cost += cost
256
- verdicts = [item for item in res.verdicts]
257
- return verdicts
258
- else:
259
- try:
260
- res: Verdicts = self.model.generate(prompt, schema=Verdicts)
261
- verdicts: Verdicts = [item for item in res.verdicts]
262
- return verdicts
263
- except TypeError:
264
- res = self.model.generate(prompt)
265
- data = trimAndLoadJson(res, self)
266
- verdicts = [
267
- ContextualRecallVerdict(**item) for item in data["verdicts"]
268
- ]
269
- return verdicts
270
280
 
271
281
  def is_successful(self) -> bool:
272
282
  if self.error is not None:
@@ -274,7 +284,7 @@ class ContextualRecallMetric(BaseMetric):
274
284
  else:
275
285
  try:
276
286
  self.success = self.score >= self.threshold
277
- except:
287
+ except TypeError:
278
288
  self.success = False
279
289
  return self.success
280
290
 
@@ -1,4 +1,7 @@
1
- from typing import List
1
+ from typing import List, Union
2
+ import textwrap
3
+ from deepeval.test_case import MLLMImage
4
+ from deepeval.utils import convert_to_multi_modal_array
2
5
 
3
6
 
4
7
  class ContextualRecallTemplate:
@@ -8,68 +11,116 @@ class ContextualRecallTemplate:
8
11
  supportive_reasons: str,
9
12
  unsupportive_reasons: str,
10
13
  score: float,
14
+ multimodal: bool = False,
11
15
  ):
12
- return f"""
13
- Given the original expected output, a list of supportive reasons, and a list of unsupportive reasons (which are deduced directly from the 'expected output'), and a contextual recall score (closer to 1 the better), summarize a CONCISE reason for the score.
14
- A supportive reason is the reason why a certain sentence in the original expected output can be attributed to the node in the retrieval context.
15
- An unsupportive reason is the reason why a certain sentence in the original expected output cannot be attributed to anything in the retrieval context.
16
- In your reason, you should relate supportive/unsupportive reasons to the sentence number in expected output, and include info regarding the node number in retrieval context to support your final reason. The first mention of "node(s)" should specify "node(s) in retrieval context".
16
+ content_type = "sentence or image" if multimodal else "sentence"
17
17
 
18
- **
19
- IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
20
- Example JSON:
21
- {{
22
- "reason": "The score is <contextual_recall_score> because <your_reason>."
23
- }}
18
+ return textwrap.dedent(
19
+ f"""Given the original expected output, a list of supportive reasons, and a list of unsupportive reasons ({'which is' if multimodal else 'which are'} deduced directly from the {'"expected output"' if multimodal else 'original expected output'}), and a contextual recall score (closer to 1 the better), summarize a CONCISE reason for the score.
20
+ A supportive reason is the reason why a certain {content_type} in the original expected output can be attributed to the node in the retrieval context.
21
+ An unsupportive reason is the reason why a certain {content_type} in the original expected output cannot be attributed to anything in the retrieval context.
22
+ In your reason, you should {'related' if multimodal else 'relate'} supportive/unsupportive reasons to the {content_type} number in expected output, and {'info' if multimodal else 'include info'} regarding the node number in retrieval context to support your final reason. The first mention of "node(s)" should specify "node(s) in retrieval context{')' if multimodal else ''}.
24
23
 
25
- DO NOT mention 'supportive reasons' and 'unsupportive reasons' in your reason, these terms are just here for you to understand the broader scope of things.
26
- If the score is 1, keep it short and say something positive with an upbeat encouraging tone (but don't overdo it, otherwise it gets annoying).
27
- **
24
+ **
25
+ IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
26
+ Example JSON:
27
+ {{
28
+ "reason": "The score is <contextual_recall_score> because <your_reason>."
29
+ }}
28
30
 
29
- Contextual Recall Score:
30
- {score}
31
+ DO NOT mention 'supportive reasons' and 'unsupportive reasons' in your reason, these terms are just here for you to understand the broader scope of things.
32
+ If the score is 1, keep it short and say something positive with an upbeat encouraging tone (but don't overdo it{',' if multimodal else ''} otherwise it gets annoying).
33
+ **
31
34
 
32
- Expected Output:
33
- {expected_output}
35
+ Contextual Recall Score:
36
+ {score}
34
37
 
35
- Supportive Reasons:
36
- {supportive_reasons}
38
+ Expected Output:
39
+ {expected_output}
37
40
 
38
- Unsupportive Reasons:
39
- {unsupportive_reasons}
41
+ Supportive Reasons:
42
+ {supportive_reasons}
40
43
 
41
- JSON:
42
- """
44
+ Unsupportive Reasons:
45
+ {unsupportive_reasons}
43
46
 
44
- @staticmethod
45
- def generate_verdicts(expected_output: str, retrieval_context: List[str]):
46
- return f"""
47
- For EACH sentence in the given expected output below, determine whether the sentence can be attributed to the nodes of retrieval contexts. Please generate a list of JSON with two keys: `verdict` and `reason`.
48
- The `verdict` key should STRICTLY be either a 'yes' or 'no'. Answer 'yes' if the sentence can be attributed to any parts of the retrieval context, else answer 'no'.
49
- The `reason` key should provide a reason why to the verdict. In the reason, you should aim to include the node(s) count in the retrieval context (eg., 1st node, and 2nd node in the retrieval context) that is attributed to said sentence. You should also aim to quote the specific part of the retrieval context to justify your verdict, but keep it extremely concise and cut short the quote with an ellipsis if possible.
50
-
51
-
52
- **
53
- IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON objects, each with two keys: `verdict` and `reason`.
54
-
55
- {{
56
- "verdicts": [
57
- {{
58
- "reason": "...",
59
- "verdict": "yes"
60
- }},
61
- ...
62
- ]
63
- }}
64
-
65
- Since you are going to generate a verdict for each sentence, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to the number of sentences in `expected output`.
66
- **
47
+ JSON:
48
+ """
49
+ )
67
50
 
68
- Expected Output:
69
- {expected_output}
70
-
71
- Retrieval Context:
72
- {retrieval_context}
51
+ @staticmethod
52
+ def generate_verdicts(
53
+ expected_output: str,
54
+ retrieval_context: List[str],
55
+ multimodal: bool = False,
56
+ ):
57
+ content_type = "sentence and image" if multimodal else "sentence"
58
+ content_type_plural = (
59
+ "sentences and images" if multimodal else "sentences"
60
+ )
61
+ content_or = "sentence or image" if multimodal else "sentence"
62
+
63
+ # For multimodal, we need to annotate the retrieval context with node IDs
64
+ context_to_display = (
65
+ ContextualRecallTemplate.id_retrieval_context(retrieval_context)
66
+ if multimodal
67
+ else retrieval_context
68
+ )
69
+
70
+ node_instruction = ""
71
+ if multimodal:
72
+ node_instruction = " A node is either a string or image, but not both (so do not group images and texts in the same nodes)."
73
+
74
+ return textwrap.dedent(
75
+ f"""For EACH {content_type} in the given expected output below, determine whether the {content_or} can be attributed to the nodes of retrieval contexts. Please generate a list of JSON with two keys: `verdict` and `reason`.
76
+ The `verdict` key should STRICTLY be either a 'yes' or 'no'. Answer 'yes' if the {content_or} can be attributed to any parts of the retrieval context, else answer 'no'.
77
+ The `reason` key should provide a reason why to the verdict. In the reason, you should aim to include the node(s) count in the retrieval context (eg., 1st node, and 2nd node in the retrieval context) that is attributed to said {content_or}.{node_instruction} You should also aim to quote the specific part of the retrieval context to justify your verdict, but keep it extremely concise and cut short the quote with an ellipsis if possible.
78
+
79
+ **
80
+ IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON objects, each with two keys: `verdict` and `reason`.
81
+
82
+ {{
83
+ "verdicts": [
84
+ {{
85
+ "reason": "...",
86
+ "verdict": "yes"
87
+ }},
88
+ ...
89
+ ]
90
+ }}
91
+
92
+ Since you are going to generate a verdict for each sentence, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to the number of {content_type_plural} in {'the' if multimodal else '`expected output`'}{' `expected output`' if multimodal else ''}.
93
+ **
94
+
95
+ Expected Output:
96
+ {expected_output}
97
+
98
+ Retrieval Context:
99
+ {context_to_display}
100
+
101
+ JSON:
102
+ """
103
+ )
73
104
 
74
- JSON:
75
- """
105
+ @staticmethod
106
+ def id_retrieval_context(
107
+ retrieval_context: List[str],
108
+ ) -> List[str]:
109
+ """
110
+ Annotates retrieval context with node IDs for multimodal processing.
111
+
112
+ Args:
113
+ retrieval_context: List of contexts (can be strings or MLLMImages)
114
+
115
+ Returns:
116
+ Annotated list with "Node X:" prefixes
117
+ """
118
+ annotated_retrieval_context = []
119
+ retrieval_context = convert_to_multi_modal_array(retrieval_context)
120
+ for i, context in enumerate(retrieval_context):
121
+ if isinstance(context, str):
122
+ annotated_retrieval_context.append(f"Node {i + 1}: {context}")
123
+ elif isinstance(context, MLLMImage):
124
+ annotated_retrieval_context.append(f"Node {i + 1}:")
125
+ annotated_retrieval_context.append(context)
126
+ return annotated_retrieval_context