deepeval 3.7.4__py3-none-any.whl → 3.7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (155) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/dataset/golden.py +54 -2
  3. deepeval/evaluate/evaluate.py +16 -8
  4. deepeval/evaluate/execute.py +70 -26
  5. deepeval/evaluate/utils.py +26 -22
  6. deepeval/integrations/pydantic_ai/agent.py +19 -2
  7. deepeval/integrations/pydantic_ai/instrumentator.py +62 -23
  8. deepeval/metrics/__init__.py +14 -12
  9. deepeval/metrics/answer_relevancy/answer_relevancy.py +74 -29
  10. deepeval/metrics/answer_relevancy/template.py +188 -92
  11. deepeval/metrics/base_metric.py +2 -5
  12. deepeval/metrics/contextual_precision/contextual_precision.py +53 -15
  13. deepeval/metrics/contextual_precision/template.py +115 -66
  14. deepeval/metrics/contextual_recall/contextual_recall.py +50 -13
  15. deepeval/metrics/contextual_recall/template.py +106 -55
  16. deepeval/metrics/contextual_relevancy/contextual_relevancy.py +47 -15
  17. deepeval/metrics/contextual_relevancy/template.py +87 -58
  18. deepeval/metrics/dag/templates.py +2 -2
  19. deepeval/metrics/faithfulness/faithfulness.py +70 -27
  20. deepeval/metrics/faithfulness/schema.py +1 -1
  21. deepeval/metrics/faithfulness/template.py +200 -115
  22. deepeval/metrics/g_eval/utils.py +2 -2
  23. deepeval/metrics/indicator.py +4 -4
  24. deepeval/metrics/multimodal_metrics/__init__.py +0 -18
  25. deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +24 -17
  26. deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +26 -21
  27. deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +24 -17
  28. deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +24 -17
  29. deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +19 -19
  30. deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +63 -78
  31. deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +20 -20
  32. deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +71 -50
  33. deepeval/metrics/ragas.py +3 -3
  34. deepeval/metrics/tool_correctness/tool_correctness.py +2 -2
  35. deepeval/metrics/turn_contextual_precision/schema.py +21 -0
  36. deepeval/metrics/turn_contextual_precision/template.py +187 -0
  37. deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +550 -0
  38. deepeval/metrics/turn_contextual_recall/schema.py +21 -0
  39. deepeval/metrics/turn_contextual_recall/template.py +178 -0
  40. deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +520 -0
  41. deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_contextual_relevancy}/schema.py +7 -1
  42. deepeval/metrics/turn_contextual_relevancy/template.py +161 -0
  43. deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +535 -0
  44. deepeval/metrics/{multimodal_metrics/multimodal_faithfulness → turn_faithfulness}/schema.py +11 -3
  45. deepeval/metrics/turn_faithfulness/template.py +218 -0
  46. deepeval/metrics/turn_faithfulness/turn_faithfulness.py +596 -0
  47. deepeval/metrics/utils.py +39 -58
  48. deepeval/models/__init__.py +0 -12
  49. deepeval/models/base_model.py +16 -38
  50. deepeval/models/embedding_models/__init__.py +7 -0
  51. deepeval/models/embedding_models/azure_embedding_model.py +52 -28
  52. deepeval/models/embedding_models/local_embedding_model.py +18 -14
  53. deepeval/models/embedding_models/ollama_embedding_model.py +38 -16
  54. deepeval/models/embedding_models/openai_embedding_model.py +40 -21
  55. deepeval/models/llms/amazon_bedrock_model.py +1 -2
  56. deepeval/models/llms/anthropic_model.py +44 -23
  57. deepeval/models/llms/azure_model.py +121 -36
  58. deepeval/models/llms/deepseek_model.py +18 -13
  59. deepeval/models/llms/gemini_model.py +129 -43
  60. deepeval/models/llms/grok_model.py +18 -13
  61. deepeval/models/llms/kimi_model.py +18 -13
  62. deepeval/models/llms/litellm_model.py +42 -22
  63. deepeval/models/llms/local_model.py +12 -7
  64. deepeval/models/llms/ollama_model.py +114 -12
  65. deepeval/models/llms/openai_model.py +137 -41
  66. deepeval/models/llms/portkey_model.py +24 -7
  67. deepeval/models/llms/utils.py +5 -3
  68. deepeval/models/retry_policy.py +17 -14
  69. deepeval/models/utils.py +46 -1
  70. deepeval/optimizer/__init__.py +5 -0
  71. deepeval/optimizer/algorithms/__init__.py +6 -0
  72. deepeval/optimizer/algorithms/base.py +29 -0
  73. deepeval/optimizer/algorithms/configs.py +18 -0
  74. deepeval/optimizer/algorithms/copro/__init__.py +5 -0
  75. deepeval/{optimization/copro/loop.py → optimizer/algorithms/copro/copro.py} +112 -113
  76. deepeval/optimizer/algorithms/gepa/__init__.py +5 -0
  77. deepeval/{optimization/gepa/loop.py → optimizer/algorithms/gepa/gepa.py} +175 -115
  78. deepeval/optimizer/algorithms/miprov2/__init__.py +17 -0
  79. deepeval/optimizer/algorithms/miprov2/bootstrapper.py +435 -0
  80. deepeval/optimizer/algorithms/miprov2/miprov2.py +752 -0
  81. deepeval/optimizer/algorithms/miprov2/proposer.py +301 -0
  82. deepeval/optimizer/algorithms/simba/__init__.py +5 -0
  83. deepeval/{optimization/simba/loop.py → optimizer/algorithms/simba/simba.py} +128 -112
  84. deepeval/{optimization → optimizer}/configs.py +5 -8
  85. deepeval/{optimization/policies/selection.py → optimizer/policies.py} +63 -2
  86. deepeval/optimizer/prompt_optimizer.py +263 -0
  87. deepeval/optimizer/rewriter/__init__.py +5 -0
  88. deepeval/optimizer/rewriter/rewriter.py +124 -0
  89. deepeval/optimizer/rewriter/utils.py +214 -0
  90. deepeval/optimizer/scorer/__init__.py +5 -0
  91. deepeval/optimizer/scorer/base.py +86 -0
  92. deepeval/optimizer/scorer/scorer.py +316 -0
  93. deepeval/optimizer/scorer/utils.py +30 -0
  94. deepeval/optimizer/types.py +148 -0
  95. deepeval/{optimization → optimizer}/utils.py +47 -165
  96. deepeval/prompt/prompt.py +5 -9
  97. deepeval/test_case/__init__.py +1 -3
  98. deepeval/test_case/api.py +12 -10
  99. deepeval/test_case/conversational_test_case.py +19 -1
  100. deepeval/test_case/llm_test_case.py +152 -1
  101. deepeval/test_case/utils.py +4 -8
  102. deepeval/test_run/api.py +15 -14
  103. deepeval/test_run/test_run.py +3 -3
  104. deepeval/tracing/patchers.py +9 -4
  105. deepeval/tracing/tracing.py +2 -2
  106. deepeval/utils.py +65 -0
  107. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/METADATA +1 -4
  108. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/RECORD +116 -125
  109. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +0 -343
  110. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/schema.py +0 -19
  111. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +0 -122
  112. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +0 -301
  113. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/schema.py +0 -15
  114. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +0 -132
  115. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +0 -285
  116. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/schema.py +0 -15
  117. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +0 -112
  118. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +0 -282
  119. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +0 -102
  120. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/__init__.py +0 -0
  121. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +0 -356
  122. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +0 -175
  123. deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/__init__.py +0 -0
  124. deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +0 -290
  125. deepeval/models/mlllms/__init__.py +0 -4
  126. deepeval/models/mlllms/azure_model.py +0 -343
  127. deepeval/models/mlllms/gemini_model.py +0 -313
  128. deepeval/models/mlllms/ollama_model.py +0 -175
  129. deepeval/models/mlllms/openai_model.py +0 -309
  130. deepeval/optimization/__init__.py +0 -13
  131. deepeval/optimization/adapters/__init__.py +0 -2
  132. deepeval/optimization/adapters/deepeval_scoring_adapter.py +0 -588
  133. deepeval/optimization/aggregates.py +0 -14
  134. deepeval/optimization/copro/configs.py +0 -31
  135. deepeval/optimization/gepa/__init__.py +0 -7
  136. deepeval/optimization/gepa/configs.py +0 -115
  137. deepeval/optimization/miprov2/configs.py +0 -134
  138. deepeval/optimization/miprov2/loop.py +0 -785
  139. deepeval/optimization/mutations/__init__.py +0 -0
  140. deepeval/optimization/mutations/prompt_rewriter.py +0 -458
  141. deepeval/optimization/policies/__init__.py +0 -16
  142. deepeval/optimization/policies/tie_breaker.py +0 -67
  143. deepeval/optimization/prompt_optimizer.py +0 -462
  144. deepeval/optimization/simba/__init__.py +0 -0
  145. deepeval/optimization/simba/configs.py +0 -33
  146. deepeval/optimization/types.py +0 -361
  147. deepeval/test_case/mllm_test_case.py +0 -170
  148. /deepeval/metrics/{multimodal_metrics/multimodal_answer_relevancy → turn_contextual_precision}/__init__.py +0 -0
  149. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_precision → turn_contextual_recall}/__init__.py +0 -0
  150. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_recall → turn_contextual_relevancy}/__init__.py +0 -0
  151. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_faithfulness}/__init__.py +0 -0
  152. /deepeval/{optimization → optimizer/algorithms}/simba/types.py +0 -0
  153. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/LICENSE.md +0 -0
  154. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/WHEEL +0 -0
  155. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/entry_points.txt +0 -0
@@ -1,4 +1,7 @@
1
- from typing import List
1
+ from typing import List, Union
2
+ import textwrap
3
+ from deepeval.test_case import MLLMImage
4
+ from deepeval.utils import convert_to_multi_modal_array
2
5
 
3
6
 
4
7
  class ContextualRecallTemplate:
@@ -8,68 +11,116 @@ class ContextualRecallTemplate:
8
11
  supportive_reasons: str,
9
12
  unsupportive_reasons: str,
10
13
  score: float,
14
+ multimodal: bool = False,
11
15
  ):
12
- return f"""
13
- Given the original expected output, a list of supportive reasons, and a list of unsupportive reasons (which are deduced directly from the 'expected output'), and a contextual recall score (closer to 1 the better), summarize a CONCISE reason for the score.
14
- A supportive reason is the reason why a certain sentence in the original expected output can be attributed to the node in the retrieval context.
15
- An unsupportive reason is the reason why a certain sentence in the original expected output cannot be attributed to anything in the retrieval context.
16
- In your reason, you should relate supportive/unsupportive reasons to the sentence number in expected output, and include info regarding the node number in retrieval context to support your final reason. The first mention of "node(s)" should specify "node(s) in retrieval context".
16
+ content_type = "sentence or image" if multimodal else "sentence"
17
17
 
18
- **
19
- IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
20
- Example JSON:
21
- {{
22
- "reason": "The score is <contextual_recall_score> because <your_reason>."
23
- }}
18
+ return textwrap.dedent(
19
+ f"""Given the original expected output, a list of supportive reasons, and a list of unsupportive reasons ({'which is' if multimodal else 'which are'} deduced directly from the {'"expected output"' if multimodal else 'original expected output'}), and a contextual recall score (closer to 1 the better), summarize a CONCISE reason for the score.
20
+ A supportive reason is the reason why a certain {content_type} in the original expected output can be attributed to the node in the retrieval context.
21
+ An unsupportive reason is the reason why a certain {content_type} in the original expected output cannot be attributed to anything in the retrieval context.
22
+ In your reason, you should {'related' if multimodal else 'relate'} supportive/unsupportive reasons to the {content_type} number in expected output, and {'info' if multimodal else 'include info'} regarding the node number in retrieval context to support your final reason. The first mention of "node(s)" should specify "node(s) in retrieval context{')' if multimodal else ''}.
24
23
 
25
- DO NOT mention 'supportive reasons' and 'unsupportive reasons' in your reason, these terms are just here for you to understand the broader scope of things.
26
- If the score is 1, keep it short and say something positive with an upbeat encouraging tone (but don't overdo it, otherwise it gets annoying).
27
- **
24
+ **
25
+ IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
26
+ Example JSON:
27
+ {{
28
+ "reason": "The score is <contextual_recall_score> because <your_reason>."
29
+ }}
28
30
 
29
- Contextual Recall Score:
30
- {score}
31
+ DO NOT mention 'supportive reasons' and 'unsupportive reasons' in your reason, these terms are just here for you to understand the broader scope of things.
32
+ If the score is 1, keep it short and say something positive with an upbeat encouraging tone (but don't overdo it{',' if multimodal else ''} otherwise it gets annoying).
33
+ **
31
34
 
32
- Expected Output:
33
- {expected_output}
35
+ Contextual Recall Score:
36
+ {score}
34
37
 
35
- Supportive Reasons:
36
- {supportive_reasons}
38
+ Expected Output:
39
+ {expected_output}
37
40
 
38
- Unsupportive Reasons:
39
- {unsupportive_reasons}
41
+ Supportive Reasons:
42
+ {supportive_reasons}
40
43
 
41
- JSON:
42
- """
44
+ Unsupportive Reasons:
45
+ {unsupportive_reasons}
43
46
 
44
- @staticmethod
45
- def generate_verdicts(expected_output: str, retrieval_context: List[str]):
46
- return f"""
47
- For EACH sentence in the given expected output below, determine whether the sentence can be attributed to the nodes of retrieval contexts. Please generate a list of JSON with two keys: `verdict` and `reason`.
48
- The `verdict` key should STRICTLY be either a 'yes' or 'no'. Answer 'yes' if the sentence can be attributed to any parts of the retrieval context, else answer 'no'.
49
- The `reason` key should provide a reason why to the verdict. In the reason, you should aim to include the node(s) count in the retrieval context (eg., 1st node, and 2nd node in the retrieval context) that is attributed to said sentence. You should also aim to quote the specific part of the retrieval context to justify your verdict, but keep it extremely concise and cut short the quote with an ellipsis if possible.
50
-
51
-
52
- **
53
- IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON objects, each with two keys: `verdict` and `reason`.
54
-
55
- {{
56
- "verdicts": [
57
- {{
58
- "reason": "...",
59
- "verdict": "yes"
60
- }},
61
- ...
62
- ]
63
- }}
64
-
65
- Since you are going to generate a verdict for each sentence, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to the number of sentences in `expected output`.
66
- **
47
+ JSON:
48
+ """
49
+ )
67
50
 
68
- Expected Output:
69
- {expected_output}
70
-
71
- Retrieval Context:
72
- {retrieval_context}
51
+ @staticmethod
52
+ def generate_verdicts(
53
+ expected_output: str,
54
+ retrieval_context: List[str],
55
+ multimodal: bool = False,
56
+ ):
57
+ content_type = "sentence and image" if multimodal else "sentence"
58
+ content_type_plural = (
59
+ "sentences and images" if multimodal else "sentences"
60
+ )
61
+ content_or = "sentence or image" if multimodal else "sentence"
62
+
63
+ # For multimodal, we need to annotate the retrieval context with node IDs
64
+ context_to_display = (
65
+ ContextualRecallTemplate.id_retrieval_context(retrieval_context)
66
+ if multimodal
67
+ else retrieval_context
68
+ )
69
+
70
+ node_instruction = ""
71
+ if multimodal:
72
+ node_instruction = " A node is either a string or image, but not both (so do not group images and texts in the same nodes)."
73
+
74
+ return textwrap.dedent(
75
+ f"""For EACH {content_type} in the given expected output below, determine whether the {content_or} can be attributed to the nodes of retrieval contexts. Please generate a list of JSON with two keys: `verdict` and `reason`.
76
+ The `verdict` key should STRICTLY be either a 'yes' or 'no'. Answer 'yes' if the {content_or} can be attributed to any parts of the retrieval context, else answer 'no'.
77
+ The `reason` key should provide a reason why to the verdict. In the reason, you should aim to include the node(s) count in the retrieval context (eg., 1st node, and 2nd node in the retrieval context) that is attributed to said {content_or}.{node_instruction} You should also aim to quote the specific part of the retrieval context to justify your verdict, but keep it extremely concise and cut short the quote with an ellipsis if possible.
78
+
79
+ **
80
+ IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON objects, each with two keys: `verdict` and `reason`.
81
+
82
+ {{
83
+ "verdicts": [
84
+ {{
85
+ "reason": "...",
86
+ "verdict": "yes"
87
+ }},
88
+ ...
89
+ ]
90
+ }}
91
+
92
+ Since you are going to generate a verdict for each sentence, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to the number of {content_type_plural} in {'the' if multimodal else '`expected output`'}{' `expected output`' if multimodal else ''}.
93
+ **
94
+
95
+ Expected Output:
96
+ {expected_output}
97
+
98
+ Retrieval Context:
99
+ {context_to_display}
100
+
101
+ JSON:
102
+ """
103
+ )
73
104
 
74
- JSON:
75
- """
105
+ @staticmethod
106
+ def id_retrieval_context(
107
+ retrieval_context: List[str],
108
+ ) -> List[str]:
109
+ """
110
+ Annotates retrieval context with node IDs for multimodal processing.
111
+
112
+ Args:
113
+ retrieval_context: List of contexts (can be strings or MLLMImages)
114
+
115
+ Returns:
116
+ Annotated list with "Node X:" prefixes
117
+ """
118
+ annotated_retrieval_context = []
119
+ retrieval_context = convert_to_multi_modal_array(retrieval_context)
120
+ for i, context in enumerate(retrieval_context):
121
+ if isinstance(context, str):
122
+ annotated_retrieval_context.append(f"Node {i + 1}: {context}")
123
+ elif isinstance(context, MLLMImage):
124
+ annotated_retrieval_context.append(f"Node {i + 1}:")
125
+ annotated_retrieval_context.append(context)
126
+ return annotated_retrieval_context
@@ -1,11 +1,16 @@
1
1
  from typing import Optional, List, Type, Union
2
2
  import asyncio
3
3
 
4
- from deepeval.utils import get_or_create_event_loop, prettify_list
4
+ from deepeval.utils import (
5
+ get_or_create_event_loop,
6
+ prettify_list,
7
+ convert_to_multi_modal_array,
8
+ )
5
9
  from deepeval.metrics.utils import (
6
10
  construct_verbose_logs,
7
11
  trimAndLoadJson,
8
12
  check_llm_test_case_params,
13
+ check_mllm_test_case_params,
9
14
  initialize_model,
10
15
  )
11
16
  from deepeval.test_case import (
@@ -57,7 +62,14 @@ class ContextualRelevancyMetric(BaseMetric):
57
62
  _log_metric_to_confident: bool = True,
58
63
  ) -> float:
59
64
 
60
- check_llm_test_case_params(test_case, self._required_params, self)
65
+ multimodal = test_case.multimodal
66
+
67
+ if multimodal:
68
+ check_mllm_test_case_params(
69
+ test_case, self._required_params, None, None, self, self.model
70
+ )
71
+ else:
72
+ check_llm_test_case_params(test_case, self._required_params, self)
61
73
 
62
74
  self.evaluation_cost = 0 if self.using_native_model else None
63
75
  with metric_progress_indicator(
@@ -74,12 +86,16 @@ class ContextualRelevancyMetric(BaseMetric):
74
86
  )
75
87
  )
76
88
  else:
89
+
90
+ input = test_case.input
91
+ retrieval_context = test_case.retrieval_context
92
+
77
93
  self.verdicts_list: List[ContextualRelevancyVerdicts] = [
78
- (self._generate_verdicts(test_case.input, context))
79
- for context in test_case.retrieval_context
94
+ (self._generate_verdicts(input, context, multimodal))
95
+ for context in retrieval_context
80
96
  ]
81
97
  self.score = self._calculate_score()
82
- self.reason = self._generate_reason(test_case.input)
98
+ self.reason = self._generate_reason(input, multimodal)
83
99
  self.success = self.score >= self.threshold
84
100
  self.verbose_logs = construct_verbose_logs(
85
101
  self,
@@ -103,7 +119,14 @@ class ContextualRelevancyMetric(BaseMetric):
103
119
  _log_metric_to_confident: bool = True,
104
120
  ) -> float:
105
121
 
106
- check_llm_test_case_params(test_case, self._required_params, self)
122
+ multimodal = test_case.multimodal
123
+
124
+ if multimodal:
125
+ check_mllm_test_case_params(
126
+ test_case, self._required_params, None, None, self, self.model
127
+ )
128
+ else:
129
+ check_llm_test_case_params(test_case, self._required_params, self)
107
130
 
108
131
  self.evaluation_cost = 0 if self.using_native_model else None
109
132
  with metric_progress_indicator(
@@ -112,16 +135,19 @@ class ContextualRelevancyMetric(BaseMetric):
112
135
  _show_indicator=_show_indicator,
113
136
  _in_component=_in_component,
114
137
  ):
138
+ input = test_case.input
139
+ retrieval_context = test_case.retrieval_context
140
+
115
141
  self.verdicts_list: List[ContextualRelevancyVerdicts] = (
116
142
  await asyncio.gather(
117
143
  *[
118
- self._a_generate_verdicts(test_case.input, context)
119
- for context in test_case.retrieval_context
144
+ self._a_generate_verdicts(input, context, multimodal)
145
+ for context in retrieval_context
120
146
  ]
121
147
  )
122
148
  )
123
149
  self.score = self._calculate_score()
124
- self.reason = await self._a_generate_reason(test_case.input)
150
+ self.reason = await self._a_generate_reason(input, multimodal)
125
151
  self.success = self.score >= self.threshold
126
152
  self.verbose_logs = construct_verbose_logs(
127
153
  self,
@@ -136,7 +162,7 @@ class ContextualRelevancyMetric(BaseMetric):
136
162
  )
137
163
  return self.score
138
164
 
139
- async def _a_generate_reason(self, input: str):
165
+ async def _a_generate_reason(self, input: str, multimodal: bool):
140
166
  if self.include_reason is False:
141
167
  return None
142
168
 
@@ -154,7 +180,9 @@ class ContextualRelevancyMetric(BaseMetric):
154
180
  irrelevant_statements=irrelevant_statements,
155
181
  relevant_statements=relevant_statements,
156
182
  score=format(self.score, ".2f"),
183
+ multimodal=multimodal,
157
184
  )
185
+
158
186
  if self.using_native_model:
159
187
  res, cost = await self.model.a_generate(
160
188
  prompt, schema=ContextualRelevancyScoreReason
@@ -174,7 +202,7 @@ class ContextualRelevancyMetric(BaseMetric):
174
202
  data = trimAndLoadJson(res, self)
175
203
  return data["reason"]
176
204
 
177
- def _generate_reason(self, input: str):
205
+ def _generate_reason(self, input: str, multimodal: bool):
178
206
  if self.include_reason is False:
179
207
  return None
180
208
 
@@ -192,7 +220,9 @@ class ContextualRelevancyMetric(BaseMetric):
192
220
  irrelevant_statements=irrelevant_statements,
193
221
  relevant_statements=relevant_statements,
194
222
  score=format(self.score, ".2f"),
223
+ multimodal=multimodal,
195
224
  )
225
+
196
226
  if self.using_native_model:
197
227
  res, cost = self.model.generate(
198
228
  prompt, schema=ContextualRelevancyScoreReason
@@ -226,11 +256,12 @@ class ContextualRelevancyMetric(BaseMetric):
226
256
  return 0 if self.strict_mode and score < self.threshold else score
227
257
 
228
258
  async def _a_generate_verdicts(
229
- self, input: str, context: List[str]
259
+ self, input: str, context: List[str], multimodal: bool
230
260
  ) -> ContextualRelevancyVerdicts:
231
261
  prompt = self.evaluation_template.generate_verdicts(
232
- input=input, context=context
262
+ input=input, context=context, multimodal=multimodal
233
263
  )
264
+
234
265
  if self.using_native_model:
235
266
  res, cost = await self.model.a_generate(
236
267
  prompt, schema=ContextualRelevancyVerdicts
@@ -249,11 +280,12 @@ class ContextualRelevancyMetric(BaseMetric):
249
280
  return ContextualRelevancyVerdicts(**data)
250
281
 
251
282
  def _generate_verdicts(
252
- self, input: str, context: str
283
+ self, input: str, context: str, multimodal: bool
253
284
  ) -> ContextualRelevancyVerdicts:
254
285
  prompt = self.evaluation_template.generate_verdicts(
255
- input=input, context=context
286
+ input=input, context=context, multimodal=multimodal
256
287
  )
288
+
257
289
  if self.using_native_model:
258
290
  res, cost = self.model.generate(
259
291
  prompt, schema=ContextualRelevancyVerdicts
@@ -1,4 +1,5 @@
1
- from typing import List
1
+ from typing import List, Union
2
+ import textwrap
2
3
 
3
4
 
4
5
  class ContextualRelevancyTemplate:
@@ -8,70 +9,98 @@ class ContextualRelevancyTemplate:
8
9
  irrelevant_statements: List[str],
9
10
  relevant_statements: List[str],
10
11
  score: float,
12
+ multimodal: bool = False,
11
13
  ):
12
- return f"""Based on the given input, reasons for why the retrieval context is irrelevant to the input, the statements in the retrieval context that is actually relevant to the retrieval context, and the contextual relevancy score (the closer to 1 the better), please generate a CONCISE reason for the score.
13
- In your reason, you should quote data provided in the reasons for irrelevancy and relevant statements to support your point.
14
+ # Note: irrelevancies parameter name in multimodal version is kept as irrelevant_statements for consistency
15
+ return textwrap.dedent(
16
+ f"""Based on the given input, reasons for why the retrieval context is irrelevant to the input, the statements in the retrieval context that is actually relevant to the retrieval context, and the contextual relevancy score (the closer to 1 the better), please generate a CONCISE reason for the score.
17
+ In your reason, you should quote data provided in the reasons for irrelevancy and relevant statements to support your point.
14
18
 
15
- **
16
- IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
17
- Example JSON:
18
- {{
19
- "reason": "The score is <contextual_relevancy_score> because <your_reason>."
20
- }}
19
+ **
20
+ IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
21
+ Example JSON:
22
+ {{
23
+ "reason": "The score is <contextual_relevancy_score> because <your_reason>."
24
+ }}
21
25
 
22
- If the score is 1, keep it short and say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
23
- **
26
+ If the score is 1, keep it short and say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
27
+ **
24
28
 
25
29
 
26
- Contextual Relevancy Score:
27
- {score}
30
+ Contextual Relevancy Score:
31
+ {score}
28
32
 
29
- Input:
30
- {input}
33
+ Input:
34
+ {input}
35
+
36
+ Reasons for why the retrieval context is irrelevant to the input:
37
+ {irrelevant_statements}
31
38
 
32
- Reasons for why the retrieval context is irrelevant to the input:
33
- {irrelevant_statements}
39
+ Statement in the retrieval context that is relevant to the input:
40
+ {relevant_statements}
34
41
 
35
- Statement in the retrieval context that is relevant to the input:
36
- {relevant_statements}
37
-
38
- JSON:
39
- """
42
+ JSON:
43
+ """
44
+ )
40
45
 
41
46
  @staticmethod
42
- def generate_verdicts(input: str, context: str):
43
- return f"""Based on the input and context, please generate a JSON object to indicate whether each statement found in the context is relevant to the provided input. The JSON will be a list of 'verdicts', with 2 mandatory fields: 'verdict' and 'statement', and 1 optional field: 'reason'.
44
- You should first extract statements found in the context, which are high level information found in the context, before deciding on a verdict and optionally a reason for each statement.
45
- The 'verdict' key should STRICTLY be either 'yes' or 'no', and states whether the statement is relevant to the input.
46
- Provide a 'reason' ONLY IF verdict is no. You MUST quote the irrelevant parts of the statement to back up your reason.
47
- If provided context contains no actual content or statements then: give \"no\" as a \"verdict\",
48
- put context into \"statement\", and \"No statements found in provided context.\" into \"reason\".
49
- **
50
- IMPORTANT: Please make sure to only return in JSON format.
51
- Example Context: "Einstein won the Nobel Prize for his discovery of the photoelectric effect. He won the Nobel Prize in 1968. There was a cat."
52
- Example Input: "What were some of Einstein's achievements?"
53
-
54
- Example:
55
- {{
56
- "verdicts": [
57
- {{
58
- "statement": "Einstein won the Nobel Prize for his discovery of the photoelectric effect in 1968",
59
- "verdict": "yes"
60
- }},
61
- {{
62
- "statement": "There was a cat.",
63
- "reason": "The retrieval context contained the information 'There was a cat' when it has nothing to do with Einstein's achievements.",
64
- "verdict": "no"
65
- }}
66
- ]
67
- }}
68
- **
69
-
70
- Input:
71
- {input}
72
-
73
- Context:
74
- {context}
75
-
76
- JSON:
77
- """
47
+ def generate_verdicts(
48
+ input: str,
49
+ context: str,
50
+ multimodal: bool = False,
51
+ ):
52
+ context_type = "context (image or string)" if multimodal else "context"
53
+ statement_or_image = "statement or image" if multimodal else "statement"
54
+
55
+ # Conditional instructions based on mode
56
+ extraction_instructions = ""
57
+ if multimodal:
58
+ extraction_instructions = textwrap.dedent(
59
+ """
60
+ If the context is textual, you should first extract the statements found in the context if the context, which are high level information found in the context, before deciding on a verdict and optionally a reason for each statement.
61
+ If the context is an image, `statement` should be a description of the image. Do not assume any information not visibly available.
62
+ """
63
+ ).strip()
64
+ else:
65
+ extraction_instructions = "You should first extract statements found in the context, which are high level information found in the context, before deciding on a verdict and optionally a reason for each statement."
66
+
67
+ # Additional instruction for empty context (only in non-multimodal)
68
+ empty_context_instruction = ""
69
+ if not multimodal:
70
+ empty_context_instruction = '\nIf provided context contains no actual content or statements then: give "no" as a "verdict",\nput context into "statement", and "No statements found in provided context." into "reason".'
71
+
72
+ return textwrap.dedent(
73
+ f"""Based on the input and {context_type}, please generate a JSON object to indicate whether {'the context' if multimodal else 'each statement found in the context'} is relevant to the provided input. The JSON will be a list of 'verdicts', with 2 mandatory fields: 'verdict' and 'statement', and 1 optional field: 'reason'.
74
+ {extraction_instructions}
75
+ The 'verdict' key should STRICTLY be either 'yes' or 'no', and states whether the {statement_or_image} is relevant to the input.
76
+ Provide a 'reason' ONLY IF verdict is no. You MUST quote the irrelevant parts of the {statement_or_image} to back up your reason.{empty_context_instruction}
77
+ **
78
+ IMPORTANT: Please make sure to only return in JSON format.
79
+ Example Context: "Einstein won the Nobel Prize for his discovery of the photoelectric effect. He won the Nobel Prize in 1968. There was a cat."
80
+ Example Input: "What were some of Einstein's achievements?"
81
+
82
+ Example:
83
+ {{
84
+ "verdicts": [
85
+ {{
86
+ "statement": "Einstein won the Nobel Prize for his discovery of the photoelectric effect in 1968",
87
+ "verdict": "yes"
88
+ }},
89
+ {{
90
+ "statement": "There was a cat.",
91
+ "reason": "The retrieval context contained the information 'There was a cat' when it has nothing to do with Einstein's achievements.",
92
+ "verdict": "no"
93
+ }}
94
+ ]
95
+ }}
96
+ **
97
+
98
+ Input:
99
+ {input}
100
+
101
+ Context:
102
+ {context}
103
+
104
+ JSON:
105
+ """
106
+ )
@@ -60,11 +60,11 @@ class BinaryJudgementTemplate:
60
60
  {text}
61
61
 
62
62
  **
63
- IMPORTANT: Please make sure to only return a json with two keys: `verdict` (True or False), and the 'reason' key providing the reason. The verdict must be a boolean only, either True or False.
63
+ IMPORTANT: Please make sure to only return a json with two keys: `verdict` (true or false), and the 'reason' key providing the reason. The verdict must be a boolean only, either true or false.
64
64
  Example JSON:
65
65
  {{
66
66
  "reason": "...",
67
- "verdict": True
67
+ "verdict": true
68
68
  }}
69
69
  **
70
70