deepeval 3.7.4__py3-none-any.whl → 3.7.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/dataset/golden.py +54 -2
- deepeval/evaluate/evaluate.py +16 -8
- deepeval/evaluate/execute.py +70 -26
- deepeval/evaluate/utils.py +26 -22
- deepeval/integrations/pydantic_ai/agent.py +19 -2
- deepeval/integrations/pydantic_ai/instrumentator.py +62 -23
- deepeval/metrics/__init__.py +14 -12
- deepeval/metrics/answer_relevancy/answer_relevancy.py +74 -29
- deepeval/metrics/answer_relevancy/template.py +188 -92
- deepeval/metrics/base_metric.py +2 -5
- deepeval/metrics/contextual_precision/contextual_precision.py +53 -15
- deepeval/metrics/contextual_precision/template.py +115 -66
- deepeval/metrics/contextual_recall/contextual_recall.py +50 -13
- deepeval/metrics/contextual_recall/template.py +106 -55
- deepeval/metrics/contextual_relevancy/contextual_relevancy.py +47 -15
- deepeval/metrics/contextual_relevancy/template.py +87 -58
- deepeval/metrics/dag/templates.py +2 -2
- deepeval/metrics/faithfulness/faithfulness.py +70 -27
- deepeval/metrics/faithfulness/schema.py +1 -1
- deepeval/metrics/faithfulness/template.py +200 -115
- deepeval/metrics/g_eval/utils.py +2 -2
- deepeval/metrics/indicator.py +4 -4
- deepeval/metrics/multimodal_metrics/__init__.py +0 -18
- deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +24 -17
- deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +26 -21
- deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +24 -17
- deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +24 -17
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +19 -19
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +63 -78
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +20 -20
- deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +71 -50
- deepeval/metrics/ragas.py +3 -3
- deepeval/metrics/tool_correctness/tool_correctness.py +2 -2
- deepeval/metrics/turn_contextual_precision/schema.py +21 -0
- deepeval/metrics/turn_contextual_precision/template.py +187 -0
- deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +550 -0
- deepeval/metrics/turn_contextual_recall/schema.py +21 -0
- deepeval/metrics/turn_contextual_recall/template.py +178 -0
- deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +520 -0
- deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_contextual_relevancy}/schema.py +7 -1
- deepeval/metrics/turn_contextual_relevancy/template.py +161 -0
- deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +535 -0
- deepeval/metrics/{multimodal_metrics/multimodal_faithfulness → turn_faithfulness}/schema.py +11 -3
- deepeval/metrics/turn_faithfulness/template.py +218 -0
- deepeval/metrics/turn_faithfulness/turn_faithfulness.py +596 -0
- deepeval/metrics/utils.py +39 -58
- deepeval/models/__init__.py +0 -12
- deepeval/models/base_model.py +16 -38
- deepeval/models/embedding_models/__init__.py +7 -0
- deepeval/models/embedding_models/azure_embedding_model.py +52 -28
- deepeval/models/embedding_models/local_embedding_model.py +18 -14
- deepeval/models/embedding_models/ollama_embedding_model.py +38 -16
- deepeval/models/embedding_models/openai_embedding_model.py +40 -21
- deepeval/models/llms/amazon_bedrock_model.py +1 -2
- deepeval/models/llms/anthropic_model.py +44 -23
- deepeval/models/llms/azure_model.py +121 -36
- deepeval/models/llms/deepseek_model.py +18 -13
- deepeval/models/llms/gemini_model.py +129 -43
- deepeval/models/llms/grok_model.py +18 -13
- deepeval/models/llms/kimi_model.py +18 -13
- deepeval/models/llms/litellm_model.py +42 -22
- deepeval/models/llms/local_model.py +12 -7
- deepeval/models/llms/ollama_model.py +114 -12
- deepeval/models/llms/openai_model.py +137 -41
- deepeval/models/llms/portkey_model.py +24 -7
- deepeval/models/llms/utils.py +5 -3
- deepeval/models/retry_policy.py +17 -14
- deepeval/models/utils.py +46 -1
- deepeval/optimizer/__init__.py +5 -0
- deepeval/optimizer/algorithms/__init__.py +6 -0
- deepeval/optimizer/algorithms/base.py +29 -0
- deepeval/optimizer/algorithms/configs.py +18 -0
- deepeval/optimizer/algorithms/copro/__init__.py +5 -0
- deepeval/{optimization/copro/loop.py → optimizer/algorithms/copro/copro.py} +112 -113
- deepeval/optimizer/algorithms/gepa/__init__.py +5 -0
- deepeval/{optimization/gepa/loop.py → optimizer/algorithms/gepa/gepa.py} +175 -115
- deepeval/optimizer/algorithms/miprov2/__init__.py +17 -0
- deepeval/optimizer/algorithms/miprov2/bootstrapper.py +435 -0
- deepeval/optimizer/algorithms/miprov2/miprov2.py +752 -0
- deepeval/optimizer/algorithms/miprov2/proposer.py +301 -0
- deepeval/optimizer/algorithms/simba/__init__.py +5 -0
- deepeval/{optimization/simba/loop.py → optimizer/algorithms/simba/simba.py} +128 -112
- deepeval/{optimization → optimizer}/configs.py +5 -8
- deepeval/{optimization/policies/selection.py → optimizer/policies.py} +63 -2
- deepeval/optimizer/prompt_optimizer.py +263 -0
- deepeval/optimizer/rewriter/__init__.py +5 -0
- deepeval/optimizer/rewriter/rewriter.py +124 -0
- deepeval/optimizer/rewriter/utils.py +214 -0
- deepeval/optimizer/scorer/__init__.py +5 -0
- deepeval/optimizer/scorer/base.py +86 -0
- deepeval/optimizer/scorer/scorer.py +316 -0
- deepeval/optimizer/scorer/utils.py +30 -0
- deepeval/optimizer/types.py +148 -0
- deepeval/{optimization → optimizer}/utils.py +47 -165
- deepeval/prompt/prompt.py +5 -9
- deepeval/test_case/__init__.py +1 -3
- deepeval/test_case/api.py +12 -10
- deepeval/test_case/conversational_test_case.py +19 -1
- deepeval/test_case/llm_test_case.py +152 -1
- deepeval/test_case/utils.py +4 -8
- deepeval/test_run/api.py +15 -14
- deepeval/test_run/test_run.py +3 -3
- deepeval/tracing/patchers.py +9 -4
- deepeval/tracing/tracing.py +2 -2
- deepeval/utils.py +65 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/METADATA +1 -4
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/RECORD +116 -125
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +0 -343
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/schema.py +0 -19
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +0 -122
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +0 -301
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +0 -132
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +0 -285
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +0 -112
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +0 -282
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +0 -102
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +0 -356
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +0 -175
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +0 -290
- deepeval/models/mlllms/__init__.py +0 -4
- deepeval/models/mlllms/azure_model.py +0 -343
- deepeval/models/mlllms/gemini_model.py +0 -313
- deepeval/models/mlllms/ollama_model.py +0 -175
- deepeval/models/mlllms/openai_model.py +0 -309
- deepeval/optimization/__init__.py +0 -13
- deepeval/optimization/adapters/__init__.py +0 -2
- deepeval/optimization/adapters/deepeval_scoring_adapter.py +0 -588
- deepeval/optimization/aggregates.py +0 -14
- deepeval/optimization/copro/configs.py +0 -31
- deepeval/optimization/gepa/__init__.py +0 -7
- deepeval/optimization/gepa/configs.py +0 -115
- deepeval/optimization/miprov2/configs.py +0 -134
- deepeval/optimization/miprov2/loop.py +0 -785
- deepeval/optimization/mutations/__init__.py +0 -0
- deepeval/optimization/mutations/prompt_rewriter.py +0 -458
- deepeval/optimization/policies/__init__.py +0 -16
- deepeval/optimization/policies/tie_breaker.py +0 -67
- deepeval/optimization/prompt_optimizer.py +0 -462
- deepeval/optimization/simba/__init__.py +0 -0
- deepeval/optimization/simba/configs.py +0 -33
- deepeval/optimization/types.py +0 -361
- deepeval/test_case/mllm_test_case.py +0 -170
- /deepeval/metrics/{multimodal_metrics/multimodal_answer_relevancy → turn_contextual_precision}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_precision → turn_contextual_recall}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_recall → turn_contextual_relevancy}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_faithfulness}/__init__.py +0 -0
- /deepeval/{optimization → optimizer/algorithms}/simba/types.py +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/LICENSE.md +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/WHEEL +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/entry_points.txt +0 -0
|
@@ -1,10 +1,14 @@
|
|
|
1
1
|
from typing import Optional, List, Type, Union
|
|
2
2
|
|
|
3
|
-
from deepeval.utils import
|
|
3
|
+
from deepeval.utils import (
|
|
4
|
+
get_or_create_event_loop,
|
|
5
|
+
prettify_list,
|
|
6
|
+
)
|
|
4
7
|
from deepeval.metrics.utils import (
|
|
5
8
|
construct_verbose_logs,
|
|
6
9
|
trimAndLoadJson,
|
|
7
10
|
check_llm_test_case_params,
|
|
11
|
+
check_mllm_test_case_params,
|
|
8
12
|
initialize_model,
|
|
9
13
|
)
|
|
10
14
|
from deepeval.test_case import (
|
|
@@ -56,7 +60,15 @@ class ContextualPrecisionMetric(BaseMetric):
|
|
|
56
60
|
_in_component: bool = False,
|
|
57
61
|
_log_metric_to_confident: bool = True,
|
|
58
62
|
) -> float:
|
|
59
|
-
|
|
63
|
+
|
|
64
|
+
multimodal = test_case.multimodal
|
|
65
|
+
|
|
66
|
+
if multimodal:
|
|
67
|
+
check_mllm_test_case_params(
|
|
68
|
+
test_case, self._required_params, None, None, self, self.model
|
|
69
|
+
)
|
|
70
|
+
else:
|
|
71
|
+
check_llm_test_case_params(test_case, self._required_params, self)
|
|
60
72
|
|
|
61
73
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
62
74
|
with metric_progress_indicator(
|
|
@@ -73,15 +85,20 @@ class ContextualPrecisionMetric(BaseMetric):
|
|
|
73
85
|
)
|
|
74
86
|
)
|
|
75
87
|
else:
|
|
88
|
+
input = test_case.input
|
|
89
|
+
expected_output = test_case.expected_output
|
|
90
|
+
retrieval_context = test_case.retrieval_context
|
|
91
|
+
|
|
76
92
|
self.verdicts: List[cpschema.ContextualPrecisionVerdict] = (
|
|
77
93
|
self._generate_verdicts(
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
94
|
+
input,
|
|
95
|
+
expected_output,
|
|
96
|
+
retrieval_context,
|
|
97
|
+
multimodal,
|
|
81
98
|
)
|
|
82
99
|
)
|
|
83
100
|
self.score = self._calculate_score()
|
|
84
|
-
self.reason = self._generate_reason(
|
|
101
|
+
self.reason = self._generate_reason(input, multimodal)
|
|
85
102
|
self.success = self.score >= self.threshold
|
|
86
103
|
self.verbose_logs = construct_verbose_logs(
|
|
87
104
|
self,
|
|
@@ -104,7 +121,14 @@ class ContextualPrecisionMetric(BaseMetric):
|
|
|
104
121
|
_log_metric_to_confident: bool = True,
|
|
105
122
|
) -> float:
|
|
106
123
|
|
|
107
|
-
|
|
124
|
+
multimodal = test_case.multimodal
|
|
125
|
+
|
|
126
|
+
if multimodal:
|
|
127
|
+
check_mllm_test_case_params(
|
|
128
|
+
test_case, self._required_params, None, None, self, self.model
|
|
129
|
+
)
|
|
130
|
+
else:
|
|
131
|
+
check_llm_test_case_params(test_case, self._required_params, self)
|
|
108
132
|
|
|
109
133
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
110
134
|
with metric_progress_indicator(
|
|
@@ -113,15 +137,17 @@ class ContextualPrecisionMetric(BaseMetric):
|
|
|
113
137
|
_show_indicator=_show_indicator,
|
|
114
138
|
_in_component=_in_component,
|
|
115
139
|
):
|
|
140
|
+
input = test_case.input
|
|
141
|
+
expected_output = test_case.expected_output
|
|
142
|
+
retrieval_context = test_case.retrieval_context
|
|
143
|
+
|
|
116
144
|
self.verdicts: List[cpschema.ContextualPrecisionVerdict] = (
|
|
117
145
|
await self._a_generate_verdicts(
|
|
118
|
-
|
|
119
|
-
test_case.expected_output,
|
|
120
|
-
test_case.retrieval_context,
|
|
146
|
+
input, expected_output, retrieval_context, multimodal
|
|
121
147
|
)
|
|
122
148
|
)
|
|
123
149
|
self.score = self._calculate_score()
|
|
124
|
-
self.reason = await self._a_generate_reason(
|
|
150
|
+
self.reason = await self._a_generate_reason(input, multimodal)
|
|
125
151
|
self.success = self.score >= self.threshold
|
|
126
152
|
self.verbose_logs = construct_verbose_logs(
|
|
127
153
|
self,
|
|
@@ -136,7 +162,7 @@ class ContextualPrecisionMetric(BaseMetric):
|
|
|
136
162
|
)
|
|
137
163
|
return self.score
|
|
138
164
|
|
|
139
|
-
async def _a_generate_reason(self, input: str):
|
|
165
|
+
async def _a_generate_reason(self, input: str, multimodal: bool):
|
|
140
166
|
if self.include_reason is False:
|
|
141
167
|
return None
|
|
142
168
|
|
|
@@ -148,6 +174,7 @@ class ContextualPrecisionMetric(BaseMetric):
|
|
|
148
174
|
input=input,
|
|
149
175
|
verdicts=retrieval_contexts_verdicts,
|
|
150
176
|
score=format(self.score, ".2f"),
|
|
177
|
+
multimodal=multimodal,
|
|
151
178
|
)
|
|
152
179
|
|
|
153
180
|
if self.using_native_model:
|
|
@@ -169,7 +196,7 @@ class ContextualPrecisionMetric(BaseMetric):
|
|
|
169
196
|
data = trimAndLoadJson(res, self)
|
|
170
197
|
return data["reason"]
|
|
171
198
|
|
|
172
|
-
def _generate_reason(self, input: str):
|
|
199
|
+
def _generate_reason(self, input: str, multimodal: bool):
|
|
173
200
|
if self.include_reason is False:
|
|
174
201
|
return None
|
|
175
202
|
|
|
@@ -181,6 +208,7 @@ class ContextualPrecisionMetric(BaseMetric):
|
|
|
181
208
|
input=input,
|
|
182
209
|
verdicts=retrieval_contexts_verdicts,
|
|
183
210
|
score=format(self.score, ".2f"),
|
|
211
|
+
multimodal=multimodal,
|
|
184
212
|
)
|
|
185
213
|
|
|
186
214
|
if self.using_native_model:
|
|
@@ -203,12 +231,17 @@ class ContextualPrecisionMetric(BaseMetric):
|
|
|
203
231
|
return data["reason"]
|
|
204
232
|
|
|
205
233
|
async def _a_generate_verdicts(
|
|
206
|
-
self,
|
|
234
|
+
self,
|
|
235
|
+
input: str,
|
|
236
|
+
expected_output: str,
|
|
237
|
+
retrieval_context: List[str],
|
|
238
|
+
multimodal: bool,
|
|
207
239
|
) -> List[cpschema.ContextualPrecisionVerdict]:
|
|
208
240
|
prompt = self.evaluation_template.generate_verdicts(
|
|
209
241
|
input=input,
|
|
210
242
|
expected_output=expected_output,
|
|
211
243
|
retrieval_context=retrieval_context,
|
|
244
|
+
multimodal=multimodal,
|
|
212
245
|
)
|
|
213
246
|
if self.using_native_model:
|
|
214
247
|
res, cost = await self.model.a_generate(
|
|
@@ -234,12 +267,17 @@ class ContextualPrecisionMetric(BaseMetric):
|
|
|
234
267
|
return verdicts
|
|
235
268
|
|
|
236
269
|
def _generate_verdicts(
|
|
237
|
-
self,
|
|
270
|
+
self,
|
|
271
|
+
input: str,
|
|
272
|
+
expected_output: str,
|
|
273
|
+
retrieval_context: List[str],
|
|
274
|
+
multimodal: bool,
|
|
238
275
|
) -> List[cpschema.ContextualPrecisionVerdict]:
|
|
239
276
|
prompt = self.evaluation_template.generate_verdicts(
|
|
240
277
|
input=input,
|
|
241
278
|
expected_output=expected_output,
|
|
242
279
|
retrieval_context=retrieval_context,
|
|
280
|
+
multimodal=multimodal,
|
|
243
281
|
)
|
|
244
282
|
if self.using_native_model:
|
|
245
283
|
res, cost = self.model.generate(prompt, schema=cpschema.Verdicts)
|
|
@@ -1,84 +1,133 @@
|
|
|
1
|
-
from typing import List, Dict
|
|
1
|
+
from typing import List, Dict, Union
|
|
2
|
+
import textwrap
|
|
3
|
+
from deepeval.test_case import MLLMImage
|
|
4
|
+
from deepeval.utils import convert_to_multi_modal_array
|
|
2
5
|
|
|
3
6
|
|
|
4
7
|
class ContextualPrecisionTemplate:
|
|
5
8
|
@staticmethod
|
|
6
9
|
def generate_verdicts(
|
|
7
|
-
input: str,
|
|
10
|
+
input: str,
|
|
11
|
+
expected_output: str,
|
|
12
|
+
retrieval_context: List[str],
|
|
13
|
+
multimodal: bool = False,
|
|
8
14
|
):
|
|
9
15
|
document_count_str = f" ({len(retrieval_context)} document{'s' if len(retrieval_context) > 1 else ''})"
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
"
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
"
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
{
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
16
|
+
|
|
17
|
+
# For multimodal, we need to annotate the retrieval context with node IDs
|
|
18
|
+
context_to_display = (
|
|
19
|
+
ContextualPrecisionTemplate.id_retrieval_context(retrieval_context)
|
|
20
|
+
if multimodal
|
|
21
|
+
else retrieval_context
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
multimodal_note = (
|
|
25
|
+
" (which can be text or an image)" if multimodal else ""
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
prompt_template = textwrap.dedent(
|
|
29
|
+
f"""Given the input, expected output, and retrieval context, please generate a list of JSON objects to determine whether each node in the retrieval context was remotely useful in arriving at the expected output.
|
|
30
|
+
|
|
31
|
+
**
|
|
32
|
+
IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON. These JSON only contain the `verdict` key that outputs only 'yes' or 'no', and a `reason` key to justify the verdict. In your reason, you should aim to quote parts of the context {multimodal_note}.
|
|
33
|
+
Example Retrieval Context: ["Einstein won the Nobel Prize for his discovery of the photoelectric effect", "He won the Nobel Prize in 1968.", "There was a cat."]
|
|
34
|
+
Example Input: "Who won the Nobel Prize in 1968 and for what?"
|
|
35
|
+
Example Expected Output: "Einstein won the Nobel Prize in 1968 for his discovery of the photoelectric effect."
|
|
36
|
+
|
|
37
|
+
Example:
|
|
38
|
+
{{
|
|
39
|
+
"verdicts": [
|
|
40
|
+
{{
|
|
41
|
+
"reason": "It clearly addresses the question by stating that 'Einstein won the Nobel Prize for his discovery of the photoelectric effect.'",
|
|
42
|
+
"verdict": "yes"
|
|
43
|
+
}},
|
|
44
|
+
{{
|
|
45
|
+
"reason": "The text verifies that the prize was indeed won in 1968.",
|
|
46
|
+
"verdict": "yes"
|
|
47
|
+
}},
|
|
48
|
+
{{
|
|
49
|
+
"reason": "'There was a cat' is not at all relevant to the topic of winning a Nobel Prize.",
|
|
50
|
+
"verdict": "no"
|
|
51
|
+
}}
|
|
52
|
+
]
|
|
53
|
+
}}
|
|
54
|
+
Since you are going to generate a verdict for each context, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to that of the contexts.
|
|
55
|
+
**
|
|
56
|
+
|
|
57
|
+
Input:
|
|
58
|
+
{input}
|
|
59
|
+
|
|
60
|
+
Expected output:
|
|
61
|
+
{expected_output}
|
|
62
|
+
|
|
63
|
+
Retrieval Context {document_count_str}:
|
|
64
|
+
{context_to_display}
|
|
65
|
+
|
|
66
|
+
JSON:
|
|
67
|
+
"""
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
return prompt_template
|
|
49
71
|
|
|
50
72
|
@staticmethod
|
|
51
73
|
def generate_reason(
|
|
52
|
-
input: str,
|
|
74
|
+
input: str,
|
|
75
|
+
score: float,
|
|
76
|
+
verdicts: List[Dict[str, str]],
|
|
77
|
+
multimodal: bool = False,
|
|
53
78
|
):
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
The retrieval contexts is a list of JSON with three keys: `verdict`, `reason` (reason for the verdict) and `node`. `verdict` will be either 'yes' or 'no', which represents whether the corresponding 'node' in the retrieval context is relevant to the input.
|
|
57
|
-
Contextual precision represents if the relevant nodes are ranked higher than irrelevant nodes. Also note that retrieval contexts is given IN THE ORDER OF THEIR RANKINGS.
|
|
79
|
+
return textwrap.dedent(
|
|
80
|
+
f"""Given the input, retrieval contexts, and contextual precision score, provide a CONCISE {'summarize' if multimodal else 'summary'} for the score. Explain why it is not higher, but also why it is at its current score.
|
|
81
|
+
The retrieval contexts is a list of JSON with three keys: `verdict`, `reason` (reason for the verdict) and `node`. `verdict` will be either 'yes' or 'no', which represents whether the corresponding 'node' in the retrieval context is relevant to the input.
|
|
82
|
+
Contextual precision represents if the relevant nodes are ranked higher than irrelevant nodes. Also note that retrieval contexts is given IN THE ORDER OF THEIR RANKINGS.
|
|
83
|
+
|
|
84
|
+
**
|
|
85
|
+
IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
|
|
86
|
+
Example JSON:
|
|
87
|
+
{{
|
|
88
|
+
"reason": "The score is <contextual_precision_score> because <your_reason>."
|
|
89
|
+
}}
|
|
58
90
|
|
|
59
|
-
**
|
|
60
|
-
IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
|
|
61
|
-
Example JSON:
|
|
62
|
-
{{
|
|
63
|
-
"reason": "The score is <contextual_precision_score> because <your_reason>."
|
|
64
|
-
}}
|
|
65
91
|
|
|
92
|
+
DO NOT mention 'verdict' in your reason, but instead phrase it as irrelevant nodes. The term 'verdict' {'are' if multimodal else 'is'} just here for you to understand the broader scope of things.
|
|
93
|
+
Also DO NOT mention there are `reason` fields in the retrieval contexts you are presented with, instead just use the information in the `reason` field.
|
|
94
|
+
In your reason, you MUST USE the `reason`, QUOTES in the 'reason', and the node RANK (starting from 1, eg. first node) to explain why the 'no' verdicts should be ranked lower than the 'yes' verdicts.
|
|
95
|
+
When addressing nodes, make it explicit that {'it is' if multimodal else 'they are'} nodes in {'retrieval context' if multimodal else 'retrieval contexts'}.
|
|
96
|
+
If the score is 1, keep it short and say something positive with an upbeat tone (but don't overdo it{',' if multimodal else ''} otherwise it gets annoying).
|
|
97
|
+
**
|
|
66
98
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
In your reason, you MUST USE the `reason`, QUOTES in the 'reason', and the node RANK (starting from 1, eg. first node) to explain why the 'no' verdicts should be ranked lower than the 'yes' verdicts.
|
|
70
|
-
When addressing nodes, make it explicit that they are nodes in retrieval contexts.
|
|
71
|
-
If the score is 1, keep it short and say something positive with an upbeat tone (but don't overdo it, otherwise it gets annoying).
|
|
72
|
-
**
|
|
99
|
+
Contextual Precision Score:
|
|
100
|
+
{score}
|
|
73
101
|
|
|
74
|
-
|
|
75
|
-
{
|
|
102
|
+
Input:
|
|
103
|
+
{input}
|
|
76
104
|
|
|
77
|
-
|
|
78
|
-
{
|
|
105
|
+
Retrieval Contexts:
|
|
106
|
+
{verdicts}
|
|
107
|
+
|
|
108
|
+
JSON:
|
|
109
|
+
"""
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
@staticmethod
|
|
113
|
+
def id_retrieval_context(
|
|
114
|
+
retrieval_context: List[str],
|
|
115
|
+
) -> List[str]:
|
|
116
|
+
"""
|
|
117
|
+
Annotates retrieval context with node IDs for multimodal processing.
|
|
79
118
|
|
|
80
|
-
|
|
81
|
-
|
|
119
|
+
Args:
|
|
120
|
+
retrieval_context: List of contexts (can be strings or MLLMImages)
|
|
82
121
|
|
|
83
|
-
|
|
84
|
-
""
|
|
122
|
+
Returns:
|
|
123
|
+
Annotated list with "Node X:" prefixes
|
|
124
|
+
"""
|
|
125
|
+
annotated_retrieval_context = []
|
|
126
|
+
retrieval_context = convert_to_multi_modal_array(retrieval_context)
|
|
127
|
+
for i, context in enumerate(retrieval_context):
|
|
128
|
+
if isinstance(context, str):
|
|
129
|
+
annotated_retrieval_context.append(f"Node {i + 1}: {context}")
|
|
130
|
+
elif isinstance(context, MLLMImage):
|
|
131
|
+
annotated_retrieval_context.append(f"Node {i + 1}:")
|
|
132
|
+
annotated_retrieval_context.append(context)
|
|
133
|
+
return annotated_retrieval_context
|
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
from typing import Optional, List, Type, Union
|
|
2
2
|
|
|
3
|
-
from deepeval.utils import
|
|
3
|
+
from deepeval.utils import (
|
|
4
|
+
get_or_create_event_loop,
|
|
5
|
+
prettify_list,
|
|
6
|
+
convert_to_multi_modal_array,
|
|
7
|
+
)
|
|
4
8
|
from deepeval.metrics.utils import (
|
|
5
9
|
construct_verbose_logs,
|
|
6
10
|
trimAndLoadJson,
|
|
7
11
|
check_llm_test_case_params,
|
|
12
|
+
check_mllm_test_case_params,
|
|
8
13
|
initialize_model,
|
|
9
14
|
)
|
|
10
15
|
from deepeval.test_case import (
|
|
@@ -55,7 +60,14 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
55
60
|
_in_component: bool = False,
|
|
56
61
|
_log_metric_to_confident: bool = True,
|
|
57
62
|
) -> float:
|
|
58
|
-
|
|
63
|
+
multimodal = test_case.multimodal
|
|
64
|
+
|
|
65
|
+
if multimodal:
|
|
66
|
+
check_mllm_test_case_params(
|
|
67
|
+
test_case, self._required_params, None, None, self, self.model
|
|
68
|
+
)
|
|
69
|
+
else:
|
|
70
|
+
check_llm_test_case_params(test_case, self._required_params, self)
|
|
59
71
|
|
|
60
72
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
61
73
|
with metric_progress_indicator(
|
|
@@ -72,13 +84,16 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
72
84
|
)
|
|
73
85
|
)
|
|
74
86
|
else:
|
|
87
|
+
expected_output = test_case.expected_output
|
|
88
|
+
retrieval_context = test_case.retrieval_context
|
|
89
|
+
|
|
75
90
|
self.verdicts: List[ContextualRecallVerdict] = (
|
|
76
91
|
self._generate_verdicts(
|
|
77
|
-
|
|
92
|
+
expected_output, retrieval_context, multimodal
|
|
78
93
|
)
|
|
79
94
|
)
|
|
80
95
|
self.score = self._calculate_score()
|
|
81
|
-
self.reason = self._generate_reason(
|
|
96
|
+
self.reason = self._generate_reason(expected_output, multimodal)
|
|
82
97
|
self.success = self.score >= self.threshold
|
|
83
98
|
self.verbose_logs = construct_verbose_logs(
|
|
84
99
|
self,
|
|
@@ -101,7 +116,14 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
101
116
|
_log_metric_to_confident: bool = True,
|
|
102
117
|
) -> float:
|
|
103
118
|
|
|
104
|
-
|
|
119
|
+
multimodal = test_case.multimodal
|
|
120
|
+
|
|
121
|
+
if multimodal:
|
|
122
|
+
check_mllm_test_case_params(
|
|
123
|
+
test_case, self._required_params, None, None, self, self.model
|
|
124
|
+
)
|
|
125
|
+
else:
|
|
126
|
+
check_llm_test_case_params(test_case, self._required_params, self)
|
|
105
127
|
|
|
106
128
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
107
129
|
with metric_progress_indicator(
|
|
@@ -110,14 +132,17 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
110
132
|
_show_indicator=_show_indicator,
|
|
111
133
|
_in_component=_in_component,
|
|
112
134
|
):
|
|
135
|
+
expected_output = test_case.expected_output
|
|
136
|
+
retrieval_context = test_case.retrieval_context
|
|
137
|
+
|
|
113
138
|
self.verdicts: List[ContextualRecallVerdict] = (
|
|
114
139
|
await self._a_generate_verdicts(
|
|
115
|
-
|
|
140
|
+
expected_output, retrieval_context, multimodal
|
|
116
141
|
)
|
|
117
142
|
)
|
|
118
143
|
self.score = self._calculate_score()
|
|
119
144
|
self.reason = await self._a_generate_reason(
|
|
120
|
-
|
|
145
|
+
expected_output, multimodal
|
|
121
146
|
)
|
|
122
147
|
self.success = self.score >= self.threshold
|
|
123
148
|
self.verbose_logs = construct_verbose_logs(
|
|
@@ -133,7 +158,7 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
133
158
|
)
|
|
134
159
|
return self.score
|
|
135
160
|
|
|
136
|
-
async def _a_generate_reason(self, expected_output: str):
|
|
161
|
+
async def _a_generate_reason(self, expected_output: str, multimodal: bool):
|
|
137
162
|
if self.include_reason is False:
|
|
138
163
|
return None
|
|
139
164
|
|
|
@@ -150,6 +175,7 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
150
175
|
supportive_reasons=supportive_reasons,
|
|
151
176
|
unsupportive_reasons=unsupportive_reasons,
|
|
152
177
|
score=format(self.score, ".2f"),
|
|
178
|
+
multimodal=multimodal,
|
|
153
179
|
)
|
|
154
180
|
|
|
155
181
|
if self.using_native_model:
|
|
@@ -169,7 +195,7 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
169
195
|
data = trimAndLoadJson(res, self)
|
|
170
196
|
return data["reason"]
|
|
171
197
|
|
|
172
|
-
def _generate_reason(self, expected_output: str):
|
|
198
|
+
def _generate_reason(self, expected_output: str, multimodal: bool):
|
|
173
199
|
if self.include_reason is False:
|
|
174
200
|
return None
|
|
175
201
|
|
|
@@ -186,6 +212,7 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
186
212
|
supportive_reasons=supportive_reasons,
|
|
187
213
|
unsupportive_reasons=unsupportive_reasons,
|
|
188
214
|
score=format(self.score, ".2f"),
|
|
215
|
+
multimodal=multimodal,
|
|
189
216
|
)
|
|
190
217
|
|
|
191
218
|
if self.using_native_model:
|
|
@@ -219,10 +246,15 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
219
246
|
return 0 if self.strict_mode and score < self.threshold else score
|
|
220
247
|
|
|
221
248
|
async def _a_generate_verdicts(
|
|
222
|
-
self,
|
|
249
|
+
self,
|
|
250
|
+
expected_output: str,
|
|
251
|
+
retrieval_context: List[str],
|
|
252
|
+
multimodal: bool,
|
|
223
253
|
) -> List[ContextualRecallVerdict]:
|
|
224
254
|
prompt = self.evaluation_template.generate_verdicts(
|
|
225
|
-
expected_output=expected_output,
|
|
255
|
+
expected_output=expected_output,
|
|
256
|
+
retrieval_context=retrieval_context,
|
|
257
|
+
multimodal=multimodal,
|
|
226
258
|
)
|
|
227
259
|
if self.using_native_model:
|
|
228
260
|
res, cost = await self.model.a_generate(prompt, schema=Verdicts)
|
|
@@ -245,10 +277,15 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
245
277
|
return verdicts
|
|
246
278
|
|
|
247
279
|
def _generate_verdicts(
|
|
248
|
-
self,
|
|
280
|
+
self,
|
|
281
|
+
expected_output: str,
|
|
282
|
+
retrieval_context: List[str],
|
|
283
|
+
multimodal: bool,
|
|
249
284
|
) -> List[ContextualRecallVerdict]:
|
|
250
285
|
prompt = self.evaluation_template.generate_verdicts(
|
|
251
|
-
expected_output=expected_output,
|
|
286
|
+
expected_output=expected_output,
|
|
287
|
+
retrieval_context=retrieval_context,
|
|
288
|
+
multimodal=multimodal,
|
|
252
289
|
)
|
|
253
290
|
if self.using_native_model:
|
|
254
291
|
res, cost = self.model.generate(prompt, schema=Verdicts)
|