deepeval 3.7.4__py3-none-any.whl → 3.7.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/dataset/golden.py +54 -2
- deepeval/evaluate/evaluate.py +16 -8
- deepeval/evaluate/execute.py +70 -26
- deepeval/evaluate/utils.py +26 -22
- deepeval/integrations/pydantic_ai/agent.py +19 -2
- deepeval/integrations/pydantic_ai/instrumentator.py +62 -23
- deepeval/metrics/__init__.py +14 -12
- deepeval/metrics/answer_relevancy/answer_relevancy.py +74 -29
- deepeval/metrics/answer_relevancy/template.py +188 -92
- deepeval/metrics/base_metric.py +2 -5
- deepeval/metrics/contextual_precision/contextual_precision.py +53 -15
- deepeval/metrics/contextual_precision/template.py +115 -66
- deepeval/metrics/contextual_recall/contextual_recall.py +50 -13
- deepeval/metrics/contextual_recall/template.py +106 -55
- deepeval/metrics/contextual_relevancy/contextual_relevancy.py +47 -15
- deepeval/metrics/contextual_relevancy/template.py +87 -58
- deepeval/metrics/dag/templates.py +2 -2
- deepeval/metrics/faithfulness/faithfulness.py +70 -27
- deepeval/metrics/faithfulness/schema.py +1 -1
- deepeval/metrics/faithfulness/template.py +200 -115
- deepeval/metrics/g_eval/utils.py +2 -2
- deepeval/metrics/indicator.py +4 -4
- deepeval/metrics/multimodal_metrics/__init__.py +0 -18
- deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +24 -17
- deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +26 -21
- deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +24 -17
- deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +24 -17
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +19 -19
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +63 -78
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +20 -20
- deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +71 -50
- deepeval/metrics/ragas.py +3 -3
- deepeval/metrics/tool_correctness/tool_correctness.py +2 -2
- deepeval/metrics/turn_contextual_precision/schema.py +21 -0
- deepeval/metrics/turn_contextual_precision/template.py +187 -0
- deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +550 -0
- deepeval/metrics/turn_contextual_recall/schema.py +21 -0
- deepeval/metrics/turn_contextual_recall/template.py +178 -0
- deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +520 -0
- deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_contextual_relevancy}/schema.py +7 -1
- deepeval/metrics/turn_contextual_relevancy/template.py +161 -0
- deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +535 -0
- deepeval/metrics/{multimodal_metrics/multimodal_faithfulness → turn_faithfulness}/schema.py +11 -3
- deepeval/metrics/turn_faithfulness/template.py +218 -0
- deepeval/metrics/turn_faithfulness/turn_faithfulness.py +596 -0
- deepeval/metrics/utils.py +39 -58
- deepeval/models/__init__.py +0 -12
- deepeval/models/base_model.py +16 -38
- deepeval/models/embedding_models/__init__.py +7 -0
- deepeval/models/embedding_models/azure_embedding_model.py +52 -28
- deepeval/models/embedding_models/local_embedding_model.py +18 -14
- deepeval/models/embedding_models/ollama_embedding_model.py +38 -16
- deepeval/models/embedding_models/openai_embedding_model.py +40 -21
- deepeval/models/llms/amazon_bedrock_model.py +1 -2
- deepeval/models/llms/anthropic_model.py +44 -23
- deepeval/models/llms/azure_model.py +121 -36
- deepeval/models/llms/deepseek_model.py +18 -13
- deepeval/models/llms/gemini_model.py +129 -43
- deepeval/models/llms/grok_model.py +18 -13
- deepeval/models/llms/kimi_model.py +18 -13
- deepeval/models/llms/litellm_model.py +42 -22
- deepeval/models/llms/local_model.py +12 -7
- deepeval/models/llms/ollama_model.py +114 -12
- deepeval/models/llms/openai_model.py +137 -41
- deepeval/models/llms/portkey_model.py +24 -7
- deepeval/models/llms/utils.py +5 -3
- deepeval/models/retry_policy.py +17 -14
- deepeval/models/utils.py +46 -1
- deepeval/optimizer/__init__.py +5 -0
- deepeval/optimizer/algorithms/__init__.py +6 -0
- deepeval/optimizer/algorithms/base.py +29 -0
- deepeval/optimizer/algorithms/configs.py +18 -0
- deepeval/optimizer/algorithms/copro/__init__.py +5 -0
- deepeval/{optimization/copro/loop.py → optimizer/algorithms/copro/copro.py} +112 -113
- deepeval/optimizer/algorithms/gepa/__init__.py +5 -0
- deepeval/{optimization/gepa/loop.py → optimizer/algorithms/gepa/gepa.py} +175 -115
- deepeval/optimizer/algorithms/miprov2/__init__.py +17 -0
- deepeval/optimizer/algorithms/miprov2/bootstrapper.py +435 -0
- deepeval/optimizer/algorithms/miprov2/miprov2.py +752 -0
- deepeval/optimizer/algorithms/miprov2/proposer.py +301 -0
- deepeval/optimizer/algorithms/simba/__init__.py +5 -0
- deepeval/{optimization/simba/loop.py → optimizer/algorithms/simba/simba.py} +128 -112
- deepeval/{optimization → optimizer}/configs.py +5 -8
- deepeval/{optimization/policies/selection.py → optimizer/policies.py} +63 -2
- deepeval/optimizer/prompt_optimizer.py +263 -0
- deepeval/optimizer/rewriter/__init__.py +5 -0
- deepeval/optimizer/rewriter/rewriter.py +124 -0
- deepeval/optimizer/rewriter/utils.py +214 -0
- deepeval/optimizer/scorer/__init__.py +5 -0
- deepeval/optimizer/scorer/base.py +86 -0
- deepeval/optimizer/scorer/scorer.py +316 -0
- deepeval/optimizer/scorer/utils.py +30 -0
- deepeval/optimizer/types.py +148 -0
- deepeval/{optimization → optimizer}/utils.py +47 -165
- deepeval/prompt/prompt.py +5 -9
- deepeval/test_case/__init__.py +1 -3
- deepeval/test_case/api.py +12 -10
- deepeval/test_case/conversational_test_case.py +19 -1
- deepeval/test_case/llm_test_case.py +152 -1
- deepeval/test_case/utils.py +4 -8
- deepeval/test_run/api.py +15 -14
- deepeval/test_run/test_run.py +3 -3
- deepeval/tracing/patchers.py +9 -4
- deepeval/tracing/tracing.py +2 -2
- deepeval/utils.py +65 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/METADATA +1 -4
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/RECORD +116 -125
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +0 -343
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/schema.py +0 -19
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +0 -122
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +0 -301
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +0 -132
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +0 -285
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +0 -112
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +0 -282
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +0 -102
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +0 -356
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +0 -175
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +0 -290
- deepeval/models/mlllms/__init__.py +0 -4
- deepeval/models/mlllms/azure_model.py +0 -343
- deepeval/models/mlllms/gemini_model.py +0 -313
- deepeval/models/mlllms/ollama_model.py +0 -175
- deepeval/models/mlllms/openai_model.py +0 -309
- deepeval/optimization/__init__.py +0 -13
- deepeval/optimization/adapters/__init__.py +0 -2
- deepeval/optimization/adapters/deepeval_scoring_adapter.py +0 -588
- deepeval/optimization/aggregates.py +0 -14
- deepeval/optimization/copro/configs.py +0 -31
- deepeval/optimization/gepa/__init__.py +0 -7
- deepeval/optimization/gepa/configs.py +0 -115
- deepeval/optimization/miprov2/configs.py +0 -134
- deepeval/optimization/miprov2/loop.py +0 -785
- deepeval/optimization/mutations/__init__.py +0 -0
- deepeval/optimization/mutations/prompt_rewriter.py +0 -458
- deepeval/optimization/policies/__init__.py +0 -16
- deepeval/optimization/policies/tie_breaker.py +0 -67
- deepeval/optimization/prompt_optimizer.py +0 -462
- deepeval/optimization/simba/__init__.py +0 -0
- deepeval/optimization/simba/configs.py +0 -33
- deepeval/optimization/types.py +0 -361
- deepeval/test_case/mllm_test_case.py +0 -170
- /deepeval/metrics/{multimodal_metrics/multimodal_answer_relevancy → turn_contextual_precision}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_precision → turn_contextual_recall}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_recall → turn_contextual_relevancy}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_faithfulness}/__init__.py +0 -0
- /deepeval/{optimization → optimizer/algorithms}/simba/types.py +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/LICENSE.md +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/WHEEL +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/entry_points.txt +0 -0
|
@@ -1,4 +1,7 @@
|
|
|
1
|
-
from typing import List
|
|
1
|
+
from typing import List, Union
|
|
2
|
+
import textwrap
|
|
3
|
+
from deepeval.test_case import MLLMImage
|
|
4
|
+
from deepeval.utils import convert_to_multi_modal_array
|
|
2
5
|
|
|
3
6
|
|
|
4
7
|
class ContextualRecallTemplate:
|
|
@@ -8,68 +11,116 @@ class ContextualRecallTemplate:
|
|
|
8
11
|
supportive_reasons: str,
|
|
9
12
|
unsupportive_reasons: str,
|
|
10
13
|
score: float,
|
|
14
|
+
multimodal: bool = False,
|
|
11
15
|
):
|
|
12
|
-
|
|
13
|
-
Given the original expected output, a list of supportive reasons, and a list of unsupportive reasons (which are deduced directly from the 'expected output'), and a contextual recall score (closer to 1 the better), summarize a CONCISE reason for the score.
|
|
14
|
-
A supportive reason is the reason why a certain sentence in the original expected output can be attributed to the node in the retrieval context.
|
|
15
|
-
An unsupportive reason is the reason why a certain sentence in the original expected output cannot be attributed to anything in the retrieval context.
|
|
16
|
-
In your reason, you should relate supportive/unsupportive reasons to the sentence number in expected output, and include info regarding the node number in retrieval context to support your final reason. The first mention of "node(s)" should specify "node(s) in retrieval context".
|
|
16
|
+
content_type = "sentence or image" if multimodal else "sentence"
|
|
17
17
|
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
{
|
|
22
|
-
|
|
23
|
-
}}
|
|
18
|
+
return textwrap.dedent(
|
|
19
|
+
f"""Given the original expected output, a list of supportive reasons, and a list of unsupportive reasons ({'which is' if multimodal else 'which are'} deduced directly from the {'"expected output"' if multimodal else 'original expected output'}), and a contextual recall score (closer to 1 the better), summarize a CONCISE reason for the score.
|
|
20
|
+
A supportive reason is the reason why a certain {content_type} in the original expected output can be attributed to the node in the retrieval context.
|
|
21
|
+
An unsupportive reason is the reason why a certain {content_type} in the original expected output cannot be attributed to anything in the retrieval context.
|
|
22
|
+
In your reason, you should {'related' if multimodal else 'relate'} supportive/unsupportive reasons to the {content_type} number in expected output, and {'info' if multimodal else 'include info'} regarding the node number in retrieval context to support your final reason. The first mention of "node(s)" should specify "node(s) in retrieval context{')' if multimodal else ''}.
|
|
24
23
|
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
24
|
+
**
|
|
25
|
+
IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
|
|
26
|
+
Example JSON:
|
|
27
|
+
{{
|
|
28
|
+
"reason": "The score is <contextual_recall_score> because <your_reason>."
|
|
29
|
+
}}
|
|
28
30
|
|
|
29
|
-
|
|
30
|
-
{
|
|
31
|
+
DO NOT mention 'supportive reasons' and 'unsupportive reasons' in your reason, these terms are just here for you to understand the broader scope of things.
|
|
32
|
+
If the score is 1, keep it short and say something positive with an upbeat encouraging tone (but don't overdo it{',' if multimodal else ''} otherwise it gets annoying).
|
|
33
|
+
**
|
|
31
34
|
|
|
32
|
-
|
|
33
|
-
{
|
|
35
|
+
Contextual Recall Score:
|
|
36
|
+
{score}
|
|
34
37
|
|
|
35
|
-
|
|
36
|
-
{
|
|
38
|
+
Expected Output:
|
|
39
|
+
{expected_output}
|
|
37
40
|
|
|
38
|
-
|
|
39
|
-
{
|
|
41
|
+
Supportive Reasons:
|
|
42
|
+
{supportive_reasons}
|
|
40
43
|
|
|
41
|
-
|
|
42
|
-
|
|
44
|
+
Unsupportive Reasons:
|
|
45
|
+
{unsupportive_reasons}
|
|
43
46
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
For EACH sentence in the given expected output below, determine whether the sentence can be attributed to the nodes of retrieval contexts. Please generate a list of JSON with two keys: `verdict` and `reason`.
|
|
48
|
-
The `verdict` key should STRICTLY be either a 'yes' or 'no'. Answer 'yes' if the sentence can be attributed to any parts of the retrieval context, else answer 'no'.
|
|
49
|
-
The `reason` key should provide a reason why to the verdict. In the reason, you should aim to include the node(s) count in the retrieval context (eg., 1st node, and 2nd node in the retrieval context) that is attributed to said sentence. You should also aim to quote the specific part of the retrieval context to justify your verdict, but keep it extremely concise and cut short the quote with an ellipsis if possible.
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
**
|
|
53
|
-
IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON objects, each with two keys: `verdict` and `reason`.
|
|
54
|
-
|
|
55
|
-
{{
|
|
56
|
-
"verdicts": [
|
|
57
|
-
{{
|
|
58
|
-
"reason": "...",
|
|
59
|
-
"verdict": "yes"
|
|
60
|
-
}},
|
|
61
|
-
...
|
|
62
|
-
]
|
|
63
|
-
}}
|
|
64
|
-
|
|
65
|
-
Since you are going to generate a verdict for each sentence, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to the number of sentences in `expected output`.
|
|
66
|
-
**
|
|
47
|
+
JSON:
|
|
48
|
+
"""
|
|
49
|
+
)
|
|
67
50
|
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
51
|
+
@staticmethod
|
|
52
|
+
def generate_verdicts(
|
|
53
|
+
expected_output: str,
|
|
54
|
+
retrieval_context: List[str],
|
|
55
|
+
multimodal: bool = False,
|
|
56
|
+
):
|
|
57
|
+
content_type = "sentence and image" if multimodal else "sentence"
|
|
58
|
+
content_type_plural = (
|
|
59
|
+
"sentences and images" if multimodal else "sentences"
|
|
60
|
+
)
|
|
61
|
+
content_or = "sentence or image" if multimodal else "sentence"
|
|
62
|
+
|
|
63
|
+
# For multimodal, we need to annotate the retrieval context with node IDs
|
|
64
|
+
context_to_display = (
|
|
65
|
+
ContextualRecallTemplate.id_retrieval_context(retrieval_context)
|
|
66
|
+
if multimodal
|
|
67
|
+
else retrieval_context
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
node_instruction = ""
|
|
71
|
+
if multimodal:
|
|
72
|
+
node_instruction = " A node is either a string or image, but not both (so do not group images and texts in the same nodes)."
|
|
73
|
+
|
|
74
|
+
return textwrap.dedent(
|
|
75
|
+
f"""For EACH {content_type} in the given expected output below, determine whether the {content_or} can be attributed to the nodes of retrieval contexts. Please generate a list of JSON with two keys: `verdict` and `reason`.
|
|
76
|
+
The `verdict` key should STRICTLY be either a 'yes' or 'no'. Answer 'yes' if the {content_or} can be attributed to any parts of the retrieval context, else answer 'no'.
|
|
77
|
+
The `reason` key should provide a reason why to the verdict. In the reason, you should aim to include the node(s) count in the retrieval context (eg., 1st node, and 2nd node in the retrieval context) that is attributed to said {content_or}.{node_instruction} You should also aim to quote the specific part of the retrieval context to justify your verdict, but keep it extremely concise and cut short the quote with an ellipsis if possible.
|
|
78
|
+
|
|
79
|
+
**
|
|
80
|
+
IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON objects, each with two keys: `verdict` and `reason`.
|
|
81
|
+
|
|
82
|
+
{{
|
|
83
|
+
"verdicts": [
|
|
84
|
+
{{
|
|
85
|
+
"reason": "...",
|
|
86
|
+
"verdict": "yes"
|
|
87
|
+
}},
|
|
88
|
+
...
|
|
89
|
+
]
|
|
90
|
+
}}
|
|
91
|
+
|
|
92
|
+
Since you are going to generate a verdict for each sentence, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to the number of {content_type_plural} in {'the' if multimodal else '`expected output`'}{' `expected output`' if multimodal else ''}.
|
|
93
|
+
**
|
|
94
|
+
|
|
95
|
+
Expected Output:
|
|
96
|
+
{expected_output}
|
|
97
|
+
|
|
98
|
+
Retrieval Context:
|
|
99
|
+
{context_to_display}
|
|
100
|
+
|
|
101
|
+
JSON:
|
|
102
|
+
"""
|
|
103
|
+
)
|
|
73
104
|
|
|
74
|
-
|
|
75
|
-
|
|
105
|
+
@staticmethod
|
|
106
|
+
def id_retrieval_context(
|
|
107
|
+
retrieval_context: List[str],
|
|
108
|
+
) -> List[str]:
|
|
109
|
+
"""
|
|
110
|
+
Annotates retrieval context with node IDs for multimodal processing.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
retrieval_context: List of contexts (can be strings or MLLMImages)
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
Annotated list with "Node X:" prefixes
|
|
117
|
+
"""
|
|
118
|
+
annotated_retrieval_context = []
|
|
119
|
+
retrieval_context = convert_to_multi_modal_array(retrieval_context)
|
|
120
|
+
for i, context in enumerate(retrieval_context):
|
|
121
|
+
if isinstance(context, str):
|
|
122
|
+
annotated_retrieval_context.append(f"Node {i + 1}: {context}")
|
|
123
|
+
elif isinstance(context, MLLMImage):
|
|
124
|
+
annotated_retrieval_context.append(f"Node {i + 1}:")
|
|
125
|
+
annotated_retrieval_context.append(context)
|
|
126
|
+
return annotated_retrieval_context
|
|
@@ -1,11 +1,16 @@
|
|
|
1
1
|
from typing import Optional, List, Type, Union
|
|
2
2
|
import asyncio
|
|
3
3
|
|
|
4
|
-
from deepeval.utils import
|
|
4
|
+
from deepeval.utils import (
|
|
5
|
+
get_or_create_event_loop,
|
|
6
|
+
prettify_list,
|
|
7
|
+
convert_to_multi_modal_array,
|
|
8
|
+
)
|
|
5
9
|
from deepeval.metrics.utils import (
|
|
6
10
|
construct_verbose_logs,
|
|
7
11
|
trimAndLoadJson,
|
|
8
12
|
check_llm_test_case_params,
|
|
13
|
+
check_mllm_test_case_params,
|
|
9
14
|
initialize_model,
|
|
10
15
|
)
|
|
11
16
|
from deepeval.test_case import (
|
|
@@ -57,7 +62,14 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
57
62
|
_log_metric_to_confident: bool = True,
|
|
58
63
|
) -> float:
|
|
59
64
|
|
|
60
|
-
|
|
65
|
+
multimodal = test_case.multimodal
|
|
66
|
+
|
|
67
|
+
if multimodal:
|
|
68
|
+
check_mllm_test_case_params(
|
|
69
|
+
test_case, self._required_params, None, None, self, self.model
|
|
70
|
+
)
|
|
71
|
+
else:
|
|
72
|
+
check_llm_test_case_params(test_case, self._required_params, self)
|
|
61
73
|
|
|
62
74
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
63
75
|
with metric_progress_indicator(
|
|
@@ -74,12 +86,16 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
74
86
|
)
|
|
75
87
|
)
|
|
76
88
|
else:
|
|
89
|
+
|
|
90
|
+
input = test_case.input
|
|
91
|
+
retrieval_context = test_case.retrieval_context
|
|
92
|
+
|
|
77
93
|
self.verdicts_list: List[ContextualRelevancyVerdicts] = [
|
|
78
|
-
(self._generate_verdicts(
|
|
79
|
-
for context in
|
|
94
|
+
(self._generate_verdicts(input, context, multimodal))
|
|
95
|
+
for context in retrieval_context
|
|
80
96
|
]
|
|
81
97
|
self.score = self._calculate_score()
|
|
82
|
-
self.reason = self._generate_reason(
|
|
98
|
+
self.reason = self._generate_reason(input, multimodal)
|
|
83
99
|
self.success = self.score >= self.threshold
|
|
84
100
|
self.verbose_logs = construct_verbose_logs(
|
|
85
101
|
self,
|
|
@@ -103,7 +119,14 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
103
119
|
_log_metric_to_confident: bool = True,
|
|
104
120
|
) -> float:
|
|
105
121
|
|
|
106
|
-
|
|
122
|
+
multimodal = test_case.multimodal
|
|
123
|
+
|
|
124
|
+
if multimodal:
|
|
125
|
+
check_mllm_test_case_params(
|
|
126
|
+
test_case, self._required_params, None, None, self, self.model
|
|
127
|
+
)
|
|
128
|
+
else:
|
|
129
|
+
check_llm_test_case_params(test_case, self._required_params, self)
|
|
107
130
|
|
|
108
131
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
109
132
|
with metric_progress_indicator(
|
|
@@ -112,16 +135,19 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
112
135
|
_show_indicator=_show_indicator,
|
|
113
136
|
_in_component=_in_component,
|
|
114
137
|
):
|
|
138
|
+
input = test_case.input
|
|
139
|
+
retrieval_context = test_case.retrieval_context
|
|
140
|
+
|
|
115
141
|
self.verdicts_list: List[ContextualRelevancyVerdicts] = (
|
|
116
142
|
await asyncio.gather(
|
|
117
143
|
*[
|
|
118
|
-
self._a_generate_verdicts(
|
|
119
|
-
for context in
|
|
144
|
+
self._a_generate_verdicts(input, context, multimodal)
|
|
145
|
+
for context in retrieval_context
|
|
120
146
|
]
|
|
121
147
|
)
|
|
122
148
|
)
|
|
123
149
|
self.score = self._calculate_score()
|
|
124
|
-
self.reason = await self._a_generate_reason(
|
|
150
|
+
self.reason = await self._a_generate_reason(input, multimodal)
|
|
125
151
|
self.success = self.score >= self.threshold
|
|
126
152
|
self.verbose_logs = construct_verbose_logs(
|
|
127
153
|
self,
|
|
@@ -136,7 +162,7 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
136
162
|
)
|
|
137
163
|
return self.score
|
|
138
164
|
|
|
139
|
-
async def _a_generate_reason(self, input: str):
|
|
165
|
+
async def _a_generate_reason(self, input: str, multimodal: bool):
|
|
140
166
|
if self.include_reason is False:
|
|
141
167
|
return None
|
|
142
168
|
|
|
@@ -154,7 +180,9 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
154
180
|
irrelevant_statements=irrelevant_statements,
|
|
155
181
|
relevant_statements=relevant_statements,
|
|
156
182
|
score=format(self.score, ".2f"),
|
|
183
|
+
multimodal=multimodal,
|
|
157
184
|
)
|
|
185
|
+
|
|
158
186
|
if self.using_native_model:
|
|
159
187
|
res, cost = await self.model.a_generate(
|
|
160
188
|
prompt, schema=ContextualRelevancyScoreReason
|
|
@@ -174,7 +202,7 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
174
202
|
data = trimAndLoadJson(res, self)
|
|
175
203
|
return data["reason"]
|
|
176
204
|
|
|
177
|
-
def _generate_reason(self, input: str):
|
|
205
|
+
def _generate_reason(self, input: str, multimodal: bool):
|
|
178
206
|
if self.include_reason is False:
|
|
179
207
|
return None
|
|
180
208
|
|
|
@@ -192,7 +220,9 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
192
220
|
irrelevant_statements=irrelevant_statements,
|
|
193
221
|
relevant_statements=relevant_statements,
|
|
194
222
|
score=format(self.score, ".2f"),
|
|
223
|
+
multimodal=multimodal,
|
|
195
224
|
)
|
|
225
|
+
|
|
196
226
|
if self.using_native_model:
|
|
197
227
|
res, cost = self.model.generate(
|
|
198
228
|
prompt, schema=ContextualRelevancyScoreReason
|
|
@@ -226,11 +256,12 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
226
256
|
return 0 if self.strict_mode and score < self.threshold else score
|
|
227
257
|
|
|
228
258
|
async def _a_generate_verdicts(
|
|
229
|
-
self, input: str, context: List[str]
|
|
259
|
+
self, input: str, context: List[str], multimodal: bool
|
|
230
260
|
) -> ContextualRelevancyVerdicts:
|
|
231
261
|
prompt = self.evaluation_template.generate_verdicts(
|
|
232
|
-
input=input, context=context
|
|
262
|
+
input=input, context=context, multimodal=multimodal
|
|
233
263
|
)
|
|
264
|
+
|
|
234
265
|
if self.using_native_model:
|
|
235
266
|
res, cost = await self.model.a_generate(
|
|
236
267
|
prompt, schema=ContextualRelevancyVerdicts
|
|
@@ -249,11 +280,12 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
249
280
|
return ContextualRelevancyVerdicts(**data)
|
|
250
281
|
|
|
251
282
|
def _generate_verdicts(
|
|
252
|
-
self, input: str, context: str
|
|
283
|
+
self, input: str, context: str, multimodal: bool
|
|
253
284
|
) -> ContextualRelevancyVerdicts:
|
|
254
285
|
prompt = self.evaluation_template.generate_verdicts(
|
|
255
|
-
input=input, context=context
|
|
286
|
+
input=input, context=context, multimodal=multimodal
|
|
256
287
|
)
|
|
288
|
+
|
|
257
289
|
if self.using_native_model:
|
|
258
290
|
res, cost = self.model.generate(
|
|
259
291
|
prompt, schema=ContextualRelevancyVerdicts
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
from typing import List
|
|
1
|
+
from typing import List, Union
|
|
2
|
+
import textwrap
|
|
2
3
|
|
|
3
4
|
|
|
4
5
|
class ContextualRelevancyTemplate:
|
|
@@ -8,70 +9,98 @@ class ContextualRelevancyTemplate:
|
|
|
8
9
|
irrelevant_statements: List[str],
|
|
9
10
|
relevant_statements: List[str],
|
|
10
11
|
score: float,
|
|
12
|
+
multimodal: bool = False,
|
|
11
13
|
):
|
|
12
|
-
|
|
13
|
-
|
|
14
|
+
# Note: irrelevancies parameter name in multimodal version is kept as irrelevant_statements for consistency
|
|
15
|
+
return textwrap.dedent(
|
|
16
|
+
f"""Based on the given input, reasons for why the retrieval context is irrelevant to the input, the statements in the retrieval context that is actually relevant to the retrieval context, and the contextual relevancy score (the closer to 1 the better), please generate a CONCISE reason for the score.
|
|
17
|
+
In your reason, you should quote data provided in the reasons for irrelevancy and relevant statements to support your point.
|
|
14
18
|
|
|
15
|
-
**
|
|
16
|
-
IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
|
|
17
|
-
Example JSON:
|
|
18
|
-
{{
|
|
19
|
-
|
|
20
|
-
}}
|
|
19
|
+
**
|
|
20
|
+
IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
|
|
21
|
+
Example JSON:
|
|
22
|
+
{{
|
|
23
|
+
"reason": "The score is <contextual_relevancy_score> because <your_reason>."
|
|
24
|
+
}}
|
|
21
25
|
|
|
22
|
-
If the score is 1, keep it short and say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
|
|
23
|
-
**
|
|
26
|
+
If the score is 1, keep it short and say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
|
|
27
|
+
**
|
|
24
28
|
|
|
25
29
|
|
|
26
|
-
Contextual Relevancy Score:
|
|
27
|
-
{score}
|
|
30
|
+
Contextual Relevancy Score:
|
|
31
|
+
{score}
|
|
28
32
|
|
|
29
|
-
Input:
|
|
30
|
-
{input}
|
|
33
|
+
Input:
|
|
34
|
+
{input}
|
|
35
|
+
|
|
36
|
+
Reasons for why the retrieval context is irrelevant to the input:
|
|
37
|
+
{irrelevant_statements}
|
|
31
38
|
|
|
32
|
-
|
|
33
|
-
{
|
|
39
|
+
Statement in the retrieval context that is relevant to the input:
|
|
40
|
+
{relevant_statements}
|
|
34
41
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
JSON:
|
|
39
|
-
"""
|
|
42
|
+
JSON:
|
|
43
|
+
"""
|
|
44
|
+
)
|
|
40
45
|
|
|
41
46
|
@staticmethod
|
|
42
|
-
def generate_verdicts(
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
{
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
47
|
+
def generate_verdicts(
|
|
48
|
+
input: str,
|
|
49
|
+
context: str,
|
|
50
|
+
multimodal: bool = False,
|
|
51
|
+
):
|
|
52
|
+
context_type = "context (image or string)" if multimodal else "context"
|
|
53
|
+
statement_or_image = "statement or image" if multimodal else "statement"
|
|
54
|
+
|
|
55
|
+
# Conditional instructions based on mode
|
|
56
|
+
extraction_instructions = ""
|
|
57
|
+
if multimodal:
|
|
58
|
+
extraction_instructions = textwrap.dedent(
|
|
59
|
+
"""
|
|
60
|
+
If the context is textual, you should first extract the statements found in the context if the context, which are high level information found in the context, before deciding on a verdict and optionally a reason for each statement.
|
|
61
|
+
If the context is an image, `statement` should be a description of the image. Do not assume any information not visibly available.
|
|
62
|
+
"""
|
|
63
|
+
).strip()
|
|
64
|
+
else:
|
|
65
|
+
extraction_instructions = "You should first extract statements found in the context, which are high level information found in the context, before deciding on a verdict and optionally a reason for each statement."
|
|
66
|
+
|
|
67
|
+
# Additional instruction for empty context (only in non-multimodal)
|
|
68
|
+
empty_context_instruction = ""
|
|
69
|
+
if not multimodal:
|
|
70
|
+
empty_context_instruction = '\nIf provided context contains no actual content or statements then: give "no" as a "verdict",\nput context into "statement", and "No statements found in provided context." into "reason".'
|
|
71
|
+
|
|
72
|
+
return textwrap.dedent(
|
|
73
|
+
f"""Based on the input and {context_type}, please generate a JSON object to indicate whether {'the context' if multimodal else 'each statement found in the context'} is relevant to the provided input. The JSON will be a list of 'verdicts', with 2 mandatory fields: 'verdict' and 'statement', and 1 optional field: 'reason'.
|
|
74
|
+
{extraction_instructions}
|
|
75
|
+
The 'verdict' key should STRICTLY be either 'yes' or 'no', and states whether the {statement_or_image} is relevant to the input.
|
|
76
|
+
Provide a 'reason' ONLY IF verdict is no. You MUST quote the irrelevant parts of the {statement_or_image} to back up your reason.{empty_context_instruction}
|
|
77
|
+
**
|
|
78
|
+
IMPORTANT: Please make sure to only return in JSON format.
|
|
79
|
+
Example Context: "Einstein won the Nobel Prize for his discovery of the photoelectric effect. He won the Nobel Prize in 1968. There was a cat."
|
|
80
|
+
Example Input: "What were some of Einstein's achievements?"
|
|
81
|
+
|
|
82
|
+
Example:
|
|
83
|
+
{{
|
|
84
|
+
"verdicts": [
|
|
85
|
+
{{
|
|
86
|
+
"statement": "Einstein won the Nobel Prize for his discovery of the photoelectric effect in 1968",
|
|
87
|
+
"verdict": "yes"
|
|
88
|
+
}},
|
|
89
|
+
{{
|
|
90
|
+
"statement": "There was a cat.",
|
|
91
|
+
"reason": "The retrieval context contained the information 'There was a cat' when it has nothing to do with Einstein's achievements.",
|
|
92
|
+
"verdict": "no"
|
|
93
|
+
}}
|
|
94
|
+
]
|
|
95
|
+
}}
|
|
96
|
+
**
|
|
97
|
+
|
|
98
|
+
Input:
|
|
99
|
+
{input}
|
|
100
|
+
|
|
101
|
+
Context:
|
|
102
|
+
{context}
|
|
103
|
+
|
|
104
|
+
JSON:
|
|
105
|
+
"""
|
|
106
|
+
)
|
|
@@ -60,11 +60,11 @@ class BinaryJudgementTemplate:
|
|
|
60
60
|
{text}
|
|
61
61
|
|
|
62
62
|
**
|
|
63
|
-
IMPORTANT: Please make sure to only return a json with two keys: `verdict` (
|
|
63
|
+
IMPORTANT: Please make sure to only return a json with two keys: `verdict` (true or false), and the 'reason' key providing the reason. The verdict must be a boolean only, either true or false.
|
|
64
64
|
Example JSON:
|
|
65
65
|
{{
|
|
66
66
|
"reason": "...",
|
|
67
|
-
"verdict":
|
|
67
|
+
"verdict": true
|
|
68
68
|
}}
|
|
69
69
|
**
|
|
70
70
|
|