deepeval 3.7.4__py3-none-any.whl → 3.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/config/settings.py +35 -1
- deepeval/dataset/api.py +23 -1
- deepeval/dataset/golden.py +139 -2
- deepeval/evaluate/evaluate.py +16 -11
- deepeval/evaluate/execute.py +13 -181
- deepeval/evaluate/utils.py +6 -26
- deepeval/integrations/pydantic_ai/agent.py +19 -2
- deepeval/integrations/pydantic_ai/instrumentator.py +62 -23
- deepeval/key_handler.py +3 -0
- deepeval/metrics/__init__.py +14 -16
- deepeval/metrics/answer_relevancy/answer_relevancy.py +118 -116
- deepeval/metrics/answer_relevancy/template.py +22 -3
- deepeval/metrics/arena_g_eval/arena_g_eval.py +98 -96
- deepeval/metrics/arena_g_eval/template.py +17 -1
- deepeval/metrics/argument_correctness/argument_correctness.py +81 -87
- deepeval/metrics/argument_correctness/template.py +19 -2
- deepeval/metrics/base_metric.py +13 -44
- deepeval/metrics/bias/bias.py +102 -108
- deepeval/metrics/bias/template.py +14 -2
- deepeval/metrics/contextual_precision/contextual_precision.py +96 -94
- deepeval/metrics/contextual_precision/template.py +115 -66
- deepeval/metrics/contextual_recall/contextual_recall.py +94 -84
- deepeval/metrics/contextual_recall/template.py +106 -55
- deepeval/metrics/contextual_relevancy/contextual_relevancy.py +86 -84
- deepeval/metrics/contextual_relevancy/template.py +87 -58
- deepeval/metrics/conversation_completeness/conversation_completeness.py +101 -119
- deepeval/metrics/conversation_completeness/template.py +23 -3
- deepeval/metrics/conversational_dag/conversational_dag.py +12 -8
- deepeval/metrics/conversational_dag/nodes.py +66 -123
- deepeval/metrics/conversational_dag/templates.py +16 -0
- deepeval/metrics/conversational_g_eval/conversational_g_eval.py +47 -66
- deepeval/metrics/dag/dag.py +10 -0
- deepeval/metrics/dag/nodes.py +63 -126
- deepeval/metrics/dag/templates.py +16 -2
- deepeval/metrics/exact_match/exact_match.py +9 -1
- deepeval/metrics/faithfulness/faithfulness.py +138 -149
- deepeval/metrics/faithfulness/schema.py +1 -1
- deepeval/metrics/faithfulness/template.py +200 -115
- deepeval/metrics/g_eval/g_eval.py +87 -78
- deepeval/metrics/g_eval/template.py +18 -1
- deepeval/metrics/g_eval/utils.py +7 -6
- deepeval/metrics/goal_accuracy/goal_accuracy.py +91 -76
- deepeval/metrics/goal_accuracy/template.py +21 -3
- deepeval/metrics/hallucination/hallucination.py +60 -75
- deepeval/metrics/hallucination/template.py +13 -0
- deepeval/metrics/indicator.py +7 -10
- deepeval/metrics/json_correctness/json_correctness.py +40 -38
- deepeval/metrics/json_correctness/template.py +10 -0
- deepeval/metrics/knowledge_retention/knowledge_retention.py +60 -97
- deepeval/metrics/knowledge_retention/schema.py +9 -3
- deepeval/metrics/knowledge_retention/template.py +12 -0
- deepeval/metrics/mcp/mcp_task_completion.py +68 -38
- deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +92 -74
- deepeval/metrics/mcp/template.py +52 -0
- deepeval/metrics/mcp_use_metric/mcp_use_metric.py +58 -64
- deepeval/metrics/mcp_use_metric/template.py +12 -0
- deepeval/metrics/misuse/misuse.py +77 -97
- deepeval/metrics/misuse/template.py +15 -0
- deepeval/metrics/multimodal_metrics/__init__.py +0 -19
- deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +59 -53
- deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +79 -95
- deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +59 -53
- deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +59 -53
- deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +111 -109
- deepeval/metrics/non_advice/non_advice.py +79 -105
- deepeval/metrics/non_advice/template.py +12 -0
- deepeval/metrics/pattern_match/pattern_match.py +12 -4
- deepeval/metrics/pii_leakage/pii_leakage.py +75 -106
- deepeval/metrics/pii_leakage/template.py +14 -0
- deepeval/metrics/plan_adherence/plan_adherence.py +63 -89
- deepeval/metrics/plan_adherence/template.py +11 -0
- deepeval/metrics/plan_quality/plan_quality.py +63 -87
- deepeval/metrics/plan_quality/template.py +9 -0
- deepeval/metrics/prompt_alignment/prompt_alignment.py +72 -83
- deepeval/metrics/prompt_alignment/template.py +12 -0
- deepeval/metrics/ragas.py +3 -3
- deepeval/metrics/role_adherence/role_adherence.py +48 -71
- deepeval/metrics/role_adherence/template.py +14 -0
- deepeval/metrics/role_violation/role_violation.py +75 -108
- deepeval/metrics/role_violation/template.py +12 -0
- deepeval/metrics/step_efficiency/step_efficiency.py +55 -65
- deepeval/metrics/step_efficiency/template.py +11 -0
- deepeval/metrics/summarization/summarization.py +115 -183
- deepeval/metrics/summarization/template.py +19 -0
- deepeval/metrics/task_completion/task_completion.py +67 -73
- deepeval/metrics/tool_correctness/tool_correctness.py +45 -44
- deepeval/metrics/tool_use/tool_use.py +42 -66
- deepeval/metrics/topic_adherence/template.py +13 -0
- deepeval/metrics/topic_adherence/topic_adherence.py +53 -67
- deepeval/metrics/toxicity/template.py +13 -0
- deepeval/metrics/toxicity/toxicity.py +80 -99
- deepeval/metrics/turn_contextual_precision/schema.py +21 -0
- deepeval/metrics/turn_contextual_precision/template.py +187 -0
- deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +592 -0
- deepeval/metrics/turn_contextual_recall/schema.py +21 -0
- deepeval/metrics/turn_contextual_recall/template.py +178 -0
- deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +563 -0
- deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_contextual_relevancy}/schema.py +7 -1
- deepeval/metrics/turn_contextual_relevancy/template.py +161 -0
- deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +576 -0
- deepeval/metrics/{multimodal_metrics/multimodal_faithfulness → turn_faithfulness}/schema.py +11 -3
- deepeval/metrics/turn_faithfulness/template.py +218 -0
- deepeval/metrics/turn_faithfulness/turn_faithfulness.py +627 -0
- deepeval/metrics/turn_relevancy/template.py +14 -0
- deepeval/metrics/turn_relevancy/turn_relevancy.py +56 -69
- deepeval/metrics/utils.py +158 -122
- deepeval/models/__init__.py +0 -12
- deepeval/models/base_model.py +49 -33
- deepeval/models/embedding_models/__init__.py +7 -0
- deepeval/models/embedding_models/azure_embedding_model.py +79 -33
- deepeval/models/embedding_models/local_embedding_model.py +39 -20
- deepeval/models/embedding_models/ollama_embedding_model.py +52 -19
- deepeval/models/embedding_models/openai_embedding_model.py +42 -22
- deepeval/models/llms/amazon_bedrock_model.py +226 -72
- deepeval/models/llms/anthropic_model.py +178 -63
- deepeval/models/llms/azure_model.py +218 -60
- deepeval/models/llms/constants.py +2032 -0
- deepeval/models/llms/deepseek_model.py +95 -40
- deepeval/models/llms/gemini_model.py +209 -64
- deepeval/models/llms/grok_model.py +139 -68
- deepeval/models/llms/kimi_model.py +140 -90
- deepeval/models/llms/litellm_model.py +131 -37
- deepeval/models/llms/local_model.py +125 -21
- deepeval/models/llms/ollama_model.py +147 -24
- deepeval/models/llms/openai_model.py +222 -269
- deepeval/models/llms/portkey_model.py +81 -22
- deepeval/models/llms/utils.py +8 -3
- deepeval/models/retry_policy.py +17 -14
- deepeval/models/utils.py +106 -5
- deepeval/optimizer/__init__.py +5 -0
- deepeval/optimizer/algorithms/__init__.py +6 -0
- deepeval/optimizer/algorithms/base.py +29 -0
- deepeval/optimizer/algorithms/configs.py +18 -0
- deepeval/optimizer/algorithms/copro/__init__.py +5 -0
- deepeval/{optimization/copro/loop.py → optimizer/algorithms/copro/copro.py} +112 -113
- deepeval/optimizer/algorithms/gepa/__init__.py +5 -0
- deepeval/{optimization/gepa/loop.py → optimizer/algorithms/gepa/gepa.py} +175 -115
- deepeval/optimizer/algorithms/miprov2/__init__.py +17 -0
- deepeval/optimizer/algorithms/miprov2/bootstrapper.py +435 -0
- deepeval/optimizer/algorithms/miprov2/miprov2.py +752 -0
- deepeval/optimizer/algorithms/miprov2/proposer.py +301 -0
- deepeval/optimizer/algorithms/simba/__init__.py +5 -0
- deepeval/{optimization/simba/loop.py → optimizer/algorithms/simba/simba.py} +128 -112
- deepeval/{optimization → optimizer}/configs.py +5 -8
- deepeval/{optimization/policies/selection.py → optimizer/policies.py} +63 -2
- deepeval/optimizer/prompt_optimizer.py +263 -0
- deepeval/optimizer/rewriter/__init__.py +5 -0
- deepeval/optimizer/rewriter/rewriter.py +124 -0
- deepeval/optimizer/rewriter/utils.py +214 -0
- deepeval/optimizer/scorer/__init__.py +5 -0
- deepeval/optimizer/scorer/base.py +86 -0
- deepeval/optimizer/scorer/scorer.py +316 -0
- deepeval/optimizer/scorer/utils.py +30 -0
- deepeval/optimizer/types.py +148 -0
- deepeval/{optimization → optimizer}/utils.py +47 -165
- deepeval/prompt/prompt.py +5 -9
- deepeval/simulator/conversation_simulator.py +43 -0
- deepeval/simulator/template.py +13 -0
- deepeval/test_case/__init__.py +1 -3
- deepeval/test_case/api.py +26 -45
- deepeval/test_case/arena_test_case.py +7 -2
- deepeval/test_case/conversational_test_case.py +68 -1
- deepeval/test_case/llm_test_case.py +206 -1
- deepeval/test_case/utils.py +4 -8
- deepeval/test_run/api.py +18 -14
- deepeval/test_run/test_run.py +3 -3
- deepeval/tracing/patchers.py +9 -4
- deepeval/tracing/tracing.py +2 -2
- deepeval/utils.py +65 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/METADATA +1 -4
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/RECORD +180 -193
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +0 -343
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/schema.py +0 -19
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +0 -122
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +0 -301
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +0 -132
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +0 -285
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +0 -112
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +0 -282
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +0 -102
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +0 -356
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +0 -175
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +0 -386
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/schema.py +0 -11
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +0 -148
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +0 -68
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +0 -290
- deepeval/models/mlllms/__init__.py +0 -4
- deepeval/models/mlllms/azure_model.py +0 -343
- deepeval/models/mlllms/gemini_model.py +0 -313
- deepeval/models/mlllms/ollama_model.py +0 -175
- deepeval/models/mlllms/openai_model.py +0 -309
- deepeval/optimization/__init__.py +0 -13
- deepeval/optimization/adapters/__init__.py +0 -2
- deepeval/optimization/adapters/deepeval_scoring_adapter.py +0 -588
- deepeval/optimization/aggregates.py +0 -14
- deepeval/optimization/copro/configs.py +0 -31
- deepeval/optimization/gepa/__init__.py +0 -7
- deepeval/optimization/gepa/configs.py +0 -115
- deepeval/optimization/miprov2/configs.py +0 -134
- deepeval/optimization/miprov2/loop.py +0 -785
- deepeval/optimization/mutations/__init__.py +0 -0
- deepeval/optimization/mutations/prompt_rewriter.py +0 -458
- deepeval/optimization/policies/__init__.py +0 -16
- deepeval/optimization/policies/tie_breaker.py +0 -67
- deepeval/optimization/prompt_optimizer.py +0 -462
- deepeval/optimization/simba/__init__.py +0 -0
- deepeval/optimization/simba/configs.py +0 -33
- deepeval/optimization/types.py +0 -361
- deepeval/test_case/mllm_test_case.py +0 -170
- /deepeval/metrics/{multimodal_metrics/multimodal_answer_relevancy → turn_contextual_precision}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_precision → turn_contextual_recall}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_recall → turn_contextual_relevancy}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_faithfulness}/__init__.py +0 -0
- /deepeval/{optimization → optimizer/algorithms}/simba/types.py +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/LICENSE.md +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/WHEEL +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
from typing import List, Union
|
|
2
|
+
import textwrap
|
|
3
|
+
from deepeval.test_case import MLLMImage
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class TurnContextualRecallTemplate:
|
|
7
|
+
multimodal_rules = """
|
|
8
|
+
--- MULTIMODAL INPUT RULES ---
|
|
9
|
+
- Treat image content as factual evidence.
|
|
10
|
+
- Only reference visual details that are explicitly and clearly visible.
|
|
11
|
+
- Do not infer or guess objects, text, or details not visibly present.
|
|
12
|
+
- If an image is unclear or ambiguous, mark uncertainty explicitly.
|
|
13
|
+
- When evaluating claims, compare them to BOTH textual and visual evidence.
|
|
14
|
+
- If the claim references something not clearly visible, respond with 'idk'.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
@staticmethod
|
|
18
|
+
def generate_reason(
|
|
19
|
+
expected_outcome: str,
|
|
20
|
+
supportive_reasons: str,
|
|
21
|
+
unsupportive_reasons: str,
|
|
22
|
+
score: float,
|
|
23
|
+
multimodal: bool = False,
|
|
24
|
+
):
|
|
25
|
+
content_type = "sentence or image" if multimodal else "sentence"
|
|
26
|
+
|
|
27
|
+
return textwrap.dedent(
|
|
28
|
+
f"""Given the original assistant output, a list of supportive reasons, and a list of unsupportive reasons ({'which is' if multimodal else 'which are'} deduced directly from the {'"assistant output"' if multimodal else 'original assistant output'}), and a contextual recall score (closer to 1 the better), summarize a CONCISE reason for the score.
|
|
29
|
+
A supportive reason is the reason why a certain {content_type} in the original assistant output can be attributed to the node in the retrieval context.
|
|
30
|
+
An unsupportive reason is the reason why a certain {content_type} in the original assistant output cannot be attributed to anything in the retrieval context.
|
|
31
|
+
In your reason, you should {'related' if multimodal else 'relate'} supportive/unsupportive reasons to the {content_type} number in assistant output, and {'info' if multimodal else 'include info'} regarding the node number in retrieval context to support your final reason. The first mention of "node(s)" should specify "node(s) in retrieval context{')' if multimodal else ''}.
|
|
32
|
+
|
|
33
|
+
{TurnContextualRecallTemplate.multimodal_rules if multimodal else ""}
|
|
34
|
+
|
|
35
|
+
**
|
|
36
|
+
IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
|
|
37
|
+
Example JSON:
|
|
38
|
+
{{
|
|
39
|
+
"reason": "The score is <contextual_recall_score> because <your_reason>."
|
|
40
|
+
}}
|
|
41
|
+
|
|
42
|
+
DO NOT mention 'supportive reasons' and 'unsupportive reasons' in your reason, these terms are just here for you to understand the broader scope of things.
|
|
43
|
+
If the score is 1, keep it short and say something positive with an upbeat encouraging tone (but don't overdo it{',' if multimodal else ''} otherwise it gets annoying).
|
|
44
|
+
**
|
|
45
|
+
|
|
46
|
+
Contextual Recall Score:
|
|
47
|
+
{score}
|
|
48
|
+
|
|
49
|
+
Assistant Output:
|
|
50
|
+
{expected_outcome}
|
|
51
|
+
|
|
52
|
+
Supportive Reasons:
|
|
53
|
+
{supportive_reasons}
|
|
54
|
+
|
|
55
|
+
Unsupportive Reasons:
|
|
56
|
+
{unsupportive_reasons}
|
|
57
|
+
|
|
58
|
+
JSON:
|
|
59
|
+
"""
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
@staticmethod
|
|
63
|
+
def generate_verdicts(
|
|
64
|
+
expected_outcome: str,
|
|
65
|
+
retrieval_context: List[Union[str, MLLMImage]],
|
|
66
|
+
multimodal: bool = False,
|
|
67
|
+
):
|
|
68
|
+
content_type = "sentence and image" if multimodal else "sentence"
|
|
69
|
+
content_type_plural = (
|
|
70
|
+
"sentences and images" if multimodal else "sentences"
|
|
71
|
+
)
|
|
72
|
+
content_or = "sentence or image" if multimodal else "sentence"
|
|
73
|
+
|
|
74
|
+
# For multimodal, we need to annotate the retrieval context with node IDs
|
|
75
|
+
context_to_display = (
|
|
76
|
+
TurnContextualRecallTemplate.id_retrieval_context(retrieval_context)
|
|
77
|
+
if multimodal
|
|
78
|
+
else retrieval_context
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
node_instruction = ""
|
|
82
|
+
if multimodal:
|
|
83
|
+
node_instruction = " A node is either a string or image, but not both (so do not group images and texts in the same nodes)."
|
|
84
|
+
|
|
85
|
+
return textwrap.dedent(
|
|
86
|
+
f"""For EACH {content_type} in the given assistant output below, determine whether the {content_or} can be attributed to the nodes of retrieval contexts. Please generate a list of JSON with two keys: `verdict` and `reason`.
|
|
87
|
+
The `verdict` key should STRICTLY be either a 'yes' or 'no'. Answer 'yes' if the {content_or} can be attributed to any parts of the retrieval context, else answer 'no'.
|
|
88
|
+
The `reason` key should provide a reason why to the verdict. In the reason, you should aim to include the node(s) count in the retrieval context (eg., 1st node, and 2nd node in the retrieval context) that is attributed to said {content_or}.{node_instruction} You should also aim to quote the specific part of the retrieval context to justify your verdict, but keep it extremely concise and cut short the quote with an ellipsis if possible.
|
|
89
|
+
|
|
90
|
+
{TurnContextualRecallTemplate.multimodal_rules if multimodal else ""}
|
|
91
|
+
|
|
92
|
+
**
|
|
93
|
+
IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON objects, each with two keys: `verdict` and `reason`.
|
|
94
|
+
|
|
95
|
+
{{
|
|
96
|
+
"verdicts": [
|
|
97
|
+
{{
|
|
98
|
+
"reason": "...",
|
|
99
|
+
"verdict": "yes"
|
|
100
|
+
}},
|
|
101
|
+
...
|
|
102
|
+
]
|
|
103
|
+
}}
|
|
104
|
+
|
|
105
|
+
Since you are going to generate a verdict for each sentence, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to the number of {content_type_plural} in {'the' if multimodal else '`assistant output`'}{' `assistant output`' if multimodal else ''}.
|
|
106
|
+
**
|
|
107
|
+
|
|
108
|
+
Assistant Output:
|
|
109
|
+
{expected_outcome}
|
|
110
|
+
|
|
111
|
+
Retrieval Context:
|
|
112
|
+
{context_to_display}
|
|
113
|
+
|
|
114
|
+
JSON:
|
|
115
|
+
"""
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
@staticmethod
|
|
119
|
+
def generate_final_reason(
|
|
120
|
+
final_score: float, success: bool, reasons: List[str]
|
|
121
|
+
):
|
|
122
|
+
return textwrap.dedent(
|
|
123
|
+
f"""You are an AI evaluator producing a single final explanation for the TurnContextualRecallMetric result.
|
|
124
|
+
|
|
125
|
+
Context:
|
|
126
|
+
This metric evaluates conversational contextual recall by determining whether sentences in the assistant output can be attributed to the retrieval context for each interaction. Each interaction yields a reason indicating which sentences were supported or unsupported. You are given all those reasons.
|
|
127
|
+
|
|
128
|
+
Inputs:
|
|
129
|
+
- final_score: the averaged score across all interactions.
|
|
130
|
+
- success: whether the metric passed or failed
|
|
131
|
+
- reasons: a list of textual reasons generated from individual interactions.
|
|
132
|
+
|
|
133
|
+
Instructions:
|
|
134
|
+
1. Read all reasons and synthesize them into one unified explanation.
|
|
135
|
+
2. Describe patterns of unsupported sentences, missing context coverage, or well-attributed outputs if present.
|
|
136
|
+
3. Do not repeat every reason; merge them into a concise, coherent narrative.
|
|
137
|
+
4. If the metric failed, state the dominant failure modes. If it passed, state why the assistant output was well-supported by retrieval context.
|
|
138
|
+
5. Output a single paragraph with no lists, no bullets, no markup.
|
|
139
|
+
|
|
140
|
+
Output:
|
|
141
|
+
A single paragraph explaining the final outcome.
|
|
142
|
+
|
|
143
|
+
Here's the inputs:
|
|
144
|
+
|
|
145
|
+
Final Score: {final_score}
|
|
146
|
+
|
|
147
|
+
Reasons:
|
|
148
|
+
{reasons}
|
|
149
|
+
|
|
150
|
+
Success: {success}
|
|
151
|
+
|
|
152
|
+
Now give me a final reason that explains why the metric passed or failed. Output ONLY the reason and nothing else.
|
|
153
|
+
|
|
154
|
+
The final reason:
|
|
155
|
+
"""
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
@staticmethod
|
|
159
|
+
def id_retrieval_context(
|
|
160
|
+
retrieval_context: List[Union[str, MLLMImage]],
|
|
161
|
+
) -> List[Union[str, MLLMImage]]:
|
|
162
|
+
"""
|
|
163
|
+
Annotates retrieval context with node IDs for multimodal processing.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
retrieval_context: List of contexts (can be strings or MLLMImages)
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
Annotated list with "Node X:" prefixes
|
|
170
|
+
"""
|
|
171
|
+
annotated_retrieval_context = []
|
|
172
|
+
for i, context in enumerate(retrieval_context):
|
|
173
|
+
if isinstance(context, str):
|
|
174
|
+
annotated_retrieval_context.append(f"Node {i + 1}: {context}")
|
|
175
|
+
elif isinstance(context, MLLMImage):
|
|
176
|
+
annotated_retrieval_context.append(f"Node {i + 1}:")
|
|
177
|
+
annotated_retrieval_context.append(context)
|
|
178
|
+
return annotated_retrieval_context
|