deepeval 3.7.4__py3-none-any.whl → 3.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/config/settings.py +35 -1
- deepeval/dataset/api.py +23 -1
- deepeval/dataset/golden.py +139 -2
- deepeval/evaluate/evaluate.py +16 -11
- deepeval/evaluate/execute.py +13 -181
- deepeval/evaluate/utils.py +6 -26
- deepeval/integrations/pydantic_ai/agent.py +19 -2
- deepeval/integrations/pydantic_ai/instrumentator.py +62 -23
- deepeval/key_handler.py +3 -0
- deepeval/metrics/__init__.py +14 -16
- deepeval/metrics/answer_relevancy/answer_relevancy.py +118 -116
- deepeval/metrics/answer_relevancy/template.py +22 -3
- deepeval/metrics/arena_g_eval/arena_g_eval.py +98 -96
- deepeval/metrics/arena_g_eval/template.py +17 -1
- deepeval/metrics/argument_correctness/argument_correctness.py +81 -87
- deepeval/metrics/argument_correctness/template.py +19 -2
- deepeval/metrics/base_metric.py +13 -44
- deepeval/metrics/bias/bias.py +102 -108
- deepeval/metrics/bias/template.py +14 -2
- deepeval/metrics/contextual_precision/contextual_precision.py +96 -94
- deepeval/metrics/contextual_precision/template.py +115 -66
- deepeval/metrics/contextual_recall/contextual_recall.py +94 -84
- deepeval/metrics/contextual_recall/template.py +106 -55
- deepeval/metrics/contextual_relevancy/contextual_relevancy.py +86 -84
- deepeval/metrics/contextual_relevancy/template.py +87 -58
- deepeval/metrics/conversation_completeness/conversation_completeness.py +101 -119
- deepeval/metrics/conversation_completeness/template.py +23 -3
- deepeval/metrics/conversational_dag/conversational_dag.py +12 -8
- deepeval/metrics/conversational_dag/nodes.py +66 -123
- deepeval/metrics/conversational_dag/templates.py +16 -0
- deepeval/metrics/conversational_g_eval/conversational_g_eval.py +47 -66
- deepeval/metrics/dag/dag.py +10 -0
- deepeval/metrics/dag/nodes.py +63 -126
- deepeval/metrics/dag/templates.py +16 -2
- deepeval/metrics/exact_match/exact_match.py +9 -1
- deepeval/metrics/faithfulness/faithfulness.py +138 -149
- deepeval/metrics/faithfulness/schema.py +1 -1
- deepeval/metrics/faithfulness/template.py +200 -115
- deepeval/metrics/g_eval/g_eval.py +87 -78
- deepeval/metrics/g_eval/template.py +18 -1
- deepeval/metrics/g_eval/utils.py +7 -6
- deepeval/metrics/goal_accuracy/goal_accuracy.py +91 -76
- deepeval/metrics/goal_accuracy/template.py +21 -3
- deepeval/metrics/hallucination/hallucination.py +60 -75
- deepeval/metrics/hallucination/template.py +13 -0
- deepeval/metrics/indicator.py +7 -10
- deepeval/metrics/json_correctness/json_correctness.py +40 -38
- deepeval/metrics/json_correctness/template.py +10 -0
- deepeval/metrics/knowledge_retention/knowledge_retention.py +60 -97
- deepeval/metrics/knowledge_retention/schema.py +9 -3
- deepeval/metrics/knowledge_retention/template.py +12 -0
- deepeval/metrics/mcp/mcp_task_completion.py +68 -38
- deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +92 -74
- deepeval/metrics/mcp/template.py +52 -0
- deepeval/metrics/mcp_use_metric/mcp_use_metric.py +58 -64
- deepeval/metrics/mcp_use_metric/template.py +12 -0
- deepeval/metrics/misuse/misuse.py +77 -97
- deepeval/metrics/misuse/template.py +15 -0
- deepeval/metrics/multimodal_metrics/__init__.py +0 -19
- deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +59 -53
- deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +79 -95
- deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +59 -53
- deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +59 -53
- deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +111 -109
- deepeval/metrics/non_advice/non_advice.py +79 -105
- deepeval/metrics/non_advice/template.py +12 -0
- deepeval/metrics/pattern_match/pattern_match.py +12 -4
- deepeval/metrics/pii_leakage/pii_leakage.py +75 -106
- deepeval/metrics/pii_leakage/template.py +14 -0
- deepeval/metrics/plan_adherence/plan_adherence.py +63 -89
- deepeval/metrics/plan_adherence/template.py +11 -0
- deepeval/metrics/plan_quality/plan_quality.py +63 -87
- deepeval/metrics/plan_quality/template.py +9 -0
- deepeval/metrics/prompt_alignment/prompt_alignment.py +72 -83
- deepeval/metrics/prompt_alignment/template.py +12 -0
- deepeval/metrics/ragas.py +3 -3
- deepeval/metrics/role_adherence/role_adherence.py +48 -71
- deepeval/metrics/role_adherence/template.py +14 -0
- deepeval/metrics/role_violation/role_violation.py +75 -108
- deepeval/metrics/role_violation/template.py +12 -0
- deepeval/metrics/step_efficiency/step_efficiency.py +55 -65
- deepeval/metrics/step_efficiency/template.py +11 -0
- deepeval/metrics/summarization/summarization.py +115 -183
- deepeval/metrics/summarization/template.py +19 -0
- deepeval/metrics/task_completion/task_completion.py +67 -73
- deepeval/metrics/tool_correctness/tool_correctness.py +45 -44
- deepeval/metrics/tool_use/tool_use.py +42 -66
- deepeval/metrics/topic_adherence/template.py +13 -0
- deepeval/metrics/topic_adherence/topic_adherence.py +53 -67
- deepeval/metrics/toxicity/template.py +13 -0
- deepeval/metrics/toxicity/toxicity.py +80 -99
- deepeval/metrics/turn_contextual_precision/schema.py +21 -0
- deepeval/metrics/turn_contextual_precision/template.py +187 -0
- deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +592 -0
- deepeval/metrics/turn_contextual_recall/schema.py +21 -0
- deepeval/metrics/turn_contextual_recall/template.py +178 -0
- deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +563 -0
- deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_contextual_relevancy}/schema.py +7 -1
- deepeval/metrics/turn_contextual_relevancy/template.py +161 -0
- deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +576 -0
- deepeval/metrics/{multimodal_metrics/multimodal_faithfulness → turn_faithfulness}/schema.py +11 -3
- deepeval/metrics/turn_faithfulness/template.py +218 -0
- deepeval/metrics/turn_faithfulness/turn_faithfulness.py +627 -0
- deepeval/metrics/turn_relevancy/template.py +14 -0
- deepeval/metrics/turn_relevancy/turn_relevancy.py +56 -69
- deepeval/metrics/utils.py +158 -122
- deepeval/models/__init__.py +0 -12
- deepeval/models/base_model.py +49 -33
- deepeval/models/embedding_models/__init__.py +7 -0
- deepeval/models/embedding_models/azure_embedding_model.py +79 -33
- deepeval/models/embedding_models/local_embedding_model.py +39 -20
- deepeval/models/embedding_models/ollama_embedding_model.py +52 -19
- deepeval/models/embedding_models/openai_embedding_model.py +42 -22
- deepeval/models/llms/amazon_bedrock_model.py +226 -72
- deepeval/models/llms/anthropic_model.py +178 -63
- deepeval/models/llms/azure_model.py +218 -60
- deepeval/models/llms/constants.py +2032 -0
- deepeval/models/llms/deepseek_model.py +95 -40
- deepeval/models/llms/gemini_model.py +209 -64
- deepeval/models/llms/grok_model.py +139 -68
- deepeval/models/llms/kimi_model.py +140 -90
- deepeval/models/llms/litellm_model.py +131 -37
- deepeval/models/llms/local_model.py +125 -21
- deepeval/models/llms/ollama_model.py +147 -24
- deepeval/models/llms/openai_model.py +222 -269
- deepeval/models/llms/portkey_model.py +81 -22
- deepeval/models/llms/utils.py +8 -3
- deepeval/models/retry_policy.py +17 -14
- deepeval/models/utils.py +106 -5
- deepeval/optimizer/__init__.py +5 -0
- deepeval/optimizer/algorithms/__init__.py +6 -0
- deepeval/optimizer/algorithms/base.py +29 -0
- deepeval/optimizer/algorithms/configs.py +18 -0
- deepeval/optimizer/algorithms/copro/__init__.py +5 -0
- deepeval/{optimization/copro/loop.py → optimizer/algorithms/copro/copro.py} +112 -113
- deepeval/optimizer/algorithms/gepa/__init__.py +5 -0
- deepeval/{optimization/gepa/loop.py → optimizer/algorithms/gepa/gepa.py} +175 -115
- deepeval/optimizer/algorithms/miprov2/__init__.py +17 -0
- deepeval/optimizer/algorithms/miprov2/bootstrapper.py +435 -0
- deepeval/optimizer/algorithms/miprov2/miprov2.py +752 -0
- deepeval/optimizer/algorithms/miprov2/proposer.py +301 -0
- deepeval/optimizer/algorithms/simba/__init__.py +5 -0
- deepeval/{optimization/simba/loop.py → optimizer/algorithms/simba/simba.py} +128 -112
- deepeval/{optimization → optimizer}/configs.py +5 -8
- deepeval/{optimization/policies/selection.py → optimizer/policies.py} +63 -2
- deepeval/optimizer/prompt_optimizer.py +263 -0
- deepeval/optimizer/rewriter/__init__.py +5 -0
- deepeval/optimizer/rewriter/rewriter.py +124 -0
- deepeval/optimizer/rewriter/utils.py +214 -0
- deepeval/optimizer/scorer/__init__.py +5 -0
- deepeval/optimizer/scorer/base.py +86 -0
- deepeval/optimizer/scorer/scorer.py +316 -0
- deepeval/optimizer/scorer/utils.py +30 -0
- deepeval/optimizer/types.py +148 -0
- deepeval/{optimization → optimizer}/utils.py +47 -165
- deepeval/prompt/prompt.py +5 -9
- deepeval/simulator/conversation_simulator.py +43 -0
- deepeval/simulator/template.py +13 -0
- deepeval/test_case/__init__.py +1 -3
- deepeval/test_case/api.py +26 -45
- deepeval/test_case/arena_test_case.py +7 -2
- deepeval/test_case/conversational_test_case.py +68 -1
- deepeval/test_case/llm_test_case.py +206 -1
- deepeval/test_case/utils.py +4 -8
- deepeval/test_run/api.py +18 -14
- deepeval/test_run/test_run.py +3 -3
- deepeval/tracing/patchers.py +9 -4
- deepeval/tracing/tracing.py +2 -2
- deepeval/utils.py +65 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/METADATA +1 -4
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/RECORD +180 -193
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +0 -343
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/schema.py +0 -19
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +0 -122
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +0 -301
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +0 -132
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +0 -285
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +0 -112
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +0 -282
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +0 -102
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +0 -356
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +0 -175
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +0 -386
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/schema.py +0 -11
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +0 -148
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +0 -68
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +0 -290
- deepeval/models/mlllms/__init__.py +0 -4
- deepeval/models/mlllms/azure_model.py +0 -343
- deepeval/models/mlllms/gemini_model.py +0 -313
- deepeval/models/mlllms/ollama_model.py +0 -175
- deepeval/models/mlllms/openai_model.py +0 -309
- deepeval/optimization/__init__.py +0 -13
- deepeval/optimization/adapters/__init__.py +0 -2
- deepeval/optimization/adapters/deepeval_scoring_adapter.py +0 -588
- deepeval/optimization/aggregates.py +0 -14
- deepeval/optimization/copro/configs.py +0 -31
- deepeval/optimization/gepa/__init__.py +0 -7
- deepeval/optimization/gepa/configs.py +0 -115
- deepeval/optimization/miprov2/configs.py +0 -134
- deepeval/optimization/miprov2/loop.py +0 -785
- deepeval/optimization/mutations/__init__.py +0 -0
- deepeval/optimization/mutations/prompt_rewriter.py +0 -458
- deepeval/optimization/policies/__init__.py +0 -16
- deepeval/optimization/policies/tie_breaker.py +0 -67
- deepeval/optimization/prompt_optimizer.py +0 -462
- deepeval/optimization/simba/__init__.py +0 -0
- deepeval/optimization/simba/configs.py +0 -33
- deepeval/optimization/types.py +0 -361
- deepeval/test_case/mllm_test_case.py +0 -170
- /deepeval/metrics/{multimodal_metrics/multimodal_answer_relevancy → turn_contextual_precision}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_precision → turn_contextual_recall}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_recall → turn_contextual_relevancy}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_faithfulness}/__init__.py +0 -0
- /deepeval/{optimization → optimizer/algorithms}/simba/types.py +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/LICENSE.md +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/WHEEL +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/entry_points.txt +0 -0
|
@@ -1,41 +1,50 @@
|
|
|
1
1
|
from typing import Optional, List
|
|
2
|
+
import textwrap
|
|
2
3
|
|
|
3
4
|
|
|
4
5
|
class FaithfulnessTemplate:
|
|
5
6
|
@staticmethod
|
|
6
|
-
def generate_claims(actual_output: str):
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
"
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
**
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
7
|
+
def generate_claims(actual_output: str, multimodal: bool = False):
|
|
8
|
+
multimodal_instruction = ""
|
|
9
|
+
if multimodal:
|
|
10
|
+
multimodal_instruction = " The excerpt may contain both text and images, so extract claims from all provided content."
|
|
11
|
+
|
|
12
|
+
return textwrap.dedent(
|
|
13
|
+
f"""Based on the given {'excerpt' if multimodal else 'text'}, please extract a comprehensive list of FACTUAL, undisputed truths, that can inferred from the provided actual AI output. {multimodal_instruction}
|
|
14
|
+
These truths, MUST BE COHERENT, and CANNOT be taken out of context.
|
|
15
|
+
|
|
16
|
+
Example:
|
|
17
|
+
Example Text:
|
|
18
|
+
"Albert Einstein, the genius often associated with wild hair and mind-bending theories, famously won the Nobel Prize in Physics—though not for his groundbreaking work on relativity, as many assume. Instead, in 1968, he was honored for his discovery of the photoelectric effect, a phenomenon that laid the foundation for quantum mechanics."
|
|
19
|
+
|
|
20
|
+
Example JSON:
|
|
21
|
+
{{
|
|
22
|
+
"claims": [
|
|
23
|
+
"Einstein won the noble prize for his discovery of the photoelectric effect in 1968.",
|
|
24
|
+
"The photoelectric effect is a phenomenon that laid the foundation for quantum mechanics."
|
|
25
|
+
]
|
|
26
|
+
}}
|
|
27
|
+
===== END OF EXAMPLE ======
|
|
28
|
+
|
|
29
|
+
**
|
|
30
|
+
IMPORTANT: Please make sure to only return in JSON format, with the "claims" key as a list of strings. No words or explanation is needed.
|
|
31
|
+
Only include claims that are factual, BUT IT DOESN'T MATTER IF THEY ARE FACTUALLY CORRECT. The claims you extract should include the full context it was presented in, NOT cherry picked facts.
|
|
32
|
+
You should NOT include any prior knowledge, and take the text at face value when extracting claims.
|
|
33
|
+
You should be aware that it is an AI that is outputting these claims.
|
|
34
|
+
**
|
|
35
|
+
|
|
36
|
+
{'Excerpt' if multimodal else 'AI Output'}:
|
|
37
|
+
{actual_output}
|
|
38
|
+
|
|
39
|
+
JSON:
|
|
40
|
+
"""
|
|
41
|
+
)
|
|
35
42
|
|
|
36
43
|
@staticmethod
|
|
37
44
|
def generate_truths(
|
|
38
|
-
retrieval_context: str,
|
|
45
|
+
retrieval_context: str,
|
|
46
|
+
extraction_limit: Optional[int] = None,
|
|
47
|
+
multimodal: bool = False,
|
|
39
48
|
):
|
|
40
49
|
if extraction_limit is None:
|
|
41
50
|
limit = " FACTUAL, undisputed truths"
|
|
@@ -43,98 +52,174 @@ JSON:
|
|
|
43
52
|
limit = " the single most important FACTUAL, undisputed truth"
|
|
44
53
|
else:
|
|
45
54
|
limit = f" the {extraction_limit} most important FACTUAL, undisputed truths per document"
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
{{
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
""
|
|
55
|
+
|
|
56
|
+
multimodal_instruction = ""
|
|
57
|
+
if multimodal:
|
|
58
|
+
multimodal_instruction = (
|
|
59
|
+
" The excerpt may contain both text and images."
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
return textwrap.dedent(
|
|
63
|
+
f"""Based on the given {'excerpt (text and images)' if multimodal else 'text'}, please generate a comprehensive list of{limit}, that can inferred from the provided {'excerpt' if multimodal else 'text'}.{multimodal_instruction}
|
|
64
|
+
These truths, MUST BE COHERENT. They must NOT be taken out of context.
|
|
65
|
+
|
|
66
|
+
Example:
|
|
67
|
+
Example Text:
|
|
68
|
+
"Albert Einstein, the genius often associated with wild hair and mind-bending theories, famously won the Nobel Prize in Physics—though not for his groundbreaking work on relativity, as many assume. Instead, in 1968, he was honored for his discovery of the photoelectric effect, a phenomenon that laid the foundation for quantum mechanics."
|
|
69
|
+
|
|
70
|
+
Example JSON:
|
|
71
|
+
{{
|
|
72
|
+
"truths": [
|
|
73
|
+
"Einstein won the noble prize for his discovery of the photoelectric effect in 1968.",
|
|
74
|
+
"The photoelectric effect is a phenomenon that laid the foundation for quantum mechanics."
|
|
75
|
+
]
|
|
76
|
+
}}
|
|
77
|
+
===== END OF EXAMPLE ======
|
|
78
|
+
**
|
|
79
|
+
IMPORTANT: Please make sure to only return in JSON format, with the "truths" key as a list of strings. No words or explanation is needed.
|
|
80
|
+
Only include truths that are factual, BUT IT DOESN'T MATTER IF THEY ARE FACTUALLY CORRECT.
|
|
81
|
+
**
|
|
82
|
+
|
|
83
|
+
{'Excerpt' if multimodal else 'Text'}:
|
|
84
|
+
{retrieval_context}
|
|
85
|
+
|
|
86
|
+
JSON:
|
|
87
|
+
"""
|
|
88
|
+
)
|
|
71
89
|
|
|
72
90
|
@staticmethod
|
|
73
|
-
def generate_verdicts(
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
91
|
+
def generate_verdicts(
|
|
92
|
+
claims: List[str], retrieval_context: str, multimodal: bool = False
|
|
93
|
+
):
|
|
94
|
+
example_section = ""
|
|
95
|
+
if multimodal:
|
|
96
|
+
example_section = textwrap.dedent(
|
|
97
|
+
"""
|
|
98
|
+
Example retrieval contexts: "Einstein won the Nobel Prize for his discovery of the photoelectric effect. Einstein won the Nobel Prize in 1968. Einstein is a German Scientist."
|
|
99
|
+
Example claims: ["Barack Obama is a caucasian male.", "Zurich is a city in London", "Einstein won the Nobel Prize for the discovery of the photoelectric effect which may have contributed to his fame.", "Einstein won the Nobel Prize in 1969 for his discovery of the photoelectric effect.", "Einstein was a German chef."]
|
|
100
|
+
|
|
101
|
+
Example:
|
|
102
|
+
{{
|
|
103
|
+
"verdicts": [
|
|
104
|
+
{{
|
|
105
|
+
"reason": "The claim about Barack Obama is not directly addressed in the retrieval context, and so poses no contradiction.",
|
|
106
|
+
"verdict": "idk"
|
|
107
|
+
}},
|
|
108
|
+
{{
|
|
109
|
+
"reason": "The claim about Zurich being a city in London is incorrect but does not pose a contradiction to the retrieval context.",
|
|
110
|
+
"verdict": "idk"
|
|
111
|
+
}},
|
|
112
|
+
{{
|
|
113
|
+
"verdict": "yes"
|
|
114
|
+
}},
|
|
115
|
+
{{
|
|
116
|
+
"reason": "The actual output claims Einstein won the Nobel Prize in 1969, which is untrue as the retrieval context states it is 1968 instead.",
|
|
117
|
+
"verdict": "no"
|
|
118
|
+
}},
|
|
119
|
+
{{
|
|
120
|
+
"reason": "The actual output claims Einstein is a German chef, which is not correct as the retrieval context states he was a German scientist instead.",
|
|
121
|
+
"verdict": "no"
|
|
122
|
+
}}
|
|
123
|
+
]
|
|
124
|
+
}}
|
|
125
|
+
===== END OF EXAMPLE ======
|
|
126
|
+
"""
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
format_instruction = textwrap.dedent(
|
|
130
|
+
"""
|
|
131
|
+
Expected JSON format:
|
|
132
|
+
{{
|
|
133
|
+
"verdicts": [
|
|
134
|
+
{{
|
|
135
|
+
"verdict": "yes"
|
|
136
|
+
}},
|
|
137
|
+
{{
|
|
138
|
+
"reason": <explanation_for_contradiction>,
|
|
139
|
+
"verdict": "no"
|
|
140
|
+
}},
|
|
141
|
+
{{
|
|
142
|
+
"reason": <explanation_for_uncertainty>,
|
|
143
|
+
"verdict": "idk"
|
|
144
|
+
}}
|
|
145
|
+
]
|
|
146
|
+
}}
|
|
147
|
+
"""
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
guidelines = ""
|
|
151
|
+
if multimodal:
|
|
152
|
+
guidelines = textwrap.dedent(
|
|
153
|
+
"""
|
|
154
|
+
The length of 'verdicts' SHOULD BE STRICTLY EQUAL to that of claims.
|
|
155
|
+
You DON'T have to provide a reason if the answer is 'yes'.
|
|
156
|
+
ONLY provide a 'no' answer if the retrieval context DIRECTLY CONTRADICTS the claims. YOU SHOULD NEVER USE YOUR PRIOR KNOWLEDGE IN YOUR JUDGEMENT.
|
|
157
|
+
Claims made using vague, suggestive, speculative language such as 'may have', 'possibility due to', does NOT count as a contradiction.
|
|
158
|
+
Claims that is not backed up due to a lack of information/is not mentioned in the retrieval contexts MUST be answered 'idk', otherwise I WILL DIE.
|
|
159
|
+
If there are clear contradictions or any data or images that's not mentioned in the retrieval context, just provide 'no'.
|
|
160
|
+
"""
|
|
161
|
+
)
|
|
162
|
+
else:
|
|
163
|
+
guidelines = textwrap.dedent(
|
|
164
|
+
"""
|
|
165
|
+
Generate ONE verdict per claim - length of 'verdicts' MUST equal number of claims.
|
|
166
|
+
No 'reason' needed for 'yes' verdicts.
|
|
167
|
+
Only use 'no' if retrieval context DIRECTLY CONTRADICTS the claim - never use prior knowledge.
|
|
168
|
+
Use 'idk' for claims not backed up by context OR factually incorrect but non-contradictory - do not assume your knowledge.
|
|
169
|
+
Vague/speculative language in claims (e.g. 'may have', 'possibility') does NOT count as contradiction.
|
|
170
|
+
"""
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
return textwrap.dedent(
|
|
174
|
+
f"""Based on the given claims, which is a list of strings, generate a list of JSON objects to indicate whether EACH claim contradicts any facts in the retrieval context. The JSON will have 2 fields: 'verdict' and 'reason'.
|
|
175
|
+
The 'verdict' key should STRICTLY be either 'yes', 'no', or 'idk', which states whether the given claim agrees with the context.
|
|
176
|
+
Provide a 'reason' ONLY if the answer is 'no' or 'idk'.
|
|
177
|
+
The provided claim is drawn from the actual output. Try to provide a correction in the reason using the facts in the retrieval context.
|
|
178
|
+
|
|
179
|
+
{format_instruction}
|
|
180
|
+
{example_section}
|
|
181
|
+
**
|
|
182
|
+
IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON objects.
|
|
183
|
+
{guidelines}
|
|
184
|
+
**
|
|
185
|
+
|
|
186
|
+
Retrieval Contexts:
|
|
187
|
+
{retrieval_context}
|
|
188
|
+
|
|
189
|
+
Claims:
|
|
190
|
+
{claims}
|
|
191
|
+
|
|
192
|
+
JSON:
|
|
193
|
+
"""
|
|
194
|
+
)
|
|
114
195
|
|
|
115
196
|
@staticmethod
|
|
116
|
-
def generate_reason(
|
|
117
|
-
|
|
118
|
-
|
|
197
|
+
def generate_reason(
|
|
198
|
+
score: float, contradictions: List[str], multimodal: bool = False
|
|
199
|
+
):
|
|
200
|
+
return textwrap.dedent(
|
|
201
|
+
f"""Below is a list of Contradictions. It is a list of strings explaining why the 'actual output' does not align with the information presented in the 'retrieval context'. Contradictions happen in the 'actual output', NOT the 'retrieval context'.
|
|
202
|
+
Given the faithfulness score, which is a 0-1 score indicating how faithful the `actual output` is to the retrieval context (higher the better), CONCISELY summarize the contradictions to justify the score.
|
|
119
203
|
|
|
120
|
-
Expected JSON format:
|
|
121
|
-
{{
|
|
122
|
-
|
|
123
|
-
}}
|
|
204
|
+
Expected JSON format:
|
|
205
|
+
{{
|
|
206
|
+
"reason": "The score is <faithfulness_score> because <your_reason>."
|
|
207
|
+
}}
|
|
124
208
|
|
|
125
|
-
**
|
|
126
|
-
IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
|
|
209
|
+
**
|
|
210
|
+
IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
|
|
127
211
|
|
|
128
|
-
If there are no contradictions, just say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
|
|
129
|
-
Your reason MUST use information in `contradiction` in your reason.
|
|
130
|
-
Be sure in your reason, as if you know what the actual output is from the contradictions.
|
|
131
|
-
**
|
|
212
|
+
If there are no contradictions, just say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
|
|
213
|
+
Your reason MUST use information in `contradiction` in your reason.
|
|
214
|
+
Be sure in your reason, as if you know what the actual output is from the contradictions.
|
|
215
|
+
**
|
|
132
216
|
|
|
133
|
-
Faithfulness Score:
|
|
134
|
-
{score}
|
|
217
|
+
Faithfulness Score:
|
|
218
|
+
{score}
|
|
135
219
|
|
|
136
|
-
Contradictions:
|
|
137
|
-
{contradictions}
|
|
220
|
+
Contradictions:
|
|
221
|
+
{contradictions}
|
|
138
222
|
|
|
139
|
-
JSON:
|
|
140
|
-
"""
|
|
223
|
+
JSON:
|
|
224
|
+
"""
|
|
225
|
+
)
|
|
@@ -15,6 +15,8 @@ from deepeval.metrics.utils import (
|
|
|
15
15
|
trimAndLoadJson,
|
|
16
16
|
initialize_model,
|
|
17
17
|
check_llm_test_case_params,
|
|
18
|
+
generate_with_schema_and_extract,
|
|
19
|
+
a_generate_with_schema_and_extract,
|
|
18
20
|
)
|
|
19
21
|
from deepeval.models import DeepEvalBaseLLM
|
|
20
22
|
from deepeval.metrics.indicator import metric_progress_indicator
|
|
@@ -82,7 +84,19 @@ class GEval(BaseMetric):
|
|
|
82
84
|
_log_metric_to_confident: bool = True,
|
|
83
85
|
_additional_context: Optional[str] = None,
|
|
84
86
|
) -> float:
|
|
85
|
-
|
|
87
|
+
|
|
88
|
+
multimodal = test_case.multimodal
|
|
89
|
+
|
|
90
|
+
check_llm_test_case_params(
|
|
91
|
+
test_case,
|
|
92
|
+
self.evaluation_params,
|
|
93
|
+
None,
|
|
94
|
+
None,
|
|
95
|
+
self,
|
|
96
|
+
self.model,
|
|
97
|
+
multimodal,
|
|
98
|
+
)
|
|
99
|
+
|
|
86
100
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
87
101
|
|
|
88
102
|
with metric_progress_indicator(
|
|
@@ -104,10 +118,12 @@ class GEval(BaseMetric):
|
|
|
104
118
|
)
|
|
105
119
|
else:
|
|
106
120
|
self.evaluation_steps: List[str] = (
|
|
107
|
-
self._generate_evaluation_steps()
|
|
121
|
+
self._generate_evaluation_steps(multimodal)
|
|
108
122
|
)
|
|
109
123
|
g_score, reason = self._evaluate(
|
|
110
|
-
test_case,
|
|
124
|
+
test_case,
|
|
125
|
+
_additional_context=_additional_context,
|
|
126
|
+
multimodal=multimodal,
|
|
111
127
|
)
|
|
112
128
|
self.score = (
|
|
113
129
|
(float(g_score) - self.score_range[0])
|
|
@@ -143,7 +159,18 @@ class GEval(BaseMetric):
|
|
|
143
159
|
_log_metric_to_confident: bool = True,
|
|
144
160
|
_additional_context: Optional[str] = None,
|
|
145
161
|
) -> float:
|
|
146
|
-
|
|
162
|
+
|
|
163
|
+
multimodal = test_case.multimodal
|
|
164
|
+
|
|
165
|
+
check_llm_test_case_params(
|
|
166
|
+
test_case,
|
|
167
|
+
self.evaluation_params,
|
|
168
|
+
None,
|
|
169
|
+
None,
|
|
170
|
+
self,
|
|
171
|
+
self.model,
|
|
172
|
+
multimodal,
|
|
173
|
+
)
|
|
147
174
|
|
|
148
175
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
149
176
|
with metric_progress_indicator(
|
|
@@ -153,10 +180,12 @@ class GEval(BaseMetric):
|
|
|
153
180
|
_in_component=_in_component,
|
|
154
181
|
):
|
|
155
182
|
self.evaluation_steps: List[str] = (
|
|
156
|
-
await self._a_generate_evaluation_steps()
|
|
183
|
+
await self._a_generate_evaluation_steps(multimodal)
|
|
157
184
|
)
|
|
158
185
|
g_score, reason = await self._a_evaluate(
|
|
159
|
-
test_case,
|
|
186
|
+
test_case,
|
|
187
|
+
_additional_context=_additional_context,
|
|
188
|
+
multimodal=multimodal,
|
|
160
189
|
)
|
|
161
190
|
self.score = (
|
|
162
191
|
(float(g_score) - self.score_range[0]) / self.score_range_span
|
|
@@ -182,7 +211,7 @@ class GEval(BaseMetric):
|
|
|
182
211
|
)
|
|
183
212
|
return self.score
|
|
184
213
|
|
|
185
|
-
async def _a_generate_evaluation_steps(self) -> List[str]:
|
|
214
|
+
async def _a_generate_evaluation_steps(self, multimodal: bool) -> List[str]:
|
|
186
215
|
if self.evaluation_steps:
|
|
187
216
|
return self.evaluation_steps
|
|
188
217
|
|
|
@@ -190,25 +219,19 @@ class GEval(BaseMetric):
|
|
|
190
219
|
self.evaluation_params
|
|
191
220
|
)
|
|
192
221
|
prompt = self.evaluation_template.generate_evaluation_steps(
|
|
193
|
-
criteria=self.criteria,
|
|
222
|
+
criteria=self.criteria,
|
|
223
|
+
parameters=g_eval_params_str,
|
|
224
|
+
multimodal=multimodal,
|
|
225
|
+
)
|
|
226
|
+
return await a_generate_with_schema_and_extract(
|
|
227
|
+
metric=self,
|
|
228
|
+
prompt=prompt,
|
|
229
|
+
schema_cls=gschema.Steps,
|
|
230
|
+
extract_schema=lambda s: s.steps,
|
|
231
|
+
extract_json=lambda d: d["steps"],
|
|
194
232
|
)
|
|
195
|
-
if self.using_native_model:
|
|
196
|
-
res, cost = await self.model.a_generate(prompt)
|
|
197
|
-
self.evaluation_cost += cost
|
|
198
|
-
data = trimAndLoadJson(res, self)
|
|
199
|
-
return data["steps"]
|
|
200
|
-
else:
|
|
201
|
-
try:
|
|
202
|
-
res: gschema.Steps = await self.model.a_generate(
|
|
203
|
-
prompt, schema=gschema.Steps
|
|
204
|
-
)
|
|
205
|
-
return res.steps
|
|
206
|
-
except TypeError:
|
|
207
|
-
res = await self.model.a_generate(prompt)
|
|
208
|
-
data = trimAndLoadJson(res, self)
|
|
209
|
-
return data["steps"]
|
|
210
233
|
|
|
211
|
-
def _generate_evaluation_steps(self) -> List[str]:
|
|
234
|
+
def _generate_evaluation_steps(self, multimodal: bool) -> List[str]:
|
|
212
235
|
if self.evaluation_steps:
|
|
213
236
|
return self.evaluation_steps
|
|
214
237
|
|
|
@@ -216,26 +239,23 @@ class GEval(BaseMetric):
|
|
|
216
239
|
self.evaluation_params
|
|
217
240
|
)
|
|
218
241
|
prompt = self.evaluation_template.generate_evaluation_steps(
|
|
219
|
-
criteria=self.criteria,
|
|
242
|
+
criteria=self.criteria,
|
|
243
|
+
parameters=g_eval_params_str,
|
|
244
|
+
multimodal=multimodal,
|
|
245
|
+
)
|
|
246
|
+
return generate_with_schema_and_extract(
|
|
247
|
+
metric=self,
|
|
248
|
+
prompt=prompt,
|
|
249
|
+
schema_cls=gschema.Steps,
|
|
250
|
+
extract_schema=lambda s: s.steps,
|
|
251
|
+
extract_json=lambda d: d["steps"],
|
|
220
252
|
)
|
|
221
|
-
if self.using_native_model:
|
|
222
|
-
res, cost = self.model.generate(prompt)
|
|
223
|
-
self.evaluation_cost += cost
|
|
224
|
-
data = trimAndLoadJson(res, self)
|
|
225
|
-
return data["steps"]
|
|
226
|
-
else:
|
|
227
|
-
try:
|
|
228
|
-
res: gschema.Steps = self.model.generate(
|
|
229
|
-
prompt, schema=gschema.Steps
|
|
230
|
-
)
|
|
231
|
-
return res.steps
|
|
232
|
-
except TypeError:
|
|
233
|
-
res = self.model.generate(prompt)
|
|
234
|
-
data = trimAndLoadJson(res, self)
|
|
235
|
-
return data["steps"]
|
|
236
253
|
|
|
237
254
|
async def _a_evaluate(
|
|
238
|
-
self,
|
|
255
|
+
self,
|
|
256
|
+
test_case: LLMTestCase,
|
|
257
|
+
multimodal: bool,
|
|
258
|
+
_additional_context: Optional[str] = None,
|
|
239
259
|
) -> Tuple[Union[int, float], str]:
|
|
240
260
|
test_case_content = construct_test_case_string(
|
|
241
261
|
self.evaluation_params, test_case
|
|
@@ -252,6 +272,7 @@ class GEval(BaseMetric):
|
|
|
252
272
|
rubric=rubric_str,
|
|
253
273
|
score_range=self.score_range,
|
|
254
274
|
_additional_context=_additional_context,
|
|
275
|
+
multimodal=multimodal,
|
|
255
276
|
)
|
|
256
277
|
else:
|
|
257
278
|
prompt = (
|
|
@@ -262,6 +283,7 @@ class GEval(BaseMetric):
|
|
|
262
283
|
test_case_content=test_case_content,
|
|
263
284
|
parameters=g_eval_params_str,
|
|
264
285
|
_additional_context=_additional_context,
|
|
286
|
+
multimodal=multimodal,
|
|
265
287
|
)
|
|
266
288
|
)
|
|
267
289
|
try:
|
|
@@ -275,8 +297,7 @@ class GEval(BaseMetric):
|
|
|
275
297
|
prompt, top_logprobs=self.top_logprobs
|
|
276
298
|
)
|
|
277
299
|
|
|
278
|
-
|
|
279
|
-
self.evaluation_cost += cost
|
|
300
|
+
self._accrue_cost(cost)
|
|
280
301
|
|
|
281
302
|
data = trimAndLoadJson(res.choices[0].message.content, self)
|
|
282
303
|
|
|
@@ -292,27 +313,21 @@ class GEval(BaseMetric):
|
|
|
292
313
|
return weighted_summed_score, reason
|
|
293
314
|
except (KeyError, AttributeError, TypeError, ValueError):
|
|
294
315
|
return score, reason
|
|
295
|
-
except
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
try:
|
|
305
|
-
res: gschema.ReasonScore = await self.model.a_generate(
|
|
306
|
-
prompt, schema=gschema.ReasonScore
|
|
307
|
-
)
|
|
308
|
-
return res.score, res.reason
|
|
309
|
-
except TypeError:
|
|
310
|
-
res = await self.model.a_generate(prompt)
|
|
311
|
-
data = trimAndLoadJson(res, self)
|
|
312
|
-
return data["score"], data["reason"]
|
|
316
|
+
except AttributeError:
|
|
317
|
+
# This catches the case where a_generate_raw_response doesn't exist.
|
|
318
|
+
return await a_generate_with_schema_and_extract(
|
|
319
|
+
metric=self,
|
|
320
|
+
prompt=prompt,
|
|
321
|
+
schema_cls=gschema.ReasonScore,
|
|
322
|
+
extract_schema=lambda s: (s.score, s.reason),
|
|
323
|
+
extract_json=lambda d: (d["score"], d["reason"]),
|
|
324
|
+
)
|
|
313
325
|
|
|
314
326
|
def _evaluate(
|
|
315
|
-
self,
|
|
327
|
+
self,
|
|
328
|
+
test_case: LLMTestCase,
|
|
329
|
+
multimodal: bool,
|
|
330
|
+
_additional_context: Optional[str] = None,
|
|
316
331
|
) -> Tuple[Union[int, float], str]:
|
|
317
332
|
test_case_content = construct_test_case_string(
|
|
318
333
|
self.evaluation_params, test_case
|
|
@@ -330,6 +345,7 @@ class GEval(BaseMetric):
|
|
|
330
345
|
rubric=rubric_str,
|
|
331
346
|
score_range=self.score_range,
|
|
332
347
|
_additional_context=_additional_context,
|
|
348
|
+
multimodal=multimodal,
|
|
333
349
|
)
|
|
334
350
|
else:
|
|
335
351
|
prompt = (
|
|
@@ -340,6 +356,7 @@ class GEval(BaseMetric):
|
|
|
340
356
|
test_case_content=test_case_content,
|
|
341
357
|
parameters=g_eval_params_str,
|
|
342
358
|
_additional_context=_additional_context,
|
|
359
|
+
multimodal=multimodal,
|
|
343
360
|
)
|
|
344
361
|
)
|
|
345
362
|
|
|
@@ -351,7 +368,7 @@ class GEval(BaseMetric):
|
|
|
351
368
|
res, cost = self.model.generate_raw_response(
|
|
352
369
|
prompt, top_logprobs=self.top_logprobs
|
|
353
370
|
)
|
|
354
|
-
self.
|
|
371
|
+
self._accrue_cost(cost)
|
|
355
372
|
data = trimAndLoadJson(res.choices[0].message.content, self)
|
|
356
373
|
|
|
357
374
|
reason = data["reason"]
|
|
@@ -368,21 +385,13 @@ class GEval(BaseMetric):
|
|
|
368
385
|
return score, reason
|
|
369
386
|
except AttributeError:
|
|
370
387
|
# This catches the case where a_generate_raw_response doesn't exist.
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
res: gschema.ReasonScore = self.model.generate(
|
|
379
|
-
prompt, schema=gschema.ReasonScore
|
|
380
|
-
)
|
|
381
|
-
return res.score, res.reason
|
|
382
|
-
except TypeError:
|
|
383
|
-
res = self.model.generate(prompt)
|
|
384
|
-
data = trimAndLoadJson(res, self)
|
|
385
|
-
return data["score"], data["reason"]
|
|
388
|
+
return generate_with_schema_and_extract(
|
|
389
|
+
metric=self,
|
|
390
|
+
prompt=prompt,
|
|
391
|
+
schema_cls=gschema.ReasonScore,
|
|
392
|
+
extract_schema=lambda s: (s.score, s.reason),
|
|
393
|
+
extract_json=lambda d: (d["score"], d["reason"]),
|
|
394
|
+
)
|
|
386
395
|
|
|
387
396
|
def is_successful(self) -> bool:
|
|
388
397
|
if self.error is not None:
|