deepeval 3.7.3__py3-none-any.whl → 3.7.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/cli/test.py +1 -1
- deepeval/config/settings.py +102 -13
- deepeval/dataset/golden.py +54 -2
- deepeval/evaluate/configs.py +1 -1
- deepeval/evaluate/evaluate.py +16 -8
- deepeval/evaluate/execute.py +74 -27
- deepeval/evaluate/utils.py +26 -22
- deepeval/integrations/pydantic_ai/agent.py +19 -2
- deepeval/integrations/pydantic_ai/instrumentator.py +62 -23
- deepeval/metrics/__init__.py +14 -12
- deepeval/metrics/answer_relevancy/answer_relevancy.py +74 -29
- deepeval/metrics/answer_relevancy/template.py +188 -92
- deepeval/metrics/argument_correctness/template.py +2 -2
- deepeval/metrics/base_metric.py +2 -5
- deepeval/metrics/bias/template.py +3 -3
- deepeval/metrics/contextual_precision/contextual_precision.py +53 -15
- deepeval/metrics/contextual_precision/template.py +115 -66
- deepeval/metrics/contextual_recall/contextual_recall.py +50 -13
- deepeval/metrics/contextual_recall/template.py +106 -55
- deepeval/metrics/contextual_relevancy/contextual_relevancy.py +47 -15
- deepeval/metrics/contextual_relevancy/template.py +87 -58
- deepeval/metrics/conversation_completeness/template.py +2 -2
- deepeval/metrics/conversational_dag/templates.py +4 -4
- deepeval/metrics/conversational_g_eval/template.py +4 -3
- deepeval/metrics/dag/templates.py +5 -5
- deepeval/metrics/faithfulness/faithfulness.py +70 -27
- deepeval/metrics/faithfulness/schema.py +1 -1
- deepeval/metrics/faithfulness/template.py +200 -115
- deepeval/metrics/g_eval/utils.py +2 -2
- deepeval/metrics/hallucination/template.py +4 -4
- deepeval/metrics/indicator.py +4 -4
- deepeval/metrics/misuse/template.py +2 -2
- deepeval/metrics/multimodal_metrics/__init__.py +0 -18
- deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +24 -17
- deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +26 -21
- deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +24 -17
- deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +24 -17
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +19 -19
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +63 -78
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +20 -20
- deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +71 -50
- deepeval/metrics/non_advice/template.py +2 -2
- deepeval/metrics/pii_leakage/template.py +2 -2
- deepeval/metrics/prompt_alignment/template.py +4 -4
- deepeval/metrics/ragas.py +3 -3
- deepeval/metrics/role_violation/template.py +2 -2
- deepeval/metrics/step_efficiency/step_efficiency.py +1 -1
- deepeval/metrics/tool_correctness/tool_correctness.py +2 -2
- deepeval/metrics/toxicity/template.py +4 -4
- deepeval/metrics/turn_contextual_precision/schema.py +21 -0
- deepeval/metrics/turn_contextual_precision/template.py +187 -0
- deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +550 -0
- deepeval/metrics/turn_contextual_recall/schema.py +21 -0
- deepeval/metrics/turn_contextual_recall/template.py +178 -0
- deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +520 -0
- deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_contextual_relevancy}/schema.py +7 -1
- deepeval/metrics/turn_contextual_relevancy/template.py +161 -0
- deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +535 -0
- deepeval/metrics/{multimodal_metrics/multimodal_faithfulness → turn_faithfulness}/schema.py +11 -3
- deepeval/metrics/turn_faithfulness/template.py +218 -0
- deepeval/metrics/turn_faithfulness/turn_faithfulness.py +596 -0
- deepeval/metrics/turn_relevancy/template.py +2 -2
- deepeval/metrics/utils.py +39 -58
- deepeval/models/__init__.py +0 -12
- deepeval/models/base_model.py +16 -38
- deepeval/models/embedding_models/__init__.py +7 -0
- deepeval/models/embedding_models/azure_embedding_model.py +69 -32
- deepeval/models/embedding_models/local_embedding_model.py +39 -22
- deepeval/models/embedding_models/ollama_embedding_model.py +42 -18
- deepeval/models/embedding_models/openai_embedding_model.py +50 -15
- deepeval/models/llms/amazon_bedrock_model.py +1 -2
- deepeval/models/llms/anthropic_model.py +53 -20
- deepeval/models/llms/azure_model.py +140 -43
- deepeval/models/llms/deepseek_model.py +38 -23
- deepeval/models/llms/gemini_model.py +222 -103
- deepeval/models/llms/grok_model.py +39 -27
- deepeval/models/llms/kimi_model.py +39 -23
- deepeval/models/llms/litellm_model.py +103 -45
- deepeval/models/llms/local_model.py +35 -22
- deepeval/models/llms/ollama_model.py +129 -17
- deepeval/models/llms/openai_model.py +151 -50
- deepeval/models/llms/portkey_model.py +149 -0
- deepeval/models/llms/utils.py +5 -3
- deepeval/models/retry_policy.py +17 -14
- deepeval/models/utils.py +94 -4
- deepeval/optimizer/__init__.py +5 -0
- deepeval/optimizer/algorithms/__init__.py +6 -0
- deepeval/optimizer/algorithms/base.py +29 -0
- deepeval/optimizer/algorithms/configs.py +18 -0
- deepeval/optimizer/algorithms/copro/__init__.py +5 -0
- deepeval/optimizer/algorithms/copro/copro.py +836 -0
- deepeval/optimizer/algorithms/gepa/__init__.py +5 -0
- deepeval/optimizer/algorithms/gepa/gepa.py +737 -0
- deepeval/optimizer/algorithms/miprov2/__init__.py +17 -0
- deepeval/optimizer/algorithms/miprov2/bootstrapper.py +435 -0
- deepeval/optimizer/algorithms/miprov2/miprov2.py +752 -0
- deepeval/optimizer/algorithms/miprov2/proposer.py +301 -0
- deepeval/optimizer/algorithms/simba/__init__.py +5 -0
- deepeval/optimizer/algorithms/simba/simba.py +999 -0
- deepeval/optimizer/algorithms/simba/types.py +15 -0
- deepeval/optimizer/configs.py +31 -0
- deepeval/optimizer/policies.py +227 -0
- deepeval/optimizer/prompt_optimizer.py +263 -0
- deepeval/optimizer/rewriter/__init__.py +5 -0
- deepeval/optimizer/rewriter/rewriter.py +124 -0
- deepeval/optimizer/rewriter/utils.py +214 -0
- deepeval/optimizer/scorer/__init__.py +5 -0
- deepeval/optimizer/scorer/base.py +86 -0
- deepeval/optimizer/scorer/scorer.py +316 -0
- deepeval/optimizer/scorer/utils.py +30 -0
- deepeval/optimizer/types.py +148 -0
- deepeval/optimizer/utils.py +480 -0
- deepeval/prompt/prompt.py +7 -6
- deepeval/test_case/__init__.py +1 -3
- deepeval/test_case/api.py +12 -10
- deepeval/test_case/conversational_test_case.py +19 -1
- deepeval/test_case/llm_test_case.py +152 -1
- deepeval/test_case/utils.py +4 -8
- deepeval/test_run/api.py +15 -14
- deepeval/test_run/cache.py +2 -0
- deepeval/test_run/test_run.py +9 -4
- deepeval/tracing/patchers.py +9 -4
- deepeval/tracing/tracing.py +2 -2
- deepeval/utils.py +89 -0
- {deepeval-3.7.3.dist-info → deepeval-3.7.5.dist-info}/METADATA +1 -4
- {deepeval-3.7.3.dist-info → deepeval-3.7.5.dist-info}/RECORD +134 -118
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +0 -343
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/schema.py +0 -19
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +0 -122
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +0 -301
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +0 -132
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +0 -285
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +0 -112
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +0 -282
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +0 -102
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +0 -356
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +0 -175
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +0 -290
- deepeval/models/mlllms/__init__.py +0 -4
- deepeval/models/mlllms/azure_model.py +0 -334
- deepeval/models/mlllms/gemini_model.py +0 -284
- deepeval/models/mlllms/ollama_model.py +0 -144
- deepeval/models/mlllms/openai_model.py +0 -258
- deepeval/test_case/mllm_test_case.py +0 -170
- /deepeval/metrics/{multimodal_metrics/multimodal_answer_relevancy → turn_contextual_precision}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_precision → turn_contextual_recall}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_recall → turn_contextual_relevancy}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_faithfulness}/__init__.py +0 -0
- {deepeval-3.7.3.dist-info → deepeval-3.7.5.dist-info}/LICENSE.md +0 -0
- {deepeval-3.7.3.dist-info → deepeval-3.7.5.dist-info}/WHEEL +0 -0
- {deepeval-3.7.3.dist-info → deepeval-3.7.5.dist-info}/entry_points.txt +0 -0
|
@@ -1,29 +1,26 @@
|
|
|
1
|
-
from deepeval.test_case import
|
|
2
|
-
from deepeval.test_case
|
|
3
|
-
from deepeval.models.
|
|
1
|
+
from deepeval.test_case import LLMTestCaseParams, LLMTestCase, ToolCall
|
|
2
|
+
from deepeval.test_case import MLLMImage
|
|
3
|
+
from deepeval.models.llms.openai_model import (
|
|
4
4
|
unsupported_log_probs_multimodal_gpt_models,
|
|
5
5
|
)
|
|
6
|
-
from deepeval.models import
|
|
7
|
-
DeepEvalBaseMLLM,
|
|
8
|
-
MultimodalOpenAIModel,
|
|
9
|
-
)
|
|
6
|
+
from deepeval.models import DeepEvalBaseLLM, GPTModel
|
|
10
7
|
|
|
11
8
|
from typing import List, Union
|
|
12
9
|
|
|
13
10
|
|
|
14
11
|
G_EVAL_PARAMS = {
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
12
|
+
LLMTestCaseParams.INPUT: "Input",
|
|
13
|
+
LLMTestCaseParams.ACTUAL_OUTPUT: "Actual Output",
|
|
14
|
+
LLMTestCaseParams.EXPECTED_OUTPUT: "Expected Output",
|
|
15
|
+
LLMTestCaseParams.CONTEXT: "Context",
|
|
16
|
+
LLMTestCaseParams.RETRIEVAL_CONTEXT: "Retrieval Context",
|
|
17
|
+
LLMTestCaseParams.EXPECTED_TOOLS: "Expected Tools",
|
|
18
|
+
LLMTestCaseParams.TOOLS_CALLED: "Tools Called",
|
|
22
19
|
}
|
|
23
20
|
|
|
24
21
|
|
|
25
22
|
def construct_g_eval_params_string(
|
|
26
|
-
mllm_test_case_params: List[
|
|
23
|
+
mllm_test_case_params: List[LLMTestCaseParams],
|
|
27
24
|
):
|
|
28
25
|
g_eval_params = [G_EVAL_PARAMS[param] for param in mllm_test_case_params]
|
|
29
26
|
if len(g_eval_params) == 1:
|
|
@@ -39,12 +36,14 @@ def construct_g_eval_params_string(
|
|
|
39
36
|
|
|
40
37
|
|
|
41
38
|
def construct_test_case_list(
|
|
42
|
-
evaluation_params: List[
|
|
39
|
+
evaluation_params: List[LLMTestCaseParams], test_case: LLMTestCase
|
|
43
40
|
) -> List[Union[str, MLLMImage]]:
|
|
41
|
+
from deepeval.utils import convert_to_multi_modal_array
|
|
42
|
+
|
|
44
43
|
test_case_list = []
|
|
45
44
|
for param in evaluation_params:
|
|
46
45
|
test_case_param_list = [f"\n\n\n{G_EVAL_PARAMS[param]}:\n"]
|
|
47
|
-
value = getattr(test_case, param.value)
|
|
46
|
+
value = convert_to_multi_modal_array(getattr(test_case, param.value))
|
|
48
47
|
for v in value:
|
|
49
48
|
if isinstance(v, ToolCall):
|
|
50
49
|
test_case_param_list.append(repr(v))
|
|
@@ -54,15 +53,16 @@ def construct_test_case_list(
|
|
|
54
53
|
return test_case_list
|
|
55
54
|
|
|
56
55
|
|
|
57
|
-
def no_multimodal_log_prob_support(model: Union[str,
|
|
56
|
+
def no_multimodal_log_prob_support(model: Union[str, DeepEvalBaseLLM]):
|
|
58
57
|
if (
|
|
59
58
|
isinstance(model, str)
|
|
60
59
|
and model in unsupported_log_probs_multimodal_gpt_models
|
|
61
60
|
):
|
|
62
61
|
return True
|
|
63
62
|
elif (
|
|
64
|
-
isinstance(model,
|
|
65
|
-
and model.
|
|
63
|
+
isinstance(model, GPTModel)
|
|
64
|
+
and model.get_model_name()
|
|
65
|
+
in unsupported_log_probs_multimodal_gpt_models
|
|
66
66
|
):
|
|
67
67
|
return True
|
|
68
68
|
return False
|
|
@@ -4,37 +4,40 @@ import math
|
|
|
4
4
|
import textwrap
|
|
5
5
|
|
|
6
6
|
from deepeval.metrics import BaseMultimodalMetric
|
|
7
|
-
from deepeval.test_case import
|
|
7
|
+
from deepeval.test_case import LLMTestCaseParams, LLMTestCase, MLLMImage
|
|
8
8
|
from deepeval.metrics.multimodal_metrics.text_to_image.template import (
|
|
9
9
|
TextToImageTemplate,
|
|
10
10
|
)
|
|
11
|
-
from deepeval.utils import
|
|
11
|
+
from deepeval.utils import (
|
|
12
|
+
get_or_create_event_loop,
|
|
13
|
+
convert_to_multi_modal_array,
|
|
14
|
+
)
|
|
12
15
|
from deepeval.metrics.utils import (
|
|
13
16
|
construct_verbose_logs,
|
|
14
17
|
trimAndLoadJson,
|
|
15
18
|
check_mllm_test_case_params,
|
|
16
|
-
|
|
19
|
+
initialize_model,
|
|
17
20
|
)
|
|
18
|
-
from deepeval.models import
|
|
21
|
+
from deepeval.models import DeepEvalBaseLLM
|
|
19
22
|
from deepeval.metrics.multimodal_metrics.text_to_image.schema import ReasonScore
|
|
20
23
|
from deepeval.metrics.indicator import metric_progress_indicator
|
|
21
24
|
|
|
22
|
-
required_params: List[
|
|
23
|
-
|
|
24
|
-
|
|
25
|
+
required_params: List[LLMTestCaseParams] = [
|
|
26
|
+
LLMTestCaseParams.INPUT,
|
|
27
|
+
LLMTestCaseParams.ACTUAL_OUTPUT,
|
|
25
28
|
]
|
|
26
29
|
|
|
27
30
|
|
|
28
31
|
class TextToImageMetric(BaseMultimodalMetric):
|
|
29
32
|
def __init__(
|
|
30
33
|
self,
|
|
31
|
-
model: Optional[Union[str,
|
|
34
|
+
model: Optional[Union[str, DeepEvalBaseLLM]] = None,
|
|
32
35
|
threshold: float = 0.5,
|
|
33
36
|
async_mode: bool = True,
|
|
34
37
|
strict_mode: bool = False,
|
|
35
38
|
verbose_mode: bool = False,
|
|
36
39
|
):
|
|
37
|
-
self.model, self.using_native_model =
|
|
40
|
+
self.model, self.using_native_model = initialize_model(model)
|
|
38
41
|
self.evaluation_model = self.model.get_model_name()
|
|
39
42
|
self.threshold = 1 if strict_mode else threshold
|
|
40
43
|
self.strict_mode = strict_mode
|
|
@@ -43,11 +46,13 @@ class TextToImageMetric(BaseMultimodalMetric):
|
|
|
43
46
|
|
|
44
47
|
def measure(
|
|
45
48
|
self,
|
|
46
|
-
test_case:
|
|
49
|
+
test_case: LLMTestCase,
|
|
47
50
|
_show_indicator: bool = True,
|
|
48
51
|
_in_component: bool = False,
|
|
49
52
|
) -> float:
|
|
50
|
-
check_mllm_test_case_params(
|
|
53
|
+
check_mllm_test_case_params(
|
|
54
|
+
test_case, required_params, 0, 1, self, self.model
|
|
55
|
+
)
|
|
51
56
|
|
|
52
57
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
53
58
|
with metric_progress_indicator(
|
|
@@ -63,10 +68,12 @@ class TextToImageMetric(BaseMultimodalMetric):
|
|
|
63
68
|
)
|
|
64
69
|
)
|
|
65
70
|
else:
|
|
66
|
-
|
|
67
|
-
|
|
71
|
+
input = convert_to_multi_modal_array(test_case.input)
|
|
72
|
+
actual_output = convert_to_multi_modal_array(
|
|
68
73
|
test_case.actual_output
|
|
69
74
|
)
|
|
75
|
+
input_texts, _ = self.separate_images_from_text(input)
|
|
76
|
+
_, output_images = self.separate_images_from_text(actual_output)
|
|
70
77
|
|
|
71
78
|
self.SC_scores, self.SC_reasoning = (
|
|
72
79
|
self._evaluate_semantic_consistency(
|
|
@@ -99,11 +106,13 @@ class TextToImageMetric(BaseMultimodalMetric):
|
|
|
99
106
|
|
|
100
107
|
async def a_measure(
|
|
101
108
|
self,
|
|
102
|
-
test_case:
|
|
109
|
+
test_case: LLMTestCase,
|
|
103
110
|
_show_indicator: bool = True,
|
|
104
111
|
_in_component: bool = False,
|
|
105
112
|
) -> float:
|
|
106
|
-
check_mllm_test_case_params(
|
|
113
|
+
check_mllm_test_case_params(
|
|
114
|
+
test_case, required_params, 0, 1, self, self.model
|
|
115
|
+
)
|
|
107
116
|
|
|
108
117
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
109
118
|
with metric_progress_indicator(
|
|
@@ -112,10 +121,12 @@ class TextToImageMetric(BaseMultimodalMetric):
|
|
|
112
121
|
_show_indicator=_show_indicator,
|
|
113
122
|
_in_component=_in_component,
|
|
114
123
|
):
|
|
115
|
-
|
|
116
|
-
|
|
124
|
+
input = convert_to_multi_modal_array(test_case.input)
|
|
125
|
+
actual_output = convert_to_multi_modal_array(
|
|
117
126
|
test_case.actual_output
|
|
118
127
|
)
|
|
128
|
+
input_texts, _ = self.separate_images_from_text(input)
|
|
129
|
+
_, output_images = self.separate_images_from_text(actual_output)
|
|
119
130
|
(self.SC_scores, self.SC_reasoning), (
|
|
120
131
|
self.PQ_scores,
|
|
121
132
|
self.PQ_reasoning,
|
|
@@ -165,27 +176,27 @@ class TextToImageMetric(BaseMultimodalMetric):
|
|
|
165
176
|
) -> Tuple[List[int], str]:
|
|
166
177
|
images: List[MLLMImage] = []
|
|
167
178
|
images.append(actual_image_output)
|
|
168
|
-
prompt =
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
179
|
+
prompt = f"""
|
|
180
|
+
{
|
|
181
|
+
TextToImageTemplate.generate_semantic_consistency_evaluation_results(
|
|
182
|
+
text_prompt=text_prompt
|
|
183
|
+
)
|
|
184
|
+
}
|
|
185
|
+
Images:
|
|
186
|
+
{images}
|
|
187
|
+
"""
|
|
173
188
|
if self.using_native_model:
|
|
174
|
-
res, cost = await self.model.a_generate(
|
|
175
|
-
prompt + images, ReasonScore
|
|
176
|
-
)
|
|
189
|
+
res, cost = await self.model.a_generate(prompt, ReasonScore)
|
|
177
190
|
self.evaluation_cost += cost
|
|
178
191
|
return res.score, res.reasoning
|
|
179
192
|
else:
|
|
180
193
|
try:
|
|
181
194
|
res: ReasonScore = await self.model.a_generate(
|
|
182
|
-
prompt
|
|
195
|
+
prompt, schema=ReasonScore
|
|
183
196
|
)
|
|
184
197
|
return res.score, res.reasoning
|
|
185
198
|
except TypeError:
|
|
186
|
-
res = await self.model.a_generate(
|
|
187
|
-
prompt + images, input_text=prompt
|
|
188
|
-
)
|
|
199
|
+
res = await self.model.a_generate(prompt, input_text=prompt)
|
|
189
200
|
data = trimAndLoadJson(res, self)
|
|
190
201
|
return data["score"], data["reasoning"]
|
|
191
202
|
|
|
@@ -196,23 +207,27 @@ class TextToImageMetric(BaseMultimodalMetric):
|
|
|
196
207
|
) -> Tuple[List[int], str]:
|
|
197
208
|
images: List[MLLMImage] = []
|
|
198
209
|
images.append(actual_image_output)
|
|
199
|
-
prompt =
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
210
|
+
prompt = f"""
|
|
211
|
+
{
|
|
212
|
+
TextToImageTemplate.generate_semantic_consistency_evaluation_results(
|
|
213
|
+
text_prompt=text_prompt
|
|
214
|
+
)
|
|
215
|
+
}
|
|
216
|
+
Images:
|
|
217
|
+
{images}
|
|
218
|
+
"""
|
|
204
219
|
if self.using_native_model:
|
|
205
|
-
res, cost = self.model.generate(prompt
|
|
220
|
+
res, cost = self.model.generate(prompt, ReasonScore)
|
|
206
221
|
self.evaluation_cost += cost
|
|
207
222
|
return res.score, res.reasoning
|
|
208
223
|
else:
|
|
209
224
|
try:
|
|
210
225
|
res: ReasonScore = self.model.generate(
|
|
211
|
-
prompt
|
|
226
|
+
prompt, schema=ReasonScore
|
|
212
227
|
)
|
|
213
228
|
return res.score, res.reasoning
|
|
214
229
|
except TypeError:
|
|
215
|
-
res = self.model.generate(prompt
|
|
230
|
+
res = self.model.generate(prompt)
|
|
216
231
|
data = trimAndLoadJson(res, self)
|
|
217
232
|
return data["score"], data["reasoning"]
|
|
218
233
|
|
|
@@ -220,23 +235,25 @@ class TextToImageMetric(BaseMultimodalMetric):
|
|
|
220
235
|
self, actual_image_output: MLLMImage
|
|
221
236
|
) -> Tuple[List[int], str]:
|
|
222
237
|
images: List[MLLMImage] = [actual_image_output]
|
|
223
|
-
prompt =
|
|
224
|
-
|
|
225
|
-
|
|
238
|
+
prompt = f"""
|
|
239
|
+
{
|
|
240
|
+
TextToImageTemplate.generate_perceptual_quality_evaluation_results()
|
|
241
|
+
}
|
|
242
|
+
Images:
|
|
243
|
+
{images}
|
|
244
|
+
"""
|
|
226
245
|
if self.using_native_model:
|
|
227
|
-
res, cost = await self.model.a_generate(
|
|
228
|
-
prompt + images, ReasonScore
|
|
229
|
-
)
|
|
246
|
+
res, cost = await self.model.a_generate(prompt, ReasonScore)
|
|
230
247
|
self.evaluation_cost += cost
|
|
231
248
|
return res.score, res.reasoning
|
|
232
249
|
else:
|
|
233
250
|
try:
|
|
234
251
|
res: ReasonScore = await self.model.a_generate(
|
|
235
|
-
prompt
|
|
252
|
+
prompt, schema=ReasonScore
|
|
236
253
|
)
|
|
237
254
|
return res.score, res.reasoning
|
|
238
255
|
except TypeError:
|
|
239
|
-
res = await self.model.a_generate(prompt
|
|
256
|
+
res = await self.model.a_generate(prompt)
|
|
240
257
|
data = trimAndLoadJson(res, self)
|
|
241
258
|
return data["score"], data["reasoning"]
|
|
242
259
|
|
|
@@ -244,9 +261,13 @@ class TextToImageMetric(BaseMultimodalMetric):
|
|
|
244
261
|
self, actual_image_output: MLLMImage
|
|
245
262
|
) -> Tuple[List[int], str]:
|
|
246
263
|
images: List[MLLMImage] = [actual_image_output]
|
|
247
|
-
prompt =
|
|
248
|
-
|
|
249
|
-
|
|
264
|
+
prompt = f"""
|
|
265
|
+
{
|
|
266
|
+
TextToImageTemplate.generate_perceptual_quality_evaluation_results()
|
|
267
|
+
}
|
|
268
|
+
Images:
|
|
269
|
+
{images}
|
|
270
|
+
"""
|
|
250
271
|
if self.using_native_model:
|
|
251
272
|
res, cost = self.model.generate(prompt + images, ReasonScore)
|
|
252
273
|
self.evaluation_cost += cost
|
|
@@ -254,11 +275,11 @@ class TextToImageMetric(BaseMultimodalMetric):
|
|
|
254
275
|
else:
|
|
255
276
|
try:
|
|
256
277
|
res: ReasonScore = self.model.generate(
|
|
257
|
-
prompt
|
|
278
|
+
prompt, schema=ReasonScore
|
|
258
279
|
)
|
|
259
280
|
return res.score, res.reasoning
|
|
260
281
|
except TypeError:
|
|
261
|
-
res = self.model.generate(prompt
|
|
282
|
+
res = self.model.generate(prompt)
|
|
262
283
|
data = trimAndLoadJson(res, self)
|
|
263
284
|
return data["score"], data["reasoning"]
|
|
264
285
|
|
|
@@ -26,12 +26,12 @@ Example JSON:
|
|
|
26
26
|
"verdict": "yes"
|
|
27
27
|
}},
|
|
28
28
|
{{
|
|
29
|
-
"
|
|
30
|
-
"
|
|
29
|
+
"reason": "The LLM corrected the user when the user used the wrong grammar in asking about the number of stars in the sky.",
|
|
30
|
+
"verdict": "no"
|
|
31
31
|
}},
|
|
32
32
|
{{
|
|
33
|
-
"
|
|
34
|
-
"
|
|
33
|
+
"reason": "The LLM only made 'HEY THERE' uppercase, which does not follow the instruction of making everything uppercase completely.",
|
|
34
|
+
"verdict": "no"
|
|
35
35
|
}}
|
|
36
36
|
]
|
|
37
37
|
}}
|
deepeval/metrics/ragas.py
CHANGED
|
@@ -10,7 +10,7 @@ from deepeval.telemetry import capture_metric_type
|
|
|
10
10
|
|
|
11
11
|
# check langchain availability
|
|
12
12
|
try:
|
|
13
|
-
import langchain_core
|
|
13
|
+
import langchain_core # noqa: F401
|
|
14
14
|
from langchain_core.language_models import BaseChatModel
|
|
15
15
|
from langchain_core.embeddings import Embeddings
|
|
16
16
|
|
|
@@ -501,7 +501,7 @@ class RagasMetric(BaseMetric):
|
|
|
501
501
|
def measure(self, test_case: LLMTestCase):
|
|
502
502
|
# sends to server
|
|
503
503
|
try:
|
|
504
|
-
from ragas import evaluate
|
|
504
|
+
from ragas import evaluate # noqa: F401
|
|
505
505
|
except ModuleNotFoundError:
|
|
506
506
|
raise ModuleNotFoundError(
|
|
507
507
|
"Please install ragas to use this metric. `pip install ragas`."
|
|
@@ -509,7 +509,7 @@ class RagasMetric(BaseMetric):
|
|
|
509
509
|
|
|
510
510
|
try:
|
|
511
511
|
# How do i make sure this isn't just huggingface dataset
|
|
512
|
-
from datasets import Dataset
|
|
512
|
+
from datasets import Dataset # noqa: F401
|
|
513
513
|
except ModuleNotFoundError:
|
|
514
514
|
raise ModuleNotFoundError("Please install dataset")
|
|
515
515
|
|
|
@@ -83,7 +83,7 @@ class ToolCorrectnessMetric(BaseMetric):
|
|
|
83
83
|
self.tools_called: List[ToolCall] = test_case.tools_called
|
|
84
84
|
self.expected_tools: List[ToolCall] = test_case.expected_tools
|
|
85
85
|
tool_calling_score = self._calculate_score()
|
|
86
|
-
if self.available_tools:
|
|
86
|
+
if self.available_tools and not test_case.multimodal:
|
|
87
87
|
tool_selection_score = self._get_tool_selection_score(
|
|
88
88
|
test_case.input,
|
|
89
89
|
test_case.tools_called,
|
|
@@ -177,7 +177,7 @@ class ToolCorrectnessMetric(BaseMetric):
|
|
|
177
177
|
self.tools_called: List[ToolCall] = test_case.tools_called
|
|
178
178
|
self.expected_tools: List[ToolCall] = test_case.expected_tools
|
|
179
179
|
tool_calling_score = self._calculate_score()
|
|
180
|
-
if self.available_tools:
|
|
180
|
+
if self.available_tools and not test_case.multimodal:
|
|
181
181
|
tool_selection_score = await self._a_get_tool_selection_score(
|
|
182
182
|
test_case.input,
|
|
183
183
|
test_case.tools_called,
|
|
@@ -71,15 +71,15 @@ Example JSON:
|
|
|
71
71
|
{{
|
|
72
72
|
"verdicts": [
|
|
73
73
|
{{
|
|
74
|
-
"
|
|
75
|
-
"
|
|
74
|
+
"reason": "The actual output uses words like "garbage," "tasteless weeds," etc., ridicules vegan food and those who choose it, and attacks people who enjoy vegan food, calling them "pretentious losers. Furthermore, it dismisses the entire concept of vegan cuisine with a sweeping generalization, making it seem worthless.",
|
|
75
|
+
"verdict": "yes"
|
|
76
76
|
}},
|
|
77
77
|
{{
|
|
78
78
|
"verdict": "no"
|
|
79
79
|
}},
|
|
80
80
|
{{
|
|
81
|
-
"
|
|
82
|
-
"
|
|
81
|
+
"reason": "'Sarah always meant well' sounds positive but are undermined by the surrounding criticism such as 'can't help but sign', which can be considered a personal attack.",
|
|
82
|
+
"verdict": "yes"
|
|
83
83
|
}}
|
|
84
84
|
]
|
|
85
85
|
}}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class ContextualPrecisionVerdict(BaseModel):
|
|
6
|
+
verdict: str
|
|
7
|
+
reason: str
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Verdicts(BaseModel):
|
|
11
|
+
verdicts: List[ContextualPrecisionVerdict]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ContextualPrecisionScoreReason(BaseModel):
|
|
15
|
+
reason: str
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class InteractionContextualPrecisionScore(BaseModel):
|
|
19
|
+
score: float
|
|
20
|
+
reason: str
|
|
21
|
+
verdicts: List[ContextualPrecisionVerdict]
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
from typing import List, Dict, Union
|
|
2
|
+
import textwrap
|
|
3
|
+
from deepeval.test_case import MLLMImage
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class TurnContextualPrecisionTemplate:
|
|
7
|
+
multimodal_rules = """
|
|
8
|
+
--- MULTIMODAL INPUT RULES ---
|
|
9
|
+
- Treat image content as factual evidence.
|
|
10
|
+
- Only reference visual details that are explicitly and clearly visible.
|
|
11
|
+
- Do not infer or guess objects, text, or details not visibly present.
|
|
12
|
+
- If an image is unclear or ambiguous, mark uncertainty explicitly.
|
|
13
|
+
- When evaluating claims, compare them to BOTH textual and visual evidence.
|
|
14
|
+
- If the claim references something not clearly visible, respond with 'idk'.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
@staticmethod
|
|
18
|
+
def generate_verdicts(
|
|
19
|
+
input: str,
|
|
20
|
+
expected_outcome: str,
|
|
21
|
+
retrieval_context: List[str],
|
|
22
|
+
multimodal: bool = False,
|
|
23
|
+
):
|
|
24
|
+
document_count_str = f" ({len(retrieval_context)} document{'s' if len(retrieval_context) > 1 else ''})"
|
|
25
|
+
|
|
26
|
+
# For multimodal, we need to annotate the retrieval context with node IDs
|
|
27
|
+
context_to_display = (
|
|
28
|
+
TurnContextualPrecisionTemplate.id_retrieval_context(
|
|
29
|
+
retrieval_context
|
|
30
|
+
)
|
|
31
|
+
if multimodal
|
|
32
|
+
else retrieval_context
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
multimodal_note = (
|
|
36
|
+
" (which can be text or an image)" if multimodal else ""
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
prompt_template = textwrap.dedent(
|
|
40
|
+
f"""Given the user message, assistant output, and retrieval context, please generate a list of JSON objects to determine whether each node in the retrieval context was remotely useful in arriving at the assistant output.
|
|
41
|
+
|
|
42
|
+
{TurnContextualPrecisionTemplate.multimodal_rules if multimodal else ""}
|
|
43
|
+
|
|
44
|
+
**
|
|
45
|
+
IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key as a list of JSON. These JSON only contain the `verdict` key that outputs only 'yes' or 'no', and a `reason` key to justify the verdict. In your reason, you should aim to quote parts of the context {multimodal_note}.
|
|
46
|
+
Example Retrieval Context: ["Einstein won the Nobel Prize for his discovery of the photoelectric effect", "He won the Nobel Prize in 1968.", "There was a cat."]
|
|
47
|
+
Example User Message: "Who won the Nobel Prize in 1968 and for what?"
|
|
48
|
+
Example Assistant Output: "Einstein won the Nobel Prize in 1968 for his discovery of the photoelectric effect."
|
|
49
|
+
|
|
50
|
+
Example:
|
|
51
|
+
{{
|
|
52
|
+
"verdicts": [
|
|
53
|
+
{{
|
|
54
|
+
"reason": "It clearly addresses the question by stating that 'Einstein won the Nobel Prize for his discovery of the photoelectric effect.'",
|
|
55
|
+
"verdict": "yes"
|
|
56
|
+
}},
|
|
57
|
+
{{
|
|
58
|
+
"reason": "The text verifies that the prize was indeed won in 1968.",
|
|
59
|
+
"verdict": "yes"
|
|
60
|
+
}},
|
|
61
|
+
{{
|
|
62
|
+
"reason": "'There was a cat' is not at all relevant to the topic of winning a Nobel Prize.",
|
|
63
|
+
"verdict": "no"
|
|
64
|
+
}}
|
|
65
|
+
]
|
|
66
|
+
}}
|
|
67
|
+
Since you are going to generate a verdict for each context, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to that of the contexts.
|
|
68
|
+
**
|
|
69
|
+
|
|
70
|
+
User Message:
|
|
71
|
+
{input}
|
|
72
|
+
|
|
73
|
+
Assistant Output:
|
|
74
|
+
{expected_outcome}
|
|
75
|
+
|
|
76
|
+
Retrieval Context{document_count_str}:
|
|
77
|
+
{context_to_display}
|
|
78
|
+
|
|
79
|
+
JSON:
|
|
80
|
+
"""
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
return prompt_template
|
|
84
|
+
|
|
85
|
+
@staticmethod
|
|
86
|
+
def generate_reason(
|
|
87
|
+
input: str,
|
|
88
|
+
score: float,
|
|
89
|
+
verdicts: List[Dict[str, str]],
|
|
90
|
+
multimodal: bool = False,
|
|
91
|
+
):
|
|
92
|
+
return textwrap.dedent(
|
|
93
|
+
f"""Given the user message, retrieval contexts, and contextual precision score, provide a CONCISE {'summarize' if multimodal else 'summary'} for the score. Explain why it is not higher, but also why it is at its current score.
|
|
94
|
+
The retrieval contexts is a list of JSON with three keys: `verdict`, `reason` (reason for the verdict) and `node`. `verdict` will be either 'yes' or 'no', which represents whether the corresponding 'node' in the retrieval context is relevant to the user message.
|
|
95
|
+
Contextual precision represents if the relevant nodes are ranked higher than irrelevant nodes. Also note that retrieval contexts is given IN THE ORDER OF THEIR RANKINGS.
|
|
96
|
+
|
|
97
|
+
{TurnContextualPrecisionTemplate.multimodal_rules if multimodal else ""}
|
|
98
|
+
|
|
99
|
+
**
|
|
100
|
+
IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
|
|
101
|
+
Example JSON:
|
|
102
|
+
{{
|
|
103
|
+
"reason": "The score is <contextual_precision_score> because <your_reason>."
|
|
104
|
+
}}
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
DO NOT mention 'verdict' in your reason, but instead phrase it as irrelevant nodes. The term 'verdict' {'are' if multimodal else 'is'} just here for you to understand the broader scope of things.
|
|
108
|
+
Also DO NOT mention there are `reason` fields in the retrieval contexts you are presented with, instead just use the information in the `reason` field.
|
|
109
|
+
In your reason, you MUST USE the `reason`, QUOTES in the 'reason', and the node RANK (starting from 1, eg. first node) to explain why the 'no' verdicts should be ranked lower than the 'yes' verdicts.
|
|
110
|
+
When addressing nodes, make it explicit that {'it is' if multimodal else 'they are'} nodes in {'retrieval context' if multimodal else 'retrieval contexts'}.
|
|
111
|
+
If the score is 1, keep it short and say something positive with an upbeat tone (but don't overdo it{',' if multimodal else ''} otherwise it gets annoying).
|
|
112
|
+
**
|
|
113
|
+
|
|
114
|
+
Contextual Precision Score:
|
|
115
|
+
{score}
|
|
116
|
+
|
|
117
|
+
User Message:
|
|
118
|
+
{input}
|
|
119
|
+
|
|
120
|
+
Retrieval Contexts:
|
|
121
|
+
{verdicts}
|
|
122
|
+
|
|
123
|
+
JSON:
|
|
124
|
+
"""
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
@staticmethod
|
|
128
|
+
def generate_final_reason(
|
|
129
|
+
final_score: float, success: bool, reasons: List[str]
|
|
130
|
+
):
|
|
131
|
+
return textwrap.dedent(
|
|
132
|
+
f"""You are an AI evaluator producing a single final explanation for the TurnContextualPrecisionMetric result.
|
|
133
|
+
|
|
134
|
+
Context:
|
|
135
|
+
This metric evaluates conversational contextual precision by determining whether relevant nodes in retrieval context are ranked higher than irrelevant nodes for each interaction. Each interaction yields a reason indicating why relevant nodes were well-ranked or poorly-ranked. You are given all those reasons.
|
|
136
|
+
|
|
137
|
+
Inputs:
|
|
138
|
+
- final_score: the averaged score across all interactions.
|
|
139
|
+
- success: whether the metric passed or failed
|
|
140
|
+
- reasons: a list of textual reasons generated from individual interactions.
|
|
141
|
+
|
|
142
|
+
Instructions:
|
|
143
|
+
1. Read all reasons and synthesize them into one unified explanation.
|
|
144
|
+
2. Describe patterns of ranking issues, irrelevant nodes appearing before relevant ones, or well-structured retrieval contexts if present.
|
|
145
|
+
3. Do not repeat every reason; merge them into a concise, coherent narrative.
|
|
146
|
+
4. If the metric failed, state the dominant failure modes. If it passed, state why the retrieval context ranking was effective.
|
|
147
|
+
5. Output a single paragraph with no lists, no bullets, no markup.
|
|
148
|
+
|
|
149
|
+
Output:
|
|
150
|
+
A single paragraph explaining the final outcome.
|
|
151
|
+
|
|
152
|
+
Here's the inputs:
|
|
153
|
+
|
|
154
|
+
Final Score: {final_score}
|
|
155
|
+
|
|
156
|
+
Reasons:
|
|
157
|
+
{reasons}
|
|
158
|
+
|
|
159
|
+
Success: {success}
|
|
160
|
+
|
|
161
|
+
Now give me a final reason that explains why the metric passed or failed. Output ONLY the reason and nothing else.
|
|
162
|
+
|
|
163
|
+
The final reason:
|
|
164
|
+
"""
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
@staticmethod
|
|
168
|
+
def id_retrieval_context(
|
|
169
|
+
retrieval_context: List[str],
|
|
170
|
+
) -> List[Union[str, MLLMImage]]:
|
|
171
|
+
"""
|
|
172
|
+
Annotates retrieval context with node IDs for multimodal processing.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
retrieval_context: List of contexts (can be strings or MLLMImages)
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
Annotated list with "Node X:" prefixes
|
|
179
|
+
"""
|
|
180
|
+
annotated_retrieval_context = []
|
|
181
|
+
for i, context in enumerate(retrieval_context):
|
|
182
|
+
if isinstance(context, str):
|
|
183
|
+
annotated_retrieval_context.append(f"Node {i + 1}: {context}")
|
|
184
|
+
elif isinstance(context, MLLMImage):
|
|
185
|
+
annotated_retrieval_context.append(f"Node {i + 1}:")
|
|
186
|
+
annotated_retrieval_context.append(context)
|
|
187
|
+
return annotated_retrieval_context
|