deepeval 3.7.4__py3-none-any.whl → 3.7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (155) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/dataset/golden.py +54 -2
  3. deepeval/evaluate/evaluate.py +16 -8
  4. deepeval/evaluate/execute.py +70 -26
  5. deepeval/evaluate/utils.py +26 -22
  6. deepeval/integrations/pydantic_ai/agent.py +19 -2
  7. deepeval/integrations/pydantic_ai/instrumentator.py +62 -23
  8. deepeval/metrics/__init__.py +14 -12
  9. deepeval/metrics/answer_relevancy/answer_relevancy.py +74 -29
  10. deepeval/metrics/answer_relevancy/template.py +188 -92
  11. deepeval/metrics/base_metric.py +2 -5
  12. deepeval/metrics/contextual_precision/contextual_precision.py +53 -15
  13. deepeval/metrics/contextual_precision/template.py +115 -66
  14. deepeval/metrics/contextual_recall/contextual_recall.py +50 -13
  15. deepeval/metrics/contextual_recall/template.py +106 -55
  16. deepeval/metrics/contextual_relevancy/contextual_relevancy.py +47 -15
  17. deepeval/metrics/contextual_relevancy/template.py +87 -58
  18. deepeval/metrics/dag/templates.py +2 -2
  19. deepeval/metrics/faithfulness/faithfulness.py +70 -27
  20. deepeval/metrics/faithfulness/schema.py +1 -1
  21. deepeval/metrics/faithfulness/template.py +200 -115
  22. deepeval/metrics/g_eval/utils.py +2 -2
  23. deepeval/metrics/indicator.py +4 -4
  24. deepeval/metrics/multimodal_metrics/__init__.py +0 -18
  25. deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +24 -17
  26. deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +26 -21
  27. deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +24 -17
  28. deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +24 -17
  29. deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +19 -19
  30. deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +63 -78
  31. deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +20 -20
  32. deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +71 -50
  33. deepeval/metrics/ragas.py +3 -3
  34. deepeval/metrics/tool_correctness/tool_correctness.py +2 -2
  35. deepeval/metrics/turn_contextual_precision/schema.py +21 -0
  36. deepeval/metrics/turn_contextual_precision/template.py +187 -0
  37. deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +550 -0
  38. deepeval/metrics/turn_contextual_recall/schema.py +21 -0
  39. deepeval/metrics/turn_contextual_recall/template.py +178 -0
  40. deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +520 -0
  41. deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_contextual_relevancy}/schema.py +7 -1
  42. deepeval/metrics/turn_contextual_relevancy/template.py +161 -0
  43. deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +535 -0
  44. deepeval/metrics/{multimodal_metrics/multimodal_faithfulness → turn_faithfulness}/schema.py +11 -3
  45. deepeval/metrics/turn_faithfulness/template.py +218 -0
  46. deepeval/metrics/turn_faithfulness/turn_faithfulness.py +596 -0
  47. deepeval/metrics/utils.py +39 -58
  48. deepeval/models/__init__.py +0 -12
  49. deepeval/models/base_model.py +16 -38
  50. deepeval/models/embedding_models/__init__.py +7 -0
  51. deepeval/models/embedding_models/azure_embedding_model.py +52 -28
  52. deepeval/models/embedding_models/local_embedding_model.py +18 -14
  53. deepeval/models/embedding_models/ollama_embedding_model.py +38 -16
  54. deepeval/models/embedding_models/openai_embedding_model.py +40 -21
  55. deepeval/models/llms/amazon_bedrock_model.py +1 -2
  56. deepeval/models/llms/anthropic_model.py +44 -23
  57. deepeval/models/llms/azure_model.py +121 -36
  58. deepeval/models/llms/deepseek_model.py +18 -13
  59. deepeval/models/llms/gemini_model.py +129 -43
  60. deepeval/models/llms/grok_model.py +18 -13
  61. deepeval/models/llms/kimi_model.py +18 -13
  62. deepeval/models/llms/litellm_model.py +42 -22
  63. deepeval/models/llms/local_model.py +12 -7
  64. deepeval/models/llms/ollama_model.py +114 -12
  65. deepeval/models/llms/openai_model.py +137 -41
  66. deepeval/models/llms/portkey_model.py +24 -7
  67. deepeval/models/llms/utils.py +5 -3
  68. deepeval/models/retry_policy.py +17 -14
  69. deepeval/models/utils.py +46 -1
  70. deepeval/optimizer/__init__.py +5 -0
  71. deepeval/optimizer/algorithms/__init__.py +6 -0
  72. deepeval/optimizer/algorithms/base.py +29 -0
  73. deepeval/optimizer/algorithms/configs.py +18 -0
  74. deepeval/optimizer/algorithms/copro/__init__.py +5 -0
  75. deepeval/{optimization/copro/loop.py → optimizer/algorithms/copro/copro.py} +112 -113
  76. deepeval/optimizer/algorithms/gepa/__init__.py +5 -0
  77. deepeval/{optimization/gepa/loop.py → optimizer/algorithms/gepa/gepa.py} +175 -115
  78. deepeval/optimizer/algorithms/miprov2/__init__.py +17 -0
  79. deepeval/optimizer/algorithms/miprov2/bootstrapper.py +435 -0
  80. deepeval/optimizer/algorithms/miprov2/miprov2.py +752 -0
  81. deepeval/optimizer/algorithms/miprov2/proposer.py +301 -0
  82. deepeval/optimizer/algorithms/simba/__init__.py +5 -0
  83. deepeval/{optimization/simba/loop.py → optimizer/algorithms/simba/simba.py} +128 -112
  84. deepeval/{optimization → optimizer}/configs.py +5 -8
  85. deepeval/{optimization/policies/selection.py → optimizer/policies.py} +63 -2
  86. deepeval/optimizer/prompt_optimizer.py +263 -0
  87. deepeval/optimizer/rewriter/__init__.py +5 -0
  88. deepeval/optimizer/rewriter/rewriter.py +124 -0
  89. deepeval/optimizer/rewriter/utils.py +214 -0
  90. deepeval/optimizer/scorer/__init__.py +5 -0
  91. deepeval/optimizer/scorer/base.py +86 -0
  92. deepeval/optimizer/scorer/scorer.py +316 -0
  93. deepeval/optimizer/scorer/utils.py +30 -0
  94. deepeval/optimizer/types.py +148 -0
  95. deepeval/{optimization → optimizer}/utils.py +47 -165
  96. deepeval/prompt/prompt.py +5 -9
  97. deepeval/test_case/__init__.py +1 -3
  98. deepeval/test_case/api.py +12 -10
  99. deepeval/test_case/conversational_test_case.py +19 -1
  100. deepeval/test_case/llm_test_case.py +152 -1
  101. deepeval/test_case/utils.py +4 -8
  102. deepeval/test_run/api.py +15 -14
  103. deepeval/test_run/test_run.py +3 -3
  104. deepeval/tracing/patchers.py +9 -4
  105. deepeval/tracing/tracing.py +2 -2
  106. deepeval/utils.py +65 -0
  107. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/METADATA +1 -4
  108. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/RECORD +116 -125
  109. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +0 -343
  110. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/schema.py +0 -19
  111. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +0 -122
  112. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +0 -301
  113. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/schema.py +0 -15
  114. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +0 -132
  115. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +0 -285
  116. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/schema.py +0 -15
  117. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +0 -112
  118. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +0 -282
  119. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +0 -102
  120. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/__init__.py +0 -0
  121. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +0 -356
  122. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +0 -175
  123. deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/__init__.py +0 -0
  124. deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +0 -290
  125. deepeval/models/mlllms/__init__.py +0 -4
  126. deepeval/models/mlllms/azure_model.py +0 -343
  127. deepeval/models/mlllms/gemini_model.py +0 -313
  128. deepeval/models/mlllms/ollama_model.py +0 -175
  129. deepeval/models/mlllms/openai_model.py +0 -309
  130. deepeval/optimization/__init__.py +0 -13
  131. deepeval/optimization/adapters/__init__.py +0 -2
  132. deepeval/optimization/adapters/deepeval_scoring_adapter.py +0 -588
  133. deepeval/optimization/aggregates.py +0 -14
  134. deepeval/optimization/copro/configs.py +0 -31
  135. deepeval/optimization/gepa/__init__.py +0 -7
  136. deepeval/optimization/gepa/configs.py +0 -115
  137. deepeval/optimization/miprov2/configs.py +0 -134
  138. deepeval/optimization/miprov2/loop.py +0 -785
  139. deepeval/optimization/mutations/__init__.py +0 -0
  140. deepeval/optimization/mutations/prompt_rewriter.py +0 -458
  141. deepeval/optimization/policies/__init__.py +0 -16
  142. deepeval/optimization/policies/tie_breaker.py +0 -67
  143. deepeval/optimization/prompt_optimizer.py +0 -462
  144. deepeval/optimization/simba/__init__.py +0 -0
  145. deepeval/optimization/simba/configs.py +0 -33
  146. deepeval/optimization/types.py +0 -361
  147. deepeval/test_case/mllm_test_case.py +0 -170
  148. /deepeval/metrics/{multimodal_metrics/multimodal_answer_relevancy → turn_contextual_precision}/__init__.py +0 -0
  149. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_precision → turn_contextual_recall}/__init__.py +0 -0
  150. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_recall → turn_contextual_relevancy}/__init__.py +0 -0
  151. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_faithfulness}/__init__.py +0 -0
  152. /deepeval/{optimization → optimizer/algorithms}/simba/types.py +0 -0
  153. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/LICENSE.md +0 -0
  154. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/WHEEL +0 -0
  155. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/entry_points.txt +0 -0
@@ -53,53 +53,45 @@ class MultimodalGEvalTemplate:
53
53
  else ""
54
54
  )
55
55
 
56
- return (
57
- [
58
- textwrap.dedent(
59
- f"""You are an evaluator. Given the following {dependencies}, assess the response below and return a JSON object with two fields:
60
-
61
- - `"score"`: an integer between {score_range[0]} and {score_range[1]}, {score_explanation}.
62
- - `"reason"`: a brief explanation for why the score was given. This must mention specific strengths or shortcomings, referencing relevant details from the input. Do **not** quote the score itself in the explanation.
63
-
64
- Your explanation should:
65
- - {reasoning_expectation}
66
- - Mention key details from the test case parameters.
67
- - Be concise, clear, and focused on the evaluation logic.
68
-
69
- Only return valid JSON. Do **not** include any extra commentary or text.
70
-
71
- ---
72
-
73
- Evaluation Steps:
74
- {evaluation_steps}
75
-
76
- {rubric_text}
77
- Test Case:
78
- ************************
79
- """
80
- )
81
- ]
82
- + test_case_list
83
- + [
84
- textwrap.dedent(
85
- f"""
86
- ************************
87
- \n\n\n
88
- Parameters:
89
- {parameters}
90
- {additional_context}
91
-
92
- ---
93
- **Example JSON:**
94
- {{
95
- "reason": "your concise and informative reason here",
96
- "score": {score_range[0]}
97
- }}
98
-
99
- JSON:
100
- """
101
- )
102
- ]
56
+ return textwrap.dedent(
57
+ f"""You are an evaluator. Given the following {dependencies}, assess the response below and return a JSON object with two fields:
58
+
59
+ - `"score"`: an integer between {score_range[0]} and {score_range[1]}, {score_explanation}.
60
+ - `"reason"`: a brief explanation for why the score was given. This must mention specific strengths or shortcomings, referencing relevant details from the input. Do **not** quote the score itself in the explanation.
61
+
62
+ Your explanation should:
63
+ - {reasoning_expectation}
64
+ - Mention key details from the test case parameters.
65
+ - Be concise, clear, and focused on the evaluation logic.
66
+
67
+ Only return valid JSON. Do **not** include any extra commentary or text.
68
+
69
+ ---
70
+
71
+ Evaluation Steps:
72
+ {evaluation_steps}
73
+
74
+ {rubric_text}
75
+ Test Case:
76
+ ************************
77
+
78
+ {test_case_list}
79
+
80
+ ************************
81
+ \n\n\n
82
+ Parameters:
83
+ {parameters}
84
+ {additional_context}
85
+
86
+ ---
87
+ **Example JSON:**
88
+ {{
89
+ "reason": "your concise and informative reason here",
90
+ "score": {score_range[0]}
91
+ }}
92
+
93
+ JSON:
94
+ """
103
95
  )
104
96
 
105
97
  @staticmethod
@@ -114,35 +106,28 @@ class MultimodalGEvalTemplate:
114
106
  if _additional_context
115
107
  else ""
116
108
  )
117
- return (
118
- [
119
- textwrap.dedent(
120
- f"""Given the evaluation steps, return a JSON with two keys: 1) a `score` key that is STRICTLY EITHER 1 (follows the criteria 100% outlined in the evaluation steps), OR 0 (does not follow the criteria), and 2) a `reason` key, a reason for the given score, but DO NOT QUOTE THE SCORE in your reason. Please mention specific information from {parameters} in your reason, but be very concise with it!
121
-
122
- Evaluation Steps:
123
- {evaluation_steps}
124
- ************************
125
- """
126
- )
127
- ]
128
- + test_case_list
129
- + [
130
- textwrap.dedent(
131
- f"""
132
- ************************
133
- {additional_context}
134
- **
135
- IMPORTANT: Please make sure to only return in JSON format, with the "score" and "reason" key. No words or explanation is needed.
136
-
137
- Example JSON:
138
- {{
139
- "reason": "The text does not follow the evaluation steps provided.",
140
- "score": 0
141
- }}
142
- **
143
-
144
- JSON:
145
- """
146
- )
147
- ]
109
+ return textwrap.dedent(
110
+ f"""Given the evaluation steps, return a JSON with two keys: 1) a `score` key that is STRICTLY EITHER 1 (follows the criteria 100% outlined in the evaluation steps), OR 0 (does not follow the criteria), and 2) a `reason` key, a reason for the given score, but DO NOT QUOTE THE SCORE in your reason. Please mention specific information from {parameters} in your reason, but be very concise with it!
111
+
112
+ Evaluation Steps:
113
+ {evaluation_steps}
114
+ ************************
115
+
116
+ {test_case_list}
117
+
118
+ ************************
119
+ {additional_context}
120
+
121
+ **
122
+ IMPORTANT: Please make sure to only return in JSON format, with the "score" and "reason" key. No words or explanation is needed.
123
+
124
+ Example JSON:
125
+ {{
126
+ "reason": "The text does not follow the evaluation steps provided.",
127
+ "score": 0
128
+ }}
129
+ **
130
+
131
+ JSON:
132
+ """
148
133
  )
@@ -1,29 +1,26 @@
1
- from deepeval.test_case import MLLMTestCaseParams, MLLMTestCase, ToolCall
2
- from deepeval.test_case.mllm_test_case import MLLMImage
3
- from deepeval.models.mlllms.openai_model import (
1
+ from deepeval.test_case import LLMTestCaseParams, LLMTestCase, ToolCall
2
+ from deepeval.test_case import MLLMImage
3
+ from deepeval.models.llms.openai_model import (
4
4
  unsupported_log_probs_multimodal_gpt_models,
5
5
  )
6
- from deepeval.models import (
7
- DeepEvalBaseMLLM,
8
- MultimodalOpenAIModel,
9
- )
6
+ from deepeval.models import DeepEvalBaseLLM, GPTModel
10
7
 
11
8
  from typing import List, Union
12
9
 
13
10
 
14
11
  G_EVAL_PARAMS = {
15
- MLLMTestCaseParams.INPUT: "Input",
16
- MLLMTestCaseParams.ACTUAL_OUTPUT: "Actual Output",
17
- MLLMTestCaseParams.EXPECTED_OUTPUT: "Expected Output",
18
- MLLMTestCaseParams.CONTEXT: "Context",
19
- MLLMTestCaseParams.RETRIEVAL_CONTEXT: "Retrieval Context",
20
- MLLMTestCaseParams.EXPECTED_TOOLS: "Expected Tools",
21
- MLLMTestCaseParams.TOOLS_CALLED: "Tools Called",
12
+ LLMTestCaseParams.INPUT: "Input",
13
+ LLMTestCaseParams.ACTUAL_OUTPUT: "Actual Output",
14
+ LLMTestCaseParams.EXPECTED_OUTPUT: "Expected Output",
15
+ LLMTestCaseParams.CONTEXT: "Context",
16
+ LLMTestCaseParams.RETRIEVAL_CONTEXT: "Retrieval Context",
17
+ LLMTestCaseParams.EXPECTED_TOOLS: "Expected Tools",
18
+ LLMTestCaseParams.TOOLS_CALLED: "Tools Called",
22
19
  }
23
20
 
24
21
 
25
22
  def construct_g_eval_params_string(
26
- mllm_test_case_params: List[MLLMTestCaseParams],
23
+ mllm_test_case_params: List[LLMTestCaseParams],
27
24
  ):
28
25
  g_eval_params = [G_EVAL_PARAMS[param] for param in mllm_test_case_params]
29
26
  if len(g_eval_params) == 1:
@@ -39,12 +36,14 @@ def construct_g_eval_params_string(
39
36
 
40
37
 
41
38
  def construct_test_case_list(
42
- evaluation_params: List[MLLMTestCaseParams], test_case: MLLMTestCase
39
+ evaluation_params: List[LLMTestCaseParams], test_case: LLMTestCase
43
40
  ) -> List[Union[str, MLLMImage]]:
41
+ from deepeval.utils import convert_to_multi_modal_array
42
+
44
43
  test_case_list = []
45
44
  for param in evaluation_params:
46
45
  test_case_param_list = [f"\n\n\n{G_EVAL_PARAMS[param]}:\n"]
47
- value = getattr(test_case, param.value)
46
+ value = convert_to_multi_modal_array(getattr(test_case, param.value))
48
47
  for v in value:
49
48
  if isinstance(v, ToolCall):
50
49
  test_case_param_list.append(repr(v))
@@ -54,15 +53,16 @@ def construct_test_case_list(
54
53
  return test_case_list
55
54
 
56
55
 
57
- def no_multimodal_log_prob_support(model: Union[str, DeepEvalBaseMLLM]):
56
+ def no_multimodal_log_prob_support(model: Union[str, DeepEvalBaseLLM]):
58
57
  if (
59
58
  isinstance(model, str)
60
59
  and model in unsupported_log_probs_multimodal_gpt_models
61
60
  ):
62
61
  return True
63
62
  elif (
64
- isinstance(model, MultimodalOpenAIModel)
65
- and model.model_name in unsupported_log_probs_multimodal_gpt_models
63
+ isinstance(model, GPTModel)
64
+ and model.get_model_name()
65
+ in unsupported_log_probs_multimodal_gpt_models
66
66
  ):
67
67
  return True
68
68
  return False
@@ -4,37 +4,40 @@ import math
4
4
  import textwrap
5
5
 
6
6
  from deepeval.metrics import BaseMultimodalMetric
7
- from deepeval.test_case import MLLMTestCaseParams, MLLMTestCase, MLLMImage
7
+ from deepeval.test_case import LLMTestCaseParams, LLMTestCase, MLLMImage
8
8
  from deepeval.metrics.multimodal_metrics.text_to_image.template import (
9
9
  TextToImageTemplate,
10
10
  )
11
- from deepeval.utils import get_or_create_event_loop
11
+ from deepeval.utils import (
12
+ get_or_create_event_loop,
13
+ convert_to_multi_modal_array,
14
+ )
12
15
  from deepeval.metrics.utils import (
13
16
  construct_verbose_logs,
14
17
  trimAndLoadJson,
15
18
  check_mllm_test_case_params,
16
- initialize_multimodal_model,
19
+ initialize_model,
17
20
  )
18
- from deepeval.models import DeepEvalBaseMLLM
21
+ from deepeval.models import DeepEvalBaseLLM
19
22
  from deepeval.metrics.multimodal_metrics.text_to_image.schema import ReasonScore
20
23
  from deepeval.metrics.indicator import metric_progress_indicator
21
24
 
22
- required_params: List[MLLMTestCaseParams] = [
23
- MLLMTestCaseParams.INPUT,
24
- MLLMTestCaseParams.ACTUAL_OUTPUT,
25
+ required_params: List[LLMTestCaseParams] = [
26
+ LLMTestCaseParams.INPUT,
27
+ LLMTestCaseParams.ACTUAL_OUTPUT,
25
28
  ]
26
29
 
27
30
 
28
31
  class TextToImageMetric(BaseMultimodalMetric):
29
32
  def __init__(
30
33
  self,
31
- model: Optional[Union[str, DeepEvalBaseMLLM]] = None,
34
+ model: Optional[Union[str, DeepEvalBaseLLM]] = None,
32
35
  threshold: float = 0.5,
33
36
  async_mode: bool = True,
34
37
  strict_mode: bool = False,
35
38
  verbose_mode: bool = False,
36
39
  ):
37
- self.model, self.using_native_model = initialize_multimodal_model(model)
40
+ self.model, self.using_native_model = initialize_model(model)
38
41
  self.evaluation_model = self.model.get_model_name()
39
42
  self.threshold = 1 if strict_mode else threshold
40
43
  self.strict_mode = strict_mode
@@ -43,11 +46,13 @@ class TextToImageMetric(BaseMultimodalMetric):
43
46
 
44
47
  def measure(
45
48
  self,
46
- test_case: MLLMTestCase,
49
+ test_case: LLMTestCase,
47
50
  _show_indicator: bool = True,
48
51
  _in_component: bool = False,
49
52
  ) -> float:
50
- check_mllm_test_case_params(test_case, required_params, 0, 1, self)
53
+ check_mllm_test_case_params(
54
+ test_case, required_params, 0, 1, self, self.model
55
+ )
51
56
 
52
57
  self.evaluation_cost = 0 if self.using_native_model else None
53
58
  with metric_progress_indicator(
@@ -63,10 +68,12 @@ class TextToImageMetric(BaseMultimodalMetric):
63
68
  )
64
69
  )
65
70
  else:
66
- input_texts, _ = self.separate_images_from_text(test_case.input)
67
- _, output_images = self.separate_images_from_text(
71
+ input = convert_to_multi_modal_array(test_case.input)
72
+ actual_output = convert_to_multi_modal_array(
68
73
  test_case.actual_output
69
74
  )
75
+ input_texts, _ = self.separate_images_from_text(input)
76
+ _, output_images = self.separate_images_from_text(actual_output)
70
77
 
71
78
  self.SC_scores, self.SC_reasoning = (
72
79
  self._evaluate_semantic_consistency(
@@ -99,11 +106,13 @@ class TextToImageMetric(BaseMultimodalMetric):
99
106
 
100
107
  async def a_measure(
101
108
  self,
102
- test_case: MLLMTestCase,
109
+ test_case: LLMTestCase,
103
110
  _show_indicator: bool = True,
104
111
  _in_component: bool = False,
105
112
  ) -> float:
106
- check_mllm_test_case_params(test_case, required_params, 0, 1, self)
113
+ check_mllm_test_case_params(
114
+ test_case, required_params, 0, 1, self, self.model
115
+ )
107
116
 
108
117
  self.evaluation_cost = 0 if self.using_native_model else None
109
118
  with metric_progress_indicator(
@@ -112,10 +121,12 @@ class TextToImageMetric(BaseMultimodalMetric):
112
121
  _show_indicator=_show_indicator,
113
122
  _in_component=_in_component,
114
123
  ):
115
- input_texts, _ = self.separate_images_from_text(test_case.input)
116
- _, output_images = self.separate_images_from_text(
124
+ input = convert_to_multi_modal_array(test_case.input)
125
+ actual_output = convert_to_multi_modal_array(
117
126
  test_case.actual_output
118
127
  )
128
+ input_texts, _ = self.separate_images_from_text(input)
129
+ _, output_images = self.separate_images_from_text(actual_output)
119
130
  (self.SC_scores, self.SC_reasoning), (
120
131
  self.PQ_scores,
121
132
  self.PQ_reasoning,
@@ -165,27 +176,27 @@ class TextToImageMetric(BaseMultimodalMetric):
165
176
  ) -> Tuple[List[int], str]:
166
177
  images: List[MLLMImage] = []
167
178
  images.append(actual_image_output)
168
- prompt = [
169
- TextToImageTemplate.generate_semantic_consistency_evaluation_results(
170
- text_prompt=text_prompt
171
- )
172
- ]
179
+ prompt = f"""
180
+ {
181
+ TextToImageTemplate.generate_semantic_consistency_evaluation_results(
182
+ text_prompt=text_prompt
183
+ )
184
+ }
185
+ Images:
186
+ {images}
187
+ """
173
188
  if self.using_native_model:
174
- res, cost = await self.model.a_generate(
175
- prompt + images, ReasonScore
176
- )
189
+ res, cost = await self.model.a_generate(prompt, ReasonScore)
177
190
  self.evaluation_cost += cost
178
191
  return res.score, res.reasoning
179
192
  else:
180
193
  try:
181
194
  res: ReasonScore = await self.model.a_generate(
182
- prompt + images, schema=ReasonScore
195
+ prompt, schema=ReasonScore
183
196
  )
184
197
  return res.score, res.reasoning
185
198
  except TypeError:
186
- res = await self.model.a_generate(
187
- prompt + images, input_text=prompt
188
- )
199
+ res = await self.model.a_generate(prompt, input_text=prompt)
189
200
  data = trimAndLoadJson(res, self)
190
201
  return data["score"], data["reasoning"]
191
202
 
@@ -196,23 +207,27 @@ class TextToImageMetric(BaseMultimodalMetric):
196
207
  ) -> Tuple[List[int], str]:
197
208
  images: List[MLLMImage] = []
198
209
  images.append(actual_image_output)
199
- prompt = [
200
- TextToImageTemplate.generate_semantic_consistency_evaluation_results(
201
- text_prompt=text_prompt
202
- )
203
- ]
210
+ prompt = f"""
211
+ {
212
+ TextToImageTemplate.generate_semantic_consistency_evaluation_results(
213
+ text_prompt=text_prompt
214
+ )
215
+ }
216
+ Images:
217
+ {images}
218
+ """
204
219
  if self.using_native_model:
205
- res, cost = self.model.generate(prompt + images, ReasonScore)
220
+ res, cost = self.model.generate(prompt, ReasonScore)
206
221
  self.evaluation_cost += cost
207
222
  return res.score, res.reasoning
208
223
  else:
209
224
  try:
210
225
  res: ReasonScore = self.model.generate(
211
- prompt + images, schema=ReasonScore
226
+ prompt, schema=ReasonScore
212
227
  )
213
228
  return res.score, res.reasoning
214
229
  except TypeError:
215
- res = self.model.generate(prompt + images)
230
+ res = self.model.generate(prompt)
216
231
  data = trimAndLoadJson(res, self)
217
232
  return data["score"], data["reasoning"]
218
233
 
@@ -220,23 +235,25 @@ class TextToImageMetric(BaseMultimodalMetric):
220
235
  self, actual_image_output: MLLMImage
221
236
  ) -> Tuple[List[int], str]:
222
237
  images: List[MLLMImage] = [actual_image_output]
223
- prompt = [
224
- TextToImageTemplate.generate_perceptual_quality_evaluation_results()
225
- ]
238
+ prompt = f"""
239
+ {
240
+ TextToImageTemplate.generate_perceptual_quality_evaluation_results()
241
+ }
242
+ Images:
243
+ {images}
244
+ """
226
245
  if self.using_native_model:
227
- res, cost = await self.model.a_generate(
228
- prompt + images, ReasonScore
229
- )
246
+ res, cost = await self.model.a_generate(prompt, ReasonScore)
230
247
  self.evaluation_cost += cost
231
248
  return res.score, res.reasoning
232
249
  else:
233
250
  try:
234
251
  res: ReasonScore = await self.model.a_generate(
235
- prompt + images, schema=ReasonScore
252
+ prompt, schema=ReasonScore
236
253
  )
237
254
  return res.score, res.reasoning
238
255
  except TypeError:
239
- res = await self.model.a_generate(prompt + images)
256
+ res = await self.model.a_generate(prompt)
240
257
  data = trimAndLoadJson(res, self)
241
258
  return data["score"], data["reasoning"]
242
259
 
@@ -244,9 +261,13 @@ class TextToImageMetric(BaseMultimodalMetric):
244
261
  self, actual_image_output: MLLMImage
245
262
  ) -> Tuple[List[int], str]:
246
263
  images: List[MLLMImage] = [actual_image_output]
247
- prompt = [
248
- TextToImageTemplate.generate_perceptual_quality_evaluation_results()
249
- ]
264
+ prompt = f"""
265
+ {
266
+ TextToImageTemplate.generate_perceptual_quality_evaluation_results()
267
+ }
268
+ Images:
269
+ {images}
270
+ """
250
271
  if self.using_native_model:
251
272
  res, cost = self.model.generate(prompt + images, ReasonScore)
252
273
  self.evaluation_cost += cost
@@ -254,11 +275,11 @@ class TextToImageMetric(BaseMultimodalMetric):
254
275
  else:
255
276
  try:
256
277
  res: ReasonScore = self.model.generate(
257
- prompt + images, schema=ReasonScore
278
+ prompt, schema=ReasonScore
258
279
  )
259
280
  return res.score, res.reasoning
260
281
  except TypeError:
261
- res = self.model.generate(prompt + images)
282
+ res = self.model.generate(prompt)
262
283
  data = trimAndLoadJson(res, self)
263
284
  return data["score"], data["reasoning"]
264
285
 
deepeval/metrics/ragas.py CHANGED
@@ -10,7 +10,7 @@ from deepeval.telemetry import capture_metric_type
10
10
 
11
11
  # check langchain availability
12
12
  try:
13
- import langchain_core
13
+ import langchain_core # noqa: F401
14
14
  from langchain_core.language_models import BaseChatModel
15
15
  from langchain_core.embeddings import Embeddings
16
16
 
@@ -501,7 +501,7 @@ class RagasMetric(BaseMetric):
501
501
  def measure(self, test_case: LLMTestCase):
502
502
  # sends to server
503
503
  try:
504
- from ragas import evaluate
504
+ from ragas import evaluate # noqa: F401
505
505
  except ModuleNotFoundError:
506
506
  raise ModuleNotFoundError(
507
507
  "Please install ragas to use this metric. `pip install ragas`."
@@ -509,7 +509,7 @@ class RagasMetric(BaseMetric):
509
509
 
510
510
  try:
511
511
  # How do i make sure this isn't just huggingface dataset
512
- from datasets import Dataset
512
+ from datasets import Dataset # noqa: F401
513
513
  except ModuleNotFoundError:
514
514
  raise ModuleNotFoundError("Please install dataset")
515
515
 
@@ -83,7 +83,7 @@ class ToolCorrectnessMetric(BaseMetric):
83
83
  self.tools_called: List[ToolCall] = test_case.tools_called
84
84
  self.expected_tools: List[ToolCall] = test_case.expected_tools
85
85
  tool_calling_score = self._calculate_score()
86
- if self.available_tools:
86
+ if self.available_tools and not test_case.multimodal:
87
87
  tool_selection_score = self._get_tool_selection_score(
88
88
  test_case.input,
89
89
  test_case.tools_called,
@@ -177,7 +177,7 @@ class ToolCorrectnessMetric(BaseMetric):
177
177
  self.tools_called: List[ToolCall] = test_case.tools_called
178
178
  self.expected_tools: List[ToolCall] = test_case.expected_tools
179
179
  tool_calling_score = self._calculate_score()
180
- if self.available_tools:
180
+ if self.available_tools and not test_case.multimodal:
181
181
  tool_selection_score = await self._a_get_tool_selection_score(
182
182
  test_case.input,
183
183
  test_case.tools_called,
@@ -0,0 +1,21 @@
1
+ from typing import List
2
+ from pydantic import BaseModel
3
+
4
+
5
+ class ContextualPrecisionVerdict(BaseModel):
6
+ verdict: str
7
+ reason: str
8
+
9
+
10
+ class Verdicts(BaseModel):
11
+ verdicts: List[ContextualPrecisionVerdict]
12
+
13
+
14
+ class ContextualPrecisionScoreReason(BaseModel):
15
+ reason: str
16
+
17
+
18
+ class InteractionContextualPrecisionScore(BaseModel):
19
+ score: float
20
+ reason: str
21
+ verdicts: List[ContextualPrecisionVerdict]