deepeval 3.7.5__py3-none-any.whl → 3.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/cli/main.py +2022 -759
  3. deepeval/cli/utils.py +208 -36
  4. deepeval/config/dotenv_handler.py +19 -0
  5. deepeval/config/settings.py +675 -245
  6. deepeval/config/utils.py +9 -1
  7. deepeval/dataset/api.py +23 -1
  8. deepeval/dataset/golden.py +106 -21
  9. deepeval/evaluate/evaluate.py +0 -3
  10. deepeval/evaluate/execute.py +162 -315
  11. deepeval/evaluate/utils.py +6 -30
  12. deepeval/key_handler.py +124 -51
  13. deepeval/metrics/__init__.py +0 -4
  14. deepeval/metrics/answer_relevancy/answer_relevancy.py +89 -132
  15. deepeval/metrics/answer_relevancy/template.py +102 -179
  16. deepeval/metrics/arena_g_eval/arena_g_eval.py +98 -96
  17. deepeval/metrics/arena_g_eval/template.py +17 -1
  18. deepeval/metrics/argument_correctness/argument_correctness.py +81 -87
  19. deepeval/metrics/argument_correctness/template.py +19 -2
  20. deepeval/metrics/base_metric.py +19 -41
  21. deepeval/metrics/bias/bias.py +102 -108
  22. deepeval/metrics/bias/template.py +14 -2
  23. deepeval/metrics/contextual_precision/contextual_precision.py +56 -92
  24. deepeval/metrics/contextual_recall/contextual_recall.py +58 -85
  25. deepeval/metrics/contextual_relevancy/contextual_relevancy.py +53 -83
  26. deepeval/metrics/conversation_completeness/conversation_completeness.py +101 -119
  27. deepeval/metrics/conversation_completeness/template.py +23 -3
  28. deepeval/metrics/conversational_dag/conversational_dag.py +12 -8
  29. deepeval/metrics/conversational_dag/nodes.py +66 -123
  30. deepeval/metrics/conversational_dag/templates.py +16 -0
  31. deepeval/metrics/conversational_g_eval/conversational_g_eval.py +47 -66
  32. deepeval/metrics/dag/dag.py +10 -0
  33. deepeval/metrics/dag/nodes.py +63 -126
  34. deepeval/metrics/dag/templates.py +14 -0
  35. deepeval/metrics/exact_match/exact_match.py +9 -1
  36. deepeval/metrics/faithfulness/faithfulness.py +82 -136
  37. deepeval/metrics/g_eval/g_eval.py +93 -79
  38. deepeval/metrics/g_eval/template.py +18 -1
  39. deepeval/metrics/g_eval/utils.py +7 -6
  40. deepeval/metrics/goal_accuracy/goal_accuracy.py +91 -76
  41. deepeval/metrics/goal_accuracy/template.py +21 -3
  42. deepeval/metrics/hallucination/hallucination.py +60 -75
  43. deepeval/metrics/hallucination/template.py +13 -0
  44. deepeval/metrics/indicator.py +11 -10
  45. deepeval/metrics/json_correctness/json_correctness.py +40 -38
  46. deepeval/metrics/json_correctness/template.py +10 -0
  47. deepeval/metrics/knowledge_retention/knowledge_retention.py +60 -97
  48. deepeval/metrics/knowledge_retention/schema.py +9 -3
  49. deepeval/metrics/knowledge_retention/template.py +12 -0
  50. deepeval/metrics/mcp/mcp_task_completion.py +72 -43
  51. deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +93 -75
  52. deepeval/metrics/mcp/schema.py +4 -0
  53. deepeval/metrics/mcp/template.py +59 -0
  54. deepeval/metrics/mcp_use_metric/mcp_use_metric.py +58 -64
  55. deepeval/metrics/mcp_use_metric/template.py +12 -0
  56. deepeval/metrics/misuse/misuse.py +77 -97
  57. deepeval/metrics/misuse/template.py +15 -0
  58. deepeval/metrics/multimodal_metrics/__init__.py +0 -1
  59. deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +37 -38
  60. deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +55 -76
  61. deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +37 -38
  62. deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +37 -38
  63. deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +57 -76
  64. deepeval/metrics/non_advice/non_advice.py +79 -105
  65. deepeval/metrics/non_advice/template.py +12 -0
  66. deepeval/metrics/pattern_match/pattern_match.py +12 -4
  67. deepeval/metrics/pii_leakage/pii_leakage.py +75 -106
  68. deepeval/metrics/pii_leakage/template.py +14 -0
  69. deepeval/metrics/plan_adherence/plan_adherence.py +63 -89
  70. deepeval/metrics/plan_adherence/template.py +11 -0
  71. deepeval/metrics/plan_quality/plan_quality.py +63 -87
  72. deepeval/metrics/plan_quality/template.py +9 -0
  73. deepeval/metrics/prompt_alignment/prompt_alignment.py +78 -86
  74. deepeval/metrics/prompt_alignment/template.py +12 -0
  75. deepeval/metrics/role_adherence/role_adherence.py +48 -71
  76. deepeval/metrics/role_adherence/template.py +14 -0
  77. deepeval/metrics/role_violation/role_violation.py +75 -108
  78. deepeval/metrics/role_violation/template.py +12 -0
  79. deepeval/metrics/step_efficiency/step_efficiency.py +55 -65
  80. deepeval/metrics/step_efficiency/template.py +11 -0
  81. deepeval/metrics/summarization/summarization.py +115 -183
  82. deepeval/metrics/summarization/template.py +19 -0
  83. deepeval/metrics/task_completion/task_completion.py +67 -73
  84. deepeval/metrics/tool_correctness/tool_correctness.py +43 -42
  85. deepeval/metrics/tool_use/schema.py +4 -0
  86. deepeval/metrics/tool_use/template.py +16 -2
  87. deepeval/metrics/tool_use/tool_use.py +72 -94
  88. deepeval/metrics/topic_adherence/schema.py +4 -0
  89. deepeval/metrics/topic_adherence/template.py +21 -1
  90. deepeval/metrics/topic_adherence/topic_adherence.py +68 -81
  91. deepeval/metrics/toxicity/template.py +13 -0
  92. deepeval/metrics/toxicity/toxicity.py +80 -99
  93. deepeval/metrics/turn_contextual_precision/schema.py +3 -3
  94. deepeval/metrics/turn_contextual_precision/template.py +9 -2
  95. deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +154 -154
  96. deepeval/metrics/turn_contextual_recall/schema.py +3 -3
  97. deepeval/metrics/turn_contextual_recall/template.py +8 -1
  98. deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +148 -143
  99. deepeval/metrics/turn_contextual_relevancy/schema.py +2 -2
  100. deepeval/metrics/turn_contextual_relevancy/template.py +8 -1
  101. deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +154 -157
  102. deepeval/metrics/turn_faithfulness/schema.py +1 -1
  103. deepeval/metrics/turn_faithfulness/template.py +8 -1
  104. deepeval/metrics/turn_faithfulness/turn_faithfulness.py +180 -203
  105. deepeval/metrics/turn_relevancy/template.py +14 -0
  106. deepeval/metrics/turn_relevancy/turn_relevancy.py +56 -69
  107. deepeval/metrics/utils.py +161 -91
  108. deepeval/models/__init__.py +2 -0
  109. deepeval/models/base_model.py +44 -6
  110. deepeval/models/embedding_models/azure_embedding_model.py +34 -12
  111. deepeval/models/embedding_models/local_embedding_model.py +22 -7
  112. deepeval/models/embedding_models/ollama_embedding_model.py +17 -6
  113. deepeval/models/embedding_models/openai_embedding_model.py +3 -2
  114. deepeval/models/llms/__init__.py +2 -0
  115. deepeval/models/llms/amazon_bedrock_model.py +229 -73
  116. deepeval/models/llms/anthropic_model.py +143 -48
  117. deepeval/models/llms/azure_model.py +169 -95
  118. deepeval/models/llms/constants.py +2032 -0
  119. deepeval/models/llms/deepseek_model.py +82 -35
  120. deepeval/models/llms/gemini_model.py +126 -67
  121. deepeval/models/llms/grok_model.py +128 -65
  122. deepeval/models/llms/kimi_model.py +129 -87
  123. deepeval/models/llms/litellm_model.py +94 -18
  124. deepeval/models/llms/local_model.py +115 -16
  125. deepeval/models/llms/ollama_model.py +97 -76
  126. deepeval/models/llms/openai_model.py +169 -311
  127. deepeval/models/llms/portkey_model.py +58 -16
  128. deepeval/models/llms/utils.py +5 -2
  129. deepeval/models/retry_policy.py +10 -5
  130. deepeval/models/utils.py +56 -4
  131. deepeval/simulator/conversation_simulator.py +49 -2
  132. deepeval/simulator/template.py +16 -1
  133. deepeval/synthesizer/synthesizer.py +19 -17
  134. deepeval/test_case/api.py +24 -45
  135. deepeval/test_case/arena_test_case.py +7 -2
  136. deepeval/test_case/conversational_test_case.py +55 -6
  137. deepeval/test_case/llm_test_case.py +60 -6
  138. deepeval/test_run/api.py +3 -0
  139. deepeval/test_run/test_run.py +6 -1
  140. deepeval/utils.py +26 -0
  141. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/METADATA +3 -3
  142. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/RECORD +145 -148
  143. deepeval/metrics/multimodal_metrics/multimodal_g_eval/__init__.py +0 -0
  144. deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +0 -386
  145. deepeval/metrics/multimodal_metrics/multimodal_g_eval/schema.py +0 -11
  146. deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +0 -133
  147. deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +0 -68
  148. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/LICENSE.md +0 -0
  149. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/WHEEL +0 -0
  150. {deepeval-3.7.5.dist-info → deepeval-3.7.7.dist-info}/entry_points.txt +0 -0
@@ -3,170 +3,93 @@ import textwrap
3
3
 
4
4
 
5
5
  class AnswerRelevancyTemplate:
6
+ multimodal_rules = """
7
+ --- MULTIMODAL INPUT RULES ---
8
+ - Treat image content as factual evidence.
9
+ - Only reference visual details that are explicitly and clearly visible.
10
+ - Do not infer or guess objects, text, or details not visibly present.
11
+ - If an image is unclear or ambiguous, mark uncertainty explicitly.
12
+ """
13
+
6
14
  @staticmethod
7
15
  def generate_statements(actual_output: str, multimodal: bool = False):
8
- multimodal_instruction = ""
9
- example_text = ""
10
- example_json = ""
11
-
12
- if multimodal:
13
- multimodal_instruction = " The text may contain images as well."
14
- example_text = "Shoes. The shoes can be refunded at no extra cost. Thanks for asking the question!"
15
- example_json = textwrap.dedent(
16
- """
17
- {{
18
- "statements": ["Shoes.", "Shoes can be refunded at no extra cost", "Thanks for asking the question!"]
19
- }}
20
- """
21
- )
22
- else:
23
- example_text = "Our new laptop model features a high-resolution Retina display for crystal-clear visuals. It also includes a fast-charging battery, giving you up to 12 hours of usage on a single charge. For security, we've added fingerprint authentication and an encrypted SSD. Plus, every purchase comes with a one-year warranty and 24/7 customer support."
24
- example_json = textwrap.dedent(
25
- """
26
- {{
27
- "statements": [
28
- "The new laptop model has a high-resolution Retina display.",
29
- "It includes a fast-charging battery with up to 12 hours of usage.",
30
- "Security features include fingerprint authentication and an encrypted SSD.",
31
- "Every purchase comes with a one-year warranty.",
32
- "24/7 customer support is included."
33
- ]
34
- }}
35
- """
36
- )
37
-
38
- coherence_note = (
39
- ""
40
- if multimodal
41
- else " Ambiguous statements and single words can be considered as statements, but only if outside of a coherent statement."
42
- )
43
-
44
- return textwrap.dedent(
45
- f"""Given the text, breakdown and generate a list of statements presented.{coherence_note}{multimodal_instruction}
46
-
47
- Example:
48
- Example text:
49
- {example_text}
50
-
51
- {example_json}
52
- ===== END OF EXAMPLE ======
53
-
54
- **
55
- IMPORTANT: Please make sure to only return in valid and parseable JSON format, with the "statements" key mapping to a list of strings. No words or explanation are needed. Ensure all strings are closed appropriately. Repair any invalid JSON before you output it.
56
- **
57
-
58
- Text:
59
- {actual_output}
60
-
61
- JSON:
62
- """
63
- )
16
+ return f"""Given the text, breakdown and generate a list of statements presented. Ambiguous statements and single words can be considered as statements, but only if outside of a coherent statement.
17
+
18
+ Example:
19
+ Example text:
20
+ Our new laptop model features a high-resolution Retina display for crystal-clear visuals. It also includes a fast-charging battery, giving you up to 12 hours of usage on a single charge. For security, we’ve added fingerprint authentication and an encrypted SSD. Plus, every purchase comes with a one-year warranty and 24/7 customer support.
21
+
22
+ {AnswerRelevancyTemplate.multimodal_rules if multimodal else ""}
23
+
24
+ {{
25
+ "statements": [
26
+ "The new laptop model has a high-resolution Retina display.",
27
+ "It includes a fast-charging battery with up to 12 hours of usage.",
28
+ "Security features include fingerprint authentication and an encrypted SSD.",
29
+ "Every purchase comes with a one-year warranty.",
30
+ "24/7 customer support is included."
31
+ ]
32
+ }}
33
+ ===== END OF EXAMPLE ======
34
+
35
+ **
36
+ IMPORTANT: Please make sure to only return in valid and parseable JSON format, with the "statements" key mapping to a list of strings. No words or explanation are needed. Ensure all strings are closed appropriately. Repair any invalid JSON before you output it.
37
+ **
38
+
39
+ Text:
40
+ {actual_output}
41
+
42
+ JSON:
43
+ """
64
44
 
65
45
  @staticmethod
66
46
  def generate_verdicts(
67
47
  input: str, statements: str, multimodal: bool = False
68
48
  ):
69
- content_type = (
70
- "statements (which can contain images)"
71
- if multimodal
72
- else "list of statements"
73
- )
74
- statement_or_image = "statement or image" if multimodal else "statement"
75
-
76
- format_instruction = textwrap.dedent(
77
- """
78
- Expected JSON format:
79
- {{
80
- "verdicts": [
81
- {{
82
- "verdict": "yes"
83
- }},
84
- {{
85
- "reason": <explanation_for_irrelevance>,
86
- "verdict": "no"
87
- }},
88
- {{
89
- "reason": <explanation_for_ambiguity>,
90
- "verdict": "idk"
91
- }}
92
- ]
93
- }}
94
- """
95
- )
96
-
97
- example_section = ""
98
- if multimodal:
99
- example_section = textwrap.dedent(
100
- """
101
- Example input: What should I do if there is an earthquake?
102
- Example statements: ["Shoes.", "Thanks for asking the question!", "Is there anything else I can help you with?", "Duck and hide"]
103
- Example JSON:
104
- {{
105
- "verdicts": [
106
- {{
107
- "reason": "The 'Shoes.' statement made in the actual output is completely irrelevant to the input, which asks about what to do in the event of an earthquake.",
108
- "verdict": "no"
109
- }},
110
- {{
111
- "reason": "The statement thanking the user for asking the question is not directly relevant to the input, but is not entirely irrelevant.",
112
- "verdict": "idk"
113
- }},
114
- {{
115
- "reason": "The question about whether there is anything else the user can help with is not directly relevant to the input, but is not entirely irrelevant.",
116
- "verdict": "idk"
117
- }},
118
- {{
119
- "verdict": "yes"
120
- }}
121
- ]
122
- }}
123
- """
124
- )
125
-
126
- guidelines = ""
127
- if multimodal:
128
- guidelines = textwrap.dedent(
129
- f"""
130
- Since you are going to generate a verdict for each statement and image, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to the number of `statements`.
131
- """
132
- )
133
- else:
134
- guidelines = textwrap.dedent(
135
- f"""
136
- Generate ONE verdict per statement - number of 'verdicts' MUST equal number of statements.
137
- 'verdict' must be STRICTLY 'yes', 'no', or 'idk':
138
- - 'yes': statement is relevant to addressing the input
139
- - 'no': statement is irrelevant to the input
140
- - 'idk': statement is ambiguous (not directly relevant but could be supporting information)
141
- Provide 'reason' ONLY for 'no' or 'idk' verdicts.
142
- """
143
- )
144
-
145
- return textwrap.dedent(
146
- f"""For the provided {content_type}, determine whether each {statement_or_image} is relevant to address the input.
147
- {"Please generate a list of JSON with two keys: `verdict` and `reason`." if multimodal else "Generate JSON objects with 'verdict' and 'reason' fields."}
148
- The 'verdict' {"key " if multimodal else ''}should {"STRICTLY be either a 'yes', 'idk' or 'no'" if multimodal else "be 'yes' (relevant), 'no' (irrelevant), or 'idk' (ambiguous/supporting information)"}. {"Answer 'yes' if the " + statement_or_image + ' is relevant to addressing the original input, no if the ' + statement_or_image + ' is irrelevant, and "idk" if it is ambiguous (eg., not directly relevant but could be used as a supporting point to address the input).' if multimodal else ""}
149
- {"The 'reason' is the reason for the verdict.' if multimodal else '"}
150
- Provide 'reason' ONLY for 'no' or 'idk' verdicts.
151
- The {"provided statements are statements and images' if multimodal else 'statements are from an AI's actual output"} generated in the actual output.
152
-
153
- **
154
- IMPORTANT: Please make sure to only return in valid and parseable JSON format, with the 'verdicts' key mapping to a list of JSON objects. Ensure all strings are closed appropriately. Repair any invalid JSON before you output it.
155
-
156
- {format_instruction if not multimodal else ''}
157
- {example_section}
158
- {guidelines}
159
- **
160
-
161
- Input:
162
- {input}
163
-
164
- Statements:
165
- {statements}
166
-
167
- JSON:
168
- """
169
- )
49
+ return f"""For the provided list of statements, determine whether each statement is relevant to address the input.
50
+ Generate JSON objects with 'verdict' and 'reason' fields.
51
+ The 'verdict' should be 'yes' (relevant), 'no' (irrelevant), or 'idk' (ambiguous/supporting information).
52
+ Provide 'reason' ONLY for 'no' or 'idk' verdicts.
53
+ The statements are from an AI's actual output.
54
+
55
+ {AnswerRelevancyTemplate.multimodal_rules if multimodal else ""}
56
+
57
+ **
58
+ IMPORTANT: Please make sure to only return in valid and parseable JSON format, with the 'verdicts' key mapping to a list of JSON objects. Ensure all strings are closed appropriately. Repair any invalid JSON before you output it.
59
+
60
+ Expected JSON format:
61
+ {{
62
+ "verdicts": [
63
+ {{
64
+ "verdict": "yes"
65
+ }},
66
+ {{
67
+ "reason": <explanation_for_irrelevance>,
68
+ "verdict": "no"
69
+ }},
70
+ {{
71
+ "reason": <explanation_for_ambiguity>,
72
+ "verdict": "idk"
73
+ }}
74
+ ]
75
+ }}
76
+
77
+ Generate ONE verdict per statement - number of 'verdicts' MUST equal number of statements.
78
+ 'verdict' must be STRICTLY 'yes', 'no', or 'idk':
79
+ - 'yes': statement is relevant to addressing the input
80
+ - 'no': statement is irrelevant to the input
81
+ - 'idk': statement is ambiguous (not directly relevant but could be supporting information)
82
+ Provide 'reason' ONLY for 'no' or 'idk' verdicts.
83
+ **
84
+
85
+ Input:
86
+ {input}
87
+
88
+ Statements:
89
+ {statements}
90
+
91
+ JSON:
92
+ """
170
93
 
171
94
  @staticmethod
172
95
  def generate_reason(
@@ -175,32 +98,32 @@ class AnswerRelevancyTemplate:
175
98
  score: float,
176
99
  multimodal: bool = False,
177
100
  ):
178
- return textwrap.dedent(
179
- f"""Given the answer relevancy score, the list of reasons of irrelevant statements made in the actual output, and the input, provide a CONCISE reason for the score. Explain why it is not higher, but also why it is at its current score.
180
- The irrelevant statements represent things in the actual output that is irrelevant to addressing whatever is asked/talked about in the input.
181
- If there is nothing irrelevant, just say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
101
+ return f"""Given the answer relevancy score, the list of reasons of irrelevant statements made in the actual output, and the input, provide a CONCISE reason for the score. Explain why it is not higher, but also why it is at its current score.
102
+ The irrelevant statements represent things in the actual output that is irrelevant to addressing whatever is asked/talked about in the input.
103
+ If there is nothing irrelevant, just say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
104
+
105
+ {AnswerRelevancyTemplate.multimodal_rules if multimodal else ""}
182
106
 
107
+ **
108
+ IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason. Ensure all strings are closed appropriately. Repair any invalid JSON before you output it.
183
109
 
184
- **
185
- IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason. Ensure all strings are closed appropriately. Repair any invalid JSON before you output it.
110
+ Example:
111
+ Example JSON:
112
+ {{
113
+ "reason": "The score is <answer_relevancy_score> because <your_reason>."
114
+ }}
115
+ ===== END OF EXAMPLE ======
116
+ **
186
117
 
187
- {"Example:' if not multimodal else '"}
188
- Example JSON:
189
- {{
190
- "reason": "The score is <answer_relevancy_score> because <your_reason>."
191
- }}
192
- {"===== END OF EXAMPLE ======' if not multimodal else '"}
193
- **
194
118
 
195
- Answer Relevancy Score:
196
- {score}
119
+ Answer Relevancy Score:
120
+ {score}
197
121
 
198
- Reasons why the score can't be higher based on irrelevant statements in the actual output:
199
- {irrelevant_statements}
122
+ Reasons why the score can't be higher based on irrelevant statements in the actual output:
123
+ {irrelevant_statements}
200
124
 
201
- Input:
202
- {input}
125
+ Input:
126
+ {input}
203
127
 
204
- JSON:
205
- """
206
- )
128
+ JSON:
129
+ """
@@ -14,12 +14,17 @@ from deepeval.utils import get_or_create_event_loop, prettify_list
14
14
  from deepeval.metrics.utils import (
15
15
  check_arena_test_case_params,
16
16
  construct_verbose_logs,
17
- trimAndLoadJson,
18
17
  initialize_model,
18
+ a_generate_with_schema_and_extract,
19
+ generate_with_schema_and_extract,
19
20
  )
20
21
  from deepeval.models import DeepEvalBaseLLM
21
22
  from deepeval.metrics.indicator import metric_progress_indicator
22
- from deepeval.metrics.arena_g_eval.schema import *
23
+ from deepeval.metrics.arena_g_eval.schema import (
24
+ RewrittenReason,
25
+ Winner,
26
+ Steps,
27
+ )
23
28
  from deepeval.metrics.g_eval.utils import (
24
29
  construct_g_eval_params_string,
25
30
  validate_criteria_and_evaluation_steps,
@@ -62,7 +67,13 @@ class ArenaGEval(BaseArenaMetric):
62
67
  _progress: Optional[Progress] = None,
63
68
  _pbar_id: Optional[int] = None,
64
69
  ) -> str:
65
- check_arena_test_case_params(test_case, self.evaluation_params, self)
70
+ check_arena_test_case_params(
71
+ test_case,
72
+ self.evaluation_params,
73
+ self,
74
+ self.model,
75
+ test_case.multimodal,
76
+ )
66
77
  self.evaluation_cost = 0 if self.using_native_model else None
67
78
 
68
79
  with metric_progress_indicator(self, _show_indicator=_show_indicator):
@@ -76,12 +87,12 @@ class ArenaGEval(BaseArenaMetric):
76
87
  )
77
88
  else:
78
89
  self.evaluation_steps: List[str] = (
79
- self._generate_evaluation_steps()
90
+ self._generate_evaluation_steps(test_case.multimodal)
80
91
  )
81
92
  if _progress:
82
93
  update_pbar(_progress, _pbar_id)
83
94
  masked_winner, masked_reason, dummy_to_real_names = (
84
- self._compare(test_case)
95
+ self._compare(test_case, test_case.multimodal)
85
96
  )
86
97
  if _progress:
87
98
  update_pbar(_progress, _pbar_id)
@@ -111,7 +122,13 @@ class ArenaGEval(BaseArenaMetric):
111
122
  _progress: Optional[Progress] = None,
112
123
  _pbar_id: Optional[int] = None,
113
124
  ) -> str:
114
- check_arena_test_case_params(test_case, self.evaluation_params, self)
125
+ check_arena_test_case_params(
126
+ test_case,
127
+ self.evaluation_params,
128
+ self,
129
+ self.model,
130
+ test_case.multimodal,
131
+ )
115
132
  self.evaluation_cost = 0 if self.using_native_model else None
116
133
 
117
134
  with metric_progress_indicator(
@@ -120,12 +137,12 @@ class ArenaGEval(BaseArenaMetric):
120
137
  _show_indicator=_show_indicator,
121
138
  ):
122
139
  self.evaluation_steps: List[str] = (
123
- await self._a_generate_evaluation_steps()
140
+ await self._a_generate_evaluation_steps(test_case.multimodal)
124
141
  )
125
142
  if _progress:
126
143
  update_pbar(_progress, _pbar_id)
127
144
  masked_winner, masked_reason, dummy_to_real_names = (
128
- await self._a_compare(test_case)
145
+ await self._a_compare(test_case, test_case.multimodal)
129
146
  )
130
147
  if _progress:
131
148
  update_pbar(_progress, _pbar_id)
@@ -147,7 +164,7 @@ class ArenaGEval(BaseArenaMetric):
147
164
  )
148
165
  return self.winner
149
166
 
150
- async def _a_generate_evaluation_steps(self) -> List[str]:
167
+ async def _a_generate_evaluation_steps(self, multimodal: bool) -> List[str]:
151
168
  if self.evaluation_steps:
152
169
  return self.evaluation_steps
153
170
 
@@ -155,23 +172,20 @@ class ArenaGEval(BaseArenaMetric):
155
172
  self.evaluation_params
156
173
  )
157
174
  prompt = ArenaGEvalTemplate.generate_evaluation_steps(
158
- criteria=self.criteria, parameters=g_eval_params_str
175
+ criteria=self.criteria,
176
+ parameters=g_eval_params_str,
177
+ multimodal=multimodal,
178
+ )
179
+
180
+ return await a_generate_with_schema_and_extract(
181
+ self,
182
+ prompt,
183
+ Steps,
184
+ extract_schema=lambda s: s.steps,
185
+ extract_json=lambda data: data["steps"],
159
186
  )
160
- if self.using_native_model:
161
- res, cost = await self.model.a_generate(prompt)
162
- self.evaluation_cost += cost
163
- data = trimAndLoadJson(res, self)
164
- return data["steps"]
165
- else:
166
- try:
167
- res: Steps = await self.model.a_generate(prompt, schema=Steps)
168
- return res.steps
169
- except TypeError:
170
- res = await self.model.a_generate(prompt)
171
- data = trimAndLoadJson(res, self)
172
- return data["steps"]
173
187
 
174
- def _generate_evaluation_steps(self) -> List[str]:
188
+ def _generate_evaluation_steps(self, multimodal: bool) -> List[str]:
175
189
  if self.evaluation_steps:
176
190
  return self.evaluation_steps
177
191
 
@@ -179,25 +193,20 @@ class ArenaGEval(BaseArenaMetric):
179
193
  self.evaluation_params
180
194
  )
181
195
  prompt = ArenaGEvalTemplate.generate_evaluation_steps(
182
- criteria=self.criteria, parameters=g_eval_params_str
196
+ criteria=self.criteria,
197
+ parameters=g_eval_params_str,
198
+ multimodal=multimodal,
199
+ )
200
+ return generate_with_schema_and_extract(
201
+ self,
202
+ prompt,
203
+ Steps,
204
+ extract_schema=lambda s: s.steps,
205
+ extract_json=lambda data: data["steps"],
183
206
  )
184
- if self.using_native_model:
185
- res, cost = self.model.generate(prompt)
186
- self.evaluation_cost += cost
187
- data = trimAndLoadJson(res, self)
188
- return data["steps"]
189
- else:
190
- try:
191
- res: Steps = self.model.generate(prompt, schema=Steps)
192
- return res.steps
193
- except TypeError:
194
- res = self.model.generate(prompt)
195
- data = trimAndLoadJson(res, self)
196
- return data["steps"]
197
207
 
198
208
  async def _a_compare(
199
- self,
200
- test_case: ArenaTestCase,
209
+ self, test_case: ArenaTestCase, multimodal: bool
201
210
  ) -> Tuple[str, str, Dict[str, str]]:
202
211
  formatted_test_case, dummy_to_real_names = format_arena_test_case(
203
212
  self.evaluation_params, test_case
@@ -209,23 +218,27 @@ class ArenaGEval(BaseArenaMetric):
209
218
  evaluation_steps=number_evaluation_steps(self.evaluation_steps),
210
219
  test_case_contents=formatted_test_case,
211
220
  parameters=g_eval_params_str,
221
+ multimodal=multimodal,
222
+ )
223
+
224
+ return await a_generate_with_schema_and_extract(
225
+ self,
226
+ prompt,
227
+ Winner,
228
+ extract_schema=lambda s: (
229
+ s.winner,
230
+ s.reason,
231
+ dummy_to_real_names,
232
+ ),
233
+ extract_json=lambda data: (
234
+ data["winner"],
235
+ data["reason"],
236
+ dummy_to_real_names,
237
+ ),
212
238
  )
213
- if self.using_native_model:
214
- res, cost = await self.model.a_generate(prompt, schema=Winner)
215
- self.evaluation_cost += cost
216
- return res.winner, res.reason, dummy_to_real_names
217
- else:
218
- try:
219
- res: Winner = await self.model.a_generate(prompt, schema=Winner)
220
- return res.winner, res.reason, dummy_to_real_names
221
- except TypeError:
222
- res = await self.model.a_generate(prompt)
223
- data = trimAndLoadJson(res, self)
224
- return data["winner"], data["reason"], dummy_to_real_names
225
239
 
226
240
  def _compare(
227
- self,
228
- test_case: ArenaTestCase,
241
+ self, test_case: ArenaTestCase, multimodal: bool
229
242
  ) -> Tuple[str, str, Dict[str, str]]:
230
243
  formatted_test_case, dummy_to_real_names = format_arena_test_case(
231
244
  self.evaluation_params, test_case
@@ -237,19 +250,23 @@ class ArenaGEval(BaseArenaMetric):
237
250
  evaluation_steps=number_evaluation_steps(self.evaluation_steps),
238
251
  test_case_contents=formatted_test_case,
239
252
  parameters=g_eval_params_str,
253
+ multimodal=multimodal,
254
+ )
255
+ return generate_with_schema_and_extract(
256
+ self,
257
+ prompt,
258
+ Winner,
259
+ extract_schema=lambda s: (
260
+ s.winner,
261
+ s.reason,
262
+ dummy_to_real_names,
263
+ ),
264
+ extract_json=lambda data: (
265
+ data["winner"],
266
+ data["reason"],
267
+ dummy_to_real_names,
268
+ ),
240
269
  )
241
- if self.using_native_model:
242
- res, cost = self.model.generate(prompt, schema=Winner)
243
- self.evaluation_cost += cost
244
- return res.winner, res.reason, dummy_to_real_names
245
- else:
246
- try:
247
- res: Winner = self.model.generate(prompt, schema=Winner)
248
- return res.winner, res.reason, dummy_to_real_names
249
- except TypeError:
250
- res = self.model.generate(prompt)
251
- data = trimAndLoadJson(res, self)
252
- return data["winner"], data["reason"], dummy_to_real_names
253
270
 
254
271
  async def _a_generate_rewritten_reason(
255
272
  self,
@@ -260,22 +277,14 @@ class ArenaGEval(BaseArenaMetric):
260
277
  reason=reason,
261
278
  dummy_to_real_names=dummy_to_real_names,
262
279
  )
263
- if self.using_native_model:
264
- res, cost = await self.model.a_generate(
265
- prompt, schema=RewrittenReason
266
- )
267
- self.evaluation_cost += cost
268
- return res.rewritten_reason
269
- else:
270
- try:
271
- res: RewrittenReason = await self.model.a_generate(
272
- prompt, schema=RewrittenReason
273
- )
274
- return res.rewritten_reason
275
- except TypeError:
276
- res = await self.model.a_generate(prompt)
277
- data = trimAndLoadJson(res, self)
278
- return data["rewritten_reason"]
280
+
281
+ return await a_generate_with_schema_and_extract(
282
+ self,
283
+ prompt,
284
+ RewrittenReason,
285
+ extract_schema=lambda s: s.rewritten_reason,
286
+ extract_json=lambda data: data["rewritten_reason"],
287
+ )
279
288
 
280
289
  def _generate_rewritten_reason(
281
290
  self,
@@ -286,20 +295,13 @@ class ArenaGEval(BaseArenaMetric):
286
295
  reason=reason,
287
296
  dummy_to_real_names=dummy_to_real_names,
288
297
  )
289
- if self.using_native_model:
290
- res, cost = self.model.generate(prompt, schema=RewrittenReason)
291
- self.evaluation_cost += cost
292
- return res.rewritten_reason
293
- else:
294
- try:
295
- res: RewrittenReason = self.model.generate(
296
- prompt, schema=RewrittenReason
297
- )
298
- return res.rewritten_reason
299
- except TypeError:
300
- res = self.model.generate(prompt)
301
- data = trimAndLoadJson(res, self)
302
- return data["rewritten_reason"]
298
+ return generate_with_schema_and_extract(
299
+ self,
300
+ prompt,
301
+ RewrittenReason,
302
+ extract_schema=lambda s: s.rewritten_reason,
303
+ extract_json=lambda data: data["rewritten_reason"],
304
+ )
303
305
 
304
306
  def is_successful(self) -> bool:
305
307
  if self.error is not None:
@@ -3,11 +3,23 @@ import textwrap
3
3
 
4
4
 
5
5
  class ArenaGEvalTemplate:
6
+ multimodal_rules = """
7
+ --- MULTIMODAL INPUT RULES ---
8
+ - Treat image content as factual evidence.
9
+ - Only reference visual details that are explicitly and clearly visible.
10
+ - Do not infer or guess objects, text, or details not visibly present.
11
+ - If an image is unclear or ambiguous, mark uncertainty explicitly.
12
+ """
13
+
6
14
  @staticmethod
7
- def generate_evaluation_steps(parameters: str, criteria: str):
15
+ def generate_evaluation_steps(
16
+ parameters: str, criteria: str, multimodal: Optional[bool]
17
+ ):
8
18
  return textwrap.dedent(
9
19
  f"""Given an evaluation criteria which outlines how you should choose the winner out of all contestants based on the {parameters}, generate 3-4 concise evaluation steps based on the criteria below. You MUST make it clear how to evaluate {parameters} in relation to one another.
10
20
 
21
+ {ArenaGEvalTemplate.multimodal_rules if multimodal else ""}
22
+
11
23
  Evaluation Criteria:
12
24
  {criteria}
13
25
 
@@ -28,6 +40,7 @@ class ArenaGEvalTemplate:
28
40
  evaluation_steps: str,
29
41
  test_case_contents: List[str],
30
42
  parameters: str,
43
+ multimodal: Optional[bool],
31
44
  ):
32
45
  reasoning_expectation = (
33
46
  "Be specific and grounded in the evaluation steps."
@@ -36,6 +49,9 @@ class ArenaGEvalTemplate:
36
49
  return textwrap.dedent(
37
50
  f"""
38
51
  You are a judge. Given the following evaluation steps, select the single contestant that best aligns with the evaluation steps.
52
+
53
+ {ArenaGEvalTemplate.multimodal_rules if multimodal else ""}
54
+
39
55
  Return a JSON object with three fields:
40
56
 
41
57
  - `"winner"`: the contestant that is best aligned with the evaluation steps.