deepeval 3.7.4__py3-none-any.whl → 3.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/config/settings.py +35 -1
- deepeval/dataset/api.py +23 -1
- deepeval/dataset/golden.py +139 -2
- deepeval/evaluate/evaluate.py +16 -11
- deepeval/evaluate/execute.py +13 -181
- deepeval/evaluate/utils.py +6 -26
- deepeval/integrations/pydantic_ai/agent.py +19 -2
- deepeval/integrations/pydantic_ai/instrumentator.py +62 -23
- deepeval/key_handler.py +3 -0
- deepeval/metrics/__init__.py +14 -16
- deepeval/metrics/answer_relevancy/answer_relevancy.py +118 -116
- deepeval/metrics/answer_relevancy/template.py +22 -3
- deepeval/metrics/arena_g_eval/arena_g_eval.py +98 -96
- deepeval/metrics/arena_g_eval/template.py +17 -1
- deepeval/metrics/argument_correctness/argument_correctness.py +81 -87
- deepeval/metrics/argument_correctness/template.py +19 -2
- deepeval/metrics/base_metric.py +13 -44
- deepeval/metrics/bias/bias.py +102 -108
- deepeval/metrics/bias/template.py +14 -2
- deepeval/metrics/contextual_precision/contextual_precision.py +96 -94
- deepeval/metrics/contextual_precision/template.py +115 -66
- deepeval/metrics/contextual_recall/contextual_recall.py +94 -84
- deepeval/metrics/contextual_recall/template.py +106 -55
- deepeval/metrics/contextual_relevancy/contextual_relevancy.py +86 -84
- deepeval/metrics/contextual_relevancy/template.py +87 -58
- deepeval/metrics/conversation_completeness/conversation_completeness.py +101 -119
- deepeval/metrics/conversation_completeness/template.py +23 -3
- deepeval/metrics/conversational_dag/conversational_dag.py +12 -8
- deepeval/metrics/conversational_dag/nodes.py +66 -123
- deepeval/metrics/conversational_dag/templates.py +16 -0
- deepeval/metrics/conversational_g_eval/conversational_g_eval.py +47 -66
- deepeval/metrics/dag/dag.py +10 -0
- deepeval/metrics/dag/nodes.py +63 -126
- deepeval/metrics/dag/templates.py +16 -2
- deepeval/metrics/exact_match/exact_match.py +9 -1
- deepeval/metrics/faithfulness/faithfulness.py +138 -149
- deepeval/metrics/faithfulness/schema.py +1 -1
- deepeval/metrics/faithfulness/template.py +200 -115
- deepeval/metrics/g_eval/g_eval.py +87 -78
- deepeval/metrics/g_eval/template.py +18 -1
- deepeval/metrics/g_eval/utils.py +7 -6
- deepeval/metrics/goal_accuracy/goal_accuracy.py +91 -76
- deepeval/metrics/goal_accuracy/template.py +21 -3
- deepeval/metrics/hallucination/hallucination.py +60 -75
- deepeval/metrics/hallucination/template.py +13 -0
- deepeval/metrics/indicator.py +7 -10
- deepeval/metrics/json_correctness/json_correctness.py +40 -38
- deepeval/metrics/json_correctness/template.py +10 -0
- deepeval/metrics/knowledge_retention/knowledge_retention.py +60 -97
- deepeval/metrics/knowledge_retention/schema.py +9 -3
- deepeval/metrics/knowledge_retention/template.py +12 -0
- deepeval/metrics/mcp/mcp_task_completion.py +68 -38
- deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +92 -74
- deepeval/metrics/mcp/template.py +52 -0
- deepeval/metrics/mcp_use_metric/mcp_use_metric.py +58 -64
- deepeval/metrics/mcp_use_metric/template.py +12 -0
- deepeval/metrics/misuse/misuse.py +77 -97
- deepeval/metrics/misuse/template.py +15 -0
- deepeval/metrics/multimodal_metrics/__init__.py +0 -19
- deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +59 -53
- deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +79 -95
- deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +59 -53
- deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +59 -53
- deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +111 -109
- deepeval/metrics/non_advice/non_advice.py +79 -105
- deepeval/metrics/non_advice/template.py +12 -0
- deepeval/metrics/pattern_match/pattern_match.py +12 -4
- deepeval/metrics/pii_leakage/pii_leakage.py +75 -106
- deepeval/metrics/pii_leakage/template.py +14 -0
- deepeval/metrics/plan_adherence/plan_adherence.py +63 -89
- deepeval/metrics/plan_adherence/template.py +11 -0
- deepeval/metrics/plan_quality/plan_quality.py +63 -87
- deepeval/metrics/plan_quality/template.py +9 -0
- deepeval/metrics/prompt_alignment/prompt_alignment.py +72 -83
- deepeval/metrics/prompt_alignment/template.py +12 -0
- deepeval/metrics/ragas.py +3 -3
- deepeval/metrics/role_adherence/role_adherence.py +48 -71
- deepeval/metrics/role_adherence/template.py +14 -0
- deepeval/metrics/role_violation/role_violation.py +75 -108
- deepeval/metrics/role_violation/template.py +12 -0
- deepeval/metrics/step_efficiency/step_efficiency.py +55 -65
- deepeval/metrics/step_efficiency/template.py +11 -0
- deepeval/metrics/summarization/summarization.py +115 -183
- deepeval/metrics/summarization/template.py +19 -0
- deepeval/metrics/task_completion/task_completion.py +67 -73
- deepeval/metrics/tool_correctness/tool_correctness.py +45 -44
- deepeval/metrics/tool_use/tool_use.py +42 -66
- deepeval/metrics/topic_adherence/template.py +13 -0
- deepeval/metrics/topic_adherence/topic_adherence.py +53 -67
- deepeval/metrics/toxicity/template.py +13 -0
- deepeval/metrics/toxicity/toxicity.py +80 -99
- deepeval/metrics/turn_contextual_precision/schema.py +21 -0
- deepeval/metrics/turn_contextual_precision/template.py +187 -0
- deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +592 -0
- deepeval/metrics/turn_contextual_recall/schema.py +21 -0
- deepeval/metrics/turn_contextual_recall/template.py +178 -0
- deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +563 -0
- deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_contextual_relevancy}/schema.py +7 -1
- deepeval/metrics/turn_contextual_relevancy/template.py +161 -0
- deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +576 -0
- deepeval/metrics/{multimodal_metrics/multimodal_faithfulness → turn_faithfulness}/schema.py +11 -3
- deepeval/metrics/turn_faithfulness/template.py +218 -0
- deepeval/metrics/turn_faithfulness/turn_faithfulness.py +627 -0
- deepeval/metrics/turn_relevancy/template.py +14 -0
- deepeval/metrics/turn_relevancy/turn_relevancy.py +56 -69
- deepeval/metrics/utils.py +158 -122
- deepeval/models/__init__.py +0 -12
- deepeval/models/base_model.py +49 -33
- deepeval/models/embedding_models/__init__.py +7 -0
- deepeval/models/embedding_models/azure_embedding_model.py +79 -33
- deepeval/models/embedding_models/local_embedding_model.py +39 -20
- deepeval/models/embedding_models/ollama_embedding_model.py +52 -19
- deepeval/models/embedding_models/openai_embedding_model.py +42 -22
- deepeval/models/llms/amazon_bedrock_model.py +226 -72
- deepeval/models/llms/anthropic_model.py +178 -63
- deepeval/models/llms/azure_model.py +218 -60
- deepeval/models/llms/constants.py +2032 -0
- deepeval/models/llms/deepseek_model.py +95 -40
- deepeval/models/llms/gemini_model.py +209 -64
- deepeval/models/llms/grok_model.py +139 -68
- deepeval/models/llms/kimi_model.py +140 -90
- deepeval/models/llms/litellm_model.py +131 -37
- deepeval/models/llms/local_model.py +125 -21
- deepeval/models/llms/ollama_model.py +147 -24
- deepeval/models/llms/openai_model.py +222 -269
- deepeval/models/llms/portkey_model.py +81 -22
- deepeval/models/llms/utils.py +8 -3
- deepeval/models/retry_policy.py +17 -14
- deepeval/models/utils.py +106 -5
- deepeval/optimizer/__init__.py +5 -0
- deepeval/optimizer/algorithms/__init__.py +6 -0
- deepeval/optimizer/algorithms/base.py +29 -0
- deepeval/optimizer/algorithms/configs.py +18 -0
- deepeval/optimizer/algorithms/copro/__init__.py +5 -0
- deepeval/{optimization/copro/loop.py → optimizer/algorithms/copro/copro.py} +112 -113
- deepeval/optimizer/algorithms/gepa/__init__.py +5 -0
- deepeval/{optimization/gepa/loop.py → optimizer/algorithms/gepa/gepa.py} +175 -115
- deepeval/optimizer/algorithms/miprov2/__init__.py +17 -0
- deepeval/optimizer/algorithms/miprov2/bootstrapper.py +435 -0
- deepeval/optimizer/algorithms/miprov2/miprov2.py +752 -0
- deepeval/optimizer/algorithms/miprov2/proposer.py +301 -0
- deepeval/optimizer/algorithms/simba/__init__.py +5 -0
- deepeval/{optimization/simba/loop.py → optimizer/algorithms/simba/simba.py} +128 -112
- deepeval/{optimization → optimizer}/configs.py +5 -8
- deepeval/{optimization/policies/selection.py → optimizer/policies.py} +63 -2
- deepeval/optimizer/prompt_optimizer.py +263 -0
- deepeval/optimizer/rewriter/__init__.py +5 -0
- deepeval/optimizer/rewriter/rewriter.py +124 -0
- deepeval/optimizer/rewriter/utils.py +214 -0
- deepeval/optimizer/scorer/__init__.py +5 -0
- deepeval/optimizer/scorer/base.py +86 -0
- deepeval/optimizer/scorer/scorer.py +316 -0
- deepeval/optimizer/scorer/utils.py +30 -0
- deepeval/optimizer/types.py +148 -0
- deepeval/{optimization → optimizer}/utils.py +47 -165
- deepeval/prompt/prompt.py +5 -9
- deepeval/simulator/conversation_simulator.py +43 -0
- deepeval/simulator/template.py +13 -0
- deepeval/test_case/__init__.py +1 -3
- deepeval/test_case/api.py +26 -45
- deepeval/test_case/arena_test_case.py +7 -2
- deepeval/test_case/conversational_test_case.py +68 -1
- deepeval/test_case/llm_test_case.py +206 -1
- deepeval/test_case/utils.py +4 -8
- deepeval/test_run/api.py +18 -14
- deepeval/test_run/test_run.py +3 -3
- deepeval/tracing/patchers.py +9 -4
- deepeval/tracing/tracing.py +2 -2
- deepeval/utils.py +65 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/METADATA +1 -4
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/RECORD +180 -193
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +0 -343
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/schema.py +0 -19
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +0 -122
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +0 -301
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +0 -132
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +0 -285
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +0 -112
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +0 -282
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +0 -102
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +0 -356
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +0 -175
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +0 -386
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/schema.py +0 -11
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +0 -148
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +0 -68
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +0 -290
- deepeval/models/mlllms/__init__.py +0 -4
- deepeval/models/mlllms/azure_model.py +0 -343
- deepeval/models/mlllms/gemini_model.py +0 -313
- deepeval/models/mlllms/ollama_model.py +0 -175
- deepeval/models/mlllms/openai_model.py +0 -309
- deepeval/optimization/__init__.py +0 -13
- deepeval/optimization/adapters/__init__.py +0 -2
- deepeval/optimization/adapters/deepeval_scoring_adapter.py +0 -588
- deepeval/optimization/aggregates.py +0 -14
- deepeval/optimization/copro/configs.py +0 -31
- deepeval/optimization/gepa/__init__.py +0 -7
- deepeval/optimization/gepa/configs.py +0 -115
- deepeval/optimization/miprov2/configs.py +0 -134
- deepeval/optimization/miprov2/loop.py +0 -785
- deepeval/optimization/mutations/__init__.py +0 -0
- deepeval/optimization/mutations/prompt_rewriter.py +0 -458
- deepeval/optimization/policies/__init__.py +0 -16
- deepeval/optimization/policies/tie_breaker.py +0 -67
- deepeval/optimization/prompt_optimizer.py +0 -462
- deepeval/optimization/simba/__init__.py +0 -0
- deepeval/optimization/simba/configs.py +0 -33
- deepeval/optimization/types.py +0 -361
- deepeval/test_case/mllm_test_case.py +0 -170
- /deepeval/metrics/{multimodal_metrics/multimodal_answer_relevancy → turn_contextual_precision}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_precision → turn_contextual_recall}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_recall → turn_contextual_relevancy}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_faithfulness}/__init__.py +0 -0
- /deepeval/{optimization → optimizer/algorithms}/simba/types.py +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/LICENSE.md +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/WHEEL +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.6.dist-info}/entry_points.txt +0 -0
|
@@ -14,12 +14,17 @@ from deepeval.utils import get_or_create_event_loop, prettify_list
|
|
|
14
14
|
from deepeval.metrics.utils import (
|
|
15
15
|
check_arena_test_case_params,
|
|
16
16
|
construct_verbose_logs,
|
|
17
|
-
trimAndLoadJson,
|
|
18
17
|
initialize_model,
|
|
18
|
+
a_generate_with_schema_and_extract,
|
|
19
|
+
generate_with_schema_and_extract,
|
|
19
20
|
)
|
|
20
21
|
from deepeval.models import DeepEvalBaseLLM
|
|
21
22
|
from deepeval.metrics.indicator import metric_progress_indicator
|
|
22
|
-
from deepeval.metrics.arena_g_eval.schema import
|
|
23
|
+
from deepeval.metrics.arena_g_eval.schema import (
|
|
24
|
+
RewrittenReason,
|
|
25
|
+
Winner,
|
|
26
|
+
Steps,
|
|
27
|
+
)
|
|
23
28
|
from deepeval.metrics.g_eval.utils import (
|
|
24
29
|
construct_g_eval_params_string,
|
|
25
30
|
validate_criteria_and_evaluation_steps,
|
|
@@ -62,7 +67,13 @@ class ArenaGEval(BaseArenaMetric):
|
|
|
62
67
|
_progress: Optional[Progress] = None,
|
|
63
68
|
_pbar_id: Optional[int] = None,
|
|
64
69
|
) -> str:
|
|
65
|
-
check_arena_test_case_params(
|
|
70
|
+
check_arena_test_case_params(
|
|
71
|
+
test_case,
|
|
72
|
+
self.evaluation_params,
|
|
73
|
+
self,
|
|
74
|
+
self.model,
|
|
75
|
+
test_case.multimodal,
|
|
76
|
+
)
|
|
66
77
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
67
78
|
|
|
68
79
|
with metric_progress_indicator(self, _show_indicator=_show_indicator):
|
|
@@ -76,12 +87,12 @@ class ArenaGEval(BaseArenaMetric):
|
|
|
76
87
|
)
|
|
77
88
|
else:
|
|
78
89
|
self.evaluation_steps: List[str] = (
|
|
79
|
-
self._generate_evaluation_steps()
|
|
90
|
+
self._generate_evaluation_steps(test_case.multimodal)
|
|
80
91
|
)
|
|
81
92
|
if _progress:
|
|
82
93
|
update_pbar(_progress, _pbar_id)
|
|
83
94
|
masked_winner, masked_reason, dummy_to_real_names = (
|
|
84
|
-
self._compare(test_case)
|
|
95
|
+
self._compare(test_case, test_case.multimodal)
|
|
85
96
|
)
|
|
86
97
|
if _progress:
|
|
87
98
|
update_pbar(_progress, _pbar_id)
|
|
@@ -111,7 +122,13 @@ class ArenaGEval(BaseArenaMetric):
|
|
|
111
122
|
_progress: Optional[Progress] = None,
|
|
112
123
|
_pbar_id: Optional[int] = None,
|
|
113
124
|
) -> str:
|
|
114
|
-
check_arena_test_case_params(
|
|
125
|
+
check_arena_test_case_params(
|
|
126
|
+
test_case,
|
|
127
|
+
self.evaluation_params,
|
|
128
|
+
self,
|
|
129
|
+
self.model,
|
|
130
|
+
test_case.multimodal,
|
|
131
|
+
)
|
|
115
132
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
116
133
|
|
|
117
134
|
with metric_progress_indicator(
|
|
@@ -120,12 +137,12 @@ class ArenaGEval(BaseArenaMetric):
|
|
|
120
137
|
_show_indicator=_show_indicator,
|
|
121
138
|
):
|
|
122
139
|
self.evaluation_steps: List[str] = (
|
|
123
|
-
await self._a_generate_evaluation_steps()
|
|
140
|
+
await self._a_generate_evaluation_steps(test_case.multimodal)
|
|
124
141
|
)
|
|
125
142
|
if _progress:
|
|
126
143
|
update_pbar(_progress, _pbar_id)
|
|
127
144
|
masked_winner, masked_reason, dummy_to_real_names = (
|
|
128
|
-
await self._a_compare(test_case)
|
|
145
|
+
await self._a_compare(test_case, test_case.multimodal)
|
|
129
146
|
)
|
|
130
147
|
if _progress:
|
|
131
148
|
update_pbar(_progress, _pbar_id)
|
|
@@ -147,7 +164,7 @@ class ArenaGEval(BaseArenaMetric):
|
|
|
147
164
|
)
|
|
148
165
|
return self.winner
|
|
149
166
|
|
|
150
|
-
async def _a_generate_evaluation_steps(self) -> List[str]:
|
|
167
|
+
async def _a_generate_evaluation_steps(self, multimodal: bool) -> List[str]:
|
|
151
168
|
if self.evaluation_steps:
|
|
152
169
|
return self.evaluation_steps
|
|
153
170
|
|
|
@@ -155,23 +172,20 @@ class ArenaGEval(BaseArenaMetric):
|
|
|
155
172
|
self.evaluation_params
|
|
156
173
|
)
|
|
157
174
|
prompt = ArenaGEvalTemplate.generate_evaluation_steps(
|
|
158
|
-
criteria=self.criteria,
|
|
175
|
+
criteria=self.criteria,
|
|
176
|
+
parameters=g_eval_params_str,
|
|
177
|
+
multimodal=multimodal,
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
return await a_generate_with_schema_and_extract(
|
|
181
|
+
self,
|
|
182
|
+
prompt,
|
|
183
|
+
Steps,
|
|
184
|
+
extract_schema=lambda s: s.steps,
|
|
185
|
+
extract_json=lambda data: data["steps"],
|
|
159
186
|
)
|
|
160
|
-
if self.using_native_model:
|
|
161
|
-
res, cost = await self.model.a_generate(prompt)
|
|
162
|
-
self.evaluation_cost += cost
|
|
163
|
-
data = trimAndLoadJson(res, self)
|
|
164
|
-
return data["steps"]
|
|
165
|
-
else:
|
|
166
|
-
try:
|
|
167
|
-
res: Steps = await self.model.a_generate(prompt, schema=Steps)
|
|
168
|
-
return res.steps
|
|
169
|
-
except TypeError:
|
|
170
|
-
res = await self.model.a_generate(prompt)
|
|
171
|
-
data = trimAndLoadJson(res, self)
|
|
172
|
-
return data["steps"]
|
|
173
187
|
|
|
174
|
-
def _generate_evaluation_steps(self) -> List[str]:
|
|
188
|
+
def _generate_evaluation_steps(self, multimodal: bool) -> List[str]:
|
|
175
189
|
if self.evaluation_steps:
|
|
176
190
|
return self.evaluation_steps
|
|
177
191
|
|
|
@@ -179,25 +193,20 @@ class ArenaGEval(BaseArenaMetric):
|
|
|
179
193
|
self.evaluation_params
|
|
180
194
|
)
|
|
181
195
|
prompt = ArenaGEvalTemplate.generate_evaluation_steps(
|
|
182
|
-
criteria=self.criteria,
|
|
196
|
+
criteria=self.criteria,
|
|
197
|
+
parameters=g_eval_params_str,
|
|
198
|
+
multimodal=multimodal,
|
|
199
|
+
)
|
|
200
|
+
return generate_with_schema_and_extract(
|
|
201
|
+
self,
|
|
202
|
+
prompt,
|
|
203
|
+
Steps,
|
|
204
|
+
extract_schema=lambda s: s.steps,
|
|
205
|
+
extract_json=lambda data: data["steps"],
|
|
183
206
|
)
|
|
184
|
-
if self.using_native_model:
|
|
185
|
-
res, cost = self.model.generate(prompt)
|
|
186
|
-
self.evaluation_cost += cost
|
|
187
|
-
data = trimAndLoadJson(res, self)
|
|
188
|
-
return data["steps"]
|
|
189
|
-
else:
|
|
190
|
-
try:
|
|
191
|
-
res: Steps = self.model.generate(prompt, schema=Steps)
|
|
192
|
-
return res.steps
|
|
193
|
-
except TypeError:
|
|
194
|
-
res = self.model.generate(prompt)
|
|
195
|
-
data = trimAndLoadJson(res, self)
|
|
196
|
-
return data["steps"]
|
|
197
207
|
|
|
198
208
|
async def _a_compare(
|
|
199
|
-
self,
|
|
200
|
-
test_case: ArenaTestCase,
|
|
209
|
+
self, test_case: ArenaTestCase, multimodal: bool
|
|
201
210
|
) -> Tuple[str, str, Dict[str, str]]:
|
|
202
211
|
formatted_test_case, dummy_to_real_names = format_arena_test_case(
|
|
203
212
|
self.evaluation_params, test_case
|
|
@@ -209,23 +218,27 @@ class ArenaGEval(BaseArenaMetric):
|
|
|
209
218
|
evaluation_steps=number_evaluation_steps(self.evaluation_steps),
|
|
210
219
|
test_case_contents=formatted_test_case,
|
|
211
220
|
parameters=g_eval_params_str,
|
|
221
|
+
multimodal=multimodal,
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
return await a_generate_with_schema_and_extract(
|
|
225
|
+
self,
|
|
226
|
+
prompt,
|
|
227
|
+
Winner,
|
|
228
|
+
extract_schema=lambda s: (
|
|
229
|
+
s.winner,
|
|
230
|
+
s.reason,
|
|
231
|
+
dummy_to_real_names,
|
|
232
|
+
),
|
|
233
|
+
extract_json=lambda data: (
|
|
234
|
+
data["winner"],
|
|
235
|
+
data["reason"],
|
|
236
|
+
dummy_to_real_names,
|
|
237
|
+
),
|
|
212
238
|
)
|
|
213
|
-
if self.using_native_model:
|
|
214
|
-
res, cost = await self.model.a_generate(prompt, schema=Winner)
|
|
215
|
-
self.evaluation_cost += cost
|
|
216
|
-
return res.winner, res.reason, dummy_to_real_names
|
|
217
|
-
else:
|
|
218
|
-
try:
|
|
219
|
-
res: Winner = await self.model.a_generate(prompt, schema=Winner)
|
|
220
|
-
return res.winner, res.reason, dummy_to_real_names
|
|
221
|
-
except TypeError:
|
|
222
|
-
res = await self.model.a_generate(prompt)
|
|
223
|
-
data = trimAndLoadJson(res, self)
|
|
224
|
-
return data["winner"], data["reason"], dummy_to_real_names
|
|
225
239
|
|
|
226
240
|
def _compare(
|
|
227
|
-
self,
|
|
228
|
-
test_case: ArenaTestCase,
|
|
241
|
+
self, test_case: ArenaTestCase, multimodal: bool
|
|
229
242
|
) -> Tuple[str, str, Dict[str, str]]:
|
|
230
243
|
formatted_test_case, dummy_to_real_names = format_arena_test_case(
|
|
231
244
|
self.evaluation_params, test_case
|
|
@@ -237,19 +250,23 @@ class ArenaGEval(BaseArenaMetric):
|
|
|
237
250
|
evaluation_steps=number_evaluation_steps(self.evaluation_steps),
|
|
238
251
|
test_case_contents=formatted_test_case,
|
|
239
252
|
parameters=g_eval_params_str,
|
|
253
|
+
multimodal=multimodal,
|
|
254
|
+
)
|
|
255
|
+
return generate_with_schema_and_extract(
|
|
256
|
+
self,
|
|
257
|
+
prompt,
|
|
258
|
+
Winner,
|
|
259
|
+
extract_schema=lambda s: (
|
|
260
|
+
s.winner,
|
|
261
|
+
s.reason,
|
|
262
|
+
dummy_to_real_names,
|
|
263
|
+
),
|
|
264
|
+
extract_json=lambda data: (
|
|
265
|
+
data["winner"],
|
|
266
|
+
data["reason"],
|
|
267
|
+
dummy_to_real_names,
|
|
268
|
+
),
|
|
240
269
|
)
|
|
241
|
-
if self.using_native_model:
|
|
242
|
-
res, cost = self.model.generate(prompt, schema=Winner)
|
|
243
|
-
self.evaluation_cost += cost
|
|
244
|
-
return res.winner, res.reason, dummy_to_real_names
|
|
245
|
-
else:
|
|
246
|
-
try:
|
|
247
|
-
res: Winner = self.model.generate(prompt, schema=Winner)
|
|
248
|
-
return res.winner, res.reason, dummy_to_real_names
|
|
249
|
-
except TypeError:
|
|
250
|
-
res = self.model.generate(prompt)
|
|
251
|
-
data = trimAndLoadJson(res, self)
|
|
252
|
-
return data["winner"], data["reason"], dummy_to_real_names
|
|
253
270
|
|
|
254
271
|
async def _a_generate_rewritten_reason(
|
|
255
272
|
self,
|
|
@@ -260,22 +277,14 @@ class ArenaGEval(BaseArenaMetric):
|
|
|
260
277
|
reason=reason,
|
|
261
278
|
dummy_to_real_names=dummy_to_real_names,
|
|
262
279
|
)
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
res: RewrittenReason = await self.model.a_generate(
|
|
272
|
-
prompt, schema=RewrittenReason
|
|
273
|
-
)
|
|
274
|
-
return res.rewritten_reason
|
|
275
|
-
except TypeError:
|
|
276
|
-
res = await self.model.a_generate(prompt)
|
|
277
|
-
data = trimAndLoadJson(res, self)
|
|
278
|
-
return data["rewritten_reason"]
|
|
280
|
+
|
|
281
|
+
return await a_generate_with_schema_and_extract(
|
|
282
|
+
self,
|
|
283
|
+
prompt,
|
|
284
|
+
RewrittenReason,
|
|
285
|
+
extract_schema=lambda s: s.rewritten_reason,
|
|
286
|
+
extract_json=lambda data: data["rewritten_reason"],
|
|
287
|
+
)
|
|
279
288
|
|
|
280
289
|
def _generate_rewritten_reason(
|
|
281
290
|
self,
|
|
@@ -286,20 +295,13 @@ class ArenaGEval(BaseArenaMetric):
|
|
|
286
295
|
reason=reason,
|
|
287
296
|
dummy_to_real_names=dummy_to_real_names,
|
|
288
297
|
)
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
prompt, schema=RewrittenReason
|
|
297
|
-
)
|
|
298
|
-
return res.rewritten_reason
|
|
299
|
-
except TypeError:
|
|
300
|
-
res = self.model.generate(prompt)
|
|
301
|
-
data = trimAndLoadJson(res, self)
|
|
302
|
-
return data["rewritten_reason"]
|
|
298
|
+
return generate_with_schema_and_extract(
|
|
299
|
+
self,
|
|
300
|
+
prompt,
|
|
301
|
+
RewrittenReason,
|
|
302
|
+
extract_schema=lambda s: s.rewritten_reason,
|
|
303
|
+
extract_json=lambda data: data["rewritten_reason"],
|
|
304
|
+
)
|
|
303
305
|
|
|
304
306
|
def is_successful(self) -> bool:
|
|
305
307
|
if self.error is not None:
|
|
@@ -3,11 +3,23 @@ import textwrap
|
|
|
3
3
|
|
|
4
4
|
|
|
5
5
|
class ArenaGEvalTemplate:
|
|
6
|
+
multimodal_rules = """
|
|
7
|
+
--- MULTIMODAL INPUT RULES ---
|
|
8
|
+
- Treat image content as factual evidence.
|
|
9
|
+
- Only reference visual details that are explicitly and clearly visible.
|
|
10
|
+
- Do not infer or guess objects, text, or details not visibly present.
|
|
11
|
+
- If an image is unclear or ambiguous, mark uncertainty explicitly.
|
|
12
|
+
"""
|
|
13
|
+
|
|
6
14
|
@staticmethod
|
|
7
|
-
def generate_evaluation_steps(
|
|
15
|
+
def generate_evaluation_steps(
|
|
16
|
+
parameters: str, criteria: str, multimodal: Optional[bool]
|
|
17
|
+
):
|
|
8
18
|
return textwrap.dedent(
|
|
9
19
|
f"""Given an evaluation criteria which outlines how you should choose the winner out of all contestants based on the {parameters}, generate 3-4 concise evaluation steps based on the criteria below. You MUST make it clear how to evaluate {parameters} in relation to one another.
|
|
10
20
|
|
|
21
|
+
{ArenaGEvalTemplate.multimodal_rules if multimodal else ""}
|
|
22
|
+
|
|
11
23
|
Evaluation Criteria:
|
|
12
24
|
{criteria}
|
|
13
25
|
|
|
@@ -28,6 +40,7 @@ class ArenaGEvalTemplate:
|
|
|
28
40
|
evaluation_steps: str,
|
|
29
41
|
test_case_contents: List[str],
|
|
30
42
|
parameters: str,
|
|
43
|
+
multimodal: Optional[bool],
|
|
31
44
|
):
|
|
32
45
|
reasoning_expectation = (
|
|
33
46
|
"Be specific and grounded in the evaluation steps."
|
|
@@ -36,6 +49,9 @@ class ArenaGEvalTemplate:
|
|
|
36
49
|
return textwrap.dedent(
|
|
37
50
|
f"""
|
|
38
51
|
You are a judge. Given the following evaluation steps, select the single contestant that best aligns with the evaluation steps.
|
|
52
|
+
|
|
53
|
+
{ArenaGEvalTemplate.multimodal_rules if multimodal else ""}
|
|
54
|
+
|
|
39
55
|
Return a JSON object with three fields:
|
|
40
56
|
|
|
41
57
|
- `"winner"`: the contestant that is best aligned with the evaluation steps.
|
|
@@ -3,9 +3,10 @@ from typing import Optional, List, Type, Union
|
|
|
3
3
|
from deepeval.utils import get_or_create_event_loop, prettify_list
|
|
4
4
|
from deepeval.metrics.utils import (
|
|
5
5
|
construct_verbose_logs,
|
|
6
|
-
trimAndLoadJson,
|
|
7
6
|
check_llm_test_case_params,
|
|
8
7
|
initialize_model,
|
|
8
|
+
a_generate_with_schema_and_extract,
|
|
9
|
+
generate_with_schema_and_extract,
|
|
9
10
|
)
|
|
10
11
|
from deepeval.test_case import (
|
|
11
12
|
LLMTestCase,
|
|
@@ -18,7 +19,11 @@ from deepeval.metrics.argument_correctness.template import (
|
|
|
18
19
|
ArgumentCorrectnessTemplate,
|
|
19
20
|
)
|
|
20
21
|
from deepeval.metrics.indicator import metric_progress_indicator
|
|
21
|
-
from deepeval.metrics.argument_correctness.schema import
|
|
22
|
+
from deepeval.metrics.argument_correctness.schema import (
|
|
23
|
+
ArgumentCorrectnessVerdict,
|
|
24
|
+
Verdicts,
|
|
25
|
+
ArgumentCorrectnessScoreReason,
|
|
26
|
+
)
|
|
22
27
|
from deepeval.metrics.api import metric_data_manager
|
|
23
28
|
|
|
24
29
|
|
|
@@ -57,7 +62,15 @@ class ArgumentCorrectnessMetric(BaseMetric):
|
|
|
57
62
|
_log_metric_to_confident: bool = True,
|
|
58
63
|
) -> float:
|
|
59
64
|
|
|
60
|
-
check_llm_test_case_params(
|
|
65
|
+
check_llm_test_case_params(
|
|
66
|
+
test_case,
|
|
67
|
+
self._required_params,
|
|
68
|
+
None,
|
|
69
|
+
None,
|
|
70
|
+
self,
|
|
71
|
+
self.model,
|
|
72
|
+
test_case.multimodal,
|
|
73
|
+
)
|
|
61
74
|
|
|
62
75
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
63
76
|
with metric_progress_indicator(
|
|
@@ -81,11 +94,15 @@ class ArgumentCorrectnessMetric(BaseMetric):
|
|
|
81
94
|
else:
|
|
82
95
|
self.verdicts: List[ArgumentCorrectnessVerdict] = (
|
|
83
96
|
self._generate_verdicts(
|
|
84
|
-
test_case.input,
|
|
97
|
+
test_case.input,
|
|
98
|
+
test_case.tools_called,
|
|
99
|
+
test_case.multimodal,
|
|
85
100
|
)
|
|
86
101
|
)
|
|
87
102
|
self.score = self._calculate_score()
|
|
88
|
-
self.reason = self._generate_reason(
|
|
103
|
+
self.reason = self._generate_reason(
|
|
104
|
+
test_case.input, test_case.multimodal
|
|
105
|
+
)
|
|
89
106
|
self.success = self.score >= self.threshold
|
|
90
107
|
self.verbose_logs = construct_verbose_logs(
|
|
91
108
|
self,
|
|
@@ -108,7 +125,15 @@ class ArgumentCorrectnessMetric(BaseMetric):
|
|
|
108
125
|
_log_metric_to_confident: bool = True,
|
|
109
126
|
) -> float:
|
|
110
127
|
|
|
111
|
-
check_llm_test_case_params(
|
|
128
|
+
check_llm_test_case_params(
|
|
129
|
+
test_case,
|
|
130
|
+
self._required_params,
|
|
131
|
+
None,
|
|
132
|
+
None,
|
|
133
|
+
self,
|
|
134
|
+
self.model,
|
|
135
|
+
test_case.multimodal,
|
|
136
|
+
)
|
|
112
137
|
|
|
113
138
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
114
139
|
with metric_progress_indicator(
|
|
@@ -124,11 +149,15 @@ class ArgumentCorrectnessMetric(BaseMetric):
|
|
|
124
149
|
else:
|
|
125
150
|
self.verdicts: List[ArgumentCorrectnessVerdict] = (
|
|
126
151
|
await self._a_generate_verdicts(
|
|
127
|
-
test_case.input,
|
|
152
|
+
test_case.input,
|
|
153
|
+
test_case.tools_called,
|
|
154
|
+
test_case.multimodal,
|
|
128
155
|
)
|
|
129
156
|
)
|
|
130
157
|
self.score = self._calculate_score()
|
|
131
|
-
self.reason = await self._a_generate_reason(
|
|
158
|
+
self.reason = await self._a_generate_reason(
|
|
159
|
+
test_case.input, test_case.multimodal
|
|
160
|
+
)
|
|
132
161
|
self.success = self.score >= self.threshold
|
|
133
162
|
self.verbose_logs = construct_verbose_logs(
|
|
134
163
|
self,
|
|
@@ -143,7 +172,7 @@ class ArgumentCorrectnessMetric(BaseMetric):
|
|
|
143
172
|
)
|
|
144
173
|
return self.score
|
|
145
174
|
|
|
146
|
-
async def _a_generate_reason(self, input: str) -> str:
|
|
175
|
+
async def _a_generate_reason(self, input: str, multimodal: bool) -> str:
|
|
147
176
|
if self.include_reason is False:
|
|
148
177
|
return None
|
|
149
178
|
|
|
@@ -156,27 +185,18 @@ class ArgumentCorrectnessMetric(BaseMetric):
|
|
|
156
185
|
incorrect_tool_calls_reasons=incorrect_tool_calls_reasons,
|
|
157
186
|
input=input,
|
|
158
187
|
score=format(self.score, ".2f"),
|
|
188
|
+
multimodal=multimodal,
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
return await a_generate_with_schema_and_extract(
|
|
192
|
+
metric=self,
|
|
193
|
+
prompt=prompt,
|
|
194
|
+
schema_cls=ArgumentCorrectnessScoreReason,
|
|
195
|
+
extract_schema=lambda score_reason: score_reason.reason,
|
|
196
|
+
extract_json=lambda data: data["reason"],
|
|
159
197
|
)
|
|
160
|
-
if self.using_native_model:
|
|
161
|
-
res, cost = await self.model.a_generate(
|
|
162
|
-
prompt, schema=ArgumentCorrectnessScoreReason
|
|
163
|
-
)
|
|
164
|
-
self.evaluation_cost += cost
|
|
165
|
-
return res.reason
|
|
166
|
-
else:
|
|
167
|
-
try:
|
|
168
|
-
res: ArgumentCorrectnessScoreReason = (
|
|
169
|
-
await self.model.a_generate(
|
|
170
|
-
prompt=prompt, schema=ArgumentCorrectnessScoreReason
|
|
171
|
-
)
|
|
172
|
-
)
|
|
173
|
-
return res.reason
|
|
174
|
-
except TypeError:
|
|
175
|
-
res = await self.model.a_generate(prompt)
|
|
176
|
-
data = trimAndLoadJson(res, self)
|
|
177
|
-
return data["reason"]
|
|
178
198
|
|
|
179
|
-
def _generate_reason(self, input: str) -> str:
|
|
199
|
+
def _generate_reason(self, input: str, multimodal: bool) -> str:
|
|
180
200
|
if self.include_reason is False:
|
|
181
201
|
return None
|
|
182
202
|
|
|
@@ -189,76 +209,50 @@ class ArgumentCorrectnessMetric(BaseMetric):
|
|
|
189
209
|
incorrect_tool_calls_reasons=incorrect_tool_calls_reasons,
|
|
190
210
|
input=input,
|
|
191
211
|
score=format(self.score, ".2f"),
|
|
212
|
+
multimodal=multimodal,
|
|
192
213
|
)
|
|
193
214
|
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
try:
|
|
202
|
-
res: ArgumentCorrectnessScoreReason = self.model.generate(
|
|
203
|
-
prompt=prompt, schema=ArgumentCorrectnessScoreReason
|
|
204
|
-
)
|
|
205
|
-
return res.reason
|
|
206
|
-
except TypeError:
|
|
207
|
-
res = self.model.generate(prompt)
|
|
208
|
-
data = trimAndLoadJson(res, self)
|
|
209
|
-
return data["reason"]
|
|
215
|
+
return generate_with_schema_and_extract(
|
|
216
|
+
metric=self,
|
|
217
|
+
prompt=prompt,
|
|
218
|
+
schema_cls=ArgumentCorrectnessScoreReason,
|
|
219
|
+
extract_schema=lambda score_reason: score_reason.reason,
|
|
220
|
+
extract_json=lambda data: data["reason"],
|
|
221
|
+
)
|
|
210
222
|
|
|
211
223
|
async def _a_generate_verdicts(
|
|
212
|
-
self,
|
|
213
|
-
input: str,
|
|
214
|
-
tools_called: List[ToolCall],
|
|
224
|
+
self, input: str, tools_called: List[ToolCall], multimodal: bool
|
|
215
225
|
) -> List[ArgumentCorrectnessVerdict]:
|
|
216
226
|
prompt = self.evaluation_template.generate_verdicts(
|
|
217
|
-
input=input,
|
|
218
|
-
|
|
227
|
+
input=input, tools_called=tools_called, multimodal=multimodal
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
return await a_generate_with_schema_and_extract(
|
|
231
|
+
metric=self,
|
|
232
|
+
prompt=prompt,
|
|
233
|
+
schema_cls=Verdicts,
|
|
234
|
+
extract_schema=lambda r: list(r.verdicts),
|
|
235
|
+
extract_json=lambda data: [
|
|
236
|
+
ArgumentCorrectnessVerdict(**item) for item in data["verdicts"]
|
|
237
|
+
],
|
|
219
238
|
)
|
|
220
|
-
if self.using_native_model:
|
|
221
|
-
res, cost = await self.model.a_generate(prompt, schema=Verdicts)
|
|
222
|
-
self.evaluation_cost += cost
|
|
223
|
-
return [item for item in res.verdicts]
|
|
224
|
-
else:
|
|
225
|
-
try:
|
|
226
|
-
res: Verdicts = await self.model.a_generate(
|
|
227
|
-
prompt, schema=Verdicts
|
|
228
|
-
)
|
|
229
|
-
return [item for item in res.verdicts]
|
|
230
|
-
except TypeError:
|
|
231
|
-
res = await self.model.a_generate(prompt)
|
|
232
|
-
data = trimAndLoadJson(res, self)
|
|
233
|
-
return [
|
|
234
|
-
ArgumentCorrectnessVerdict(**item)
|
|
235
|
-
for item in data["verdicts"]
|
|
236
|
-
]
|
|
237
239
|
|
|
238
240
|
def _generate_verdicts(
|
|
239
|
-
self,
|
|
240
|
-
input: str,
|
|
241
|
-
tools_called: List[ToolCall],
|
|
241
|
+
self, input: str, tools_called: List[ToolCall], multimodal: bool
|
|
242
242
|
) -> List[ArgumentCorrectnessVerdict]:
|
|
243
243
|
prompt = self.evaluation_template.generate_verdicts(
|
|
244
|
-
input=input,
|
|
245
|
-
|
|
244
|
+
input=input, tools_called=tools_called, multimodal=multimodal
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
return generate_with_schema_and_extract(
|
|
248
|
+
metric=self,
|
|
249
|
+
prompt=prompt,
|
|
250
|
+
schema_cls=Verdicts,
|
|
251
|
+
extract_schema=lambda r: list(r.verdicts),
|
|
252
|
+
extract_json=lambda data: [
|
|
253
|
+
ArgumentCorrectnessVerdict(**item) for item in data["verdicts"]
|
|
254
|
+
],
|
|
246
255
|
)
|
|
247
|
-
if self.using_native_model:
|
|
248
|
-
res, cost = self.model.generate(prompt, schema=Verdicts)
|
|
249
|
-
self.evaluation_cost += cost
|
|
250
|
-
return [item for item in res.verdicts]
|
|
251
|
-
else:
|
|
252
|
-
try:
|
|
253
|
-
res: Verdicts = self.model.generate(prompt, schema=Verdicts)
|
|
254
|
-
return [item for item in res.verdicts]
|
|
255
|
-
except TypeError:
|
|
256
|
-
res = self.model.generate(prompt)
|
|
257
|
-
data = trimAndLoadJson(res, self)
|
|
258
|
-
return [
|
|
259
|
-
ArgumentCorrectnessVerdict(**item)
|
|
260
|
-
for item in data["verdicts"]
|
|
261
|
-
]
|
|
262
256
|
|
|
263
257
|
def _calculate_score(self):
|
|
264
258
|
number_of_verdicts = len(self.verdicts)
|
|
@@ -279,7 +273,7 @@ class ArgumentCorrectnessMetric(BaseMetric):
|
|
|
279
273
|
else:
|
|
280
274
|
try:
|
|
281
275
|
self.success = self.score >= self.threshold
|
|
282
|
-
except:
|
|
276
|
+
except TypeError:
|
|
283
277
|
self.success = False
|
|
284
278
|
return self.success
|
|
285
279
|
|
|
@@ -4,8 +4,18 @@ import textwrap
|
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
class ArgumentCorrectnessTemplate:
|
|
7
|
+
multimodal_rules = """
|
|
8
|
+
--- MULTIMODAL INPUT RULES ---
|
|
9
|
+
- Treat image content as factual evidence.
|
|
10
|
+
- Only reference visual details that are explicitly and clearly visible.
|
|
11
|
+
- Do not infer or guess objects, text, or details not visibly present.
|
|
12
|
+
- If an image is unclear or ambiguous, mark uncertainty explicitly.
|
|
13
|
+
"""
|
|
14
|
+
|
|
7
15
|
@staticmethod
|
|
8
|
-
def generate_verdicts(
|
|
16
|
+
def generate_verdicts(
|
|
17
|
+
input: str, tools_called: List[ToolCall], multimodal: bool = False
|
|
18
|
+
):
|
|
9
19
|
|
|
10
20
|
stringified_tools_called = repr(tools_called)
|
|
11
21
|
|
|
@@ -19,6 +29,8 @@ class ArgumentCorrectnessTemplate:
|
|
|
19
29
|
Provide a 'reason' ONLY if the answer is 'no'.
|
|
20
30
|
If there is no input parameter, answer 'no' for the verdict and provide the reason as "No input parameter provided".
|
|
21
31
|
|
|
32
|
+
{ArgumentCorrectnessTemplate.multimodal_rules if multimodal else ""}
|
|
33
|
+
|
|
22
34
|
**
|
|
23
35
|
IMPORTANT: Please make sure to only return in valid and parseable JSON format, with the 'verdicts' key mapping to a list of JSON objects. Ensure all strings are closed appropriately. Repair any invalid JSON before you output it.
|
|
24
36
|
Example input:
|
|
@@ -92,12 +104,17 @@ class ArgumentCorrectnessTemplate:
|
|
|
92
104
|
|
|
93
105
|
@staticmethod
|
|
94
106
|
def generate_reason(
|
|
95
|
-
incorrect_tool_calls_reasons: List[str],
|
|
107
|
+
incorrect_tool_calls_reasons: List[str],
|
|
108
|
+
input: str,
|
|
109
|
+
score: float,
|
|
110
|
+
multimodal: bool = False,
|
|
96
111
|
):
|
|
97
112
|
return textwrap.dedent(
|
|
98
113
|
f"""Given the argument correctness score, the list of reasons of incorrect tool calls, and the input, provide a CONCISE reason for the score. Explain why it is not higher, but also why it is at its current score. You can mention tool calls or input, but do not mention an output or a response.
|
|
99
114
|
If there is nothing incorrect, just say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
|
|
100
115
|
|
|
116
|
+
{ArgumentCorrectnessTemplate.multimodal_rules if multimodal else ""}
|
|
117
|
+
|
|
101
118
|
**
|
|
102
119
|
IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason. Ensure all strings are closed appropriately. Repair any invalid JSON before you output it.
|
|
103
120
|
|