deepeval 3.7.5__py3-none-any.whl → 3.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/config/settings.py +35 -1
- deepeval/dataset/api.py +23 -1
- deepeval/dataset/golden.py +106 -21
- deepeval/evaluate/evaluate.py +0 -3
- deepeval/evaluate/execute.py +10 -222
- deepeval/evaluate/utils.py +6 -30
- deepeval/key_handler.py +3 -0
- deepeval/metrics/__init__.py +0 -4
- deepeval/metrics/answer_relevancy/answer_relevancy.py +89 -132
- deepeval/metrics/answer_relevancy/template.py +102 -179
- deepeval/metrics/arena_g_eval/arena_g_eval.py +98 -96
- deepeval/metrics/arena_g_eval/template.py +17 -1
- deepeval/metrics/argument_correctness/argument_correctness.py +81 -87
- deepeval/metrics/argument_correctness/template.py +19 -2
- deepeval/metrics/base_metric.py +13 -41
- deepeval/metrics/bias/bias.py +102 -108
- deepeval/metrics/bias/template.py +14 -2
- deepeval/metrics/contextual_precision/contextual_precision.py +56 -92
- deepeval/metrics/contextual_recall/contextual_recall.py +58 -85
- deepeval/metrics/contextual_relevancy/contextual_relevancy.py +53 -83
- deepeval/metrics/conversation_completeness/conversation_completeness.py +101 -119
- deepeval/metrics/conversation_completeness/template.py +23 -3
- deepeval/metrics/conversational_dag/conversational_dag.py +12 -8
- deepeval/metrics/conversational_dag/nodes.py +66 -123
- deepeval/metrics/conversational_dag/templates.py +16 -0
- deepeval/metrics/conversational_g_eval/conversational_g_eval.py +47 -66
- deepeval/metrics/dag/dag.py +10 -0
- deepeval/metrics/dag/nodes.py +63 -126
- deepeval/metrics/dag/templates.py +14 -0
- deepeval/metrics/exact_match/exact_match.py +9 -1
- deepeval/metrics/faithfulness/faithfulness.py +82 -136
- deepeval/metrics/g_eval/g_eval.py +87 -78
- deepeval/metrics/g_eval/template.py +18 -1
- deepeval/metrics/g_eval/utils.py +7 -6
- deepeval/metrics/goal_accuracy/goal_accuracy.py +91 -76
- deepeval/metrics/goal_accuracy/template.py +21 -3
- deepeval/metrics/hallucination/hallucination.py +60 -75
- deepeval/metrics/hallucination/template.py +13 -0
- deepeval/metrics/indicator.py +3 -6
- deepeval/metrics/json_correctness/json_correctness.py +40 -38
- deepeval/metrics/json_correctness/template.py +10 -0
- deepeval/metrics/knowledge_retention/knowledge_retention.py +60 -97
- deepeval/metrics/knowledge_retention/schema.py +9 -3
- deepeval/metrics/knowledge_retention/template.py +12 -0
- deepeval/metrics/mcp/mcp_task_completion.py +68 -38
- deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +92 -74
- deepeval/metrics/mcp/template.py +52 -0
- deepeval/metrics/mcp_use_metric/mcp_use_metric.py +58 -64
- deepeval/metrics/mcp_use_metric/template.py +12 -0
- deepeval/metrics/misuse/misuse.py +77 -97
- deepeval/metrics/misuse/template.py +15 -0
- deepeval/metrics/multimodal_metrics/__init__.py +0 -1
- deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +37 -38
- deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +55 -76
- deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +37 -38
- deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +37 -38
- deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +57 -76
- deepeval/metrics/non_advice/non_advice.py +79 -105
- deepeval/metrics/non_advice/template.py +12 -0
- deepeval/metrics/pattern_match/pattern_match.py +12 -4
- deepeval/metrics/pii_leakage/pii_leakage.py +75 -106
- deepeval/metrics/pii_leakage/template.py +14 -0
- deepeval/metrics/plan_adherence/plan_adherence.py +63 -89
- deepeval/metrics/plan_adherence/template.py +11 -0
- deepeval/metrics/plan_quality/plan_quality.py +63 -87
- deepeval/metrics/plan_quality/template.py +9 -0
- deepeval/metrics/prompt_alignment/prompt_alignment.py +72 -83
- deepeval/metrics/prompt_alignment/template.py +12 -0
- deepeval/metrics/role_adherence/role_adherence.py +48 -71
- deepeval/metrics/role_adherence/template.py +14 -0
- deepeval/metrics/role_violation/role_violation.py +75 -108
- deepeval/metrics/role_violation/template.py +12 -0
- deepeval/metrics/step_efficiency/step_efficiency.py +55 -65
- deepeval/metrics/step_efficiency/template.py +11 -0
- deepeval/metrics/summarization/summarization.py +115 -183
- deepeval/metrics/summarization/template.py +19 -0
- deepeval/metrics/task_completion/task_completion.py +67 -73
- deepeval/metrics/tool_correctness/tool_correctness.py +43 -42
- deepeval/metrics/tool_use/tool_use.py +42 -66
- deepeval/metrics/topic_adherence/template.py +13 -0
- deepeval/metrics/topic_adherence/topic_adherence.py +53 -67
- deepeval/metrics/toxicity/template.py +13 -0
- deepeval/metrics/toxicity/toxicity.py +80 -99
- deepeval/metrics/turn_contextual_precision/schema.py +3 -3
- deepeval/metrics/turn_contextual_precision/template.py +1 -1
- deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +110 -68
- deepeval/metrics/turn_contextual_recall/schema.py +3 -3
- deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +104 -61
- deepeval/metrics/turn_contextual_relevancy/schema.py +2 -2
- deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +106 -65
- deepeval/metrics/turn_faithfulness/schema.py +1 -1
- deepeval/metrics/turn_faithfulness/turn_faithfulness.py +104 -73
- deepeval/metrics/turn_relevancy/template.py +14 -0
- deepeval/metrics/turn_relevancy/turn_relevancy.py +56 -69
- deepeval/metrics/utils.py +145 -90
- deepeval/models/base_model.py +44 -6
- deepeval/models/embedding_models/azure_embedding_model.py +34 -12
- deepeval/models/embedding_models/local_embedding_model.py +22 -7
- deepeval/models/embedding_models/ollama_embedding_model.py +17 -6
- deepeval/models/embedding_models/openai_embedding_model.py +3 -2
- deepeval/models/llms/amazon_bedrock_model.py +226 -71
- deepeval/models/llms/anthropic_model.py +141 -47
- deepeval/models/llms/azure_model.py +167 -94
- deepeval/models/llms/constants.py +2032 -0
- deepeval/models/llms/deepseek_model.py +79 -29
- deepeval/models/llms/gemini_model.py +126 -67
- deepeval/models/llms/grok_model.py +125 -59
- deepeval/models/llms/kimi_model.py +126 -81
- deepeval/models/llms/litellm_model.py +92 -18
- deepeval/models/llms/local_model.py +114 -15
- deepeval/models/llms/ollama_model.py +97 -76
- deepeval/models/llms/openai_model.py +167 -310
- deepeval/models/llms/portkey_model.py +58 -16
- deepeval/models/llms/utils.py +5 -2
- deepeval/models/utils.py +60 -4
- deepeval/simulator/conversation_simulator.py +43 -0
- deepeval/simulator/template.py +13 -0
- deepeval/test_case/api.py +24 -45
- deepeval/test_case/arena_test_case.py +7 -2
- deepeval/test_case/conversational_test_case.py +55 -6
- deepeval/test_case/llm_test_case.py +60 -6
- deepeval/test_run/api.py +3 -0
- {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/METADATA +1 -1
- {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/RECORD +128 -132
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +0 -386
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/schema.py +0 -11
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +0 -133
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +0 -68
- {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/LICENSE.md +0 -0
- {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/WHEEL +0 -0
- {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/entry_points.txt +0 -0
|
@@ -3,14 +3,13 @@ from typing import Optional, List, Type, Union
|
|
|
3
3
|
from deepeval.utils import (
|
|
4
4
|
get_or_create_event_loop,
|
|
5
5
|
prettify_list,
|
|
6
|
-
convert_to_multi_modal_array,
|
|
7
6
|
)
|
|
8
7
|
from deepeval.metrics.utils import (
|
|
9
8
|
construct_verbose_logs,
|
|
10
|
-
trimAndLoadJson,
|
|
11
9
|
check_llm_test_case_params,
|
|
12
|
-
check_mllm_test_case_params,
|
|
13
10
|
initialize_model,
|
|
11
|
+
a_generate_with_schema_and_extract,
|
|
12
|
+
generate_with_schema_and_extract,
|
|
14
13
|
)
|
|
15
14
|
from deepeval.test_case import (
|
|
16
15
|
LLMTestCase,
|
|
@@ -20,7 +19,11 @@ from deepeval.metrics import BaseMetric
|
|
|
20
19
|
from deepeval.models import DeepEvalBaseLLM
|
|
21
20
|
from deepeval.metrics.contextual_recall.template import ContextualRecallTemplate
|
|
22
21
|
from deepeval.metrics.indicator import metric_progress_indicator
|
|
23
|
-
from deepeval.metrics.contextual_recall.schema import
|
|
22
|
+
from deepeval.metrics.contextual_recall.schema import (
|
|
23
|
+
ContextualRecallVerdict,
|
|
24
|
+
Verdicts,
|
|
25
|
+
ContextualRecallScoreReason,
|
|
26
|
+
)
|
|
24
27
|
from deepeval.metrics.api import metric_data_manager
|
|
25
28
|
|
|
26
29
|
|
|
@@ -62,12 +65,15 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
62
65
|
) -> float:
|
|
63
66
|
multimodal = test_case.multimodal
|
|
64
67
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
68
|
+
check_llm_test_case_params(
|
|
69
|
+
test_case,
|
|
70
|
+
self._required_params,
|
|
71
|
+
None,
|
|
72
|
+
None,
|
|
73
|
+
self,
|
|
74
|
+
self.model,
|
|
75
|
+
test_case.multimodal,
|
|
76
|
+
)
|
|
71
77
|
|
|
72
78
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
73
79
|
with metric_progress_indicator(
|
|
@@ -118,12 +124,15 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
118
124
|
|
|
119
125
|
multimodal = test_case.multimodal
|
|
120
126
|
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
+
check_llm_test_case_params(
|
|
128
|
+
test_case,
|
|
129
|
+
self._required_params,
|
|
130
|
+
None,
|
|
131
|
+
None,
|
|
132
|
+
self,
|
|
133
|
+
self.model,
|
|
134
|
+
test_case.multimodal,
|
|
135
|
+
)
|
|
127
136
|
|
|
128
137
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
129
138
|
with metric_progress_indicator(
|
|
@@ -178,22 +187,13 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
178
187
|
multimodal=multimodal,
|
|
179
188
|
)
|
|
180
189
|
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
try:
|
|
189
|
-
res: ContextualRecallScoreReason = await self.model.a_generate(
|
|
190
|
-
prompt, schema=ContextualRecallScoreReason
|
|
191
|
-
)
|
|
192
|
-
return res.reason
|
|
193
|
-
except TypeError:
|
|
194
|
-
res = await self.model.a_generate(prompt)
|
|
195
|
-
data = trimAndLoadJson(res, self)
|
|
196
|
-
return data["reason"]
|
|
190
|
+
return await a_generate_with_schema_and_extract(
|
|
191
|
+
metric=self,
|
|
192
|
+
prompt=prompt,
|
|
193
|
+
schema_cls=ContextualRecallScoreReason,
|
|
194
|
+
extract_schema=lambda score_reason: score_reason.reason,
|
|
195
|
+
extract_json=lambda data: data["reason"],
|
|
196
|
+
)
|
|
197
197
|
|
|
198
198
|
def _generate_reason(self, expected_output: str, multimodal: bool):
|
|
199
199
|
if self.include_reason is False:
|
|
@@ -215,22 +215,13 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
215
215
|
multimodal=multimodal,
|
|
216
216
|
)
|
|
217
217
|
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
try:
|
|
226
|
-
res: ContextualRecallScoreReason = self.model.generate(
|
|
227
|
-
prompt, schema=ContextualRecallScoreReason
|
|
228
|
-
)
|
|
229
|
-
return res.reason
|
|
230
|
-
except TypeError:
|
|
231
|
-
res = self.model.generate(prompt)
|
|
232
|
-
data = trimAndLoadJson(res, self)
|
|
233
|
-
return data["reason"]
|
|
218
|
+
return generate_with_schema_and_extract(
|
|
219
|
+
metric=self,
|
|
220
|
+
prompt=prompt,
|
|
221
|
+
schema_cls=ContextualRecallScoreReason,
|
|
222
|
+
extract_schema=lambda score_reason: score_reason.reason,
|
|
223
|
+
extract_json=lambda data: data["reason"],
|
|
224
|
+
)
|
|
234
225
|
|
|
235
226
|
def _calculate_score(self):
|
|
236
227
|
number_of_verdicts = len(self.verdicts)
|
|
@@ -256,25 +247,15 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
256
247
|
retrieval_context=retrieval_context,
|
|
257
248
|
multimodal=multimodal,
|
|
258
249
|
)
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
)
|
|
269
|
-
verdicts: Verdicts = [item for item in res.verdicts]
|
|
270
|
-
return verdicts
|
|
271
|
-
except TypeError:
|
|
272
|
-
res = await self.model.a_generate(prompt)
|
|
273
|
-
data = trimAndLoadJson(res, self)
|
|
274
|
-
verdicts = [
|
|
275
|
-
ContextualRecallVerdict(**item) for item in data["verdicts"]
|
|
276
|
-
]
|
|
277
|
-
return verdicts
|
|
250
|
+
return await a_generate_with_schema_and_extract(
|
|
251
|
+
metric=self,
|
|
252
|
+
prompt=prompt,
|
|
253
|
+
schema_cls=Verdicts,
|
|
254
|
+
extract_schema=lambda r: list(r.verdicts),
|
|
255
|
+
extract_json=lambda data: [
|
|
256
|
+
ContextualRecallVerdict(**item) for item in data["verdicts"]
|
|
257
|
+
],
|
|
258
|
+
)
|
|
278
259
|
|
|
279
260
|
def _generate_verdicts(
|
|
280
261
|
self,
|
|
@@ -287,23 +268,15 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
287
268
|
retrieval_context=retrieval_context,
|
|
288
269
|
multimodal=multimodal,
|
|
289
270
|
)
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
return verdicts
|
|
300
|
-
except TypeError:
|
|
301
|
-
res = self.model.generate(prompt)
|
|
302
|
-
data = trimAndLoadJson(res, self)
|
|
303
|
-
verdicts = [
|
|
304
|
-
ContextualRecallVerdict(**item) for item in data["verdicts"]
|
|
305
|
-
]
|
|
306
|
-
return verdicts
|
|
271
|
+
return generate_with_schema_and_extract(
|
|
272
|
+
metric=self,
|
|
273
|
+
prompt=prompt,
|
|
274
|
+
schema_cls=Verdicts,
|
|
275
|
+
extract_schema=lambda r: list(r.verdicts),
|
|
276
|
+
extract_json=lambda data: [
|
|
277
|
+
ContextualRecallVerdict(**item) for item in data["verdicts"]
|
|
278
|
+
],
|
|
279
|
+
)
|
|
307
280
|
|
|
308
281
|
def is_successful(self) -> bool:
|
|
309
282
|
if self.error is not None:
|
|
@@ -311,7 +284,7 @@ class ContextualRecallMetric(BaseMetric):
|
|
|
311
284
|
else:
|
|
312
285
|
try:
|
|
313
286
|
self.success = self.score >= self.threshold
|
|
314
|
-
except:
|
|
287
|
+
except TypeError:
|
|
315
288
|
self.success = False
|
|
316
289
|
return self.success
|
|
317
290
|
|
|
@@ -4,14 +4,13 @@ import asyncio
|
|
|
4
4
|
from deepeval.utils import (
|
|
5
5
|
get_or_create_event_loop,
|
|
6
6
|
prettify_list,
|
|
7
|
-
convert_to_multi_modal_array,
|
|
8
7
|
)
|
|
9
8
|
from deepeval.metrics.utils import (
|
|
10
9
|
construct_verbose_logs,
|
|
11
|
-
trimAndLoadJson,
|
|
12
10
|
check_llm_test_case_params,
|
|
13
|
-
check_mllm_test_case_params,
|
|
14
11
|
initialize_model,
|
|
12
|
+
a_generate_with_schema_and_extract,
|
|
13
|
+
generate_with_schema_and_extract,
|
|
15
14
|
)
|
|
16
15
|
from deepeval.test_case import (
|
|
17
16
|
LLMTestCase,
|
|
@@ -23,7 +22,10 @@ from deepeval.metrics.contextual_relevancy.template import (
|
|
|
23
22
|
ContextualRelevancyTemplate,
|
|
24
23
|
)
|
|
25
24
|
from deepeval.metrics.indicator import metric_progress_indicator
|
|
26
|
-
from deepeval.metrics.contextual_relevancy.schema import
|
|
25
|
+
from deepeval.metrics.contextual_relevancy.schema import (
|
|
26
|
+
ContextualRelevancyVerdicts,
|
|
27
|
+
ContextualRelevancyScoreReason,
|
|
28
|
+
)
|
|
27
29
|
from deepeval.metrics.api import metric_data_manager
|
|
28
30
|
|
|
29
31
|
|
|
@@ -64,12 +66,15 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
64
66
|
|
|
65
67
|
multimodal = test_case.multimodal
|
|
66
68
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
69
|
+
check_llm_test_case_params(
|
|
70
|
+
test_case,
|
|
71
|
+
self._required_params,
|
|
72
|
+
None,
|
|
73
|
+
None,
|
|
74
|
+
self,
|
|
75
|
+
self.model,
|
|
76
|
+
test_case.multimodal,
|
|
77
|
+
)
|
|
73
78
|
|
|
74
79
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
75
80
|
with metric_progress_indicator(
|
|
@@ -121,12 +126,15 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
121
126
|
|
|
122
127
|
multimodal = test_case.multimodal
|
|
123
128
|
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
129
|
+
check_llm_test_case_params(
|
|
130
|
+
test_case,
|
|
131
|
+
self._required_params,
|
|
132
|
+
None,
|
|
133
|
+
None,
|
|
134
|
+
self,
|
|
135
|
+
self.model,
|
|
136
|
+
test_case.multimodal,
|
|
137
|
+
)
|
|
130
138
|
|
|
131
139
|
self.evaluation_cost = 0 if self.using_native_model else None
|
|
132
140
|
with metric_progress_indicator(
|
|
@@ -183,24 +191,13 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
183
191
|
multimodal=multimodal,
|
|
184
192
|
)
|
|
185
193
|
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
try:
|
|
194
|
-
res: ContextualRelevancyScoreReason = (
|
|
195
|
-
await self.model.a_generate(
|
|
196
|
-
prompt, schema=ContextualRelevancyScoreReason
|
|
197
|
-
)
|
|
198
|
-
)
|
|
199
|
-
return res.reason
|
|
200
|
-
except TypeError:
|
|
201
|
-
res = await self.model.a_generate(prompt)
|
|
202
|
-
data = trimAndLoadJson(res, self)
|
|
203
|
-
return data["reason"]
|
|
194
|
+
return await a_generate_with_schema_and_extract(
|
|
195
|
+
metric=self,
|
|
196
|
+
prompt=prompt,
|
|
197
|
+
schema_cls=ContextualRelevancyScoreReason,
|
|
198
|
+
extract_schema=lambda score_reason: score_reason.reason,
|
|
199
|
+
extract_json=lambda data: data["reason"],
|
|
200
|
+
)
|
|
204
201
|
|
|
205
202
|
def _generate_reason(self, input: str, multimodal: bool):
|
|
206
203
|
if self.include_reason is False:
|
|
@@ -223,22 +220,13 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
223
220
|
multimodal=multimodal,
|
|
224
221
|
)
|
|
225
222
|
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
try:
|
|
234
|
-
res: ContextualRelevancyScoreReason = self.model.generate(
|
|
235
|
-
prompt, schema=ContextualRelevancyScoreReason
|
|
236
|
-
)
|
|
237
|
-
return res.reason
|
|
238
|
-
except TypeError:
|
|
239
|
-
res = self.model.generate(prompt)
|
|
240
|
-
data = trimAndLoadJson(res, self)
|
|
241
|
-
return data["reason"]
|
|
223
|
+
return generate_with_schema_and_extract(
|
|
224
|
+
metric=self,
|
|
225
|
+
prompt=prompt,
|
|
226
|
+
schema_cls=ContextualRelevancyScoreReason,
|
|
227
|
+
extract_schema=lambda score_reason: score_reason.reason,
|
|
228
|
+
extract_json=lambda data: data["reason"],
|
|
229
|
+
)
|
|
242
230
|
|
|
243
231
|
def _calculate_score(self):
|
|
244
232
|
total_verdicts = 0
|
|
@@ -262,22 +250,13 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
262
250
|
input=input, context=context, multimodal=multimodal
|
|
263
251
|
)
|
|
264
252
|
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
try:
|
|
273
|
-
res = await self.model.a_generate(
|
|
274
|
-
prompt, schema=ContextualRelevancyVerdicts
|
|
275
|
-
)
|
|
276
|
-
return res
|
|
277
|
-
except TypeError:
|
|
278
|
-
res = await self.model.a_generate(prompt)
|
|
279
|
-
data = trimAndLoadJson(res, self)
|
|
280
|
-
return ContextualRelevancyVerdicts(**data)
|
|
253
|
+
return await a_generate_with_schema_and_extract(
|
|
254
|
+
metric=self,
|
|
255
|
+
prompt=prompt,
|
|
256
|
+
schema_cls=ContextualRelevancyVerdicts,
|
|
257
|
+
extract_schema=lambda r: r,
|
|
258
|
+
extract_json=lambda data: ContextualRelevancyVerdicts(**data),
|
|
259
|
+
)
|
|
281
260
|
|
|
282
261
|
def _generate_verdicts(
|
|
283
262
|
self, input: str, context: str, multimodal: bool
|
|
@@ -286,22 +265,13 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
286
265
|
input=input, context=context, multimodal=multimodal
|
|
287
266
|
)
|
|
288
267
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
try:
|
|
297
|
-
res = self.model.generate(
|
|
298
|
-
prompt, schema=ContextualRelevancyVerdicts
|
|
299
|
-
)
|
|
300
|
-
return res
|
|
301
|
-
except TypeError:
|
|
302
|
-
res = self.model.generate(prompt)
|
|
303
|
-
data = trimAndLoadJson(res, self)
|
|
304
|
-
return ContextualRelevancyVerdicts(**data)
|
|
268
|
+
return generate_with_schema_and_extract(
|
|
269
|
+
metric=self,
|
|
270
|
+
prompt=prompt,
|
|
271
|
+
schema_cls=ContextualRelevancyVerdicts,
|
|
272
|
+
extract_schema=lambda r: r,
|
|
273
|
+
extract_json=lambda data: ContextualRelevancyVerdicts(**data),
|
|
274
|
+
)
|
|
305
275
|
|
|
306
276
|
def is_successful(self) -> bool:
|
|
307
277
|
if self.error is not None:
|
|
@@ -309,7 +279,7 @@ class ContextualRelevancyMetric(BaseMetric):
|
|
|
309
279
|
else:
|
|
310
280
|
try:
|
|
311
281
|
self.success = self.score >= self.threshold
|
|
312
|
-
except:
|
|
282
|
+
except TypeError:
|
|
313
283
|
self.success = False
|
|
314
284
|
return self.success
|
|
315
285
|
|