deepeval 3.7.5__py3-none-any.whl → 3.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (133) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/config/settings.py +35 -1
  3. deepeval/dataset/api.py +23 -1
  4. deepeval/dataset/golden.py +106 -21
  5. deepeval/evaluate/evaluate.py +0 -3
  6. deepeval/evaluate/execute.py +10 -222
  7. deepeval/evaluate/utils.py +6 -30
  8. deepeval/key_handler.py +3 -0
  9. deepeval/metrics/__init__.py +0 -4
  10. deepeval/metrics/answer_relevancy/answer_relevancy.py +89 -132
  11. deepeval/metrics/answer_relevancy/template.py +102 -179
  12. deepeval/metrics/arena_g_eval/arena_g_eval.py +98 -96
  13. deepeval/metrics/arena_g_eval/template.py +17 -1
  14. deepeval/metrics/argument_correctness/argument_correctness.py +81 -87
  15. deepeval/metrics/argument_correctness/template.py +19 -2
  16. deepeval/metrics/base_metric.py +13 -41
  17. deepeval/metrics/bias/bias.py +102 -108
  18. deepeval/metrics/bias/template.py +14 -2
  19. deepeval/metrics/contextual_precision/contextual_precision.py +56 -92
  20. deepeval/metrics/contextual_recall/contextual_recall.py +58 -85
  21. deepeval/metrics/contextual_relevancy/contextual_relevancy.py +53 -83
  22. deepeval/metrics/conversation_completeness/conversation_completeness.py +101 -119
  23. deepeval/metrics/conversation_completeness/template.py +23 -3
  24. deepeval/metrics/conversational_dag/conversational_dag.py +12 -8
  25. deepeval/metrics/conversational_dag/nodes.py +66 -123
  26. deepeval/metrics/conversational_dag/templates.py +16 -0
  27. deepeval/metrics/conversational_g_eval/conversational_g_eval.py +47 -66
  28. deepeval/metrics/dag/dag.py +10 -0
  29. deepeval/metrics/dag/nodes.py +63 -126
  30. deepeval/metrics/dag/templates.py +14 -0
  31. deepeval/metrics/exact_match/exact_match.py +9 -1
  32. deepeval/metrics/faithfulness/faithfulness.py +82 -136
  33. deepeval/metrics/g_eval/g_eval.py +87 -78
  34. deepeval/metrics/g_eval/template.py +18 -1
  35. deepeval/metrics/g_eval/utils.py +7 -6
  36. deepeval/metrics/goal_accuracy/goal_accuracy.py +91 -76
  37. deepeval/metrics/goal_accuracy/template.py +21 -3
  38. deepeval/metrics/hallucination/hallucination.py +60 -75
  39. deepeval/metrics/hallucination/template.py +13 -0
  40. deepeval/metrics/indicator.py +3 -6
  41. deepeval/metrics/json_correctness/json_correctness.py +40 -38
  42. deepeval/metrics/json_correctness/template.py +10 -0
  43. deepeval/metrics/knowledge_retention/knowledge_retention.py +60 -97
  44. deepeval/metrics/knowledge_retention/schema.py +9 -3
  45. deepeval/metrics/knowledge_retention/template.py +12 -0
  46. deepeval/metrics/mcp/mcp_task_completion.py +68 -38
  47. deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +92 -74
  48. deepeval/metrics/mcp/template.py +52 -0
  49. deepeval/metrics/mcp_use_metric/mcp_use_metric.py +58 -64
  50. deepeval/metrics/mcp_use_metric/template.py +12 -0
  51. deepeval/metrics/misuse/misuse.py +77 -97
  52. deepeval/metrics/misuse/template.py +15 -0
  53. deepeval/metrics/multimodal_metrics/__init__.py +0 -1
  54. deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +37 -38
  55. deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +55 -76
  56. deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +37 -38
  57. deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +37 -38
  58. deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +57 -76
  59. deepeval/metrics/non_advice/non_advice.py +79 -105
  60. deepeval/metrics/non_advice/template.py +12 -0
  61. deepeval/metrics/pattern_match/pattern_match.py +12 -4
  62. deepeval/metrics/pii_leakage/pii_leakage.py +75 -106
  63. deepeval/metrics/pii_leakage/template.py +14 -0
  64. deepeval/metrics/plan_adherence/plan_adherence.py +63 -89
  65. deepeval/metrics/plan_adherence/template.py +11 -0
  66. deepeval/metrics/plan_quality/plan_quality.py +63 -87
  67. deepeval/metrics/plan_quality/template.py +9 -0
  68. deepeval/metrics/prompt_alignment/prompt_alignment.py +72 -83
  69. deepeval/metrics/prompt_alignment/template.py +12 -0
  70. deepeval/metrics/role_adherence/role_adherence.py +48 -71
  71. deepeval/metrics/role_adherence/template.py +14 -0
  72. deepeval/metrics/role_violation/role_violation.py +75 -108
  73. deepeval/metrics/role_violation/template.py +12 -0
  74. deepeval/metrics/step_efficiency/step_efficiency.py +55 -65
  75. deepeval/metrics/step_efficiency/template.py +11 -0
  76. deepeval/metrics/summarization/summarization.py +115 -183
  77. deepeval/metrics/summarization/template.py +19 -0
  78. deepeval/metrics/task_completion/task_completion.py +67 -73
  79. deepeval/metrics/tool_correctness/tool_correctness.py +43 -42
  80. deepeval/metrics/tool_use/tool_use.py +42 -66
  81. deepeval/metrics/topic_adherence/template.py +13 -0
  82. deepeval/metrics/topic_adherence/topic_adherence.py +53 -67
  83. deepeval/metrics/toxicity/template.py +13 -0
  84. deepeval/metrics/toxicity/toxicity.py +80 -99
  85. deepeval/metrics/turn_contextual_precision/schema.py +3 -3
  86. deepeval/metrics/turn_contextual_precision/template.py +1 -1
  87. deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +110 -68
  88. deepeval/metrics/turn_contextual_recall/schema.py +3 -3
  89. deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +104 -61
  90. deepeval/metrics/turn_contextual_relevancy/schema.py +2 -2
  91. deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +106 -65
  92. deepeval/metrics/turn_faithfulness/schema.py +1 -1
  93. deepeval/metrics/turn_faithfulness/turn_faithfulness.py +104 -73
  94. deepeval/metrics/turn_relevancy/template.py +14 -0
  95. deepeval/metrics/turn_relevancy/turn_relevancy.py +56 -69
  96. deepeval/metrics/utils.py +145 -90
  97. deepeval/models/base_model.py +44 -6
  98. deepeval/models/embedding_models/azure_embedding_model.py +34 -12
  99. deepeval/models/embedding_models/local_embedding_model.py +22 -7
  100. deepeval/models/embedding_models/ollama_embedding_model.py +17 -6
  101. deepeval/models/embedding_models/openai_embedding_model.py +3 -2
  102. deepeval/models/llms/amazon_bedrock_model.py +226 -71
  103. deepeval/models/llms/anthropic_model.py +141 -47
  104. deepeval/models/llms/azure_model.py +167 -94
  105. deepeval/models/llms/constants.py +2032 -0
  106. deepeval/models/llms/deepseek_model.py +79 -29
  107. deepeval/models/llms/gemini_model.py +126 -67
  108. deepeval/models/llms/grok_model.py +125 -59
  109. deepeval/models/llms/kimi_model.py +126 -81
  110. deepeval/models/llms/litellm_model.py +92 -18
  111. deepeval/models/llms/local_model.py +114 -15
  112. deepeval/models/llms/ollama_model.py +97 -76
  113. deepeval/models/llms/openai_model.py +167 -310
  114. deepeval/models/llms/portkey_model.py +58 -16
  115. deepeval/models/llms/utils.py +5 -2
  116. deepeval/models/utils.py +60 -4
  117. deepeval/simulator/conversation_simulator.py +43 -0
  118. deepeval/simulator/template.py +13 -0
  119. deepeval/test_case/api.py +24 -45
  120. deepeval/test_case/arena_test_case.py +7 -2
  121. deepeval/test_case/conversational_test_case.py +55 -6
  122. deepeval/test_case/llm_test_case.py +60 -6
  123. deepeval/test_run/api.py +3 -0
  124. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/METADATA +1 -1
  125. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/RECORD +128 -132
  126. deepeval/metrics/multimodal_metrics/multimodal_g_eval/__init__.py +0 -0
  127. deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +0 -386
  128. deepeval/metrics/multimodal_metrics/multimodal_g_eval/schema.py +0 -11
  129. deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +0 -133
  130. deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +0 -68
  131. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/LICENSE.md +0 -0
  132. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/WHEEL +0 -0
  133. {deepeval-3.7.5.dist-info → deepeval-3.7.6.dist-info}/entry_points.txt +0 -0
@@ -3,14 +3,13 @@ from typing import Optional, List, Type, Union
3
3
  from deepeval.utils import (
4
4
  get_or_create_event_loop,
5
5
  prettify_list,
6
- convert_to_multi_modal_array,
7
6
  )
8
7
  from deepeval.metrics.utils import (
9
8
  construct_verbose_logs,
10
- trimAndLoadJson,
11
9
  check_llm_test_case_params,
12
- check_mllm_test_case_params,
13
10
  initialize_model,
11
+ a_generate_with_schema_and_extract,
12
+ generate_with_schema_and_extract,
14
13
  )
15
14
  from deepeval.test_case import (
16
15
  LLMTestCase,
@@ -20,7 +19,11 @@ from deepeval.metrics import BaseMetric
20
19
  from deepeval.models import DeepEvalBaseLLM
21
20
  from deepeval.metrics.contextual_recall.template import ContextualRecallTemplate
22
21
  from deepeval.metrics.indicator import metric_progress_indicator
23
- from deepeval.metrics.contextual_recall.schema import *
22
+ from deepeval.metrics.contextual_recall.schema import (
23
+ ContextualRecallVerdict,
24
+ Verdicts,
25
+ ContextualRecallScoreReason,
26
+ )
24
27
  from deepeval.metrics.api import metric_data_manager
25
28
 
26
29
 
@@ -62,12 +65,15 @@ class ContextualRecallMetric(BaseMetric):
62
65
  ) -> float:
63
66
  multimodal = test_case.multimodal
64
67
 
65
- if multimodal:
66
- check_mllm_test_case_params(
67
- test_case, self._required_params, None, None, self, self.model
68
- )
69
- else:
70
- check_llm_test_case_params(test_case, self._required_params, self)
68
+ check_llm_test_case_params(
69
+ test_case,
70
+ self._required_params,
71
+ None,
72
+ None,
73
+ self,
74
+ self.model,
75
+ test_case.multimodal,
76
+ )
71
77
 
72
78
  self.evaluation_cost = 0 if self.using_native_model else None
73
79
  with metric_progress_indicator(
@@ -118,12 +124,15 @@ class ContextualRecallMetric(BaseMetric):
118
124
 
119
125
  multimodal = test_case.multimodal
120
126
 
121
- if multimodal:
122
- check_mllm_test_case_params(
123
- test_case, self._required_params, None, None, self, self.model
124
- )
125
- else:
126
- check_llm_test_case_params(test_case, self._required_params, self)
127
+ check_llm_test_case_params(
128
+ test_case,
129
+ self._required_params,
130
+ None,
131
+ None,
132
+ self,
133
+ self.model,
134
+ test_case.multimodal,
135
+ )
127
136
 
128
137
  self.evaluation_cost = 0 if self.using_native_model else None
129
138
  with metric_progress_indicator(
@@ -178,22 +187,13 @@ class ContextualRecallMetric(BaseMetric):
178
187
  multimodal=multimodal,
179
188
  )
180
189
 
181
- if self.using_native_model:
182
- res, cost = await self.model.a_generate(
183
- prompt, schema=ContextualRecallScoreReason
184
- )
185
- self.evaluation_cost += cost
186
- return res.reason
187
- else:
188
- try:
189
- res: ContextualRecallScoreReason = await self.model.a_generate(
190
- prompt, schema=ContextualRecallScoreReason
191
- )
192
- return res.reason
193
- except TypeError:
194
- res = await self.model.a_generate(prompt)
195
- data = trimAndLoadJson(res, self)
196
- return data["reason"]
190
+ return await a_generate_with_schema_and_extract(
191
+ metric=self,
192
+ prompt=prompt,
193
+ schema_cls=ContextualRecallScoreReason,
194
+ extract_schema=lambda score_reason: score_reason.reason,
195
+ extract_json=lambda data: data["reason"],
196
+ )
197
197
 
198
198
  def _generate_reason(self, expected_output: str, multimodal: bool):
199
199
  if self.include_reason is False:
@@ -215,22 +215,13 @@ class ContextualRecallMetric(BaseMetric):
215
215
  multimodal=multimodal,
216
216
  )
217
217
 
218
- if self.using_native_model:
219
- res, cost = self.model.generate(
220
- prompt, schema=ContextualRecallScoreReason
221
- )
222
- self.evaluation_cost += cost
223
- return res.reason
224
- else:
225
- try:
226
- res: ContextualRecallScoreReason = self.model.generate(
227
- prompt, schema=ContextualRecallScoreReason
228
- )
229
- return res.reason
230
- except TypeError:
231
- res = self.model.generate(prompt)
232
- data = trimAndLoadJson(res, self)
233
- return data["reason"]
218
+ return generate_with_schema_and_extract(
219
+ metric=self,
220
+ prompt=prompt,
221
+ schema_cls=ContextualRecallScoreReason,
222
+ extract_schema=lambda score_reason: score_reason.reason,
223
+ extract_json=lambda data: data["reason"],
224
+ )
234
225
 
235
226
  def _calculate_score(self):
236
227
  number_of_verdicts = len(self.verdicts)
@@ -256,25 +247,15 @@ class ContextualRecallMetric(BaseMetric):
256
247
  retrieval_context=retrieval_context,
257
248
  multimodal=multimodal,
258
249
  )
259
- if self.using_native_model:
260
- res, cost = await self.model.a_generate(prompt, schema=Verdicts)
261
- self.evaluation_cost += cost
262
- verdicts = [item for item in res.verdicts]
263
- return verdicts
264
- else:
265
- try:
266
- res: Verdicts = await self.model.a_generate(
267
- prompt, schema=Verdicts
268
- )
269
- verdicts: Verdicts = [item for item in res.verdicts]
270
- return verdicts
271
- except TypeError:
272
- res = await self.model.a_generate(prompt)
273
- data = trimAndLoadJson(res, self)
274
- verdicts = [
275
- ContextualRecallVerdict(**item) for item in data["verdicts"]
276
- ]
277
- return verdicts
250
+ return await a_generate_with_schema_and_extract(
251
+ metric=self,
252
+ prompt=prompt,
253
+ schema_cls=Verdicts,
254
+ extract_schema=lambda r: list(r.verdicts),
255
+ extract_json=lambda data: [
256
+ ContextualRecallVerdict(**item) for item in data["verdicts"]
257
+ ],
258
+ )
278
259
 
279
260
  def _generate_verdicts(
280
261
  self,
@@ -287,23 +268,15 @@ class ContextualRecallMetric(BaseMetric):
287
268
  retrieval_context=retrieval_context,
288
269
  multimodal=multimodal,
289
270
  )
290
- if self.using_native_model:
291
- res, cost = self.model.generate(prompt, schema=Verdicts)
292
- self.evaluation_cost += cost
293
- verdicts = [item for item in res.verdicts]
294
- return verdicts
295
- else:
296
- try:
297
- res: Verdicts = self.model.generate(prompt, schema=Verdicts)
298
- verdicts: Verdicts = [item for item in res.verdicts]
299
- return verdicts
300
- except TypeError:
301
- res = self.model.generate(prompt)
302
- data = trimAndLoadJson(res, self)
303
- verdicts = [
304
- ContextualRecallVerdict(**item) for item in data["verdicts"]
305
- ]
306
- return verdicts
271
+ return generate_with_schema_and_extract(
272
+ metric=self,
273
+ prompt=prompt,
274
+ schema_cls=Verdicts,
275
+ extract_schema=lambda r: list(r.verdicts),
276
+ extract_json=lambda data: [
277
+ ContextualRecallVerdict(**item) for item in data["verdicts"]
278
+ ],
279
+ )
307
280
 
308
281
  def is_successful(self) -> bool:
309
282
  if self.error is not None:
@@ -311,7 +284,7 @@ class ContextualRecallMetric(BaseMetric):
311
284
  else:
312
285
  try:
313
286
  self.success = self.score >= self.threshold
314
- except:
287
+ except TypeError:
315
288
  self.success = False
316
289
  return self.success
317
290
 
@@ -4,14 +4,13 @@ import asyncio
4
4
  from deepeval.utils import (
5
5
  get_or_create_event_loop,
6
6
  prettify_list,
7
- convert_to_multi_modal_array,
8
7
  )
9
8
  from deepeval.metrics.utils import (
10
9
  construct_verbose_logs,
11
- trimAndLoadJson,
12
10
  check_llm_test_case_params,
13
- check_mllm_test_case_params,
14
11
  initialize_model,
12
+ a_generate_with_schema_and_extract,
13
+ generate_with_schema_and_extract,
15
14
  )
16
15
  from deepeval.test_case import (
17
16
  LLMTestCase,
@@ -23,7 +22,10 @@ from deepeval.metrics.contextual_relevancy.template import (
23
22
  ContextualRelevancyTemplate,
24
23
  )
25
24
  from deepeval.metrics.indicator import metric_progress_indicator
26
- from deepeval.metrics.contextual_relevancy.schema import *
25
+ from deepeval.metrics.contextual_relevancy.schema import (
26
+ ContextualRelevancyVerdicts,
27
+ ContextualRelevancyScoreReason,
28
+ )
27
29
  from deepeval.metrics.api import metric_data_manager
28
30
 
29
31
 
@@ -64,12 +66,15 @@ class ContextualRelevancyMetric(BaseMetric):
64
66
 
65
67
  multimodal = test_case.multimodal
66
68
 
67
- if multimodal:
68
- check_mllm_test_case_params(
69
- test_case, self._required_params, None, None, self, self.model
70
- )
71
- else:
72
- check_llm_test_case_params(test_case, self._required_params, self)
69
+ check_llm_test_case_params(
70
+ test_case,
71
+ self._required_params,
72
+ None,
73
+ None,
74
+ self,
75
+ self.model,
76
+ test_case.multimodal,
77
+ )
73
78
 
74
79
  self.evaluation_cost = 0 if self.using_native_model else None
75
80
  with metric_progress_indicator(
@@ -121,12 +126,15 @@ class ContextualRelevancyMetric(BaseMetric):
121
126
 
122
127
  multimodal = test_case.multimodal
123
128
 
124
- if multimodal:
125
- check_mllm_test_case_params(
126
- test_case, self._required_params, None, None, self, self.model
127
- )
128
- else:
129
- check_llm_test_case_params(test_case, self._required_params, self)
129
+ check_llm_test_case_params(
130
+ test_case,
131
+ self._required_params,
132
+ None,
133
+ None,
134
+ self,
135
+ self.model,
136
+ test_case.multimodal,
137
+ )
130
138
 
131
139
  self.evaluation_cost = 0 if self.using_native_model else None
132
140
  with metric_progress_indicator(
@@ -183,24 +191,13 @@ class ContextualRelevancyMetric(BaseMetric):
183
191
  multimodal=multimodal,
184
192
  )
185
193
 
186
- if self.using_native_model:
187
- res, cost = await self.model.a_generate(
188
- prompt, schema=ContextualRelevancyScoreReason
189
- )
190
- self.evaluation_cost += cost
191
- return res.reason
192
- else:
193
- try:
194
- res: ContextualRelevancyScoreReason = (
195
- await self.model.a_generate(
196
- prompt, schema=ContextualRelevancyScoreReason
197
- )
198
- )
199
- return res.reason
200
- except TypeError:
201
- res = await self.model.a_generate(prompt)
202
- data = trimAndLoadJson(res, self)
203
- return data["reason"]
194
+ return await a_generate_with_schema_and_extract(
195
+ metric=self,
196
+ prompt=prompt,
197
+ schema_cls=ContextualRelevancyScoreReason,
198
+ extract_schema=lambda score_reason: score_reason.reason,
199
+ extract_json=lambda data: data["reason"],
200
+ )
204
201
 
205
202
  def _generate_reason(self, input: str, multimodal: bool):
206
203
  if self.include_reason is False:
@@ -223,22 +220,13 @@ class ContextualRelevancyMetric(BaseMetric):
223
220
  multimodal=multimodal,
224
221
  )
225
222
 
226
- if self.using_native_model:
227
- res, cost = self.model.generate(
228
- prompt, schema=ContextualRelevancyScoreReason
229
- )
230
- self.evaluation_cost += cost
231
- return res.reason
232
- else:
233
- try:
234
- res: ContextualRelevancyScoreReason = self.model.generate(
235
- prompt, schema=ContextualRelevancyScoreReason
236
- )
237
- return res.reason
238
- except TypeError:
239
- res = self.model.generate(prompt)
240
- data = trimAndLoadJson(res, self)
241
- return data["reason"]
223
+ return generate_with_schema_and_extract(
224
+ metric=self,
225
+ prompt=prompt,
226
+ schema_cls=ContextualRelevancyScoreReason,
227
+ extract_schema=lambda score_reason: score_reason.reason,
228
+ extract_json=lambda data: data["reason"],
229
+ )
242
230
 
243
231
  def _calculate_score(self):
244
232
  total_verdicts = 0
@@ -262,22 +250,13 @@ class ContextualRelevancyMetric(BaseMetric):
262
250
  input=input, context=context, multimodal=multimodal
263
251
  )
264
252
 
265
- if self.using_native_model:
266
- res, cost = await self.model.a_generate(
267
- prompt, schema=ContextualRelevancyVerdicts
268
- )
269
- self.evaluation_cost += cost
270
- return res
271
- else:
272
- try:
273
- res = await self.model.a_generate(
274
- prompt, schema=ContextualRelevancyVerdicts
275
- )
276
- return res
277
- except TypeError:
278
- res = await self.model.a_generate(prompt)
279
- data = trimAndLoadJson(res, self)
280
- return ContextualRelevancyVerdicts(**data)
253
+ return await a_generate_with_schema_and_extract(
254
+ metric=self,
255
+ prompt=prompt,
256
+ schema_cls=ContextualRelevancyVerdicts,
257
+ extract_schema=lambda r: r,
258
+ extract_json=lambda data: ContextualRelevancyVerdicts(**data),
259
+ )
281
260
 
282
261
  def _generate_verdicts(
283
262
  self, input: str, context: str, multimodal: bool
@@ -286,22 +265,13 @@ class ContextualRelevancyMetric(BaseMetric):
286
265
  input=input, context=context, multimodal=multimodal
287
266
  )
288
267
 
289
- if self.using_native_model:
290
- res, cost = self.model.generate(
291
- prompt, schema=ContextualRelevancyVerdicts
292
- )
293
- self.evaluation_cost += cost
294
- return res
295
- else:
296
- try:
297
- res = self.model.generate(
298
- prompt, schema=ContextualRelevancyVerdicts
299
- )
300
- return res
301
- except TypeError:
302
- res = self.model.generate(prompt)
303
- data = trimAndLoadJson(res, self)
304
- return ContextualRelevancyVerdicts(**data)
268
+ return generate_with_schema_and_extract(
269
+ metric=self,
270
+ prompt=prompt,
271
+ schema_cls=ContextualRelevancyVerdicts,
272
+ extract_schema=lambda r: r,
273
+ extract_json=lambda data: ContextualRelevancyVerdicts(**data),
274
+ )
305
275
 
306
276
  def is_successful(self) -> bool:
307
277
  if self.error is not None:
@@ -309,7 +279,7 @@ class ContextualRelevancyMetric(BaseMetric):
309
279
  else:
310
280
  try:
311
281
  self.success = self.score >= self.threshold
312
- except:
282
+ except TypeError:
313
283
  self.success = False
314
284
  return self.success
315
285