deepeval 3.7.4__py3-none-any.whl → 3.7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (155) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/dataset/golden.py +54 -2
  3. deepeval/evaluate/evaluate.py +16 -8
  4. deepeval/evaluate/execute.py +70 -26
  5. deepeval/evaluate/utils.py +26 -22
  6. deepeval/integrations/pydantic_ai/agent.py +19 -2
  7. deepeval/integrations/pydantic_ai/instrumentator.py +62 -23
  8. deepeval/metrics/__init__.py +14 -12
  9. deepeval/metrics/answer_relevancy/answer_relevancy.py +74 -29
  10. deepeval/metrics/answer_relevancy/template.py +188 -92
  11. deepeval/metrics/base_metric.py +2 -5
  12. deepeval/metrics/contextual_precision/contextual_precision.py +53 -15
  13. deepeval/metrics/contextual_precision/template.py +115 -66
  14. deepeval/metrics/contextual_recall/contextual_recall.py +50 -13
  15. deepeval/metrics/contextual_recall/template.py +106 -55
  16. deepeval/metrics/contextual_relevancy/contextual_relevancy.py +47 -15
  17. deepeval/metrics/contextual_relevancy/template.py +87 -58
  18. deepeval/metrics/dag/templates.py +2 -2
  19. deepeval/metrics/faithfulness/faithfulness.py +70 -27
  20. deepeval/metrics/faithfulness/schema.py +1 -1
  21. deepeval/metrics/faithfulness/template.py +200 -115
  22. deepeval/metrics/g_eval/utils.py +2 -2
  23. deepeval/metrics/indicator.py +4 -4
  24. deepeval/metrics/multimodal_metrics/__init__.py +0 -18
  25. deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +24 -17
  26. deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +26 -21
  27. deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +24 -17
  28. deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +24 -17
  29. deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +19 -19
  30. deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +63 -78
  31. deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +20 -20
  32. deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +71 -50
  33. deepeval/metrics/ragas.py +3 -3
  34. deepeval/metrics/tool_correctness/tool_correctness.py +2 -2
  35. deepeval/metrics/turn_contextual_precision/schema.py +21 -0
  36. deepeval/metrics/turn_contextual_precision/template.py +187 -0
  37. deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +550 -0
  38. deepeval/metrics/turn_contextual_recall/schema.py +21 -0
  39. deepeval/metrics/turn_contextual_recall/template.py +178 -0
  40. deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +520 -0
  41. deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_contextual_relevancy}/schema.py +7 -1
  42. deepeval/metrics/turn_contextual_relevancy/template.py +161 -0
  43. deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +535 -0
  44. deepeval/metrics/{multimodal_metrics/multimodal_faithfulness → turn_faithfulness}/schema.py +11 -3
  45. deepeval/metrics/turn_faithfulness/template.py +218 -0
  46. deepeval/metrics/turn_faithfulness/turn_faithfulness.py +596 -0
  47. deepeval/metrics/utils.py +39 -58
  48. deepeval/models/__init__.py +0 -12
  49. deepeval/models/base_model.py +16 -38
  50. deepeval/models/embedding_models/__init__.py +7 -0
  51. deepeval/models/embedding_models/azure_embedding_model.py +52 -28
  52. deepeval/models/embedding_models/local_embedding_model.py +18 -14
  53. deepeval/models/embedding_models/ollama_embedding_model.py +38 -16
  54. deepeval/models/embedding_models/openai_embedding_model.py +40 -21
  55. deepeval/models/llms/amazon_bedrock_model.py +1 -2
  56. deepeval/models/llms/anthropic_model.py +44 -23
  57. deepeval/models/llms/azure_model.py +121 -36
  58. deepeval/models/llms/deepseek_model.py +18 -13
  59. deepeval/models/llms/gemini_model.py +129 -43
  60. deepeval/models/llms/grok_model.py +18 -13
  61. deepeval/models/llms/kimi_model.py +18 -13
  62. deepeval/models/llms/litellm_model.py +42 -22
  63. deepeval/models/llms/local_model.py +12 -7
  64. deepeval/models/llms/ollama_model.py +114 -12
  65. deepeval/models/llms/openai_model.py +137 -41
  66. deepeval/models/llms/portkey_model.py +24 -7
  67. deepeval/models/llms/utils.py +5 -3
  68. deepeval/models/retry_policy.py +17 -14
  69. deepeval/models/utils.py +46 -1
  70. deepeval/optimizer/__init__.py +5 -0
  71. deepeval/optimizer/algorithms/__init__.py +6 -0
  72. deepeval/optimizer/algorithms/base.py +29 -0
  73. deepeval/optimizer/algorithms/configs.py +18 -0
  74. deepeval/optimizer/algorithms/copro/__init__.py +5 -0
  75. deepeval/{optimization/copro/loop.py → optimizer/algorithms/copro/copro.py} +112 -113
  76. deepeval/optimizer/algorithms/gepa/__init__.py +5 -0
  77. deepeval/{optimization/gepa/loop.py → optimizer/algorithms/gepa/gepa.py} +175 -115
  78. deepeval/optimizer/algorithms/miprov2/__init__.py +17 -0
  79. deepeval/optimizer/algorithms/miprov2/bootstrapper.py +435 -0
  80. deepeval/optimizer/algorithms/miprov2/miprov2.py +752 -0
  81. deepeval/optimizer/algorithms/miprov2/proposer.py +301 -0
  82. deepeval/optimizer/algorithms/simba/__init__.py +5 -0
  83. deepeval/{optimization/simba/loop.py → optimizer/algorithms/simba/simba.py} +128 -112
  84. deepeval/{optimization → optimizer}/configs.py +5 -8
  85. deepeval/{optimization/policies/selection.py → optimizer/policies.py} +63 -2
  86. deepeval/optimizer/prompt_optimizer.py +263 -0
  87. deepeval/optimizer/rewriter/__init__.py +5 -0
  88. deepeval/optimizer/rewriter/rewriter.py +124 -0
  89. deepeval/optimizer/rewriter/utils.py +214 -0
  90. deepeval/optimizer/scorer/__init__.py +5 -0
  91. deepeval/optimizer/scorer/base.py +86 -0
  92. deepeval/optimizer/scorer/scorer.py +316 -0
  93. deepeval/optimizer/scorer/utils.py +30 -0
  94. deepeval/optimizer/types.py +148 -0
  95. deepeval/{optimization → optimizer}/utils.py +47 -165
  96. deepeval/prompt/prompt.py +5 -9
  97. deepeval/test_case/__init__.py +1 -3
  98. deepeval/test_case/api.py +12 -10
  99. deepeval/test_case/conversational_test_case.py +19 -1
  100. deepeval/test_case/llm_test_case.py +152 -1
  101. deepeval/test_case/utils.py +4 -8
  102. deepeval/test_run/api.py +15 -14
  103. deepeval/test_run/test_run.py +3 -3
  104. deepeval/tracing/patchers.py +9 -4
  105. deepeval/tracing/tracing.py +2 -2
  106. deepeval/utils.py +65 -0
  107. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/METADATA +1 -4
  108. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/RECORD +116 -125
  109. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +0 -343
  110. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/schema.py +0 -19
  111. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +0 -122
  112. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +0 -301
  113. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/schema.py +0 -15
  114. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +0 -132
  115. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +0 -285
  116. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/schema.py +0 -15
  117. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +0 -112
  118. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +0 -282
  119. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +0 -102
  120. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/__init__.py +0 -0
  121. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +0 -356
  122. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +0 -175
  123. deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/__init__.py +0 -0
  124. deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +0 -290
  125. deepeval/models/mlllms/__init__.py +0 -4
  126. deepeval/models/mlllms/azure_model.py +0 -343
  127. deepeval/models/mlllms/gemini_model.py +0 -313
  128. deepeval/models/mlllms/ollama_model.py +0 -175
  129. deepeval/models/mlllms/openai_model.py +0 -309
  130. deepeval/optimization/__init__.py +0 -13
  131. deepeval/optimization/adapters/__init__.py +0 -2
  132. deepeval/optimization/adapters/deepeval_scoring_adapter.py +0 -588
  133. deepeval/optimization/aggregates.py +0 -14
  134. deepeval/optimization/copro/configs.py +0 -31
  135. deepeval/optimization/gepa/__init__.py +0 -7
  136. deepeval/optimization/gepa/configs.py +0 -115
  137. deepeval/optimization/miprov2/configs.py +0 -134
  138. deepeval/optimization/miprov2/loop.py +0 -785
  139. deepeval/optimization/mutations/__init__.py +0 -0
  140. deepeval/optimization/mutations/prompt_rewriter.py +0 -458
  141. deepeval/optimization/policies/__init__.py +0 -16
  142. deepeval/optimization/policies/tie_breaker.py +0 -67
  143. deepeval/optimization/prompt_optimizer.py +0 -462
  144. deepeval/optimization/simba/__init__.py +0 -0
  145. deepeval/optimization/simba/configs.py +0 -33
  146. deepeval/optimization/types.py +0 -361
  147. deepeval/test_case/mllm_test_case.py +0 -170
  148. /deepeval/metrics/{multimodal_metrics/multimodal_answer_relevancy → turn_contextual_precision}/__init__.py +0 -0
  149. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_precision → turn_contextual_recall}/__init__.py +0 -0
  150. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_recall → turn_contextual_relevancy}/__init__.py +0 -0
  151. /deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_faithfulness}/__init__.py +0 -0
  152. /deepeval/{optimization → optimizer/algorithms}/simba/types.py +0 -0
  153. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/LICENSE.md +0 -0
  154. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/WHEEL +0 -0
  155. {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/entry_points.txt +0 -0
@@ -1,343 +0,0 @@
1
- from typing import Optional, List, Union
2
-
3
- from deepeval.metrics import BaseMultimodalMetric
4
- from deepeval.test_case import MLLMTestCaseParams, MLLMTestCase, MLLMImage
5
- from deepeval.metrics.multimodal_metrics.multimodal_answer_relevancy.template import (
6
- MultimodalAnswerRelevancyTemplate,
7
- )
8
- from deepeval.utils import get_or_create_event_loop, prettify_list
9
- from deepeval.metrics.utils import (
10
- construct_verbose_logs,
11
- trimAndLoadJson,
12
- check_mllm_test_case_params,
13
- initialize_multimodal_model,
14
- )
15
- from deepeval.models import DeepEvalBaseMLLM
16
- from deepeval.metrics.multimodal_metrics.multimodal_answer_relevancy.schema import *
17
- from deepeval.metrics.indicator import metric_progress_indicator
18
-
19
-
20
- class MultimodalAnswerRelevancyMetric(BaseMultimodalMetric):
21
-
22
- _required_params: List[MLLMTestCaseParams] = [
23
- MLLMTestCaseParams.INPUT,
24
- MLLMTestCaseParams.ACTUAL_OUTPUT,
25
- ]
26
-
27
- def __init__(
28
- self,
29
- threshold: float = 0.5,
30
- model: Optional[Union[str, DeepEvalBaseMLLM]] = None,
31
- include_reason: bool = True,
32
- async_mode: bool = True,
33
- strict_mode: bool = False,
34
- verbose_mode: bool = False,
35
- ):
36
- self.threshold = 1 if strict_mode else threshold
37
- self.model, self.using_native_model = initialize_multimodal_model(model)
38
- self.evaluation_model = self.model.get_model_name()
39
- self.include_reason = include_reason
40
- self.async_mode = async_mode
41
- self.strict_mode = strict_mode
42
- self.verbose_mode = verbose_mode
43
-
44
- def measure(
45
- self,
46
- test_case: MLLMTestCase,
47
- _show_indicator: bool = True,
48
- _in_component: bool = False,
49
- _log_metric_to_confident: bool = True,
50
- ) -> float:
51
- check_mllm_test_case_params(
52
- test_case, self._required_params, None, None, self
53
- )
54
- self.evaluation_cost = 0 if self.using_native_model else None
55
- with metric_progress_indicator(
56
- self,
57
- _show_indicator=_show_indicator,
58
- _in_component=_in_component,
59
- ):
60
- if self.async_mode:
61
- loop = get_or_create_event_loop()
62
- loop.run_until_complete(
63
- self.a_measure(
64
- test_case,
65
- _show_indicator=False,
66
- _in_component=_in_component,
67
- _log_metric_to_confident=_log_metric_to_confident,
68
- )
69
- )
70
- else:
71
- self.statements: List[str] = self._generate_statements(
72
- test_case.actual_output
73
- )
74
- self.verdicts: List[AnswerRelevancyVerdict] = (
75
- self._generate_verdicts(test_case.input)
76
- )
77
- self.score = self._calculate_score()
78
- self.reason = self._generate_reason(test_case.input)
79
- self.success = self.score >= self.threshold
80
- self.verbose_logs = construct_verbose_logs(
81
- self,
82
- steps=[
83
- f"Statements:\n{prettify_list(self.statements)}",
84
- f"Verdicts:\n{prettify_list(self.verdicts)}",
85
- f"Score: {self.score}\nReason: {self.reason}",
86
- ],
87
- )
88
-
89
- return self.score
90
-
91
- async def a_measure(
92
- self,
93
- test_case: MLLMTestCase,
94
- _show_indicator: bool = True,
95
- _in_component: bool = False,
96
- _log_metric_to_confident: bool = True,
97
- ) -> float:
98
- check_mllm_test_case_params(
99
- test_case, self._required_params, None, None, self
100
- )
101
-
102
- self.evaluation_cost = 0 if self.using_native_model else None
103
- with metric_progress_indicator(
104
- self,
105
- async_mode=True,
106
- _show_indicator=_show_indicator,
107
- _in_component=_in_component,
108
- ):
109
- self.statements: List[str] = await self._a_generate_statements(
110
- test_case.actual_output
111
- )
112
- self.verdicts: List[AnswerRelevancyVerdict] = (
113
- await self._a_generate_verdicts(test_case.input)
114
- )
115
- self.score = self._calculate_score()
116
- self.reason = await self._a_generate_reason(test_case.input)
117
- self.success = self.score >= self.threshold
118
- self.verbose_logs = construct_verbose_logs(
119
- self,
120
- steps=[
121
- f"Statements:\n{prettify_list(self.statements)}",
122
- f"Verdicts:\n{prettify_list(self.verdicts)}",
123
- f"Score: {self.score}\nReason: {self.reason}",
124
- ],
125
- )
126
-
127
- return self.score
128
-
129
- async def _a_generate_reason(
130
- self,
131
- input: List[Union[str, MLLMImage]],
132
- ) -> str:
133
- if self.include_reason is False:
134
- return None
135
-
136
- irrelevant_statements = []
137
- for verdict in self.verdicts:
138
- if verdict.verdict.strip().lower() == "no":
139
- irrelevant_statements.append(verdict.reason)
140
-
141
- prompt = MultimodalAnswerRelevancyTemplate.generate_reason(
142
- irrelevant_statements=irrelevant_statements,
143
- input=input,
144
- score=format(self.score, ".2f"),
145
- )
146
- if self.using_native_model:
147
- res, cost = await self.model.a_generate(
148
- prompt, schema=MultimodelAnswerRelevancyScoreReason
149
- )
150
- self.evaluation_cost += cost
151
- return res.reason
152
- else:
153
- try:
154
- res: MultimodelAnswerRelevancyScoreReason = (
155
- await self.model.a_generate(
156
- prompt=prompt,
157
- schema=MultimodelAnswerRelevancyScoreReason,
158
- )
159
- )
160
- return res.reason
161
- except TypeError:
162
- res = await self.model.a_generate(prompt)
163
- data = trimAndLoadJson(res, self)
164
- return data["reason"]
165
-
166
- def _generate_reason(
167
- self,
168
- input: List[Union[str, MLLMImage]],
169
- ) -> str:
170
- if self.include_reason is False:
171
- return None
172
-
173
- irrelevant_statements = []
174
- for verdict in self.verdicts:
175
- if verdict.verdict.strip().lower() == "no":
176
- irrelevant_statements.append(verdict.reason)
177
-
178
- prompt = MultimodalAnswerRelevancyTemplate.generate_reason(
179
- irrelevant_statements=irrelevant_statements,
180
- input=input,
181
- score=format(self.score, ".2f"),
182
- )
183
-
184
- if self.using_native_model:
185
- res, cost = self.model.generate(
186
- prompt, schema=MultimodelAnswerRelevancyScoreReason
187
- )
188
- self.evaluation_cost += cost
189
- return res.reason
190
- else:
191
- try:
192
- res: MultimodelAnswerRelevancyScoreReason = self.model.generate(
193
- prompt=prompt, schema=MultimodelAnswerRelevancyScoreReason
194
- )
195
- return res.reason
196
- except TypeError:
197
- res = self.model.generate(prompt)
198
- data = trimAndLoadJson(res, self)
199
- return data["reason"]
200
-
201
- async def _a_generate_verdicts(
202
- self,
203
- input: List[Union[str, MLLMImage]],
204
- ) -> List[AnswerRelevancyVerdict]:
205
- if len(self.statements) == 0:
206
- return []
207
-
208
- prompt = MultimodalAnswerRelevancyTemplate.generate_verdicts(
209
- input=input,
210
- actual_output=self.statements,
211
- )
212
- if self.using_native_model:
213
- res, cost = await self.model.a_generate(prompt, schema=Verdicts)
214
- self.evaluation_cost += cost
215
- return [item for item in res.verdicts]
216
- else:
217
- try:
218
- res: Verdicts = await self.model.a_generate(
219
- prompt, schema=Verdicts
220
- )
221
- return [item for item in res.verdicts]
222
- except TypeError:
223
- res = await self.model.a_generate(prompt)
224
- data = trimAndLoadJson(res, self)
225
- return [
226
- AnswerRelevancyVerdict(**item) for item in data["verdicts"]
227
- ]
228
-
229
- def _generate_verdicts(
230
- self, input: List[Union[str, MLLMImage]]
231
- ) -> List[AnswerRelevancyVerdict]:
232
- if len(self.statements) == 0:
233
- return []
234
-
235
- prompt = MultimodalAnswerRelevancyTemplate.generate_verdicts(
236
- input=input,
237
- actual_output=self.statements,
238
- )
239
- if self.using_native_model:
240
- res, cost = self.model.generate(prompt, schema=Verdicts)
241
- self.evaluation_cost += cost
242
- return [item for item in res.verdicts]
243
- else:
244
- try:
245
- res: Verdicts = self.model.generate(prompt, schema=Verdicts)
246
- return [item for item in res.verdicts]
247
- except TypeError:
248
- res = self.model.generate(prompt)
249
- data = trimAndLoadJson(res, self)
250
- return [
251
- AnswerRelevancyVerdict(**item) for item in data["verdicts"]
252
- ]
253
-
254
- async def _a_generate_statements(
255
- self,
256
- actual_output: List[Union[str, MLLMImage]],
257
- ) -> List[str]:
258
- prompt = MultimodalAnswerRelevancyTemplate.generate_statements(
259
- actual_output=[
260
- ele for ele in actual_output if isinstance(ele, str)
261
- ],
262
- )
263
- if self.using_native_model:
264
- res, cost = await self.model.a_generate(prompt, schema=Statements)
265
- self.evaluation_cost += cost
266
- statements: List[str] = res.statements + [
267
- ele for ele in actual_output if isinstance(ele, MLLMImage)
268
- ]
269
- return statements
270
- else:
271
- try:
272
- res: Statements = await self.model.a_generate(
273
- prompt, schema=Statements
274
- )
275
- statements: List[str] = res.statements + [
276
- ele for ele in actual_output if isinstance(ele, MLLMImage)
277
- ]
278
- return statements
279
- except TypeError:
280
- res = await self.model.a_generate(prompt)
281
- data = trimAndLoadJson(res, self)
282
- statements = data["statements"] + [
283
- ele for ele in actual_output if isinstance(ele, MLLMImage)
284
- ]
285
- return statements
286
-
287
- def _generate_statements(
288
- self,
289
- actual_output: List[Union[str, MLLMImage]],
290
- ) -> List[str]:
291
- prompt = MultimodalAnswerRelevancyTemplate.generate_statements(
292
- actual_output=[
293
- ele for ele in actual_output if isinstance(ele, str)
294
- ],
295
- )
296
- if self.using_native_model:
297
- res, cost = self.model.generate(prompt, schema=Statements)
298
- self.evaluation_cost += cost
299
- statements = res.statements + [
300
- ele for ele in actual_output if isinstance(ele, MLLMImage)
301
- ]
302
- return statements
303
- else:
304
- try:
305
- res: Statements = self.model.generate(prompt, schema=Statements)
306
- statements = res.statements + [
307
- ele for ele in actual_output if isinstance(ele, MLLMImage)
308
- ]
309
- return statements
310
- except TypeError:
311
- res = self.model.generate(prompt)
312
- data = trimAndLoadJson(res, self)
313
- statements = data["statements"] + [
314
- ele for ele in actual_output if isinstance(ele, MLLMImage)
315
- ]
316
- return statements
317
-
318
- def _calculate_score(self):
319
- number_of_verdicts = len(self.verdicts)
320
- if number_of_verdicts == 0:
321
- return 1
322
-
323
- relevant_count = 0
324
- for verdict in self.verdicts:
325
- if verdict.verdict.strip().lower() != "no":
326
- relevant_count += 1
327
-
328
- score = relevant_count / number_of_verdicts
329
- return 0 if self.strict_mode and score < self.threshold else score
330
-
331
- def is_successful(self) -> bool:
332
- if self.error is not None:
333
- self.success = False
334
- else:
335
- try:
336
- self.success = self.score >= self.threshold
337
- except:
338
- self.success = False
339
- return self.success
340
-
341
- @property
342
- def __name__(self):
343
- return "Multimodal Answer Relevancy"
@@ -1,19 +0,0 @@
1
- from typing import List, Optional
2
- from pydantic import BaseModel, Field
3
-
4
-
5
- class Statements(BaseModel):
6
- statements: List[str]
7
-
8
-
9
- class AnswerRelevancyVerdict(BaseModel):
10
- verdict: str
11
- reason: Optional[str] = Field(default=None)
12
-
13
-
14
- class Verdicts(BaseModel):
15
- verdicts: List[AnswerRelevancyVerdict]
16
-
17
-
18
- class MultimodelAnswerRelevancyScoreReason(BaseModel):
19
- reason: str
@@ -1,122 +0,0 @@
1
- from typing import Union, List
2
- import textwrap
3
-
4
- from deepeval.test_case import MLLMImage
5
-
6
-
7
- class MultimodalAnswerRelevancyTemplate:
8
- @staticmethod
9
- def generate_statements(actual_output: List[str]):
10
-
11
- return textwrap.dedent(
12
- f"""Given the text, breakdown and generate a list of statements presented. Ambiguous statements and single words can also be considered as statements.
13
-
14
- Example:
15
- Example text: Shoes. The shoes can be refunded at no extra cost. Thanks for asking the question!
16
-
17
- {{
18
- "statements": ["Shoes.", "Shoes can be refunded at no extra cost", "Thanks for asking the question!"]
19
- }}
20
- ===== END OF EXAMPLE ======
21
-
22
- **
23
- IMPORTANT: Please make sure to only return in JSON format, with the "statements" key mapping to a list of strings. No words or explanation is needed.
24
- **
25
-
26
- Text:
27
- {actual_output}
28
-
29
- JSON:
30
- """
31
- )
32
-
33
- @staticmethod
34
- def generate_verdicts(input, actual_output):
35
- return (
36
- [
37
- textwrap.dedent(
38
- f"""For the provided list of statements (which can contain images), determine whether each statement or image is relevant to address the input.
39
- Please generate a list of JSON with two keys: `verdict` and `reason`.
40
- The 'verdict' key should STRICTLY be either a 'yes', 'idk' or 'no'. Answer 'yes' if the statement or image is relevant to addressing the original input, 'no' if the statement or image is irrelevant, and 'idk' if it is ambiguous (eg., not directly relevant but could be used as a supporting point to address the input).
41
- The 'reason' is the reason for the verdict.
42
- Provide a 'reason' ONLY if the answer is 'no' or 'idk'.
43
- The provided statements are statements and images generated in the actual output.
44
-
45
- **
46
- IMPORTANT: Please make sure to only return in JSON format, with the 'verdicts' key mapping to a list of JSON objects.
47
- Example input: What should I do if there is an earthquake?
48
- Example statements: ["Shoes.", "Thanks for asking the question!", "Is there anything else I can help you with?", "Duck and hide"]
49
- Example JSON:
50
- {{
51
- "verdicts": [
52
- {{
53
- "reason": "The 'Shoes.' statement made in the actual output is completely irrelevant to the input, which asks about what to do in the event of an earthquake.",
54
- "verdict": "no"
55
- }},
56
- {{
57
- "reason": "The statement thanking the user for asking the question is not directly relevant to the input, but is not entirely irrelevant.",
58
- "verdict": "idk"
59
- }},
60
- {{
61
- "reason": "The question about whether there is anything else the user can help with is not directly relevant to the input, but is not entirely irrelevant.",
62
- "verdict": "idk"
63
- }},
64
- {{
65
- "verdict": "yes"
66
- }}
67
- ]
68
- }}
69
-
70
- Since you are going to generate a verdict for each statement and image, the number of 'verdicts' SHOULD BE STRICTLY EQUAL to the number of `statements`.
71
- **
72
-
73
- Input:
74
- """
75
- )
76
- ]
77
- + input
78
- + [
79
- textwrap.dedent(
80
- """
81
- Statements:
82
- """
83
- )
84
- ]
85
- + actual_output
86
- + [
87
- textwrap.dedent(
88
- """
89
- JSON:
90
- """
91
- )
92
- ]
93
- )
94
-
95
- @staticmethod
96
- def generate_reason(irrelevant_statements, input, score):
97
- return textwrap.dedent(
98
- f"""Given the answer relevancy score, the list of reasons of irrelevant statements made in the actual output, and the input, provide a CONCISE reason for the score. Explain why it is not higher, but also why it is at its current score.
99
- The irrelevant statements represent things in the actual output that is irrelevant to addressing whatever is asked/talked about in the input.
100
- If there is nothing irrelevant, just say something positive with an upbeat encouraging tone (but don't overdo it otherwise it gets annoying).
101
-
102
-
103
- **
104
- IMPORTANT: Please make sure to only return in JSON format, with the 'reason' key providing the reason.
105
- Example JSON:
106
- {{
107
- "reason": "The score is <answer_relevancy_score> because <your_reason>."
108
- }}
109
- **
110
-
111
- Answer Relevancy Score:
112
- {score}
113
-
114
- Reasons why the score can't be higher based on irrelevant statements in the actual output:
115
- {irrelevant_statements}
116
-
117
- Input:
118
- {input}
119
-
120
- JSON:
121
- """
122
- )