deepeval 3.6.7__py3-none-any.whl → 3.6.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/config/settings.py +104 -36
  3. deepeval/config/utils.py +5 -0
  4. deepeval/dataset/dataset.py +162 -30
  5. deepeval/dataset/utils.py +41 -13
  6. deepeval/errors.py +20 -2
  7. deepeval/evaluate/execute.py +1662 -688
  8. deepeval/evaluate/types.py +1 -0
  9. deepeval/evaluate/utils.py +13 -3
  10. deepeval/integrations/crewai/__init__.py +2 -1
  11. deepeval/integrations/crewai/tool.py +71 -0
  12. deepeval/integrations/llama_index/__init__.py +0 -4
  13. deepeval/integrations/llama_index/handler.py +20 -21
  14. deepeval/integrations/pydantic_ai/instrumentator.py +125 -76
  15. deepeval/metrics/__init__.py +13 -0
  16. deepeval/metrics/base_metric.py +1 -0
  17. deepeval/metrics/contextual_precision/contextual_precision.py +27 -21
  18. deepeval/metrics/conversational_g_eval/__init__.py +3 -0
  19. deepeval/metrics/conversational_g_eval/conversational_g_eval.py +11 -7
  20. deepeval/metrics/dag/schema.py +1 -1
  21. deepeval/metrics/dag/templates.py +2 -2
  22. deepeval/metrics/goal_accuracy/__init__.py +1 -0
  23. deepeval/metrics/goal_accuracy/goal_accuracy.py +349 -0
  24. deepeval/metrics/goal_accuracy/schema.py +17 -0
  25. deepeval/metrics/goal_accuracy/template.py +235 -0
  26. deepeval/metrics/hallucination/hallucination.py +8 -8
  27. deepeval/metrics/indicator.py +21 -1
  28. deepeval/metrics/mcp/mcp_task_completion.py +7 -2
  29. deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +16 -6
  30. deepeval/metrics/mcp_use_metric/mcp_use_metric.py +2 -1
  31. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +32 -24
  32. deepeval/metrics/plan_adherence/__init__.py +1 -0
  33. deepeval/metrics/plan_adherence/plan_adherence.py +292 -0
  34. deepeval/metrics/plan_adherence/schema.py +11 -0
  35. deepeval/metrics/plan_adherence/template.py +170 -0
  36. deepeval/metrics/plan_quality/__init__.py +1 -0
  37. deepeval/metrics/plan_quality/plan_quality.py +292 -0
  38. deepeval/metrics/plan_quality/schema.py +11 -0
  39. deepeval/metrics/plan_quality/template.py +101 -0
  40. deepeval/metrics/step_efficiency/__init__.py +1 -0
  41. deepeval/metrics/step_efficiency/schema.py +11 -0
  42. deepeval/metrics/step_efficiency/step_efficiency.py +234 -0
  43. deepeval/metrics/step_efficiency/template.py +256 -0
  44. deepeval/metrics/task_completion/task_completion.py +1 -0
  45. deepeval/metrics/tool_correctness/schema.py +6 -0
  46. deepeval/metrics/tool_correctness/template.py +88 -0
  47. deepeval/metrics/tool_correctness/tool_correctness.py +226 -22
  48. deepeval/metrics/tool_use/__init__.py +1 -0
  49. deepeval/metrics/tool_use/schema.py +19 -0
  50. deepeval/metrics/tool_use/template.py +220 -0
  51. deepeval/metrics/tool_use/tool_use.py +458 -0
  52. deepeval/metrics/topic_adherence/__init__.py +1 -0
  53. deepeval/metrics/topic_adherence/schema.py +16 -0
  54. deepeval/metrics/topic_adherence/template.py +162 -0
  55. deepeval/metrics/topic_adherence/topic_adherence.py +355 -0
  56. deepeval/models/embedding_models/azure_embedding_model.py +37 -36
  57. deepeval/models/embedding_models/local_embedding_model.py +30 -32
  58. deepeval/models/embedding_models/ollama_embedding_model.py +18 -20
  59. deepeval/models/embedding_models/openai_embedding_model.py +22 -31
  60. deepeval/models/llms/amazon_bedrock_model.py +20 -17
  61. deepeval/models/llms/openai_model.py +10 -1
  62. deepeval/models/retry_policy.py +103 -20
  63. deepeval/openai/extractors.py +61 -16
  64. deepeval/openai/patch.py +8 -12
  65. deepeval/openai/types.py +1 -1
  66. deepeval/openai/utils.py +108 -1
  67. deepeval/prompt/prompt.py +1 -0
  68. deepeval/prompt/utils.py +43 -14
  69. deepeval/simulator/conversation_simulator.py +25 -18
  70. deepeval/synthesizer/chunking/context_generator.py +9 -1
  71. deepeval/synthesizer/synthesizer.py +11 -10
  72. deepeval/test_case/llm_test_case.py +6 -2
  73. deepeval/test_run/test_run.py +190 -207
  74. deepeval/tracing/__init__.py +2 -1
  75. deepeval/tracing/otel/exporter.py +3 -4
  76. deepeval/tracing/otel/utils.py +23 -4
  77. deepeval/tracing/trace_context.py +53 -38
  78. deepeval/tracing/tracing.py +23 -0
  79. deepeval/tracing/types.py +16 -14
  80. deepeval/utils.py +21 -0
  81. {deepeval-3.6.7.dist-info → deepeval-3.6.9.dist-info}/METADATA +1 -1
  82. {deepeval-3.6.7.dist-info → deepeval-3.6.9.dist-info}/RECORD +85 -63
  83. deepeval/integrations/llama_index/agent/patched.py +0 -68
  84. deepeval/tracing/message_types/__init__.py +0 -10
  85. deepeval/tracing/message_types/base.py +0 -6
  86. deepeval/tracing/message_types/messages.py +0 -14
  87. deepeval/tracing/message_types/tools.py +0 -18
  88. {deepeval-3.6.7.dist-info → deepeval-3.6.9.dist-info}/LICENSE.md +0 -0
  89. {deepeval-3.6.7.dist-info → deepeval-3.6.9.dist-info}/WHEEL +0 -0
  90. {deepeval-3.6.7.dist-info → deepeval-3.6.9.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,16 @@
1
+ from pydantic import BaseModel
2
+ from typing import List, Dict, Literal
3
+
4
+
5
+ class QAPair(BaseModel):
6
+ question: str
7
+ response: str
8
+
9
+
10
+ class QAPairs(BaseModel):
11
+ qa_pairs: List[QAPair]
12
+
13
+
14
+ class RelevancyVerdict(BaseModel):
15
+ verdict: Literal["TP", "TN", "FP", "FN"]
16
+ reason: str
@@ -0,0 +1,162 @@
1
+ from typing import List
2
+ import textwrap
3
+
4
+
5
+ class TopicAdherenceTemplate:
6
+
7
+ @staticmethod
8
+ def get_qa_pairs(
9
+ conversation: str,
10
+ ) -> str:
11
+ return textwrap.dedent(
12
+ f"""Your task is to extract question-answer (QA) pairs from a multi-turn conversation between a `user` and an `assistant`.
13
+
14
+ You must return only valid pairs where:
15
+ - The **question** comes from the `user`.
16
+ - The **response** comes from the `assistant`.
17
+ - Both question and response must appear **explicitly** in the conversation.
18
+
19
+ Do not infer information beyond what is stated. Ignore irrelevant or conversational turns (e.g. greetings, affirmations) that do not constitute clear QA pairs.
20
+ If there are multiple questions and multiple answers in a single sentence, break them into separate pairs. Each pair must be standalone, and should not contain more than one question or response.
21
+
22
+ OUTPUT Format:
23
+ Return a **JSON object** with a single 2 keys:
24
+ - `"question"`: the user's question
25
+ - `"response"`: the assistant's direct response
26
+
27
+ If no valid QA pairs are found, return:
28
+ ```json
29
+ {{
30
+ question: "",
31
+ response: ""
32
+ }}
33
+
34
+ CHAIN OF THOUGHT:
35
+ - Read the full conversation sequentially.
36
+ - Identify user turns that clearly ask a question (explicit or strongly implied).
37
+ - Match each question with the immediate assistant response.
38
+ - Only include pairs where the assistant's reply directly addresses the user's question.
39
+ - Do not include incomplete, ambiguous, or out-of-context entries.
40
+
41
+ EXAMPLE:
42
+
43
+ Conversation:
44
+
45
+ user: Which food is best for diabetic patients?
46
+ assistant: Steel-cut oats are good for diabetic patients
47
+ user: Is it better if I eat muesli instead of oats?
48
+ assistant: While muesli is good for diabetic people, steel-cut oats are preferred. Refer to your nutritionist for better guidance.
49
+
50
+ Example JSON:
51
+ {{
52
+ "question": "Which food is best for diabetic patients?",
53
+ "response": "Steel-cut oats are good for diabetic patients"
54
+ }}
55
+ ===== END OF EXAMPLE ======
56
+
57
+ **
58
+ IMPORTANT: Please make sure to only return in JSON format with one key: 'qa_pairs' and the value MUST be a list of dictionaries
59
+ **
60
+
61
+ Conversation:
62
+ {conversation}
63
+ JSON:
64
+ """
65
+ )
66
+
67
+ @staticmethod
68
+ def get_qa_pair_verdict(
69
+ relevant_topics: List[str],
70
+ question: str,
71
+ response: str,
72
+ ) -> str:
73
+ return textwrap.dedent(
74
+ f"""You are given:
75
+ - A list of **relevant topics**
76
+ - A **user question**
77
+ - An **assistant response**
78
+
79
+ Your task is to:
80
+ 1. Determine if the question is relevant to the list of topics.
81
+ 2. If it is relevant, evaluate whether the response properly answers the question.
82
+ 3. Based on both relevance and correctness, assign one of four possible verdicts.
83
+ 4. Give a simple, comprehensive reason explaining why this question-answer pair was assigned this verdict
84
+
85
+ VERDICTS:
86
+ - `"TP"` (True Positive): Question is relevant and the response correctly answers it.
87
+ - `"FN"` (False Negative): Question is relevant, but the assistant refused to answer or gave an irrelevant response.
88
+ - `"FP"` (False Positive): Question is NOT relevant, but the assistant still gave an answer (based on general/training knowledge).
89
+ - `"TN"` (True Negative): Question is NOT relevant, and the assistant correctly refused to answer.
90
+
91
+ OUTPUT FORMAT:
92
+ Return only a **JSON object** with one key:
93
+ ```json
94
+ {{
95
+ "verdict": "TP" // or TN, FP, FN
96
+ "reason": "Reason why the verdict is 'TP'"
97
+ }}
98
+
99
+ CHAIN OF THOUGHT:
100
+ - Check if the question aligns with any of the relevant topics.
101
+ - If yes:
102
+ - Assess if the response is correct, complete, and directly answers the question.
103
+ - If no:
104
+ - Check if the assistant refused appropriately or gave an unwarranted answer.
105
+ - Choose the correct verdict using the definitions above.
106
+
107
+ EXAMPLE:
108
+
109
+ Relevant topics: ["heath nutrition", "food and their benefits"]
110
+ Question: "Which food is best for diabetic patients?"
111
+ Response: "Steel-cut oats are good for diabetic patients"
112
+
113
+ Example JSON:
114
+ {{
115
+ "verdict": "TP",
116
+ "reason": The question asks about food for diabetic patients and the response clearly answers that oats are good for diabetic patients. Both align with the relevant topics of heath nutrition and food and their benefits...
117
+ }}
118
+
119
+ ===== END OF EXAMPLE ======
120
+
121
+ **
122
+ IMPORTANT: Please make sure to only return in JSON format with two keys: 'verdict' and 'reason'
123
+ **
124
+
125
+ Relevant topics: {relevant_topics}
126
+ Question: {question}
127
+ Response: {response}
128
+
129
+ JSON:
130
+ """
131
+ )
132
+
133
+ @staticmethod
134
+ def generate_reason(success, score, threshold, TP, TN, FP, FN) -> str:
135
+ return textwrap.dedent(
136
+ f"""You are given a score for a metric that calculates whether an agent has adhered to it's topics.
137
+ You are also given a list of reasons for the truth table values that were used to calculate final score.
138
+
139
+ Your task is to go through these reasons and give a single final explaination that clearly explains why this metric has failed or passed.
140
+
141
+ Pass: {success}
142
+ Score: {score}
143
+ Threshold: {threshold}
144
+
145
+ Here are the reasons for all truth table entries:
146
+
147
+ True positive reasons: {TP[1]}
148
+ True negative reasons: {TN[1]}
149
+ False positives reasons: {FP[1]}
150
+ False negatives reasons: {FN[1]}
151
+
152
+ Score calculation = Number of True Positives + Number of True Negatives / Total number of table entries
153
+
154
+ **
155
+ IMPORTANT: Now generate a comprehensive reason that explains why this metric failed. You MUST output only the reason as a string and nothing else.
156
+ **
157
+
158
+ Output ONLY the reason, DON"T output anything else.
159
+
160
+ Reason:
161
+ """
162
+ )
@@ -0,0 +1,355 @@
1
+ from typing import Optional, List, Union
2
+
3
+ from deepeval.utils import get_or_create_event_loop, prettify_list
4
+ from deepeval.metrics.utils import (
5
+ construct_verbose_logs,
6
+ trimAndLoadJson,
7
+ get_unit_interactions,
8
+ check_conversational_test_case_params,
9
+ initialize_model,
10
+ )
11
+ from deepeval.test_case import ConversationalTestCase, TurnParams
12
+ from deepeval.metrics import BaseConversationalMetric
13
+ from deepeval.models import DeepEvalBaseLLM
14
+ from deepeval.metrics.indicator import metric_progress_indicator
15
+ from deepeval.metrics.topic_adherence.template import TopicAdherenceTemplate
16
+ from deepeval.metrics.topic_adherence.schema import (
17
+ RelevancyVerdict,
18
+ QAPairs,
19
+ QAPair,
20
+ )
21
+ from deepeval.metrics.api import metric_data_manager
22
+
23
+
24
+ class TopicAdherenceMetric(BaseConversationalMetric):
25
+
26
+ _required_test_case_params = [
27
+ TurnParams.ROLE,
28
+ TurnParams.CONTENT,
29
+ ]
30
+
31
+ def __init__(
32
+ self,
33
+ relevant_topics: List[str],
34
+ threshold: float = 0.5,
35
+ model: Optional[Union[str, DeepEvalBaseLLM]] = None,
36
+ include_reason: bool = True,
37
+ async_mode: bool = True,
38
+ strict_mode: bool = False,
39
+ verbose_mode: bool = False,
40
+ ):
41
+ self.relevant_topics = relevant_topics
42
+ self.threshold = 1 if strict_mode else threshold
43
+ self.model, self.using_native_model = initialize_model(model)
44
+ self.evaluation_model = self.model.get_model_name()
45
+ self.include_reason = include_reason
46
+ self.async_mode = async_mode
47
+ self.strict_mode = strict_mode
48
+ self.verbose_mode = verbose_mode
49
+
50
+ def measure(
51
+ self,
52
+ test_case: ConversationalTestCase,
53
+ _show_indicator: bool = True,
54
+ _in_component: bool = False,
55
+ _log_metric_to_confident: bool = True,
56
+ ):
57
+ check_conversational_test_case_params(
58
+ test_case, self._required_test_case_params, self
59
+ )
60
+
61
+ self.evaluation_cost = 0 if self.using_native_model else None
62
+ with metric_progress_indicator(
63
+ self, _show_indicator=_show_indicator, _in_component=_in_component
64
+ ):
65
+ if self.async_mode:
66
+ loop = get_or_create_event_loop()
67
+ loop.run_until_complete(
68
+ self.a_measure(
69
+ test_case,
70
+ _show_indicator=False,
71
+ _in_component=_in_component,
72
+ _log_metric_to_confident=_log_metric_to_confident,
73
+ )
74
+ )
75
+ else:
76
+ unit_interactions = get_unit_interactions(test_case.turns)
77
+ interaction_pairs = self._get_qa_pairs(unit_interactions)
78
+ True_Positives = [0, []]
79
+ True_Negatives = [0, []]
80
+ False_Positives = [0, []]
81
+ False_Negatives = [0, []]
82
+ for interaction_pair in interaction_pairs:
83
+ for qa_pair in interaction_pair.qa_pairs:
84
+ qa_verdict: RelevancyVerdict = self._get_qa_verdict(
85
+ qa_pair
86
+ )
87
+ if qa_verdict.verdict == "TP":
88
+ True_Positives[0] += 1
89
+ True_Positives[1].append(qa_verdict.reason)
90
+ elif qa_verdict.verdict == "TN":
91
+ True_Negatives[0] += 1
92
+ True_Negatives[1].append(qa_verdict.reason)
93
+ elif qa_verdict.verdict == "FP":
94
+ False_Positives[0] += 1
95
+ False_Positives[1].append(qa_verdict.reason)
96
+ elif qa_verdict.verdict == "FN":
97
+ False_Negatives[0] += 1
98
+ False_Negatives[1].append(qa_verdict.reason)
99
+
100
+ self.score = self._get_score(
101
+ True_Positives,
102
+ True_Negatives,
103
+ False_Positives,
104
+ False_Negatives,
105
+ )
106
+ self.success = self.score >= self.threshold
107
+ self.reason = self._generate_reason(
108
+ True_Positives,
109
+ True_Negatives,
110
+ False_Positives,
111
+ False_Negatives,
112
+ )
113
+
114
+ self.verbose_logs = construct_verbose_logs(
115
+ self,
116
+ steps=[
117
+ f"Interaction Pairs: \n{prettify_list(interaction_pairs)} \n",
118
+ f"Truth Table:",
119
+ f"\nTrue Positives:",
120
+ f"Count: {True_Positives[0]}, Reasons: {prettify_list(True_Positives[1])} \n",
121
+ f"\nTrue Negatives: ",
122
+ f"Count: {True_Negatives[0]}, Reasons: {prettify_list(True_Negatives[1])} \n",
123
+ f"\nFalse Positives: ",
124
+ f"Count: {False_Positives[0]}, Reasons: {prettify_list(False_Positives[1])} \n",
125
+ f"\nFalse Negatives: ",
126
+ f"Count: {False_Negatives[0]}, Reasons: {prettify_list(False_Negatives[1])} \n",
127
+ f"Final Score: {self.score}",
128
+ f"Final Reason: {self.reason}",
129
+ ],
130
+ )
131
+
132
+ if _log_metric_to_confident:
133
+ metric_data_manager.post_metric_if_enabled(
134
+ self, test_case=test_case
135
+ )
136
+
137
+ return self.score
138
+
139
+ async def a_measure(
140
+ self,
141
+ test_case: ConversationalTestCase,
142
+ _show_indicator: bool = True,
143
+ _in_component: bool = False,
144
+ _log_metric_to_confident: bool = True,
145
+ ):
146
+ check_conversational_test_case_params(
147
+ test_case, self._required_test_case_params, self
148
+ )
149
+
150
+ self.evaluation_cost = 0 if self.using_native_model else None
151
+
152
+ with metric_progress_indicator(
153
+ self,
154
+ async_mode=True,
155
+ _show_indicator=_show_indicator,
156
+ _in_component=_in_component,
157
+ ):
158
+ unit_interactions = get_unit_interactions(test_case.turns)
159
+ interaction_pairs = await self._a_get_qa_pairs(unit_interactions)
160
+ True_Positives = [0, []]
161
+ True_Negatives = [0, []]
162
+ False_Positives = [0, []]
163
+ False_Negatives = [0, []]
164
+ for interaction_pair in interaction_pairs:
165
+ for qa_pair in interaction_pair.qa_pairs:
166
+ qa_verdict: RelevancyVerdict = self._get_qa_verdict(qa_pair)
167
+ if qa_verdict.verdict == "TP":
168
+ True_Positives[0] += 1
169
+ True_Positives[1].append(qa_verdict.reason)
170
+ elif qa_verdict.verdict == "TN":
171
+ True_Negatives[0] += 1
172
+ True_Negatives[1].append(qa_verdict.reason)
173
+ elif qa_verdict.verdict == "FP":
174
+ False_Positives[0] += 1
175
+ False_Positives[1].append(qa_verdict.reason)
176
+ elif qa_verdict.verdict == "FN":
177
+ False_Negatives[0] += 1
178
+ False_Negatives[1].append(qa_verdict.reason)
179
+
180
+ self.score = self._get_score(
181
+ True_Positives, True_Negatives, False_Positives, False_Negatives
182
+ )
183
+ self.success = self.score >= self.threshold
184
+ self.reason = await self._a_generate_reason(
185
+ True_Positives, True_Negatives, False_Positives, False_Negatives
186
+ )
187
+
188
+ self.verbose_logs = construct_verbose_logs(
189
+ self,
190
+ steps=[
191
+ f"Interaction Pairs: \n{prettify_list(interaction_pairs)} \n",
192
+ f"Truth Table:",
193
+ f"\nTrue Positives:",
194
+ f"Count: {True_Positives[0]}, Reasons: {prettify_list(True_Positives[1])} \n",
195
+ f"\nTrue Negatives: ",
196
+ f"Count: {True_Negatives[0]}, Reasons: {prettify_list(True_Negatives[1])} \n",
197
+ f"\nFalse Positives: ",
198
+ f"Count: {False_Positives[0]}, Reasons: {prettify_list(False_Positives[1])} \n",
199
+ f"\nFalse Negatives: ",
200
+ f"Count: {False_Negatives[0]}, Reasons: {prettify_list(False_Negatives[1])} \n",
201
+ f"Final Score: {self.score}",
202
+ f"Final Reason: {self.reason}",
203
+ ],
204
+ )
205
+
206
+ if _log_metric_to_confident:
207
+ metric_data_manager.post_metric_if_enabled(
208
+ self, test_case=test_case
209
+ )
210
+
211
+ return self.score
212
+
213
+ def _generate_reason(self, TP, TN, FP, FN):
214
+ total = TP[0] + TN[0] + FP[0] + FN[0]
215
+ if total <= 0:
216
+ return "There were no question-answer pairs to evaluate. Please enable verbose logs to look at the evaluation steps taken"
217
+ prompt = TopicAdherenceTemplate.generate_reason(
218
+ self.success, self.score, self.threshold, TP, TN, FP, FN
219
+ )
220
+ if self.using_native_model:
221
+ res, cost = self.model.generate(prompt)
222
+ self.evaluation_cost += cost
223
+ return res
224
+ else:
225
+ res = self.model.generate(prompt)
226
+ return res
227
+
228
+ async def _a_generate_reason(self, TP, TN, FP, FN):
229
+ prompt = TopicAdherenceTemplate.generate_reason(
230
+ self.success, self.score, self.threshold, TP, TN, FP, FN
231
+ )
232
+ if self.using_native_model:
233
+ res, cost = await self.model.a_generate(prompt)
234
+ self.evaluation_cost += cost
235
+ return res
236
+ else:
237
+ res = await self.model.a_generate(prompt)
238
+ return res
239
+
240
+ def _get_score(self, TP, TN, FP, FN) -> float:
241
+ true_values = TP[0] + TN[0]
242
+ total = TP[0] + TN[0] + FP[0] + FN[0]
243
+ if total <= 0:
244
+ score = 0
245
+ else:
246
+ score = true_values / total
247
+ return 0 if self.strict_mode and score < self.threshold else score
248
+
249
+ def _get_qa_verdict(self, qa_pair: QAPair) -> RelevancyVerdict:
250
+ prompt = TopicAdherenceTemplate.get_qa_pair_verdict(
251
+ self.relevant_topics, qa_pair.question, qa_pair.response
252
+ )
253
+ if self.using_native_model:
254
+ res, cost = self.model.generate(prompt, schema=RelevancyVerdict)
255
+ self.evaluation_cost += cost
256
+ return res
257
+ else:
258
+ try:
259
+ res = self.model.generate(prompt, schema=RelevancyVerdict)
260
+ return res
261
+ except TypeError:
262
+ res = self.model.generate(prompt)
263
+ data = trimAndLoadJson(res, self)
264
+ return RelevancyVerdict(**data)
265
+
266
+ async def _a_get_qa_verdict(self, qa_pair: QAPair) -> RelevancyVerdict:
267
+ prompt = TopicAdherenceTemplate.get_qa_pair_verdict(
268
+ self.relevant_topics, qa_pair.question, qa_pair.response
269
+ )
270
+ if self.using_native_model:
271
+ res, cost = await self.model.a_generate(
272
+ prompt, schema=RelevancyVerdict
273
+ )
274
+ self.evaluation_cost += cost
275
+ return res
276
+ else:
277
+ try:
278
+ res = await self.model.a_generate(
279
+ prompt, schema=RelevancyVerdict
280
+ )
281
+ return res
282
+ except TypeError:
283
+ res = await self.model.a_generate(prompt)
284
+ data = trimAndLoadJson(res, self)
285
+ return RelevancyVerdict(**data)
286
+
287
+ def _get_qa_pairs(self, unit_interactions: List) -> List[QAPairs]:
288
+ qa_pairs = []
289
+ for unit_interaction in unit_interactions:
290
+ conversation = "Conversation: \n"
291
+ for turn in unit_interaction:
292
+ conversation += f"{turn.role} \n"
293
+ conversation += f"{turn.content} \n\n"
294
+ prompt = TopicAdherenceTemplate.get_qa_pairs(conversation)
295
+ new_pair = None
296
+
297
+ if self.using_native_model:
298
+ res, cost = self.model.generate(prompt, schema=QAPairs)
299
+ self.evaluation_cost += cost
300
+ new_pair = res
301
+ else:
302
+ try:
303
+ res = self.model.generate(prompt, schema=QAPairs)
304
+ new_pair = res
305
+ except TypeError:
306
+ res = self.model.generate(prompt)
307
+ data = trimAndLoadJson(res, self)
308
+ new_pair = QAPairs(**data)
309
+
310
+ if new_pair is not None:
311
+ qa_pairs.append(new_pair)
312
+
313
+ return qa_pairs
314
+
315
+ async def _a_get_qa_pairs(self, unit_interactions: List) -> List[QAPairs]:
316
+ qa_pairs = []
317
+ for unit_interaction in unit_interactions:
318
+ conversation = "Conversation: \n"
319
+ for turn in unit_interaction:
320
+ conversation += f"{turn.role} \n"
321
+ conversation += f"{turn.content} \n\n"
322
+ prompt = TopicAdherenceTemplate.get_qa_pairs(conversation)
323
+ new_pair = None
324
+
325
+ if self.using_native_model:
326
+ res, cost = await self.model.a_generate(prompt, schema=QAPairs)
327
+ self.evaluation_cost += cost
328
+ new_pair = res
329
+ else:
330
+ try:
331
+ res = await self.model.a_generate(prompt, schema=QAPairs)
332
+ new_pair = res
333
+ except TypeError:
334
+ res = await self.model.a_generate(prompt)
335
+ data = trimAndLoadJson(res, self)
336
+ new_pair = QAPairs(**data)
337
+
338
+ if new_pair is not None:
339
+ qa_pairs.append(new_pair)
340
+
341
+ return qa_pairs
342
+
343
+ def is_successful(self) -> bool:
344
+ if self.error is not None:
345
+ self.success = False
346
+ else:
347
+ try:
348
+ self.score >= self.threshold
349
+ except:
350
+ self.success = False
351
+ return self.success
352
+
353
+ @property
354
+ def __name__(self):
355
+ return "Topic Adherence"
@@ -1,4 +1,4 @@
1
- from typing import Dict, List
1
+ from typing import Dict, List, Optional
2
2
  from openai import AzureOpenAI, AsyncAzureOpenAI
3
3
  from deepeval.key_handler import (
4
4
  EmbeddingKeyValues,
@@ -17,28 +17,39 @@ retry_azure = create_retry_decorator(PS.AZURE)
17
17
 
18
18
 
19
19
  class AzureOpenAIEmbeddingModel(DeepEvalBaseEmbeddingModel):
20
- def __init__(self, **kwargs):
21
- self.azure_openai_api_key = KEY_FILE_HANDLER.fetch_data(
20
+ def __init__(
21
+ self,
22
+ openai_api_key: Optional[str] = None,
23
+ openai_api_version: Optional[str] = None,
24
+ azure_endpoint: Optional[str] = None,
25
+ azure_deployment: Optional[str] = None,
26
+ model: Optional[str] = None,
27
+ generation_kwargs: Optional[Dict] = None,
28
+ **client_kwargs,
29
+ ):
30
+ self.openai_api_key = openai_api_key or KEY_FILE_HANDLER.fetch_data(
22
31
  ModelKeyValues.AZURE_OPENAI_API_KEY
23
32
  )
24
- self.openai_api_version = KEY_FILE_HANDLER.fetch_data(
25
- ModelKeyValues.OPENAI_API_VERSION
33
+ self.openai_api_version = (
34
+ openai_api_version
35
+ or KEY_FILE_HANDLER.fetch_data(ModelKeyValues.OPENAI_API_VERSION)
26
36
  )
27
- self.azure_embedding_deployment = KEY_FILE_HANDLER.fetch_data(
28
- EmbeddingKeyValues.AZURE_EMBEDDING_DEPLOYMENT_NAME
29
- )
30
- self.azure_endpoint = KEY_FILE_HANDLER.fetch_data(
37
+ self.azure_endpoint = azure_endpoint or KEY_FILE_HANDLER.fetch_data(
31
38
  ModelKeyValues.AZURE_OPENAI_ENDPOINT
32
39
  )
33
- self.model_name = self.azure_embedding_deployment
34
- self.kwargs = kwargs
40
+ self.azure_deployment = azure_deployment or KEY_FILE_HANDLER.fetch_data(
41
+ EmbeddingKeyValues.AZURE_EMBEDDING_DEPLOYMENT_NAME
42
+ )
43
+ self.client_kwargs = client_kwargs or {}
44
+ self.model_name = model or self.azure_deployment
45
+ self.generation_kwargs = generation_kwargs or {}
46
+ super().__init__(self.model_name)
35
47
 
36
48
  @retry_azure
37
49
  def embed_text(self, text: str) -> List[float]:
38
50
  client = self.load_model(async_mode=False)
39
51
  response = client.embeddings.create(
40
- input=text,
41
- model=self.azure_embedding_deployment,
52
+ input=text, model=self.model_name, **self.generation_kwargs
42
53
  )
43
54
  return response.data[0].embedding
44
55
 
@@ -46,8 +57,7 @@ class AzureOpenAIEmbeddingModel(DeepEvalBaseEmbeddingModel):
46
57
  def embed_texts(self, texts: List[str]) -> List[List[float]]:
47
58
  client = self.load_model(async_mode=False)
48
59
  response = client.embeddings.create(
49
- input=texts,
50
- model=self.azure_embedding_deployment,
60
+ input=texts, model=self.model_name, **self.generation_kwargs
51
61
  )
52
62
  return [item.embedding for item in response.data]
53
63
 
@@ -55,8 +65,7 @@ class AzureOpenAIEmbeddingModel(DeepEvalBaseEmbeddingModel):
55
65
  async def a_embed_text(self, text: str) -> List[float]:
56
66
  client = self.load_model(async_mode=True)
57
67
  response = await client.embeddings.create(
58
- input=text,
59
- model=self.azure_embedding_deployment,
68
+ input=text, model=self.model_name, **self.generation_kwargs
60
69
  )
61
70
  return response.data[0].embedding
62
71
 
@@ -64,8 +73,7 @@ class AzureOpenAIEmbeddingModel(DeepEvalBaseEmbeddingModel):
64
73
  async def a_embed_texts(self, texts: List[str]) -> List[List[float]]:
65
74
  client = self.load_model(async_mode=True)
66
75
  response = await client.embeddings.create(
67
- input=texts,
68
- model=self.azure_embedding_deployment,
76
+ input=texts, model=self.model_name, **self.generation_kwargs
69
77
  )
70
78
  return [item.embedding for item in response.data]
71
79
 
@@ -77,30 +85,23 @@ class AzureOpenAIEmbeddingModel(DeepEvalBaseEmbeddingModel):
77
85
  return self._build_client(AzureOpenAI)
78
86
  return self._build_client(AsyncAzureOpenAI)
79
87
 
80
- def _client_kwargs(self) -> Dict:
81
- """
82
- If Tenacity is managing retries, force OpenAI SDK retries off to avoid double retries.
83
- If the user opts into SDK retries for 'azure' via DEEPEVAL_SDK_RETRY_PROVIDERS,
84
- leave their retry settings as is.
85
- """
86
- kwargs = dict(self.kwargs or {})
88
+ def _build_client(self, cls):
89
+ client_kwargs = self.client_kwargs.copy()
87
90
  if not sdk_retries_for(PS.AZURE):
88
- kwargs["max_retries"] = 0
89
- return kwargs
91
+ client_kwargs["max_retries"] = 0
90
92
 
91
- def _build_client(self, cls):
92
- kw = dict(
93
- api_key=self.azure_openai_api_key,
93
+ client_init_kwargs = dict(
94
+ api_key=self.openai_api_key,
94
95
  api_version=self.openai_api_version,
95
96
  azure_endpoint=self.azure_endpoint,
96
- azure_deployment=self.azure_embedding_deployment,
97
- **self._client_kwargs(),
97
+ azure_deployment=self.azure_deployment,
98
+ **client_kwargs,
98
99
  )
99
100
  try:
100
- return cls(**kw)
101
+ return cls(**client_init_kwargs)
101
102
  except TypeError as e:
102
103
  # older OpenAI SDKs may not accept max_retries, in that case remove and retry once
103
104
  if "max_retries" in str(e):
104
- kw.pop("max_retries", None)
105
- return cls(**kw)
105
+ client_init_kwargs.pop("max_retries", None)
106
+ return cls(**client_init_kwargs)
106
107
  raise