deepeval 3.5.6__py3-none-any.whl → 3.5.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
deepeval/_version.py CHANGED
@@ -1 +1 @@
1
- __version__: str = "3.5.6"
1
+ __version__: str = "3.5.8"
@@ -458,6 +458,8 @@ class EvaluationDataset:
458
458
  tools_called_col_delimiter: str = ";",
459
459
  expected_tools_col_name: Optional[str] = "expected_tools",
460
460
  expected_tools_col_delimiter: str = ";",
461
+ comments_key_name: str = "comments",
462
+ name_key_name: str = "name",
461
463
  source_file_col_name: Optional[str] = None,
462
464
  additional_metadata_col_name: Optional[str] = None,
463
465
  scenario_col_name: Optional[str] = "scenario",
@@ -526,6 +528,8 @@ class EvaluationDataset:
526
528
  df, expected_tools_col_name, default=""
527
529
  )
528
530
  ]
531
+ comments = get_column_data(df, comments_key_name)
532
+ name = get_column_data(df, name_key_name)
529
533
  source_files = get_column_data(df, source_file_col_name)
530
534
  additional_metadatas = [
531
535
  ast.literal_eval(metadata) if metadata else None
@@ -546,6 +550,8 @@ class EvaluationDataset:
546
550
  retrieval_context,
547
551
  tools_called,
548
552
  expected_tools,
553
+ comments,
554
+ name,
549
555
  source_file,
550
556
  additional_metadata,
551
557
  scenario,
@@ -560,6 +566,8 @@ class EvaluationDataset:
560
566
  retrieval_contexts,
561
567
  tools_called,
562
568
  expected_tools,
569
+ comments,
570
+ name,
563
571
  source_files,
564
572
  additional_metadatas,
565
573
  scenarios,
@@ -569,7 +577,7 @@ class EvaluationDataset:
569
577
  ):
570
578
  if scenario:
571
579
  self._multi_turn = True
572
- parsed_turns = parse_turns(turns)
580
+ parsed_turns = parse_turns(turns) if turns else []
573
581
  self.goldens.append(
574
582
  ConversationalGolden(
575
583
  scenario=scenario,
@@ -577,6 +585,8 @@ class EvaluationDataset:
577
585
  expected_outcome=expected_outcome,
578
586
  user_description=user_description,
579
587
  context=context,
588
+ comments=comments,
589
+ name=name,
580
590
  )
581
591
  )
582
592
  else:
@@ -592,6 +602,8 @@ class EvaluationDataset:
592
602
  expected_tools=expected_tools,
593
603
  additional_metadata=additional_metadata,
594
604
  source_file=source_file,
605
+ comments=comments,
606
+ name=name,
595
607
  )
596
608
  )
597
609
 
@@ -605,6 +617,8 @@ class EvaluationDataset:
605
617
  retrieval_context_key_name: Optional[str] = "retrieval_context",
606
618
  tools_called_key_name: Optional[str] = "tools_called",
607
619
  expected_tools_key_name: Optional[str] = "expected_tools",
620
+ comments_key_name: str = "comments",
621
+ name_key_name: str = "name",
608
622
  source_file_key_name: Optional[str] = "source_file",
609
623
  additional_metadata_key_name: Optional[str] = "additional_metadata",
610
624
  scenario_key_name: Optional[str] = "scenario",
@@ -628,7 +642,8 @@ class EvaluationDataset:
628
642
  expected_outcome = json_obj.get(expected_outcome_key_name)
629
643
  user_description = json_obj.get(user_description_key_name)
630
644
  context = json_obj.get(context_key_name)
631
-
645
+ comments = json_obj.get(comments_key_name)
646
+ name = json_obj.get(name_key_name)
632
647
  parsed_turns = parse_turns(turns) if turns else []
633
648
 
634
649
  self._multi_turn = True
@@ -639,6 +654,8 @@ class EvaluationDataset:
639
654
  expected_outcome=expected_outcome,
640
655
  user_description=user_description,
641
656
  context=context,
657
+ comments=comments,
658
+ name=name,
642
659
  )
643
660
  )
644
661
  else:
@@ -649,6 +666,8 @@ class EvaluationDataset:
649
666
  retrieval_context = json_obj.get(retrieval_context_key_name)
650
667
  tools_called = json_obj.get(tools_called_key_name)
651
668
  expected_tools = json_obj.get(expected_tools_key_name)
669
+ comments = json_obj.get(comments_key_name)
670
+ name = json_obj.get(name_key_name)
652
671
  source_file = json_obj.get(source_file_key_name)
653
672
  additional_metadata = json_obj.get(additional_metadata_key_name)
654
673
 
@@ -663,6 +682,8 @@ class EvaluationDataset:
663
682
  tools_called=tools_called,
664
683
  expected_tools=expected_tools,
665
684
  additional_metadata=additional_metadata,
685
+ comments=comments,
686
+ name=name,
666
687
  source_file=source_file,
667
688
  )
668
689
  )
@@ -928,6 +949,8 @@ class EvaluationDataset:
928
949
  expected_outcome=golden.expected_outcome,
929
950
  user_description=golden.user_description,
930
951
  context=golden.context,
952
+ name=golden.name,
953
+ comments=golden.comments,
931
954
  )
932
955
  for golden in self.goldens
933
956
  ]
@@ -939,6 +962,8 @@ class EvaluationDataset:
939
962
  actual_output=golden.actual_output,
940
963
  retrieval_context=golden.retrieval_context,
941
964
  context=golden.context,
965
+ name=golden.name,
966
+ comments=golden.comments,
942
967
  source_file=golden.source_file,
943
968
  )
944
969
  for golden in self.goldens
@@ -981,6 +1006,8 @@ class EvaluationDataset:
981
1006
  "expected_outcome": golden.expected_outcome,
982
1007
  "user_description": golden.user_description,
983
1008
  "context": golden.context,
1009
+ "name": golden.name,
1010
+ "comments": golden.comments,
984
1011
  }
985
1012
  for golden in goldens
986
1013
  ]
@@ -992,6 +1019,8 @@ class EvaluationDataset:
992
1019
  "expected_output": golden.expected_output,
993
1020
  "retrieval_context": golden.retrieval_context,
994
1021
  "context": golden.context,
1022
+ "name": golden.name,
1023
+ "comments": golden.comments,
995
1024
  "source_file": golden.source_file,
996
1025
  }
997
1026
  for golden in goldens
@@ -1010,6 +1039,8 @@ class EvaluationDataset:
1010
1039
  "expected_outcome",
1011
1040
  "user_description",
1012
1041
  "context",
1042
+ "name",
1043
+ "comments",
1013
1044
  ]
1014
1045
  )
1015
1046
  for golden in goldens:
@@ -1030,6 +1061,8 @@ class EvaluationDataset:
1030
1061
  golden.expected_outcome,
1031
1062
  golden.user_description,
1032
1063
  context,
1064
+ golden.name,
1065
+ golden.comments,
1033
1066
  ]
1034
1067
  )
1035
1068
  else:
@@ -1040,6 +1073,8 @@ class EvaluationDataset:
1040
1073
  "expected_output",
1041
1074
  "retrieval_context",
1042
1075
  "context",
1076
+ "name",
1077
+ "comments",
1043
1078
  "source_file",
1044
1079
  ]
1045
1080
  )
@@ -1061,6 +1096,8 @@ class EvaluationDataset:
1061
1096
  golden.expected_output,
1062
1097
  retrieval_context,
1063
1098
  context,
1099
+ golden.name,
1100
+ golden.comments,
1064
1101
  golden.source_file,
1065
1102
  ]
1066
1103
  )
@@ -1,6 +1,6 @@
1
1
  """LLM evaluated metric based on the GEval framework: https://arxiv.org/pdf/2303.16634.pdf"""
2
2
 
3
- from typing import Optional, List, Tuple, Union
3
+ from typing import Optional, List, Tuple, Type, Union
4
4
  from deepeval.models import DeepEvalBaseMLLM
5
5
  from deepeval.metrics import BaseMultimodalMetric
6
6
  from deepeval.test_case import (
@@ -10,7 +10,10 @@ from deepeval.test_case import (
10
10
  from deepeval.metrics.multimodal_metrics.multimodal_g_eval.template import (
11
11
  MultimodalGEvalTemplate,
12
12
  )
13
- from deepeval.metrics.multimodal_metrics.multimodal_g_eval.schema import *
13
+ from deepeval.metrics.multimodal_metrics.multimodal_g_eval.schema import (
14
+ Steps,
15
+ ReasonScore,
16
+ )
14
17
  from deepeval.utils import get_or_create_event_loop, prettify_list
15
18
  from deepeval.metrics.indicator import metric_progress_indicator
16
19
  from deepeval.metrics.utils import (
@@ -49,6 +52,9 @@ class MultimodalGEval(BaseMultimodalMetric):
49
52
  async_mode: bool = True,
50
53
  strict_mode: bool = False,
51
54
  verbose_mode: bool = False,
55
+ evaluation_template: Type[
56
+ MultimodalGEvalTemplate
57
+ ] = MultimodalGEvalTemplate,
52
58
  _include_g_eval_suffix: bool = True,
53
59
  ):
54
60
  validate_criteria_and_evaluation_steps(criteria, evaluation_steps)
@@ -65,6 +71,7 @@ class MultimodalGEval(BaseMultimodalMetric):
65
71
  self.async_mode = async_mode
66
72
  self.verbose_mode = verbose_mode
67
73
  self._include_g_eval_suffix = _include_g_eval_suffix
74
+ self.evaluation_template = evaluation_template
68
75
 
69
76
  def measure(
70
77
  self,
@@ -167,7 +174,7 @@ class MultimodalGEval(BaseMultimodalMetric):
167
174
  g_eval_params_str = construct_g_eval_params_string(
168
175
  self.evaluation_params
169
176
  )
170
- prompt = MultimodalGEvalTemplate.generate_evaluation_steps(
177
+ prompt = self.evaluation_template.generate_evaluation_steps(
171
178
  criteria=self.criteria, parameters=g_eval_params_str
172
179
  )
173
180
  if self.using_native_model:
@@ -190,7 +197,7 @@ class MultimodalGEval(BaseMultimodalMetric):
190
197
  g_eval_params_str = construct_g_eval_params_string(
191
198
  self.evaluation_params
192
199
  )
193
- prompt = MultimodalGEvalTemplate.generate_evaluation_steps(
200
+ prompt = self.evaluation_template.generate_evaluation_steps(
194
201
  criteria=self.criteria, parameters=g_eval_params_str
195
202
  )
196
203
  if self.using_native_model:
@@ -218,7 +225,7 @@ class MultimodalGEval(BaseMultimodalMetric):
218
225
 
219
226
  if not self.strict_mode:
220
227
  rubric_str = format_rubrics(self.rubric) if self.rubric else None
221
- prompt = MultimodalGEvalTemplate.generate_evaluation_results(
228
+ prompt = self.evaluation_template.generate_evaluation_results(
222
229
  evaluation_steps=number_evaluation_steps(self.evaluation_steps),
223
230
  test_case_list=test_case_list,
224
231
  parameters=g_eval_params_str,
@@ -227,11 +234,15 @@ class MultimodalGEval(BaseMultimodalMetric):
227
234
  _additional_context=_additional_context,
228
235
  )
229
236
  else:
230
- prompt = MultimodalGEvalTemplate.generate_strict_evaluation_results(
231
- evaluation_steps=number_evaluation_steps(self.evaluation_steps),
232
- test_case_list=test_case_list,
233
- parameters=g_eval_params_str,
234
- _additional_context=_additional_context,
237
+ prompt = (
238
+ self.evaluation_template.generate_strict_evaluation_results(
239
+ evaluation_steps=number_evaluation_steps(
240
+ self.evaluation_steps
241
+ ),
242
+ test_case_list=test_case_list,
243
+ parameters=g_eval_params_str,
244
+ _additional_context=_additional_context,
245
+ )
235
246
  )
236
247
  try:
237
248
  # don't use log probabilities for unsupported gpt models
@@ -256,7 +267,7 @@ class MultimodalGEval(BaseMultimodalMetric):
256
267
  score, res
257
268
  )
258
269
  return weighted_summed_score, reason
259
- except:
270
+ except Exception:
260
271
  return score, reason
261
272
  except (
262
273
  AttributeError
@@ -289,7 +300,7 @@ class MultimodalGEval(BaseMultimodalMetric):
289
300
 
290
301
  if not self.strict_mode:
291
302
  rubric_str = format_rubrics(self.rubric) if self.rubric else None
292
- prompt = MultimodalGEvalTemplate.generate_evaluation_results(
303
+ prompt = self.evaluation_template.generate_evaluation_results(
293
304
  evaluation_steps=number_evaluation_steps(self.evaluation_steps),
294
305
  test_case_list=test_case_list,
295
306
  parameters=g_eval_params_str,
@@ -298,11 +309,15 @@ class MultimodalGEval(BaseMultimodalMetric):
298
309
  _additional_context=_additional_context,
299
310
  )
300
311
  else:
301
- prompt = MultimodalGEvalTemplate.generate_strict_evaluation_results(
302
- evaluation_steps=number_evaluation_steps(self.evaluation_steps),
303
- test_case_list=test_case_list,
304
- parameters=g_eval_params_str,
305
- _additional_context=_additional_context,
312
+ prompt = (
313
+ self.evaluation_template.generate_strict_evaluation_results(
314
+ evaluation_steps=number_evaluation_steps(
315
+ self.evaluation_steps
316
+ ),
317
+ test_case_list=test_case_list,
318
+ parameters=g_eval_params_str,
319
+ _additional_context=_additional_context,
320
+ )
306
321
  )
307
322
 
308
323
  try:
@@ -326,7 +341,7 @@ class MultimodalGEval(BaseMultimodalMetric):
326
341
  score, res
327
342
  )
328
343
  return weighted_summed_score, reason
329
- except:
344
+ except Exception:
330
345
  return score, reason
331
346
  except AttributeError:
332
347
  # This catches the case where a_generate_raw_response doesn't exist.
@@ -352,7 +367,7 @@ class MultimodalGEval(BaseMultimodalMetric):
352
367
  else:
353
368
  try:
354
369
  self.success = self.score >= self.threshold
355
- except:
370
+ except Exception:
356
371
  self.success = False
357
372
  return self.success
358
373
 
@@ -164,13 +164,12 @@ class _ObservedModel(Model):
164
164
  ):
165
165
 
166
166
  if isinstance(event, ResponseCompletedEvent):
167
- observer.result = (
168
- event.response.output_text
169
- ) # TODO: support other response types
167
+ observer.result = make_json_serializable(
168
+ event.response.output
169
+ )
170
170
 
171
171
  yield event
172
172
 
173
- observer.__exit__(None, None, None)
174
173
  except Exception as e:
175
174
  observer.__exit__(type(e), e, e.__traceback__)
176
175
  raise
deepeval/prompt/api.py CHANGED
@@ -8,6 +8,7 @@ class PromptInterpolationType(Enum):
8
8
  MUSTACHE_WITH_SPACE = "MUSTACHE_WITH_SPACE"
9
9
  FSTRING = "FSTRING"
10
10
  DOLLAR_BRACKETS = "DOLLAR_BRACKETS"
11
+ JINJA = "JINJA"
11
12
 
12
13
 
13
14
  class PromptMessage(BaseModel):
deepeval/prompt/prompt.py CHANGED
@@ -64,6 +64,10 @@ class Prompt:
64
64
  raise TypeError(
65
65
  "Unable to create Prompt where 'alias' and 'template' are both None. Please provide at least one to continue."
66
66
  )
67
+ if template and messages_template:
68
+ raise TypeError(
69
+ "Unable to create Prompt where 'template' and 'messages_template' are both provided. Please provide only one to continue."
70
+ )
67
71
 
68
72
  self.alias = alias
69
73
  self._text_template = template
@@ -71,6 +75,10 @@ class Prompt:
71
75
  self._version = None
72
76
  self._polling_tasks: Dict[str, asyncio.Task] = {}
73
77
  self._refresh_map: Dict[str, int] = {}
78
+ if template:
79
+ self._type = PromptType.TEXT
80
+ elif messages_template:
81
+ self._type = PromptType.LIST
74
82
 
75
83
  @property
76
84
  def version(self):
deepeval/prompt/utils.py CHANGED
@@ -1,5 +1,7 @@
1
- from deepeval.prompt.api import PromptInterpolationType
2
1
  import re
2
+ from jinja2 import Template
3
+
4
+ from deepeval.prompt.api import PromptInterpolationType
3
5
 
4
6
 
5
7
  def interpolate_mustache(text: str, **kwargs) -> str:
@@ -25,6 +27,11 @@ def interpolate_dollar_brackets(text: str, **kwargs) -> str:
25
27
  return formatted_template.format(**kwargs)
26
28
 
27
29
 
30
+ def interpolate_jinja(text: str, **kwargs) -> str:
31
+ template = Template(text)
32
+ return template.render(**kwargs)
33
+
34
+
28
35
  def interpolate_text(
29
36
  interpolation_type: PromptInterpolationType, text: str, **kwargs
30
37
  ) -> str:
@@ -37,5 +44,7 @@ def interpolate_text(
37
44
  return interpolate_fstring(text, **kwargs)
38
45
  elif interpolation_type == PromptInterpolationType.DOLLAR_BRACKETS:
39
46
  return interpolate_dollar_brackets(text, **kwargs)
47
+ elif interpolation_type == PromptInterpolationType.JINJA:
48
+ return interpolate_jinja(text, **kwargs)
40
49
 
41
50
  raise ValueError(f"Unsupported interpolation type: {interpolation_type}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepeval
3
- Version: 3.5.6
3
+ Version: 3.5.8
4
4
  Summary: The LLM Evaluation Framework
5
5
  Home-page: https://github.com/confident-ai/deepeval
6
6
  License: Apache-2.0
@@ -17,6 +17,7 @@ Requires-Dist: anthropic
17
17
  Requires-Dist: click (>=8.0.0,<8.3.0)
18
18
  Requires-Dist: google-genai (>=1.9.0,<2.0.0)
19
19
  Requires-Dist: grpcio (>=1.67.1,<2.0.0)
20
+ Requires-Dist: jinja2
20
21
  Requires-Dist: nest_asyncio
21
22
  Requires-Dist: ollama
22
23
  Requires-Dist: openai
@@ -1,5 +1,5 @@
1
1
  deepeval/__init__.py,sha256=6fsb813LD_jNhqR-xZnSdE5E-KsBbC3tc4oIg5ZMgTw,2115
2
- deepeval/_version.py,sha256=SscZ47Pu5M9nj65Z84bZLD85xcLbKNzPGkXRhycsW-M,27
2
+ deepeval/_version.py,sha256=unWkmwnFycd1EkbcelGqbdnCdsoFCoHp1cgSea6zrS0,27
3
3
  deepeval/annotation/__init__.py,sha256=ZFhUVNNuH_YgQSZJ-m5E9iUb9TkAkEV33a6ouMDZ8EI,111
4
4
  deepeval/annotation/annotation.py,sha256=3j3-syeJepAcEj3u3e4T_BeRDzNr7yXGDIoNQGMKpwQ,2298
5
5
  deepeval/annotation/api.py,sha256=EYN33ACVzVxsFleRYm60KB4Exvff3rPJKt1VBuuX970,2147
@@ -147,7 +147,7 @@ deepeval/config/utils.py,sha256=gSOVv18Tx1R72GucbdQesbZLFL-Y9EzbS4p7qd2w_xE,3799
147
147
  deepeval/constants.py,sha256=Qe-es-WDPJndgBspEQXxddDCVanrAu03YWCpXsUkdo0,1368
148
148
  deepeval/dataset/__init__.py,sha256=rcum_VjBXu8eisCdr6sl84BgoZUs3x0tYbB2PnPtHGY,212
149
149
  deepeval/dataset/api.py,sha256=ZxkEqAF4nZH_Ys_1f5r9N2LFI_vBcAJxt8eJm7Mplpw,831
150
- deepeval/dataset/dataset.py,sha256=T2rzGGKeCjIkkhXY0ofnWh13W6gjjdjat9uVHCmhGFI,49493
150
+ deepeval/dataset/dataset.py,sha256=dDWTSPWN8i_mZBOAgZt0r5Id6q6aeDf8jAKxv81mP1o,51113
151
151
  deepeval/dataset/golden.py,sha256=T-rTk4Hw1tANx_Iimv977F6Y4QK3s5OIB4PecU5FJDM,2338
152
152
  deepeval/dataset/test_run_tracer.py,sha256=5CdpDvhzkEEBRyqWi6egocaxiN6IRS3XfbACxEQZQeM,2544
153
153
  deepeval/dataset/types.py,sha256=CWeOIBPK2WdmRUqjFa9gfN-w2da0r8Ilzl3ToDpJQoQ,558
@@ -308,7 +308,7 @@ deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfuln
308
308
  deepeval/metrics/multimodal_metrics/multimodal_faithfulness/schema.py,sha256=b-WtfA7zq4TgQiuqqNEMf7jmohnWBMW4opChHyg49Gc,414
309
309
  deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py,sha256=9EWRC-Wiyr_UEMPfpuTcX2tvsjPxSRY4n_lClcsK6vw,8389
310
310
  deepeval/metrics/multimodal_metrics/multimodal_g_eval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
311
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py,sha256=gZ7Q4vF12PLGhbHhOUAl9LIFWDOc9-GKhu3ly_LOkQ0,13997
311
+ deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py,sha256=YR2SMmUwVPe8epth2PWtG6UB4vnInBZrTaeeVyF9VHA,14428
312
312
  deepeval/metrics/multimodal_metrics/multimodal_g_eval/schema.py,sha256=H_9-iA1BXJwbPKrGEZBqxDO_En4sjXI8_xKSNYc-hnk,167
313
313
  deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py,sha256=6kIC4vTtRxUBCyafjyWLZg5WhVHxsRy-m2Mv7OGbgV0,5235
314
314
  deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py,sha256=UgY46c1mudFoOglbrrJsXnSrdiJGGRFqpDvrAAZWwV0,2189
@@ -395,7 +395,7 @@ deepeval/openai/extractors.py,sha256=q062nlYKuPVwqfLFYCD1yWv7xHF1U_XrYdAp5ve2l_E
395
395
  deepeval/openai/patch.py,sha256=tPDqXaBScBJveM9P5xLT_mVwkubw0bOey-efvdjZIfg,7466
396
396
  deepeval/openai/utils.py,sha256=-84VZGUsnzRkYAFWc_DGaGuQTDCUItk0VtUTdjtSxg4,2748
397
397
  deepeval/openai_agents/__init__.py,sha256=u-e9laod3LyPfLcI5lr7Yhk8ArfWvlpr-D4_idWIt0A,321
398
- deepeval/openai_agents/agent.py,sha256=PYOhLELRXfGAP_fje70X3Ovm3WjF24mQYWdwrobwcr4,6173
398
+ deepeval/openai_agents/agent.py,sha256=gZcmfqTgrQaJV8g6ChmmdpyArEp6oDIqHSaYPDEd344,6100
399
399
  deepeval/openai_agents/callback_handler.py,sha256=jrV2Uv9FjfU1BQQe6V_ltT3QS8ZcalxMbqzJI2vvJXo,4713
400
400
  deepeval/openai_agents/extractors.py,sha256=0jZxwgY1NQ3mMxVWPpLcMpKlbj-aYV7rwuzRzG8hdZs,11529
401
401
  deepeval/openai_agents/patch.py,sha256=zSmRV5yOReHC6IylhT93SM1nQpmH3sEWfYcJqa_iM84,3684
@@ -404,9 +404,9 @@ deepeval/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
404
404
  deepeval/plugins/plugin.py,sha256=_dwsdx4Dg9DbXxK3f7zJY4QWTJQWc7QE1HmIg2Zjjag,1515
405
405
  deepeval/progress_context.py,sha256=ZSKpxrE9sdgt9G3REKnVeXAv7GJXHHVGgLynpG1Pudw,3557
406
406
  deepeval/prompt/__init__.py,sha256=M99QTWdxOfiNeySGCSqN873Q80PPxqRvjLq4_Mw-X1w,49
407
- deepeval/prompt/api.py,sha256=ccrMT6_Otuef9zrRm9iQsmZ-Apkjj3nypvw3wc8-eW0,1708
408
- deepeval/prompt/prompt.py,sha256=DlUPib0EAVC2T54SQB1xxEQBNSpEirL5x-CtrKE1rek,15463
409
- deepeval/prompt/utils.py,sha256=Gk0zj_9BK8MQccs8GmiC8o-YVtkou6ZJEz8kWgW5Mog,1678
407
+ deepeval/prompt/api.py,sha256=kR3MkaHuU2wYILKVnvnXhQWxWp0XgtcWX-kIjpMJRl8,1728
408
+ deepeval/prompt/prompt.py,sha256=192W5zFBx08nELxRHHDQscMM3psj8OUFV_JR85BZv8Q,15823
409
+ deepeval/prompt/utils.py,sha256=Ermw9P-1-T5wQ5uYuj5yWgdj7pVB_JLw8D37Qvmh9ok,1938
410
410
  deepeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
411
411
  deepeval/red_teaming/README.md,sha256=BY5rAdpp3-sMMToEKwq0Nsd9ivkGDzPE16DeDb8GY7U,154
412
412
  deepeval/scorer/__init__.py,sha256=hTvtoV3a4l0dSBjERm-jX7jveTtKZXK0c9JerQo0T_w,27
@@ -461,8 +461,8 @@ deepeval/tracing/tracing.py,sha256=b-0T3W6lAEOEGhODx0e-yIwBkm5V46EDNAWS9lcWkD0,4
461
461
  deepeval/tracing/types.py,sha256=l_utWKerNlE5H3mOKpeUJLsvpP3cMyjH7HRANNgTmSQ,5306
462
462
  deepeval/tracing/utils.py,sha256=w_kdhuyBCygllnbqLpDdKJqpJo42t3ZMlGhNicV2A8c,6467
463
463
  deepeval/utils.py,sha256=r8tV_NYJSi6ib-oQw6cLw3L7ZSe4KIJVJc1ng6-kDX4,17179
464
- deepeval-3.5.6.dist-info/LICENSE.md,sha256=0ATkuLv6QgsJTBODUHC5Rak_PArA6gv2t7inJzNTP38,11352
465
- deepeval-3.5.6.dist-info/METADATA,sha256=ZJkHCQuFE2QYEkvOyIY367qnhzreyUqvyipCoN3O4a8,18721
466
- deepeval-3.5.6.dist-info/WHEEL,sha256=d2fvjOD7sXsVzChCqf0Ty0JbHKBaLYwDbGQDwQTnJ50,88
467
- deepeval-3.5.6.dist-info/entry_points.txt,sha256=fVr8UphXTfJe9I2rObmUtfU3gkSrYeM0pLy-NbJYg10,94
468
- deepeval-3.5.6.dist-info/RECORD,,
464
+ deepeval-3.5.8.dist-info/LICENSE.md,sha256=0ATkuLv6QgsJTBODUHC5Rak_PArA6gv2t7inJzNTP38,11352
465
+ deepeval-3.5.8.dist-info/METADATA,sha256=7yiM7djTQ2fLy8XfdyecBxMg3cgk3hDAsGLUjRamC44,18743
466
+ deepeval-3.5.8.dist-info/WHEEL,sha256=d2fvjOD7sXsVzChCqf0Ty0JbHKBaLYwDbGQDwQTnJ50,88
467
+ deepeval-3.5.8.dist-info/entry_points.txt,sha256=fVr8UphXTfJe9I2rObmUtfU3gkSrYeM0pLy-NbJYg10,94
468
+ deepeval-3.5.8.dist-info/RECORD,,