deepeval 3.5.4__py3-none-any.whl → 3.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,100 +1,188 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from dataclasses import replace
4
+ from typing import List, Any, Union, Optional
5
+
6
+ try:
7
+ from agents import (
8
+ RunConfig,
9
+ RunResult,
10
+ RunResultStreaming,
11
+ Runner as AgentsRunner,
12
+ )
13
+ from agents.agent import Agent
14
+ from agents.models.interface import ModelProvider
15
+ from agents.items import TResponseInputItem
16
+ from agents.lifecycle import RunHooks
17
+ from agents.memory import Session
18
+ from agents.run import DEFAULT_MAX_TURNS
19
+ from agents.run import AgentRunner
20
+ from agents.run_context import TContext
21
+ from agents.models.interface import Model
22
+
23
+ agents_available = True
24
+ except:
25
+ agents_available = False
26
+
27
+
28
+ def is_agents_available():
29
+ if not agents_available:
30
+ raise ImportError(
31
+ "agents is required for this integration. Install it via your package manager"
32
+ )
33
+
4
34
 
5
- from agents import (
6
- Runner as BaseRunner,
7
- RunConfig,
8
- RunResult,
9
- RunResultStreaming,
10
- )
11
35
  from deepeval.tracing.tracing import Observer
12
36
  from deepeval.tracing.context import current_span_context, current_trace_context
13
37
 
14
38
  # Import observed provider/model helpers from our agent module
15
- from deepeval.openai_agents.agent import _ObservedProvider
39
+ from deepeval.metrics import BaseMetric
40
+ from deepeval.openai_agents.agent import _ObservedModel
16
41
 
42
+ _PATCHED_DEFAULT_GET_MODEL = False
17
43
 
18
- class Runner(BaseRunner):
19
- """
20
- Extends Runner to:
21
- - capture metric_collection/metrics at run entry for tracing
22
- - ensure RunConfig.model_provider is wrapped to return observed Models
23
- so string-based model lookups are also instrumented.
24
- """
44
+
45
+ def _patch_default_agent_runner_get_model():
46
+ global _PATCHED_DEFAULT_GET_MODEL
47
+ if _PATCHED_DEFAULT_GET_MODEL:
48
+ return
49
+
50
+ original_get_model = AgentRunner._get_model
25
51
 
26
52
  @classmethod
27
- async def run(cls, *args, **kwargs) -> RunResult:
28
- metric_collection = kwargs.pop("metric_collection", None)
29
- metrics = kwargs.pop("metrics", None)
53
+ def patched_get_model(
54
+ cls, agent: Agent[Any], run_config: RunConfig
55
+ ) -> Model:
56
+ model = original_get_model(agent, run_config)
30
57
 
31
- # Ensure the model provider is wrapped so _get_model(...) uses observed Models
32
- starting_agent = (
33
- args[0] if len(args) > 0 else kwargs.get("starting_agent")
58
+ # Extract attributes from agent if it's a DeepEvalAgent
59
+ llm_metrics = getattr(agent, "llm_metrics", None)
60
+ llm_metric_collection = getattr(agent, "llm_metric_collection", None)
61
+ confident_prompt = getattr(agent, "confident_prompt", None)
62
+ model = _ObservedModel(
63
+ inner=model,
64
+ llm_metric_collection=llm_metric_collection,
65
+ llm_metrics=llm_metrics,
66
+ confident_prompt=confident_prompt,
34
67
  )
35
- run_config: RunConfig | None = kwargs.get("run_config")
36
- if run_config is None:
37
- run_config = RunConfig()
38
- kwargs["run_config"] = run_config
39
-
40
- if run_config.model_provider is not None:
41
- run_config.model_provider = _ObservedProvider(
42
- run_config.model_provider,
43
- metrics=getattr(starting_agent, "metrics", None) or metrics,
44
- metric_collection=getattr(
45
- starting_agent, "metric_collection", None
46
- )
47
- or metric_collection,
48
- deepeval_prompt=getattr(
49
- starting_agent, "deepeval_prompt", None
50
- ),
51
- )
52
68
 
53
- input_val = args[1] if len(args) >= 2 else kwargs.get("input", None)
69
+ return model
70
+
71
+ # Replace the method
72
+ AgentRunner._get_model = patched_get_model
73
+ _PATCHED_DEFAULT_GET_MODEL = True
74
+
75
+
76
+ if agents_available:
77
+ _patch_default_agent_runner_get_model()
78
+
79
+
80
+ class Runner(AgentsRunner):
81
+
82
+ @classmethod
83
+ async def run(
84
+ cls,
85
+ starting_agent: Agent[TContext],
86
+ input: Union[str, list[TResponseInputItem]],
87
+ *,
88
+ context: Optional[TContext] = None,
89
+ max_turns: int = DEFAULT_MAX_TURNS,
90
+ hooks: Optional[RunHooks[TContext]] = None,
91
+ run_config: Optional[RunConfig] = None,
92
+ previous_response_id: Optional[str] = None,
93
+ conversation_id: Optional[str] = None,
94
+ session: Optional[Session] = None,
95
+ metrics: Optional[List[BaseMetric]] = None,
96
+ metric_collection: Optional[str] = None,
97
+ name: Optional[str] = None,
98
+ tags: Optional[List[str]] = None,
99
+ metadata: Optional[dict] = None,
100
+ thread_id: Optional[str] = None,
101
+ user_id: Optional[str] = None,
102
+ **kwargs, # backwards compatibility
103
+ ) -> RunResult:
104
+ is_agents_available()
105
+ # _patch_default_agent_runner_get_model()
106
+
54
107
  with Observer(
55
108
  span_type="custom",
56
109
  metric_collection=metric_collection,
57
110
  metrics=metrics,
58
111
  func_name="run",
59
- function_kwargs={"input": input_val},
112
+ function_kwargs={"input": input},
60
113
  ) as observer:
114
+ update_trace_attributes(
115
+ input=input,
116
+ name=name,
117
+ tags=tags,
118
+ metadata=metadata,
119
+ thread_id=thread_id,
120
+ user_id=user_id,
121
+ metric_collection=metric_collection,
122
+ metrics=metrics,
123
+ )
61
124
  current_span = current_span_context.get()
62
125
  current_trace = current_trace_context.get()
63
- current_trace.input = input_val
126
+ current_trace.input = input
64
127
  if current_span:
65
- current_span.input = input_val
66
- res = await super().run(*args, **kwargs)
67
- current_trace.output = str(res)
68
- observer.result = str(res)
128
+ current_span.input = input
129
+ res = await super().run(
130
+ starting_agent,
131
+ input,
132
+ context=context,
133
+ max_turns=max_turns,
134
+ hooks=hooks,
135
+ run_config=run_config,
136
+ previous_response_id=previous_response_id,
137
+ conversation_id=conversation_id,
138
+ session=session,
139
+ **kwargs, # backwards compatibility
140
+ )
141
+ _output = None
142
+ if thread_id:
143
+ _output = res.final_output
144
+ else:
145
+ _output = str(res)
146
+ observer.result = _output
147
+ update_trace_attributes(output=_output)
69
148
  return res
70
149
 
71
150
  @classmethod
72
- def run_sync(cls, *args, **kwargs) -> RunResult:
73
- metric_collection = kwargs.pop("metric_collection", None)
74
- metrics = kwargs.pop("metrics", None)
151
+ def run_sync(
152
+ cls,
153
+ starting_agent: Agent[TContext],
154
+ input: Union[str, list[TResponseInputItem]],
155
+ *,
156
+ context: Optional[TContext] = None,
157
+ max_turns: int = DEFAULT_MAX_TURNS,
158
+ hooks: Optional[RunHooks[TContext]] = None,
159
+ run_config: Optional[RunConfig] = None,
160
+ previous_response_id: Optional[str] = None,
161
+ conversation_id: Optional[str] = None,
162
+ session: Optional[Session] = None,
163
+ metrics: Optional[List[BaseMetric]] = None,
164
+ metric_collection: Optional[str] = None,
165
+ name: Optional[str] = None,
166
+ tags: Optional[List[str]] = None,
167
+ metadata: Optional[dict] = None,
168
+ thread_id: Optional[str] = None,
169
+ user_id: Optional[str] = None,
170
+ **kwargs,
171
+ ) -> RunResult:
172
+ is_agents_available()
173
+ input_val = input
75
174
 
76
- starting_agent = (
77
- args[0] if len(args) > 0 else kwargs.get("starting_agent")
175
+ update_trace_attributes(
176
+ input=input_val,
177
+ name=name,
178
+ tags=tags,
179
+ metadata=metadata,
180
+ thread_id=thread_id,
181
+ user_id=user_id,
182
+ metric_collection=metric_collection,
183
+ metrics=metrics,
78
184
  )
79
- run_config: RunConfig | None = kwargs.get("run_config")
80
- if run_config is None:
81
- run_config = RunConfig()
82
- kwargs["run_config"] = run_config
83
-
84
- if run_config.model_provider is not None:
85
- run_config.model_provider = _ObservedProvider(
86
- run_config.model_provider,
87
- metrics=getattr(starting_agent, "metrics", None) or metrics,
88
- metric_collection=getattr(
89
- starting_agent, "metric_collection", None
90
- )
91
- or metric_collection,
92
- deepeval_prompt=getattr(
93
- starting_agent, "deepeval_prompt", None
94
- ),
95
- )
96
185
 
97
- input_val = args[1] if len(args) >= 2 else kwargs.get("input", None)
98
186
  with Observer(
99
187
  span_type="custom",
100
188
  metric_collection=metric_collection,
@@ -104,11 +192,140 @@ class Runner(BaseRunner):
104
192
  ) as observer:
105
193
  current_span = current_span_context.get()
106
194
  current_trace = current_trace_context.get()
107
- current_trace.input = input_val
108
195
  if current_span:
109
196
  current_span.input = input_val
110
- res = super().run_sync(*args, **kwargs)
111
- current_trace.output = str(res)
112
- observer.result = str(res)
197
+ res = super().run_sync(
198
+ starting_agent,
199
+ input,
200
+ context=context,
201
+ max_turns=max_turns,
202
+ hooks=hooks,
203
+ run_config=run_config,
204
+ previous_response_id=previous_response_id,
205
+ conversation_id=conversation_id,
206
+ session=session,
207
+ **kwargs, # backwards compatibility
208
+ )
209
+ _output = None
210
+ if thread_id:
211
+ _output = res.final_output
212
+ else:
213
+ _output = str(res)
214
+ update_trace_attributes(output=_output)
215
+ observer.result = _output
113
216
 
114
217
  return res
218
+
219
+ @classmethod
220
+ def run_streamed(
221
+ cls,
222
+ starting_agent: Agent[TContext],
223
+ input: Union[str, list[TResponseInputItem]],
224
+ *,
225
+ context: Optional[TContext] = None,
226
+ max_turns: int = DEFAULT_MAX_TURNS,
227
+ hooks: Optional[RunHooks[TContext]] = None,
228
+ run_config: Optional[RunConfig] = None,
229
+ previous_response_id: Optional[str] = None,
230
+ conversation_id: Optional[str] = None,
231
+ session: Optional[Session] = None,
232
+ metrics: Optional[List[BaseMetric]] = None,
233
+ metric_collection: Optional[str] = None,
234
+ name: Optional[str] = None,
235
+ tags: Optional[List[str]] = None,
236
+ metadata: Optional[dict] = None,
237
+ thread_id: Optional[str] = None,
238
+ user_id: Optional[str] = None,
239
+ **kwargs, # backwards compatibility
240
+ ) -> RunResultStreaming:
241
+ is_agents_available()
242
+ # Manually enter observer; we'll exit when streaming finishes
243
+ observer = Observer(
244
+ span_type="custom",
245
+ metric_collection=metric_collection,
246
+ metrics=metrics,
247
+ func_name="run_streamed",
248
+ function_kwargs={"input": input},
249
+ )
250
+ observer.__enter__()
251
+
252
+ update_trace_attributes(
253
+ input=input,
254
+ name=name,
255
+ tags=tags,
256
+ metadata=metadata,
257
+ thread_id=thread_id,
258
+ user_id=user_id,
259
+ metric_collection=metric_collection,
260
+ metrics=metrics,
261
+ )
262
+
263
+ current_span = current_span_context.get()
264
+ if current_span:
265
+ current_span.input = input
266
+
267
+ res = super().run_streamed(
268
+ starting_agent,
269
+ input,
270
+ context=context,
271
+ max_turns=max_turns,
272
+ hooks=hooks,
273
+ run_config=run_config,
274
+ previous_response_id=previous_response_id,
275
+ conversation_id=conversation_id,
276
+ session=session,
277
+ **kwargs, # backwards compatibility
278
+ )
279
+
280
+ # Runtime-patch stream_events so the observer closes only after streaming completes
281
+ orig_stream_events = res.stream_events
282
+
283
+ async def _patched_stream_events(self: RunResultStreaming):
284
+ try:
285
+ async for event in orig_stream_events():
286
+ yield event
287
+ observer.result = self.final_output
288
+ update_trace_attributes(output=self.final_output)
289
+ except Exception as e:
290
+ observer.__exit__(type(e), e, e.__traceback__)
291
+ raise
292
+ finally:
293
+ observer.__exit__(None, None, None)
294
+
295
+ from types import MethodType as _MethodType
296
+
297
+ res.stream_events = _MethodType(_patched_stream_events, res)
298
+
299
+ return res
300
+
301
+
302
+ def update_trace_attributes(
303
+ input: Any = None,
304
+ output: Any = None,
305
+ name: str = None,
306
+ tags: List[str] = None,
307
+ metadata: dict = None,
308
+ thread_id: str = None,
309
+ user_id: str = None,
310
+ metric_collection: str = None,
311
+ metrics: List[BaseMetric] = None,
312
+ ):
313
+ current_trace = current_trace_context.get()
314
+ if input:
315
+ current_trace.input = input
316
+ if output:
317
+ current_trace.output = output
318
+ if name:
319
+ current_trace.name = name
320
+ if tags:
321
+ current_trace.tags = tags
322
+ if metadata:
323
+ current_trace.metadata = metadata
324
+ if thread_id:
325
+ current_trace.thread_id = thread_id
326
+ if user_id:
327
+ current_trace.user_id = user_id
328
+ if metric_collection:
329
+ current_trace.metric_collection = metric_collection
330
+ if metrics:
331
+ current_trace.metrics = metrics
@@ -8,9 +8,6 @@ import random
8
8
  import atexit
9
9
  import queue
10
10
  import uuid
11
- import os
12
- import json
13
- import time
14
11
  from openai import OpenAI
15
12
  from rich.console import Console
16
13
  from rich.progress import Progress
@@ -496,6 +493,7 @@ class TraceManager:
496
493
  asyncio.gather(*pending, return_exceptions=True)
497
494
  )
498
495
  self.flush_traces(remaining_trace_request_bodies)
496
+ loop.run_until_complete(loop.shutdown_asyncgens())
499
497
  loop.close()
500
498
 
501
499
  def flush_traces(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepeval
3
- Version: 3.5.4
3
+ Version: 3.5.5
4
4
  Summary: The LLM Evaluation Framework
5
5
  Home-page: https://github.com/confident-ai/deepeval
6
6
  License: Apache-2.0
@@ -186,6 +186,8 @@ Let's pretend your LLM application is a RAG based customer support chatbot; here
186
186
 
187
187
  ## Installation
188
188
 
189
+ Deepeval works with **Python>=3.9+**.
190
+
189
191
  ```
190
192
  pip install -U deepeval
191
193
  ```
@@ -1,5 +1,5 @@
1
1
  deepeval/__init__.py,sha256=6fsb813LD_jNhqR-xZnSdE5E-KsBbC3tc4oIg5ZMgTw,2115
2
- deepeval/_version.py,sha256=Vy_DqdUIdzt42W7BKglfMO5ghp2Wa6OV5Tatx__sA2U,27
2
+ deepeval/_version.py,sha256=CJwAeAyMGnIxrkmBn8fpG6bwbVBsUZaTrtwbstM2LgA,27
3
3
  deepeval/annotation/__init__.py,sha256=ZFhUVNNuH_YgQSZJ-m5E9iUb9TkAkEV33a6ouMDZ8EI,111
4
4
  deepeval/annotation/annotation.py,sha256=3j3-syeJepAcEj3u3e4T_BeRDzNr7yXGDIoNQGMKpwQ,2298
5
5
  deepeval/annotation/api.py,sha256=EYN33ACVzVxsFleRYm60KB4Exvff3rPJKt1VBuuX970,2147
@@ -141,24 +141,24 @@ deepeval/confident/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVG
141
141
  deepeval/confident/api.py,sha256=bOC71TaVAEgoXFtJ9yMo0-atmUUdBuvaclMGczMcR6o,8455
142
142
  deepeval/confident/types.py,sha256=-slFhDof_1maMgpLxqDRZv6kz6ZVY2hP_0uj_aveJKU,533
143
143
  deepeval/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
144
- deepeval/config/settings.py,sha256=h-hHrzTItfMkR1ZnMCF6xUeJ3DBwdOgO354QaEH-VaE,20861
144
+ deepeval/config/settings.py,sha256=gRRi6nXEUKse13xAShU9MA18zo14vpIgl_R0xJ_0vnM,21314
145
145
  deepeval/config/settings_manager.py,sha256=PsBS_5dRJASak2AUDwjhjLSiezNz1fje0R3onoFCKC0,4014
146
146
  deepeval/config/utils.py,sha256=gSOVv18Tx1R72GucbdQesbZLFL-Y9EzbS4p7qd2w_xE,3799
147
- deepeval/constants.py,sha256=ZXIA1Hsjy03SvfaIx2hbj_oxHp-gJ-kYIyktnFD1AGg,1337
147
+ deepeval/constants.py,sha256=Qe-es-WDPJndgBspEQXxddDCVanrAu03YWCpXsUkdo0,1368
148
148
  deepeval/dataset/__init__.py,sha256=rcum_VjBXu8eisCdr6sl84BgoZUs3x0tYbB2PnPtHGY,212
149
149
  deepeval/dataset/api.py,sha256=ZxkEqAF4nZH_Ys_1f5r9N2LFI_vBcAJxt8eJm7Mplpw,831
150
- deepeval/dataset/dataset.py,sha256=dkUDYtK1z9sDn6A-HOohoHHUobWuaCiuHEABLAfP4kQ,49396
150
+ deepeval/dataset/dataset.py,sha256=T2rzGGKeCjIkkhXY0ofnWh13W6gjjdjat9uVHCmhGFI,49493
151
151
  deepeval/dataset/golden.py,sha256=T-rTk4Hw1tANx_Iimv977F6Y4QK3s5OIB4PecU5FJDM,2338
152
152
  deepeval/dataset/test_run_tracer.py,sha256=5CdpDvhzkEEBRyqWi6egocaxiN6IRS3XfbACxEQZQeM,2544
153
- deepeval/dataset/types.py,sha256=cwsAXeaxvd511Uc7-zQ0OxsrPPduCfUSKRbuziAwWhA,309
154
- deepeval/dataset/utils.py,sha256=kqzzM1kTtAmlt0kv4_6lLQ-oVR20PtcxdKE4NmoZFpM,5735
153
+ deepeval/dataset/types.py,sha256=CWeOIBPK2WdmRUqjFa9gfN-w2da0r8Ilzl3ToDpJQoQ,558
154
+ deepeval/dataset/utils.py,sha256=fGHqUxqBRGLp3ck1QHLuAQeqtkEERfi4-G5pSvh6KII,6606
155
155
  deepeval/errors.py,sha256=_K5wywEw2gp0DDECyeY6UrSI9GgOtCBbfPxt5maIzSY,113
156
156
  deepeval/evaluate/__init__.py,sha256=315IaMiYEz7oJhZ4kPTBfeCNd1xF-wWVU6KOQnrKQpE,291
157
157
  deepeval/evaluate/api.py,sha256=rkblH0ZFAAdyuF0Ymh7JE1pIJPR9yFuPrn9SQaCEQp4,435
158
158
  deepeval/evaluate/compare.py,sha256=tdSJY4E7YJ_zO3dzvpwngZHLiUI2YQcTWJOLI83htsQ,9855
159
159
  deepeval/evaluate/configs.py,sha256=QfWjaWNxLsgEe8-5j4PIs5WcSyEckiWt0qdpXSpl57M,928
160
160
  deepeval/evaluate/evaluate.py,sha256=NPAJ2iJqJI_RurXKUIC0tft_ozYMIKwZf5iPfmnNhQc,10412
161
- deepeval/evaluate/execute.py,sha256=fKMbH5rdfySTDUkvu808dLZ0NCwHQXMolz4ggmdZq90,79382
161
+ deepeval/evaluate/execute.py,sha256=45m3w3QSAWVHRNTSqLZcpUI1bA_qRFWIGu292WKTjcA,87953
162
162
  deepeval/evaluate/types.py,sha256=IGZ3Xsj0UecPI3JNeTpJaK1gDvlepokfCmHwtItIW9M,831
163
163
  deepeval/evaluate/utils.py,sha256=kkliSGzuICeUsXDtlMMPfN95dUKlqarNhfciSffd4gI,23143
164
164
  deepeval/integrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -395,11 +395,11 @@ deepeval/openai/extractors.py,sha256=q062nlYKuPVwqfLFYCD1yWv7xHF1U_XrYdAp5ve2l_E
395
395
  deepeval/openai/patch.py,sha256=tPDqXaBScBJveM9P5xLT_mVwkubw0bOey-efvdjZIfg,7466
396
396
  deepeval/openai/utils.py,sha256=-84VZGUsnzRkYAFWc_DGaGuQTDCUItk0VtUTdjtSxg4,2748
397
397
  deepeval/openai_agents/__init__.py,sha256=u-e9laod3LyPfLcI5lr7Yhk8ArfWvlpr-D4_idWIt0A,321
398
- deepeval/openai_agents/agent.py,sha256=lgwc9pXhJn1xbytl2sy58aZX-gIsFP87fhImXnV9EJU,5564
399
- deepeval/openai_agents/callback_handler.py,sha256=AXnL9teO8txUwK3cxBpqFTki5ZWOEy6kSglLjrnCNyA,3264
398
+ deepeval/openai_agents/agent.py,sha256=PYOhLELRXfGAP_fje70X3Ovm3WjF24mQYWdwrobwcr4,6173
399
+ deepeval/openai_agents/callback_handler.py,sha256=-tOXJ3SMKqH5u41cB_g7FBjaX5qAuqVAaAv7vQtiBVc,3025
400
400
  deepeval/openai_agents/extractors.py,sha256=0jZxwgY1NQ3mMxVWPpLcMpKlbj-aYV7rwuzRzG8hdZs,11529
401
401
  deepeval/openai_agents/patch.py,sha256=zSmRV5yOReHC6IylhT93SM1nQpmH3sEWfYcJqa_iM84,3684
402
- deepeval/openai_agents/runner.py,sha256=eWXPrjDE7mE0hB7D1RXFCv3mDtHjNrWJV7Ba5QOTJb8,4173
402
+ deepeval/openai_agents/runner.py,sha256=pRwe6DX6kpXia6btl4TAWlnXpk88MsQfM8yWkGufyk8,10608
403
403
  deepeval/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
404
404
  deepeval/plugins/plugin.py,sha256=_dwsdx4Dg9DbXxK3f7zJY4QWTJQWc7QE1HmIg2Zjjag,1515
405
405
  deepeval/progress_context.py,sha256=ZSKpxrE9sdgt9G3REKnVeXAv7GJXHHVGgLynpG1Pudw,3557
@@ -457,12 +457,12 @@ deepeval/tracing/otel/exporter.py,sha256=dXQd834zm5rm1ss9pWkBBlk-JSdtiw7aFLso2hM
457
457
  deepeval/tracing/otel/utils.py,sha256=g8yAzhqbPh1fOKCWkfNekC6AVotLfu1SUcfNMo6zii8,9786
458
458
  deepeval/tracing/patchers.py,sha256=DAPNkhrDtoeyJIVeQDUMhTz-xGcXu00eqjQZmov8FiU,3096
459
459
  deepeval/tracing/perf_epoch_bridge.py,sha256=iyAPddB6Op7NpMtPHJ29lDm53Btz9yLaN6xSCfTRQm4,1825
460
- deepeval/tracing/tracing.py,sha256=vOVFdN6fVMW53XhyqTZSfp4vI7DCqRez4TKNhdhr-sg,42277
460
+ deepeval/tracing/tracing.py,sha256=b-0T3W6lAEOEGhODx0e-yIwBkm5V46EDNAWS9lcWkD0,42306
461
461
  deepeval/tracing/types.py,sha256=l_utWKerNlE5H3mOKpeUJLsvpP3cMyjH7HRANNgTmSQ,5306
462
462
  deepeval/tracing/utils.py,sha256=w_kdhuyBCygllnbqLpDdKJqpJo42t3ZMlGhNicV2A8c,6467
463
463
  deepeval/utils.py,sha256=r8tV_NYJSi6ib-oQw6cLw3L7ZSe4KIJVJc1ng6-kDX4,17179
464
- deepeval-3.5.4.dist-info/LICENSE.md,sha256=0ATkuLv6QgsJTBODUHC5Rak_PArA6gv2t7inJzNTP38,11352
465
- deepeval-3.5.4.dist-info/METADATA,sha256=fJ15yXxlzKTfOsoW5z7uxIJ4Qx6X-UTpKj7pabi5Tv8,18682
466
- deepeval-3.5.4.dist-info/WHEEL,sha256=d2fvjOD7sXsVzChCqf0Ty0JbHKBaLYwDbGQDwQTnJ50,88
467
- deepeval-3.5.4.dist-info/entry_points.txt,sha256=fVr8UphXTfJe9I2rObmUtfU3gkSrYeM0pLy-NbJYg10,94
468
- deepeval-3.5.4.dist-info/RECORD,,
464
+ deepeval-3.5.5.dist-info/LICENSE.md,sha256=0ATkuLv6QgsJTBODUHC5Rak_PArA6gv2t7inJzNTP38,11352
465
+ deepeval-3.5.5.dist-info/METADATA,sha256=Js_9nOjXPh0YQOokcbPvquIW7lBRQMphrLHTYZ8-pAE,18721
466
+ deepeval-3.5.5.dist-info/WHEEL,sha256=d2fvjOD7sXsVzChCqf0Ty0JbHKBaLYwDbGQDwQTnJ50,88
467
+ deepeval-3.5.5.dist-info/entry_points.txt,sha256=fVr8UphXTfJe9I2rObmUtfU3gkSrYeM0pLy-NbJYg10,94
468
+ deepeval-3.5.5.dist-info/RECORD,,