deepeval 3.7.9__py3-none-any.whl → 3.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/cli/main.py +168 -0
- deepeval/confident/api.py +2 -0
- deepeval/config/settings.py +10 -0
- deepeval/constants.py +1 -0
- deepeval/integrations/langchain/callback.py +330 -158
- deepeval/integrations/langchain/utils.py +31 -8
- deepeval/key_handler.py +8 -1
- deepeval/metrics/conversational_g_eval/conversational_g_eval.py +35 -0
- deepeval/metrics/g_eval/g_eval.py +35 -1
- deepeval/metrics/g_eval/utils.py +65 -0
- deepeval/models/__init__.py +2 -0
- deepeval/models/llms/__init__.py +2 -0
- deepeval/models/llms/constants.py +23 -0
- deepeval/models/llms/openai_model.py +5 -4
- deepeval/models/llms/openrouter_model.py +398 -0
- deepeval/models/retry_policy.py +3 -0
- deepeval/prompt/api.py +1 -0
- deepeval/tracing/tracing.py +6 -1
- deepeval/tracing/types.py +1 -1
- {deepeval-3.7.9.dist-info → deepeval-3.8.0.dist-info}/METADATA +3 -3
- {deepeval-3.7.9.dist-info → deepeval-3.8.0.dist-info}/RECORD +25 -24
- {deepeval-3.7.9.dist-info → deepeval-3.8.0.dist-info}/LICENSE.md +0 -0
- {deepeval-3.7.9.dist-info → deepeval-3.8.0.dist-info}/WHEEL +0 -0
- {deepeval-3.7.9.dist-info → deepeval-3.8.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,398 @@
|
|
|
1
|
+
import warnings
|
|
2
|
+
import inspect
|
|
3
|
+
|
|
4
|
+
from typing import Optional, Tuple, Union, Dict, Type
|
|
5
|
+
from pydantic import BaseModel, SecretStr
|
|
6
|
+
from openai.types.chat.chat_completion import ChatCompletion
|
|
7
|
+
from openai import (
|
|
8
|
+
OpenAI,
|
|
9
|
+
AsyncOpenAI,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
from deepeval.config.settings import get_settings
|
|
13
|
+
from deepeval.constants import ProviderSlug as PS
|
|
14
|
+
from deepeval.errors import DeepEvalError
|
|
15
|
+
from deepeval.models import DeepEvalBaseLLM
|
|
16
|
+
from deepeval.models.llms.constants import DEFAULT_OPENROUTER_MODEL
|
|
17
|
+
from deepeval.models.llms.utils import trim_and_load_json
|
|
18
|
+
from deepeval.models.utils import require_secret_api_key
|
|
19
|
+
from deepeval.models.retry_policy import (
|
|
20
|
+
create_retry_decorator,
|
|
21
|
+
sdk_retries_for,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
retry_openrouter = create_retry_decorator(PS.OPENROUTER)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _request_timeout_seconds() -> float:
|
|
29
|
+
timeout = float(get_settings().DEEPEVAL_PER_ATTEMPT_TIMEOUT_SECONDS or 0)
|
|
30
|
+
return timeout if timeout > 0 else 30.0
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _convert_schema_to_openrouter_format(
|
|
34
|
+
schema: Union[Type[BaseModel], BaseModel],
|
|
35
|
+
) -> Dict:
|
|
36
|
+
"""
|
|
37
|
+
Convert Pydantic BaseModel to OpenRouter's JSON Schema format.
|
|
38
|
+
|
|
39
|
+
OpenRouter expects:
|
|
40
|
+
{
|
|
41
|
+
"type": "json_schema",
|
|
42
|
+
"json_schema": {
|
|
43
|
+
"name": "schema_name",
|
|
44
|
+
"strict": true,
|
|
45
|
+
"schema": { ... JSON Schema ... }
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
"""
|
|
49
|
+
json_schema = schema.model_json_schema()
|
|
50
|
+
schema_name = (
|
|
51
|
+
schema.__name__
|
|
52
|
+
if inspect.isclass(schema)
|
|
53
|
+
else schema.__class__.__name__
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
# OpenRouter requires additionalProperties: false when strict: true
|
|
57
|
+
# Ensure it's set at the root level of the schema
|
|
58
|
+
if "additionalProperties" not in json_schema:
|
|
59
|
+
json_schema["additionalProperties"] = False
|
|
60
|
+
|
|
61
|
+
return {
|
|
62
|
+
"type": "json_schema",
|
|
63
|
+
"json_schema": {
|
|
64
|
+
"name": schema_name,
|
|
65
|
+
"strict": True,
|
|
66
|
+
"schema": json_schema,
|
|
67
|
+
},
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class OpenRouterModel(DeepEvalBaseLLM):
|
|
72
|
+
def __init__(
|
|
73
|
+
self,
|
|
74
|
+
model: Optional[str] = None,
|
|
75
|
+
api_key: Optional[str] = None,
|
|
76
|
+
base_url: Optional[str] = None,
|
|
77
|
+
temperature: Optional[float] = None,
|
|
78
|
+
cost_per_input_token: Optional[float] = None,
|
|
79
|
+
cost_per_output_token: Optional[float] = None,
|
|
80
|
+
generation_kwargs: Optional[Dict] = None,
|
|
81
|
+
**kwargs,
|
|
82
|
+
):
|
|
83
|
+
settings = get_settings()
|
|
84
|
+
model = model or settings.OPENROUTER_MODEL_NAME
|
|
85
|
+
if model is None:
|
|
86
|
+
model = DEFAULT_OPENROUTER_MODEL
|
|
87
|
+
|
|
88
|
+
if api_key is not None:
|
|
89
|
+
# keep it secret, keep it safe from serializings, logging and alike
|
|
90
|
+
self.api_key: Optional[SecretStr] = SecretStr(api_key)
|
|
91
|
+
else:
|
|
92
|
+
self.api_key = settings.OPENROUTER_API_KEY
|
|
93
|
+
|
|
94
|
+
if base_url is not None:
|
|
95
|
+
base_url = str(base_url).rstrip("/")
|
|
96
|
+
elif settings.OPENROUTER_BASE_URL is not None:
|
|
97
|
+
base_url = str(settings.OPENROUTER_BASE_URL).rstrip("/")
|
|
98
|
+
else:
|
|
99
|
+
base_url = "https://openrouter.ai/api/v1"
|
|
100
|
+
|
|
101
|
+
cost_per_input_token = (
|
|
102
|
+
cost_per_input_token
|
|
103
|
+
if cost_per_input_token is not None
|
|
104
|
+
else settings.OPENROUTER_COST_PER_INPUT_TOKEN
|
|
105
|
+
)
|
|
106
|
+
cost_per_output_token = (
|
|
107
|
+
cost_per_output_token
|
|
108
|
+
if cost_per_output_token is not None
|
|
109
|
+
else settings.OPENROUTER_COST_PER_OUTPUT_TOKEN
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
if temperature is not None:
|
|
113
|
+
temperature = float(temperature)
|
|
114
|
+
elif settings.TEMPERATURE is not None:
|
|
115
|
+
temperature = settings.TEMPERATURE
|
|
116
|
+
else:
|
|
117
|
+
temperature = 0.0
|
|
118
|
+
|
|
119
|
+
# validation
|
|
120
|
+
if temperature < 0:
|
|
121
|
+
raise DeepEvalError("Temperature must be >= 0.")
|
|
122
|
+
|
|
123
|
+
self.base_url = base_url
|
|
124
|
+
self.cost_per_input_token = cost_per_input_token
|
|
125
|
+
self.cost_per_output_token = cost_per_output_token
|
|
126
|
+
self.temperature = temperature
|
|
127
|
+
|
|
128
|
+
self.kwargs = dict(kwargs)
|
|
129
|
+
self.kwargs.pop("temperature", None)
|
|
130
|
+
|
|
131
|
+
self.generation_kwargs = dict(generation_kwargs or {})
|
|
132
|
+
self.generation_kwargs.pop("temperature", None)
|
|
133
|
+
|
|
134
|
+
super().__init__(model)
|
|
135
|
+
|
|
136
|
+
###############################################
|
|
137
|
+
# Generate functions
|
|
138
|
+
###############################################
|
|
139
|
+
|
|
140
|
+
async def _generate_with_client(
|
|
141
|
+
self,
|
|
142
|
+
client: AsyncOpenAI,
|
|
143
|
+
prompt: str,
|
|
144
|
+
schema: Optional[BaseModel] = None,
|
|
145
|
+
) -> Tuple[Union[str, Dict], float]:
|
|
146
|
+
"""
|
|
147
|
+
Core generation logic shared between generate() and a_generate().
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
client: AsyncOpenAI client
|
|
151
|
+
prompt: The prompt to send
|
|
152
|
+
schema: Optional Pydantic schema for structured outputs
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
Tuple of (output, cost)
|
|
156
|
+
"""
|
|
157
|
+
if schema:
|
|
158
|
+
# Try OpenRouter's native JSON Schema format
|
|
159
|
+
try:
|
|
160
|
+
openrouter_response_format = (
|
|
161
|
+
_convert_schema_to_openrouter_format(schema)
|
|
162
|
+
)
|
|
163
|
+
completion = await client.chat.completions.create(
|
|
164
|
+
model=self.name,
|
|
165
|
+
messages=[{"role": "user", "content": prompt}],
|
|
166
|
+
response_format=openrouter_response_format,
|
|
167
|
+
temperature=self.temperature,
|
|
168
|
+
**self.generation_kwargs,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# Parse the JSON response and validate against schema
|
|
172
|
+
json_output = trim_and_load_json(
|
|
173
|
+
completion.choices[0].message.content
|
|
174
|
+
)
|
|
175
|
+
cost = self.calculate_cost(
|
|
176
|
+
completion.usage.prompt_tokens,
|
|
177
|
+
completion.usage.completion_tokens,
|
|
178
|
+
response=completion,
|
|
179
|
+
)
|
|
180
|
+
return schema.model_validate(json_output), cost
|
|
181
|
+
except Exception as e:
|
|
182
|
+
# Warn if structured outputs fail
|
|
183
|
+
warnings.warn(
|
|
184
|
+
f"Structured outputs not supported for model '{self.name}'. "
|
|
185
|
+
f"Falling back to regular generation with JSON parsing. "
|
|
186
|
+
f"Error: {str(e)}",
|
|
187
|
+
UserWarning,
|
|
188
|
+
stacklevel=3,
|
|
189
|
+
)
|
|
190
|
+
# Fall back to regular generation and parse JSON manually (like Bedrock)
|
|
191
|
+
# This works with any model that can generate JSON in text
|
|
192
|
+
pass
|
|
193
|
+
|
|
194
|
+
# Regular generation (or fallback if structured outputs failed)
|
|
195
|
+
completion = await client.chat.completions.create(
|
|
196
|
+
model=self.name,
|
|
197
|
+
messages=[{"role": "user", "content": prompt}],
|
|
198
|
+
temperature=self.temperature,
|
|
199
|
+
**self.generation_kwargs,
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
output = completion.choices[0].message.content
|
|
203
|
+
cost = self.calculate_cost(
|
|
204
|
+
completion.usage.prompt_tokens,
|
|
205
|
+
completion.usage.completion_tokens,
|
|
206
|
+
response=completion,
|
|
207
|
+
)
|
|
208
|
+
if schema:
|
|
209
|
+
# Parse JSON from text and validate against schema (like Bedrock)
|
|
210
|
+
json_output = trim_and_load_json(output)
|
|
211
|
+
return schema.model_validate(json_output), cost
|
|
212
|
+
else:
|
|
213
|
+
return output, cost
|
|
214
|
+
|
|
215
|
+
@retry_openrouter
|
|
216
|
+
def generate(
|
|
217
|
+
self, prompt: str, schema: Optional[BaseModel] = None
|
|
218
|
+
) -> Tuple[Union[str, Dict], float]:
|
|
219
|
+
from deepeval.models.llms.utils import safe_asyncio_run
|
|
220
|
+
|
|
221
|
+
client = self.load_model(async_mode=True)
|
|
222
|
+
return safe_asyncio_run(
|
|
223
|
+
self._generate_with_client(client, prompt, schema)
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
@retry_openrouter
|
|
227
|
+
async def a_generate(
|
|
228
|
+
self, prompt: str, schema: Optional[BaseModel] = None
|
|
229
|
+
) -> Tuple[Union[str, BaseModel], float]:
|
|
230
|
+
client = self.load_model(async_mode=True)
|
|
231
|
+
return await self._generate_with_client(client, prompt, schema)
|
|
232
|
+
|
|
233
|
+
###############################################
|
|
234
|
+
# Other generate functions
|
|
235
|
+
###############################################
|
|
236
|
+
|
|
237
|
+
@retry_openrouter
|
|
238
|
+
def generate_raw_response(
|
|
239
|
+
self,
|
|
240
|
+
prompt: str,
|
|
241
|
+
top_logprobs: int = 5,
|
|
242
|
+
) -> Tuple[ChatCompletion, float]:
|
|
243
|
+
# Generate completion
|
|
244
|
+
client = self.load_model(async_mode=False)
|
|
245
|
+
completion = client.chat.completions.create(
|
|
246
|
+
model=self.name,
|
|
247
|
+
messages=[{"role": "user", "content": prompt}],
|
|
248
|
+
temperature=self.temperature,
|
|
249
|
+
logprobs=True,
|
|
250
|
+
top_logprobs=top_logprobs,
|
|
251
|
+
**self.generation_kwargs,
|
|
252
|
+
)
|
|
253
|
+
# Cost calculation
|
|
254
|
+
input_tokens = completion.usage.prompt_tokens
|
|
255
|
+
output_tokens = completion.usage.completion_tokens
|
|
256
|
+
cost = self.calculate_cost(
|
|
257
|
+
input_tokens, output_tokens, response=completion
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
return completion, cost
|
|
261
|
+
|
|
262
|
+
@retry_openrouter
|
|
263
|
+
async def a_generate_raw_response(
|
|
264
|
+
self,
|
|
265
|
+
prompt: str,
|
|
266
|
+
top_logprobs: int = 5,
|
|
267
|
+
) -> Tuple[ChatCompletion, float]:
|
|
268
|
+
# Generate completion
|
|
269
|
+
client = self.load_model(async_mode=True)
|
|
270
|
+
completion = await client.chat.completions.create(
|
|
271
|
+
model=self.name,
|
|
272
|
+
messages=[{"role": "user", "content": prompt}],
|
|
273
|
+
temperature=self.temperature,
|
|
274
|
+
logprobs=True,
|
|
275
|
+
top_logprobs=top_logprobs,
|
|
276
|
+
**self.generation_kwargs,
|
|
277
|
+
)
|
|
278
|
+
# Cost calculation
|
|
279
|
+
input_tokens = completion.usage.prompt_tokens
|
|
280
|
+
output_tokens = completion.usage.completion_tokens
|
|
281
|
+
cost = self.calculate_cost(
|
|
282
|
+
input_tokens, output_tokens, response=completion
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
return completion, cost
|
|
286
|
+
|
|
287
|
+
@retry_openrouter
|
|
288
|
+
def generate_samples(
|
|
289
|
+
self, prompt: str, n: int, temperature: float
|
|
290
|
+
) -> Tuple[list[str], float]:
|
|
291
|
+
client = self.load_model(async_mode=False)
|
|
292
|
+
response = client.chat.completions.create(
|
|
293
|
+
model=self.name,
|
|
294
|
+
messages=[{"role": "user", "content": prompt}],
|
|
295
|
+
n=n,
|
|
296
|
+
temperature=temperature,
|
|
297
|
+
**self.generation_kwargs,
|
|
298
|
+
)
|
|
299
|
+
completions = [choice.message.content for choice in response.choices]
|
|
300
|
+
cost = self.calculate_cost(
|
|
301
|
+
response.usage.prompt_tokens,
|
|
302
|
+
response.usage.completion_tokens,
|
|
303
|
+
response=response,
|
|
304
|
+
)
|
|
305
|
+
return completions, cost
|
|
306
|
+
|
|
307
|
+
###############################################
|
|
308
|
+
# Utilities
|
|
309
|
+
###############################################
|
|
310
|
+
|
|
311
|
+
def calculate_cost(
|
|
312
|
+
self, input_tokens: int, output_tokens: int, response=None
|
|
313
|
+
) -> Optional[float]:
|
|
314
|
+
"""
|
|
315
|
+
Calculate cost with priority:
|
|
316
|
+
1. User-provided pricing (highest priority)
|
|
317
|
+
2. Try to extract from API response (if OpenRouter includes pricing)
|
|
318
|
+
3. Return None if cost cannot be determined
|
|
319
|
+
"""
|
|
320
|
+
# Priority 1: User-provided pricing
|
|
321
|
+
if (
|
|
322
|
+
self.cost_per_input_token is not None
|
|
323
|
+
and self.cost_per_output_token is not None
|
|
324
|
+
):
|
|
325
|
+
return (
|
|
326
|
+
input_tokens * self.cost_per_input_token
|
|
327
|
+
+ output_tokens * self.cost_per_output_token
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
# Priority 2: Try to extract from API response (if OpenRouter includes pricing)
|
|
331
|
+
# Note: OpenRouter may include pricing in response metadata
|
|
332
|
+
if response is not None:
|
|
333
|
+
# Check if response has cost information
|
|
334
|
+
usage_cost = getattr(getattr(response, "usage", None), "cost", None)
|
|
335
|
+
if usage_cost is not None:
|
|
336
|
+
try:
|
|
337
|
+
return float(usage_cost)
|
|
338
|
+
except (ValueError, TypeError):
|
|
339
|
+
pass
|
|
340
|
+
# Some responses might have cost at the top level
|
|
341
|
+
response_cost = getattr(response, "cost", None)
|
|
342
|
+
if response_cost is not None:
|
|
343
|
+
try:
|
|
344
|
+
return float(response_cost)
|
|
345
|
+
except (ValueError, TypeError):
|
|
346
|
+
pass
|
|
347
|
+
|
|
348
|
+
# Priority 3: Return None since cost is unknown
|
|
349
|
+
return None
|
|
350
|
+
|
|
351
|
+
###############################################
|
|
352
|
+
# Model
|
|
353
|
+
###############################################
|
|
354
|
+
|
|
355
|
+
def get_model_name(self):
|
|
356
|
+
return f"{self.name} (OpenRouter)"
|
|
357
|
+
|
|
358
|
+
def load_model(self, async_mode: bool = False):
|
|
359
|
+
if not async_mode:
|
|
360
|
+
return self._build_client(OpenAI)
|
|
361
|
+
return self._build_client(AsyncOpenAI)
|
|
362
|
+
|
|
363
|
+
def _client_kwargs(self) -> Dict:
|
|
364
|
+
"""
|
|
365
|
+
If Tenacity is managing retries, force OpenAI SDK retries off to avoid double retries.
|
|
366
|
+
If the user opts into SDK retries for 'openrouter' via DEEPEVAL_SDK_RETRY_PROVIDERS,
|
|
367
|
+
leave their retry settings as is.
|
|
368
|
+
"""
|
|
369
|
+
kwargs = dict(self.kwargs or {})
|
|
370
|
+
if not sdk_retries_for(PS.OPENROUTER):
|
|
371
|
+
kwargs["max_retries"] = 0
|
|
372
|
+
|
|
373
|
+
if not kwargs.get("timeout"):
|
|
374
|
+
kwargs["timeout"] = _request_timeout_seconds()
|
|
375
|
+
|
|
376
|
+
return kwargs
|
|
377
|
+
|
|
378
|
+
def _build_client(self, cls):
|
|
379
|
+
api_key = require_secret_api_key(
|
|
380
|
+
self.api_key,
|
|
381
|
+
provider_label="OpenRouter",
|
|
382
|
+
env_var_name="OPENROUTER_API_KEY",
|
|
383
|
+
param_hint="`api_key` to OpenRouterModel(...)",
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
kw = dict(
|
|
387
|
+
api_key=api_key,
|
|
388
|
+
base_url=self.base_url,
|
|
389
|
+
**self._client_kwargs(),
|
|
390
|
+
)
|
|
391
|
+
try:
|
|
392
|
+
return cls(**kw)
|
|
393
|
+
except TypeError as e:
|
|
394
|
+
# older OpenAI SDKs may not accept max_retries, in that case remove and retry once
|
|
395
|
+
if "max_retries" in str(e):
|
|
396
|
+
kw.pop("max_retries", None)
|
|
397
|
+
return cls(**kw)
|
|
398
|
+
raise
|
deepeval/models/retry_policy.py
CHANGED
|
@@ -772,6 +772,7 @@ AZURE_OPENAI_ERROR_POLICY = OPENAI_ERROR_POLICY
|
|
|
772
772
|
DEEPSEEK_ERROR_POLICY = OPENAI_ERROR_POLICY
|
|
773
773
|
KIMI_ERROR_POLICY = OPENAI_ERROR_POLICY
|
|
774
774
|
LOCAL_ERROR_POLICY = OPENAI_ERROR_POLICY
|
|
775
|
+
OPENROUTER_ERROR_POLICY = OPENAI_ERROR_POLICY
|
|
775
776
|
|
|
776
777
|
######################
|
|
777
778
|
# AWS Bedrock Policy #
|
|
@@ -998,6 +999,7 @@ _POLICY_BY_SLUG: dict[str, Optional[ErrorPolicy]] = {
|
|
|
998
999
|
PS.LITELLM.value: LITELLM_ERROR_POLICY,
|
|
999
1000
|
PS.LOCAL.value: LOCAL_ERROR_POLICY,
|
|
1000
1001
|
PS.OLLAMA.value: OLLAMA_ERROR_POLICY,
|
|
1002
|
+
PS.OPENROUTER.value: OPENROUTER_ERROR_POLICY,
|
|
1001
1003
|
}
|
|
1002
1004
|
|
|
1003
1005
|
|
|
@@ -1019,6 +1021,7 @@ _STATIC_PRED_BY_SLUG: dict[str, Optional[Callable[[Exception], bool]]] = {
|
|
|
1019
1021
|
PS.LITELLM.value: _opt_pred(LITELLM_ERROR_POLICY),
|
|
1020
1022
|
PS.LOCAL.value: _opt_pred(LOCAL_ERROR_POLICY),
|
|
1021
1023
|
PS.OLLAMA.value: _opt_pred(OLLAMA_ERROR_POLICY),
|
|
1024
|
+
PS.OPENROUTER.value: _opt_pred(OPENROUTER_ERROR_POLICY),
|
|
1022
1025
|
}
|
|
1023
1026
|
|
|
1024
1027
|
|
deepeval/prompt/api.py
CHANGED
deepeval/tracing/tracing.py
CHANGED
|
@@ -847,7 +847,12 @@ class Observer:
|
|
|
847
847
|
self.trace_uuid = parent_span.trace_uuid
|
|
848
848
|
else:
|
|
849
849
|
current_trace = current_trace_context.get()
|
|
850
|
-
|
|
850
|
+
# IMPORTANT: Verify trace is still active, not just in context
|
|
851
|
+
# (a previous failed async operation might leave a dead trace in context)
|
|
852
|
+
if (
|
|
853
|
+
current_trace
|
|
854
|
+
and current_trace.uuid in trace_manager.active_traces
|
|
855
|
+
):
|
|
851
856
|
self.trace_uuid = current_trace.uuid
|
|
852
857
|
else:
|
|
853
858
|
trace = trace_manager.start_new_trace(
|
deepeval/tracing/types.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: deepeval
|
|
3
|
-
Version: 3.
|
|
3
|
+
Version: 3.8.0
|
|
4
4
|
Summary: The LLM Evaluation Framework
|
|
5
5
|
Home-page: https://github.com/confident-ai/deepeval
|
|
6
6
|
License: Apache-2.0
|
|
@@ -100,7 +100,7 @@ Description-Content-Type: text/markdown
|
|
|
100
100
|
<a href="https://www.readme-i18n.com/confident-ai/deepeval?lang=zh">中文</a>
|
|
101
101
|
</p>
|
|
102
102
|
|
|
103
|
-
**DeepEval** is a simple-to-use, open-source LLM evaluation framework, for evaluating and testing large-language model systems. It is similar to Pytest but specialized for unit testing LLM outputs. DeepEval incorporates the latest research to evaluate LLM outputs based on metrics such as G-Eval, task completion, answer relevancy, hallucination, etc., which uses LLM-as-a-judge and other NLP models that
|
|
103
|
+
**DeepEval** is a simple-to-use, open-source LLM evaluation framework, for evaluating and testing large-language model systems. It is similar to Pytest but specialized for unit testing LLM outputs. DeepEval incorporates the latest research to evaluate LLM outputs based on metrics such as G-Eval, task completion, answer relevancy, hallucination, etc., which uses LLM-as-a-judge and other NLP models that run **locally on your machine** for evaluation.
|
|
104
104
|
|
|
105
105
|
Whether your LLM applications are AI agents, RAG pipelines, or chatbots, implemented via LangChain or OpenAI, DeepEval has you covered. With it, you can easily determine the optimal models, prompts, and architecture to improve your RAG pipeline, agentic workflows, prevent prompt drifting, or even transition from OpenAI to hosting your own Deepseek R1 with confidence.
|
|
106
106
|
|
|
@@ -118,7 +118,7 @@ Whether your LLM applications are AI agents, RAG pipelines, or chatbots, impleme
|
|
|
118
118
|
> 🥳 You can now share DeepEval's test results on the cloud directly on [Confident AI](https://confident-ai.com?utm_source=GitHub)
|
|
119
119
|
|
|
120
120
|
- Supports both end-to-end and component-level LLM evaluation.
|
|
121
|
-
- Large variety of ready-to-use LLM evaluation metrics (all with explanations) powered by **ANY** LLM of your choice, statistical methods, or NLP models that
|
|
121
|
+
- Large variety of ready-to-use LLM evaluation metrics (all with explanations) powered by **ANY** LLM of your choice, statistical methods, or NLP models that run **locally on your machine**:
|
|
122
122
|
- G-Eval
|
|
123
123
|
- DAG ([deep acyclic graph](https://deepeval.com/docs/metrics-dag))
|
|
124
124
|
- **RAG metrics:**
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
deepeval/__init__.py,sha256=tle4lT4FONApg3OeztGPEdrpGMEGLWajyGTu7bEd3s0,2976
|
|
2
|
-
deepeval/_version.py,sha256=
|
|
2
|
+
deepeval/_version.py,sha256=zzwaJLAxT4xICJZz0E72TnWZ0UtmJr0uD8CK8vnXu6I,27
|
|
3
3
|
deepeval/annotation/__init__.py,sha256=ZFhUVNNuH_YgQSZJ-m5E9iUb9TkAkEV33a6ouMDZ8EI,111
|
|
4
4
|
deepeval/annotation/annotation.py,sha256=3j3-syeJepAcEj3u3e4T_BeRDzNr7yXGDIoNQGMKpwQ,2298
|
|
5
5
|
deepeval/annotation/api.py,sha256=EYN33ACVzVxsFleRYm60KB4Exvff3rPJKt1VBuuX970,2147
|
|
@@ -136,21 +136,21 @@ deepeval/benchmarks/winogrande/template.py,sha256=tDwH8NpNF9x7FbDmQw45XaW1LNqGBV
|
|
|
136
136
|
deepeval/benchmarks/winogrande/winogrande.py,sha256=_4irJkRPw3c-Ufo-hM4cHpPKUoxozedFQpok9n0csTg,5644
|
|
137
137
|
deepeval/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
138
138
|
deepeval/cli/dotenv_handler.py,sha256=7PtVjCNUZKAXsVJQxznsLexad7y8x-gQ195xAxmv4gA,2468
|
|
139
|
-
deepeval/cli/main.py,sha256=
|
|
139
|
+
deepeval/cli/main.py,sha256=IsnAF8XVMA4j0dExMjlsD0ABPzFCwHkI4ai2S567qlA,100548
|
|
140
140
|
deepeval/cli/server.py,sha256=cOm9xiYcPYB9GDeFQw9-Iawf9bNfOqftZs7q7mO_P7I,1979
|
|
141
141
|
deepeval/cli/test.py,sha256=aoBPMfk0HTvOqb2xdvMykkx_s4SHst7lEnoUiSXo1lU,5483
|
|
142
142
|
deepeval/cli/types.py,sha256=_7KdthstHNc-JKCWrfpDQCf_j8h9PMxh0qJCHmVXJr0,310
|
|
143
143
|
deepeval/cli/utils.py,sha256=3fgH5WPTTe7Cz_QOLCHyflXB81kmFaSxXHJ2tnxvFLw,10649
|
|
144
144
|
deepeval/confident/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
145
|
-
deepeval/confident/api.py,sha256=
|
|
145
|
+
deepeval/confident/api.py,sha256=3TpuZm59xo1_APsAPppreCRepf8pfGWksMmgxnwp764,8773
|
|
146
146
|
deepeval/confident/types.py,sha256=9bgePDaU31yY7JGwCLZcc7pev9VGtNDZLbjsVpCLVdc,574
|
|
147
147
|
deepeval/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
148
148
|
deepeval/config/dotenv_handler.py,sha256=lOosoC7fm9RljriY8EFl5ywSGfSiQsVf_vmYqzpbZ8s,588
|
|
149
149
|
deepeval/config/logging.py,sha256=ivqmhOSB-oHOOU3MvnhImrZwkkxzxKJgoKxesnWfHjg,1314
|
|
150
|
-
deepeval/config/settings.py,sha256=
|
|
150
|
+
deepeval/config/settings.py,sha256=l5wvTEyG7KgWoY6F4Gc0j19oZef151r20Z8fZsiXR_U,57004
|
|
151
151
|
deepeval/config/settings_manager.py,sha256=Ynebm2BKDrzajc6DEq2eYIwyRAAtUQOkTnl46albxLk,4187
|
|
152
152
|
deepeval/config/utils.py,sha256=bJGljeAXoEYuUlYSvHSOsUnqINTwo6wOwfFHFpWxiaQ,4238
|
|
153
|
-
deepeval/constants.py,sha256=
|
|
153
|
+
deepeval/constants.py,sha256=MvwjLC1IHUY35FnnSsWVcHScmdbYBbPr8eTnsLWn40Y,1697
|
|
154
154
|
deepeval/contextvars.py,sha256=oqXtuYiKd4Zvc1rNoR1gcRBxzZYCGTMVn7XostwvkRI,524
|
|
155
155
|
deepeval/dataset/__init__.py,sha256=N2c-rkuxWYiiJSOZArw0H02Cwo7cnfzFuNYJlvsIBEg,249
|
|
156
156
|
deepeval/dataset/api.py,sha256=bZ95HfIaxYB1IwTnp7x4AaKXWuII17T5uqVkhUXNc7I,1650
|
|
@@ -180,9 +180,9 @@ deepeval/integrations/hugging_face/rich_manager.py,sha256=WvFtPGpPmGeg2Ftsnojga6
|
|
|
180
180
|
deepeval/integrations/hugging_face/tests/test_callbacks.py,sha256=88Wyg-aDaXujj9jHeGdFF3ITSl2-y7eaJGWgSyvvDi8,4607
|
|
181
181
|
deepeval/integrations/hugging_face/utils.py,sha256=HUKdQcTIb76Ct69AS737oPxmlVxk5fw2UbT2pLn-o8k,1817
|
|
182
182
|
deepeval/integrations/langchain/__init__.py,sha256=G1Qey5WkKou2-PA34KwWgmayQ_TbvXqPyotTbzmD8tw,84
|
|
183
|
-
deepeval/integrations/langchain/callback.py,sha256=
|
|
183
|
+
deepeval/integrations/langchain/callback.py,sha256=1K5KxpND6XEKCWnz-DWjhbO35AzmM3M8PIlk6bTYO2k,20360
|
|
184
184
|
deepeval/integrations/langchain/patch.py,sha256=fCHfZXU9xX3IJ6SG8GEYzn3qrifyUkT0i_uUABTsmcs,1255
|
|
185
|
-
deepeval/integrations/langchain/utils.py,sha256=
|
|
185
|
+
deepeval/integrations/langchain/utils.py,sha256=oYsQYO3Ucbmd1d7gkb5ARd60gm6BHtLy_5OOqUw8HIQ,11311
|
|
186
186
|
deepeval/integrations/llama_index/__init__.py,sha256=Ujs9ZBJFkuCWUDBJOF88UbM1Y-S6QFQhxSo0oQnEWNw,90
|
|
187
187
|
deepeval/integrations/llama_index/handler.py,sha256=uTvNXmAF4xBh8t9bBm5sBFX6ETp8SrkOZlFlE_GWdmM,10771
|
|
188
188
|
deepeval/integrations/llama_index/utils.py,sha256=onmmo1vpn6cpOY5EhfTc0Uui7X6l1M0HD3sq-KVAesg,3380
|
|
@@ -191,7 +191,7 @@ deepeval/integrations/pydantic_ai/agent.py,sha256=-NKvpTUw3AxRNhuxVFcx9mw5BWCujz
|
|
|
191
191
|
deepeval/integrations/pydantic_ai/instrumentator.py,sha256=Us9LSYZWMfaeAc7PGXMDYWzjWKFVmhRvZrFhSvmk448,11922
|
|
192
192
|
deepeval/integrations/pydantic_ai/otel.py,sha256=CCqwCJ5pHqCzHgujHQqZy7Jxo2PH1BT0kR7QxdtzutY,2060
|
|
193
193
|
deepeval/integrations/pydantic_ai/test_instrumentator.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
194
|
-
deepeval/key_handler.py,sha256=
|
|
194
|
+
deepeval/key_handler.py,sha256=lajMBgF2lCzbQpW4e6Y7cD9FOw0Qk5UOKS4_kIIHj6Y,9562
|
|
195
195
|
deepeval/metrics/__init__.py,sha256=19Df323r8aAlx2sRfV9BHJLicORhTLpogR8M1deJetw,4680
|
|
196
196
|
deepeval/metrics/answer_relevancy/__init__.py,sha256=WbZUpoSg2GQoqJ4VIRirVVQ1JDx5xwT-RskwqNKfWGM,46
|
|
197
197
|
deepeval/metrics/answer_relevancy/answer_relevancy.py,sha256=2zRAuPq794S-rqMesMnuvSJxd1yhEUTq8nRwLxf42QE,10958
|
|
@@ -233,7 +233,7 @@ deepeval/metrics/conversational_dag/conversational_dag.py,sha256=GlhJC-BBtnfx3G2
|
|
|
233
233
|
deepeval/metrics/conversational_dag/nodes.py,sha256=xMhBEQ87CMWd6DPF-hMuMp3rxVuw_SS7mHKHqGSfxLw,31348
|
|
234
234
|
deepeval/metrics/conversational_dag/templates.py,sha256=zMF9rjRQCbAtMXXC03m1CwcYiYwsRCfoitf63QkvCmE,4243
|
|
235
235
|
deepeval/metrics/conversational_g_eval/__init__.py,sha256=0whQUYv_qZx4nkz0V6TTjgg8gJVCmW8323WcXpj2EzI,93
|
|
236
|
-
deepeval/metrics/conversational_g_eval/conversational_g_eval.py,sha256=
|
|
236
|
+
deepeval/metrics/conversational_g_eval/conversational_g_eval.py,sha256=R6qt6yO8_k0ciXUz3F_imMxS9hFi0EtMgDkooOyE790,17190
|
|
237
237
|
deepeval/metrics/conversational_g_eval/schema.py,sha256=H_9-iA1BXJwbPKrGEZBqxDO_En4sjXI8_xKSNYc-hnk,167
|
|
238
238
|
deepeval/metrics/conversational_g_eval/template.py,sha256=JVKwZJBgHiP1cMuGTLAL_taKvRL-ppJjkiTOs0wzgYk,2931
|
|
239
239
|
deepeval/metrics/dag/__init__.py,sha256=G5D9ngJ6nnbRBF2mfmNZymZId8gKD09QzTA1Y_bTrgM,157
|
|
@@ -250,10 +250,10 @@ deepeval/metrics/faithfulness/faithfulness.py,sha256=2mFXIh0U7Xf4Ybl6w-Lt74D4P13
|
|
|
250
250
|
deepeval/metrics/faithfulness/schema.py,sha256=yPbe1CrW6PMOJjnWnUOxeb_Ul8sfDwvwgt4QTW-95RI,437
|
|
251
251
|
deepeval/metrics/faithfulness/template.py,sha256=n9SvOM8iJ9Y4K0o8OHHc0uyw3E_v2BfZRd0fSUIX8XI,11126
|
|
252
252
|
deepeval/metrics/g_eval/__init__.py,sha256=HAhsQFVq9LIpZXPN00Jc_WrMXrh47NIT86VnUpWM4_4,102
|
|
253
|
-
deepeval/metrics/g_eval/g_eval.py,sha256=
|
|
253
|
+
deepeval/metrics/g_eval/g_eval.py,sha256=VlQkYuWwWITB0wo8q1OVZEQjZ7V7gpDv4kdvrUP3ROA,16134
|
|
254
254
|
deepeval/metrics/g_eval/schema.py,sha256=V629txuDrr_2IEKEsgJVYYZb_pkdfcltQV9ZjvxK5co,287
|
|
255
255
|
deepeval/metrics/g_eval/template.py,sha256=v96BJFOH1rnME6b-OwJwcunvA4dd2GwraoXnjiZRu9Y,5182
|
|
256
|
-
deepeval/metrics/g_eval/utils.py,sha256=
|
|
256
|
+
deepeval/metrics/g_eval/utils.py,sha256=t8SvFt_2GRSOOKiYMHJWyhrzLCWOdFWGnlQ38PZA0Ls,10767
|
|
257
257
|
deepeval/metrics/goal_accuracy/__init__.py,sha256=SVvA5Py1iNQoLujNUptvckoLoR6XMs-W2jQ7b89v-Tc,46
|
|
258
258
|
deepeval/metrics/goal_accuracy/goal_accuracy.py,sha256=zlzxrAQQ4ASrjVo4-jDmn6uBw50sHOsAtgWez0CpveU,13034
|
|
259
259
|
deepeval/metrics/goal_accuracy/schema.py,sha256=WmP1nw5ugOAKCFrpjZpF4jjeJzLB3Ecdp-2VWfBJLAE,257
|
|
@@ -388,7 +388,7 @@ deepeval/metrics/utils.py,sha256=RS8gsEh__DaKhXjdDfNcw1iOVvN40Z1mbQHM21Q30Iw,213
|
|
|
388
388
|
deepeval/model_integrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
389
389
|
deepeval/model_integrations/types.py,sha256=rbVMhC_2yWwD6JqzkRO9D7aMVC_KtXN686G_S7de7S8,630
|
|
390
390
|
deepeval/model_integrations/utils.py,sha256=Zt9SYPgTxlGsQFZgpZvh_a5fWuL8mmIFVSe6uoQywZ4,3562
|
|
391
|
-
deepeval/models/__init__.py,sha256
|
|
391
|
+
deepeval/models/__init__.py,sha256=-eHIhBn1a1fLKjPd5TCW8da8UuCKYdJvwic26swFtZM,1024
|
|
392
392
|
deepeval/models/_summac_model.py,sha256=xflanxl_IBuzuainlYCVX7UvjHCnAckKSvNR2NwZI6k,19750
|
|
393
393
|
deepeval/models/answer_relevancy_model.py,sha256=SLOA6uUImNOuxpPGfTg2AH7MIkf9QsotYixvI1jcVC8,2197
|
|
394
394
|
deepeval/models/base_model.py,sha256=uja2bZcrTCIPMkIDgOLG2k2Ncw7uTX6vjvnrQtFlNlk,4891
|
|
@@ -399,11 +399,11 @@ deepeval/models/embedding_models/local_embedding_model.py,sha256=Io5dYNR8f-_iMmz
|
|
|
399
399
|
deepeval/models/embedding_models/ollama_embedding_model.py,sha256=4uxrzdBlpWT-SM1HHHsZXhwg1ejkxUDFewxACLeYsG4,3747
|
|
400
400
|
deepeval/models/embedding_models/openai_embedding_model.py,sha256=S8uvWODbiTF4EYfeID5yEF0YvYkDs1dP_Kiur4sb67M,4477
|
|
401
401
|
deepeval/models/hallucination_model.py,sha256=ABi978VKLE_jNHbDzM96kJ08EsZ5ZlvOlJHA_ptSkfQ,1003
|
|
402
|
-
deepeval/models/llms/__init__.py,sha256=
|
|
402
|
+
deepeval/models/llms/__init__.py,sha256=Mlkvw9eIbxJXJjTB9Nj0LoL-kSRCmewrEihDvFyzvJA,799
|
|
403
403
|
deepeval/models/llms/amazon_bedrock_model.py,sha256=mgBdGhyZo0SU0OMzqWAF6pUlQMUuCCdDiordfinDNpM,10898
|
|
404
404
|
deepeval/models/llms/anthropic_model.py,sha256=08_nGK5EoGpf_F0I6JkhrEAswDc9DjLQqGYMX3emsoQ,10542
|
|
405
405
|
deepeval/models/llms/azure_model.py,sha256=Nc_LgA8rEhkldvdhccNojERaviaBg6jyfBVL9bGdKek,16673
|
|
406
|
-
deepeval/models/llms/constants.py,sha256=
|
|
406
|
+
deepeval/models/llms/constants.py,sha256=H6_FyTNkfF0wr3R8qUlvT2LuZGT5lbXFh9Hcq5T8A8k,72008
|
|
407
407
|
deepeval/models/llms/deepseek_model.py,sha256=OzEs0hnSixqICurVFo6T5GBAUeDrnWOlooEyJrgi5zE,8565
|
|
408
408
|
deepeval/models/llms/gemini_model.py,sha256=h01bJnLBnc1xaqoVSBOL-PwllCsHRkA88bp21BA0Mws,15552
|
|
409
409
|
deepeval/models/llms/grok_model.py,sha256=zGU1WzKADrgap5NQJTDb6BY4SZNNJqAZ6phnK_HFJqw,10703
|
|
@@ -411,10 +411,11 @@ deepeval/models/llms/kimi_model.py,sha256=n5w2MeeKSMS7HvSpiDSQueZ2EQSv3c6pDb-C-A
|
|
|
411
411
|
deepeval/models/llms/litellm_model.py,sha256=lWfJvzWia7XCrLiRTNF0fUQXYOalsLV1y3Tq03loDP4,16533
|
|
412
412
|
deepeval/models/llms/local_model.py,sha256=1KWuvgfdSE2XaNriIz-8gBIrbvmLOgomZxXI80Zt-8c,8287
|
|
413
413
|
deepeval/models/llms/ollama_model.py,sha256=fk2GlQSFMYBe9oKrFouWAIf_PtSZZp8SGV2HXpE66no,7957
|
|
414
|
-
deepeval/models/llms/openai_model.py,sha256=
|
|
414
|
+
deepeval/models/llms/openai_model.py,sha256=3cp7fkpnh6suMu9lWTe0rYASALqWIZa2aUHVQhL8JHM,17243
|
|
415
|
+
deepeval/models/llms/openrouter_model.py,sha256=hXDPQGa-HrvkXBL1E8ju1pduqsAnZg86RbotC0NcENU,13617
|
|
415
416
|
deepeval/models/llms/portkey_model.py,sha256=EvUJDHhtKzetFakc1HzxYIpzUlgD3UnEvZr0q9hpIKU,6684
|
|
416
417
|
deepeval/models/llms/utils.py,sha256=NsrZ4DjrVBnYbMZEY6G8U9On_B84kiDkB88yeyBUFfw,1433
|
|
417
|
-
deepeval/models/retry_policy.py,sha256=
|
|
418
|
+
deepeval/models/retry_policy.py,sha256=hNQRpP6SL5rpFTvLL-BRXkBLknmTwusJXuZnW9ba_as,34973
|
|
418
419
|
deepeval/models/summac_model.py,sha256=wKeH7pWQRXrTlzlIw_r1YCb8b7jUhWq6jUz9FiNUCSg,1992
|
|
419
420
|
deepeval/models/unbias_model.py,sha256=umOMhQLTmnD7uOuhiQufEl4Wlti4q2s3EtKOpds7zhs,597
|
|
420
421
|
deepeval/models/utils.py,sha256=0_6_hmEwatWGoba-KNE38KvmDKMlhGff2lIzOCpRQgQ,5947
|
|
@@ -459,7 +460,7 @@ deepeval/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
|
|
|
459
460
|
deepeval/plugins/plugin.py,sha256=_dwsdx4Dg9DbXxK3f7zJY4QWTJQWc7QE1HmIg2Zjjag,1515
|
|
460
461
|
deepeval/progress_context.py,sha256=ZSKpxrE9sdgt9G3REKnVeXAv7GJXHHVGgLynpG1Pudw,3557
|
|
461
462
|
deepeval/prompt/__init__.py,sha256=rDU99KjydxDRKhuQJCBs_bpDJrWb2mpHtvyv6AEwFC8,367
|
|
462
|
-
deepeval/prompt/api.py,sha256=
|
|
463
|
+
deepeval/prompt/api.py,sha256=DNhKouq3ntEKmN_VegNh5X1gu_2RGJwzBp07rEEyg6s,6359
|
|
463
464
|
deepeval/prompt/prompt.py,sha256=waaQDrTXQQUzOIJbOYtUpoa4qsuXgmzObUwFH-wRx2Y,31654
|
|
464
465
|
deepeval/prompt/utils.py,sha256=knjgPU2066OtYWMb3NqMPChr9zQgKfXo_QTLTtSkmYg,7620
|
|
465
466
|
deepeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -515,12 +516,12 @@ deepeval/tracing/patchers.py,sha256=Oi9wao3oDYhcviv7p0KoWBeS9ne7rHLa2gh9AR9EyiU,
|
|
|
515
516
|
deepeval/tracing/perf_epoch_bridge.py,sha256=iyAPddB6Op7NpMtPHJ29lDm53Btz9yLaN6xSCfTRQm4,1825
|
|
516
517
|
deepeval/tracing/trace_context.py,sha256=Z0n0Cu1A5g9dXiZnzTFO5TzeOYHKeNuO6v3_EU_Gi_c,3568
|
|
517
518
|
deepeval/tracing/trace_test_manager.py,sha256=wt4y7EWTRc4Bw938-UFFtXHkdFFOrnx6JaIk7J5Iulw,555
|
|
518
|
-
deepeval/tracing/tracing.py,sha256=
|
|
519
|
-
deepeval/tracing/types.py,sha256=
|
|
519
|
+
deepeval/tracing/tracing.py,sha256=AkbmgjWzSQ2k2qeN9i8LT17MsafuBenzzkP0r31I950,46728
|
|
520
|
+
deepeval/tracing/types.py,sha256=3QkF0toQ6f0fEDARYOUV6Iv9UJFbg14kSpn3dL1H5CE,6040
|
|
520
521
|
deepeval/tracing/utils.py,sha256=mdvhYAxDNsdnusaEXJd-c-_O2Jn6S3xSuzRvLO1Jz4U,5684
|
|
521
522
|
deepeval/utils.py,sha256=Wsu95g6t1wdttxWIESVwuUxbml7C-9ZTsV7qHCQI3Xg,27259
|
|
522
|
-
deepeval-3.
|
|
523
|
-
deepeval-3.
|
|
524
|
-
deepeval-3.
|
|
525
|
-
deepeval-3.
|
|
526
|
-
deepeval-3.
|
|
523
|
+
deepeval-3.8.0.dist-info/LICENSE.md,sha256=0ATkuLv6QgsJTBODUHC5Rak_PArA6gv2t7inJzNTP38,11352
|
|
524
|
+
deepeval-3.8.0.dist-info/METADATA,sha256=z7OpguZITPdv0S9jhvE6CEq-zVCSq9fvbKChLKD9gwc,18752
|
|
525
|
+
deepeval-3.8.0.dist-info/WHEEL,sha256=d2fvjOD7sXsVzChCqf0Ty0JbHKBaLYwDbGQDwQTnJ50,88
|
|
526
|
+
deepeval-3.8.0.dist-info/entry_points.txt,sha256=NoismUQfwLOojSGZmBrdcpwfaoFRAzUhBvZD3UwOKog,95
|
|
527
|
+
deepeval-3.8.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|