deepeval 3.8.3__py3-none-any.whl → 3.8.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/config/settings.py +12 -1
- deepeval/constants.py +2 -1
- deepeval/prompt/__init__.py +2 -0
- deepeval/prompt/api.py +48 -2
- deepeval/prompt/prompt.py +30 -0
- deepeval/prompt/utils.py +72 -2
- {deepeval-3.8.3.dist-info → deepeval-3.8.4.dist-info}/METADATA +1 -1
- {deepeval-3.8.3.dist-info → deepeval-3.8.4.dist-info}/RECORD +12 -12
- {deepeval-3.8.3.dist-info → deepeval-3.8.4.dist-info}/LICENSE.md +0 -0
- {deepeval-3.8.3.dist-info → deepeval-3.8.4.dist-info}/WHEEL +0 -0
- {deepeval-3.8.3.dist-info → deepeval-3.8.4.dist-info}/entry_points.txt +0 -0
deepeval/_version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__: str = "3.8.
|
|
1
|
+
__version__: str = "3.8.4"
|
deepeval/config/settings.py
CHANGED
|
@@ -316,6 +316,12 @@ class Settings(BaseSettings):
|
|
|
316
316
|
description="If set, export a timestamped JSON of the latest test run into this folder (created if missing).",
|
|
317
317
|
)
|
|
318
318
|
|
|
319
|
+
# When set, overrides the default DeepEval cache directory
|
|
320
|
+
DEEPEVAL_CACHE_FOLDER: Optional[Path] = Field(
|
|
321
|
+
".deepeval",
|
|
322
|
+
description="Path to the directory used by DeepEval to store cache files. If set, this overrides the default cache location. The directory will be created if it does not exist.",
|
|
323
|
+
)
|
|
324
|
+
|
|
319
325
|
# Display / Truncation
|
|
320
326
|
DEEPEVAL_MAXLEN_TINY: Optional[int] = Field(
|
|
321
327
|
40,
|
|
@@ -1015,7 +1021,12 @@ class Settings(BaseSettings):
|
|
|
1015
1021
|
def _coerce_yes_no(cls, v):
|
|
1016
1022
|
return None if v is None else parse_bool(v, default=False)
|
|
1017
1023
|
|
|
1018
|
-
@field_validator(
|
|
1024
|
+
@field_validator(
|
|
1025
|
+
"DEEPEVAL_RESULTS_FOLDER",
|
|
1026
|
+
"ENV_DIR_PATH",
|
|
1027
|
+
"DEEPEVAL_CACHE_FOLDER",
|
|
1028
|
+
mode="before",
|
|
1029
|
+
)
|
|
1019
1030
|
@classmethod
|
|
1020
1031
|
def _coerce_path(cls, v):
|
|
1021
1032
|
if v is None:
|
deepeval/constants.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
from enum import Enum
|
|
2
2
|
from typing import Union
|
|
3
|
+
import os
|
|
3
4
|
|
|
4
5
|
KEY_FILE: str = ".deepeval"
|
|
5
|
-
HIDDEN_DIR: str = ".deepeval"
|
|
6
|
+
HIDDEN_DIR: str = os.getenv("DEEPEVAL_CACHE_FOLDER", ".deepeval")
|
|
6
7
|
PYTEST_RUN_TEST_NAME: str = "CONFIDENT_AI_RUN_TEST_NAME"
|
|
7
8
|
LOGIN_PROMPT = "\n✨👀 Looking for a place for your LLM test data to live 🏡❤️ ? Use [rgb(106,0,255)]Confident AI[/rgb(106,0,255)] to get & share testing reports, experiment with models/prompts, and catch regressions for your LLM system. Just run [cyan]'deepeval login'[/cyan] in the CLI."
|
|
8
9
|
|
deepeval/prompt/__init__.py
CHANGED
deepeval/prompt/api.py
CHANGED
|
@@ -1,6 +1,14 @@
|
|
|
1
|
-
from pydantic import
|
|
1
|
+
from pydantic import (
|
|
2
|
+
BaseModel,
|
|
3
|
+
Field,
|
|
4
|
+
AliasChoices,
|
|
5
|
+
ConfigDict,
|
|
6
|
+
model_validator,
|
|
7
|
+
model_serializer,
|
|
8
|
+
)
|
|
2
9
|
from enum import Enum
|
|
3
|
-
|
|
10
|
+
import uuid
|
|
11
|
+
from typing import List, Optional, Dict, Any, Union, Type
|
|
4
12
|
from pydantic import TypeAdapter
|
|
5
13
|
|
|
6
14
|
from deepeval.utils import make_model_config
|
|
@@ -33,6 +41,12 @@ class ModelProvider(Enum):
|
|
|
33
41
|
OPENROUTER = "OPENROUTER"
|
|
34
42
|
|
|
35
43
|
|
|
44
|
+
class ToolMode(str, Enum):
|
|
45
|
+
ALLOW_ADDITIONAL = "ALLOW_ADDITIONAL"
|
|
46
|
+
NO_ADDITIONAL = "NO_ADDITIONAL"
|
|
47
|
+
STRICT = "STRICT"
|
|
48
|
+
|
|
49
|
+
|
|
36
50
|
class ModelSettings(BaseModel):
|
|
37
51
|
provider: Optional[ModelProvider] = None
|
|
38
52
|
name: Optional[str] = None
|
|
@@ -100,6 +114,7 @@ class OutputSchemaField(BaseModel):
|
|
|
100
114
|
id: str
|
|
101
115
|
type: SchemaDataType
|
|
102
116
|
name: str
|
|
117
|
+
description: Optional[str] = None
|
|
103
118
|
required: Optional[bool] = False
|
|
104
119
|
parent_id: Optional[str] = Field(
|
|
105
120
|
default=None,
|
|
@@ -109,8 +124,36 @@ class OutputSchemaField(BaseModel):
|
|
|
109
124
|
|
|
110
125
|
|
|
111
126
|
class OutputSchema(BaseModel):
|
|
127
|
+
id: Optional[str] = None
|
|
112
128
|
fields: Optional[List[OutputSchemaField]] = None
|
|
129
|
+
name: Optional[str] = None
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
class Tool(BaseModel):
|
|
133
|
+
id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
|
113
134
|
name: str
|
|
135
|
+
description: Optional[str] = None
|
|
136
|
+
mode: ToolMode
|
|
137
|
+
structured_schema: Optional[Union[Type[BaseModel], OutputSchema]] = Field(
|
|
138
|
+
serialization_alias="structuredSchema",
|
|
139
|
+
validation_alias=AliasChoices("structured_schema", "structuredSchema"),
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
@model_validator(mode="after")
|
|
143
|
+
def update_schema(self):
|
|
144
|
+
if not isinstance(self.structured_schema, OutputSchema):
|
|
145
|
+
from deepeval.prompt.utils import construct_output_schema
|
|
146
|
+
|
|
147
|
+
self.structured_schema = construct_output_schema(
|
|
148
|
+
self.structured_schema
|
|
149
|
+
)
|
|
150
|
+
return self
|
|
151
|
+
|
|
152
|
+
@property
|
|
153
|
+
def input_schema(self) -> Dict[str, Any]:
|
|
154
|
+
from deepeval.prompt.utils import output_schema_to_json_schema
|
|
155
|
+
|
|
156
|
+
return output_schema_to_json_schema(self.structured_schema)
|
|
114
157
|
|
|
115
158
|
|
|
116
159
|
###################################
|
|
@@ -186,6 +229,7 @@ class PromptHttpResponse(BaseModel):
|
|
|
186
229
|
serialization_alias="outputSchema",
|
|
187
230
|
validation_alias=AliasChoices("output_schema", "outputSchema"),
|
|
188
231
|
)
|
|
232
|
+
tools: Optional[List[Tool]] = None
|
|
189
233
|
|
|
190
234
|
|
|
191
235
|
class PromptPushRequest(BaseModel):
|
|
@@ -196,6 +240,7 @@ class PromptPushRequest(BaseModel):
|
|
|
196
240
|
alias: str
|
|
197
241
|
text: Optional[str] = None
|
|
198
242
|
messages: Optional[List[PromptMessage]] = None
|
|
243
|
+
tools: Optional[List[Tool]] = None
|
|
199
244
|
interpolation_type: PromptInterpolationType = Field(
|
|
200
245
|
serialization_alias="interpolationType"
|
|
201
246
|
)
|
|
@@ -215,6 +260,7 @@ class PromptUpdateRequest(BaseModel):
|
|
|
215
260
|
|
|
216
261
|
text: Optional[str] = None
|
|
217
262
|
messages: Optional[List[PromptMessage]] = None
|
|
263
|
+
tools: Optional[List[Tool]] = None
|
|
218
264
|
interpolation_type: PromptInterpolationType = Field(
|
|
219
265
|
serialization_alias="interpolationType"
|
|
220
266
|
)
|
deepeval/prompt/prompt.py
CHANGED
|
@@ -25,6 +25,7 @@ from deepeval.prompt.api import (
|
|
|
25
25
|
ModelSettings,
|
|
26
26
|
OutputSchema,
|
|
27
27
|
OutputType,
|
|
28
|
+
Tool,
|
|
28
29
|
)
|
|
29
30
|
from deepeval.prompt.utils import (
|
|
30
31
|
interpolate_text,
|
|
@@ -101,6 +102,7 @@ class CachedPrompt(BaseModel):
|
|
|
101
102
|
model_settings: Optional[ModelSettings]
|
|
102
103
|
output_type: Optional[OutputType]
|
|
103
104
|
output_schema: Optional[OutputSchema]
|
|
105
|
+
tools: Optional[List[Tool]] = None
|
|
104
106
|
|
|
105
107
|
|
|
106
108
|
class Prompt:
|
|
@@ -131,6 +133,7 @@ class Prompt:
|
|
|
131
133
|
interpolation_type or PromptInterpolationType.FSTRING
|
|
132
134
|
)
|
|
133
135
|
self.confident_api_key = confident_api_key
|
|
136
|
+
self.tools: Optional[List[Tool]] = None
|
|
134
137
|
|
|
135
138
|
self._version = None
|
|
136
139
|
self._prompt_version_id: Optional[str] = None
|
|
@@ -308,6 +311,7 @@ class Prompt:
|
|
|
308
311
|
model_settings: Optional[ModelSettings] = None,
|
|
309
312
|
output_type: Optional[OutputType] = None,
|
|
310
313
|
output_schema: Optional[OutputSchema] = None,
|
|
314
|
+
tools: Optional[List[Tool]] = None,
|
|
311
315
|
):
|
|
312
316
|
if portalocker is None or not self.alias:
|
|
313
317
|
return
|
|
@@ -354,6 +358,7 @@ class Prompt:
|
|
|
354
358
|
"model_settings": model_settings,
|
|
355
359
|
"output_type": output_type,
|
|
356
360
|
"output_schema": output_schema,
|
|
361
|
+
"tools": tools,
|
|
357
362
|
}
|
|
358
363
|
|
|
359
364
|
if cache_key == VERSION_CACHE_KEY:
|
|
@@ -415,6 +420,7 @@ class Prompt:
|
|
|
415
420
|
self.output_schema = construct_base_model(
|
|
416
421
|
cached_prompt.output_schema
|
|
417
422
|
)
|
|
423
|
+
self.tools = cached_prompt.tools
|
|
418
424
|
|
|
419
425
|
end_time = time.perf_counter()
|
|
420
426
|
time_taken = format(end_time - start_time, ".2f")
|
|
@@ -494,6 +500,7 @@ class Prompt:
|
|
|
494
500
|
self.output_schema = construct_base_model(
|
|
495
501
|
cached_prompt.output_schema
|
|
496
502
|
)
|
|
503
|
+
self.tools = cached_prompt.tools
|
|
497
504
|
return
|
|
498
505
|
except Exception:
|
|
499
506
|
pass
|
|
@@ -547,6 +554,7 @@ class Prompt:
|
|
|
547
554
|
model_settings=data.get("modelSettings", None),
|
|
548
555
|
output_type=data.get("outputType", None),
|
|
549
556
|
output_schema=data.get("outputSchema", None),
|
|
557
|
+
tools=data.get("tools", None),
|
|
550
558
|
)
|
|
551
559
|
except Exception:
|
|
552
560
|
if fallback_to_cache:
|
|
@@ -573,6 +581,7 @@ class Prompt:
|
|
|
573
581
|
self.output_schema = construct_base_model(
|
|
574
582
|
response.output_schema
|
|
575
583
|
)
|
|
584
|
+
self.tools = response.tools
|
|
576
585
|
|
|
577
586
|
end_time = time.perf_counter()
|
|
578
587
|
time_taken = format(end_time - start_time, ".2f")
|
|
@@ -594,6 +603,7 @@ class Prompt:
|
|
|
594
603
|
model_settings=response.model_settings,
|
|
595
604
|
output_type=response.output_type,
|
|
596
605
|
output_schema=response.output_schema,
|
|
606
|
+
tools=response.tools,
|
|
597
607
|
)
|
|
598
608
|
|
|
599
609
|
def push(
|
|
@@ -606,6 +616,7 @@ class Prompt:
|
|
|
606
616
|
model_settings: Optional[ModelSettings] = None,
|
|
607
617
|
output_type: Optional[OutputType] = None,
|
|
608
618
|
output_schema: Optional[Type[BaseModel]] = None,
|
|
619
|
+
tools: Optional[List[Tool]] = None,
|
|
609
620
|
_verbose: Optional[bool] = True,
|
|
610
621
|
):
|
|
611
622
|
if self.alias is None:
|
|
@@ -628,6 +639,7 @@ class Prompt:
|
|
|
628
639
|
output_type=output_type or self.output_type,
|
|
629
640
|
output_schema=construct_output_schema(output_schema)
|
|
630
641
|
or construct_output_schema(self.output_schema),
|
|
642
|
+
tools=tools or self.tools,
|
|
631
643
|
)
|
|
632
644
|
try:
|
|
633
645
|
body = body.model_dump(
|
|
@@ -655,6 +667,7 @@ class Prompt:
|
|
|
655
667
|
self.model_settings = model_settings or self.model_settings
|
|
656
668
|
self.output_type = output_type or self.output_type
|
|
657
669
|
self.output_schema = output_schema or self.output_schema
|
|
670
|
+
self.tools = tools or self.tools
|
|
658
671
|
self.type = PromptType.TEXT if text_template else PromptType.LIST
|
|
659
672
|
if _verbose:
|
|
660
673
|
console = Console()
|
|
@@ -674,6 +687,7 @@ class Prompt:
|
|
|
674
687
|
model_settings: Optional[ModelSettings] = None,
|
|
675
688
|
output_type: Optional[OutputType] = None,
|
|
676
689
|
output_schema: Optional[Type[BaseModel]] = None,
|
|
690
|
+
tools: Optional[List[Tool]] = None,
|
|
677
691
|
):
|
|
678
692
|
if self.alias is None:
|
|
679
693
|
raise ValueError(
|
|
@@ -687,6 +701,7 @@ class Prompt:
|
|
|
687
701
|
model_settings=model_settings,
|
|
688
702
|
output_type=output_type,
|
|
689
703
|
output_schema=construct_output_schema(output_schema),
|
|
704
|
+
tools=tools,
|
|
690
705
|
)
|
|
691
706
|
try:
|
|
692
707
|
body = body.model_dump(
|
|
@@ -712,6 +727,7 @@ class Prompt:
|
|
|
712
727
|
self.model_settings = model_settings
|
|
713
728
|
self.output_type = output_type
|
|
714
729
|
self.output_schema = output_schema
|
|
730
|
+
self.tools = tools
|
|
715
731
|
self.type = PromptType.TEXT if text else PromptType.LIST
|
|
716
732
|
console = Console()
|
|
717
733
|
console.print("✅ Prompt successfully updated on Confident AI!")
|
|
@@ -796,6 +812,10 @@ class Prompt:
|
|
|
796
812
|
messages=data.get("messages", None),
|
|
797
813
|
type=data["type"],
|
|
798
814
|
interpolation_type=data["interpolationType"],
|
|
815
|
+
model_settings=data.get("modelSettings", None),
|
|
816
|
+
output_type=data.get("outputType", None),
|
|
817
|
+
output_schema=data.get("outputSchema", None),
|
|
818
|
+
tools=data.get("tools", None),
|
|
799
819
|
)
|
|
800
820
|
|
|
801
821
|
# Update the cache with fresh data from server
|
|
@@ -808,6 +828,10 @@ class Prompt:
|
|
|
808
828
|
prompt_version_id=response.id,
|
|
809
829
|
type=response.type,
|
|
810
830
|
interpolation_type=response.interpolation_type,
|
|
831
|
+
model_settings=response.model_settings,
|
|
832
|
+
output_type=response.output_type,
|
|
833
|
+
output_schema=response.output_schema,
|
|
834
|
+
tools=response.tools,
|
|
811
835
|
)
|
|
812
836
|
|
|
813
837
|
# Update in-memory properties with fresh data (thread-safe)
|
|
@@ -819,6 +843,12 @@ class Prompt:
|
|
|
819
843
|
self._prompt_version_id = response.id
|
|
820
844
|
self.type = response.type
|
|
821
845
|
self.interpolation_type = response.interpolation_type
|
|
846
|
+
self.model_settings = response.model_settings
|
|
847
|
+
self.output_type = response.output_type
|
|
848
|
+
self.output_schema = construct_base_model(
|
|
849
|
+
response.output_schema
|
|
850
|
+
)
|
|
851
|
+
self.tools = response.tools
|
|
822
852
|
|
|
823
853
|
except Exception:
|
|
824
854
|
pass
|
deepeval/prompt/utils.py
CHANGED
|
@@ -130,7 +130,7 @@ def construct_base_model(
|
|
|
130
130
|
if not schema:
|
|
131
131
|
return None
|
|
132
132
|
if not schema.fields:
|
|
133
|
-
return create_model(schema.name)
|
|
133
|
+
return create_model(schema.name or "EmptySchema")
|
|
134
134
|
|
|
135
135
|
parent_id_map: Dict[Optional[str], List[OutputSchemaField]] = {}
|
|
136
136
|
for field in schema.fields:
|
|
@@ -153,7 +153,7 @@ def construct_base_model(
|
|
|
153
153
|
default = ... if field.required else None
|
|
154
154
|
root_fields[field.name] = (python_type, default)
|
|
155
155
|
|
|
156
|
-
return create_model(schema.name, **root_fields)
|
|
156
|
+
return create_model(schema.name or "Schema", **root_fields)
|
|
157
157
|
|
|
158
158
|
|
|
159
159
|
###################################
|
|
@@ -219,3 +219,73 @@ def construct_output_schema(
|
|
|
219
219
|
return None
|
|
220
220
|
all_fields = _process_model(base_model_class)
|
|
221
221
|
return OutputSchema(fields=all_fields, name=base_model_class.__name__)
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def output_schema_to_json_schema(
|
|
225
|
+
schema: Optional[OutputSchema] = None,
|
|
226
|
+
) -> Dict[str, Any]:
|
|
227
|
+
if not schema or not schema.fields:
|
|
228
|
+
return {
|
|
229
|
+
"type": "object",
|
|
230
|
+
"properties": {},
|
|
231
|
+
"additionalProperties": False,
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
# Build parent-child mapping
|
|
235
|
+
children_map: Dict[Optional[str], List[OutputSchemaField]] = {}
|
|
236
|
+
for field in schema.fields:
|
|
237
|
+
parent_id = field.parent_id
|
|
238
|
+
children_map.setdefault(parent_id, []).append(field)
|
|
239
|
+
|
|
240
|
+
# Map SchemaDataType to JSON Schema types
|
|
241
|
+
def map_type(dtype: SchemaDataType) -> str:
|
|
242
|
+
return {
|
|
243
|
+
SchemaDataType.STRING: "string",
|
|
244
|
+
SchemaDataType.INTEGER: "integer",
|
|
245
|
+
SchemaDataType.FLOAT: "number",
|
|
246
|
+
SchemaDataType.BOOLEAN: "boolean",
|
|
247
|
+
SchemaDataType.OBJECT: "object",
|
|
248
|
+
SchemaDataType.NULL: "null",
|
|
249
|
+
}.get(dtype, "string")
|
|
250
|
+
|
|
251
|
+
def build_node(field_list: List[OutputSchemaField]) -> Dict[str, Any]:
|
|
252
|
+
properties = {}
|
|
253
|
+
required_fields = []
|
|
254
|
+
|
|
255
|
+
for field in field_list:
|
|
256
|
+
field_type = (
|
|
257
|
+
field.type.value if hasattr(field.type, "value") else field.type
|
|
258
|
+
)
|
|
259
|
+
field_schema = {"type": map_type(field.type)}
|
|
260
|
+
|
|
261
|
+
# Add description if available
|
|
262
|
+
if field.description:
|
|
263
|
+
field_schema["description"] = field.description
|
|
264
|
+
|
|
265
|
+
# Handle nested objects
|
|
266
|
+
if field_type == SchemaDataType.OBJECT.value:
|
|
267
|
+
children = children_map.get(field.id, [])
|
|
268
|
+
if children:
|
|
269
|
+
nested = build_node(children)
|
|
270
|
+
field_schema.update(nested)
|
|
271
|
+
else:
|
|
272
|
+
field_schema["properties"] = {}
|
|
273
|
+
field_schema["additionalProperties"] = False
|
|
274
|
+
|
|
275
|
+
properties[field.name] = field_schema
|
|
276
|
+
if field.required:
|
|
277
|
+
required_fields.append(field.name)
|
|
278
|
+
|
|
279
|
+
schema_dict = {
|
|
280
|
+
"type": "object",
|
|
281
|
+
"properties": properties,
|
|
282
|
+
"additionalProperties": False,
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
if required_fields:
|
|
286
|
+
schema_dict["required"] = required_fields
|
|
287
|
+
|
|
288
|
+
return schema_dict
|
|
289
|
+
|
|
290
|
+
root_fields = children_map.get(None, [])
|
|
291
|
+
return build_node(root_fields)
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
deepeval/__init__.py,sha256=tle4lT4FONApg3OeztGPEdrpGMEGLWajyGTu7bEd3s0,2976
|
|
2
|
-
deepeval/_version.py,sha256=
|
|
2
|
+
deepeval/_version.py,sha256=qRG_MqE6nmuOs8veYUyEqKmn7uxJuYuzR-7hEXJnUEo,27
|
|
3
3
|
deepeval/annotation/__init__.py,sha256=ZFhUVNNuH_YgQSZJ-m5E9iUb9TkAkEV33a6ouMDZ8EI,111
|
|
4
4
|
deepeval/annotation/annotation.py,sha256=WLFZRkx6wRJcNzaOMMGXuTfw6Q1_1Mv5A4jpD7Ea4sU,2300
|
|
5
5
|
deepeval/annotation/api.py,sha256=EYN33ACVzVxsFleRYm60KB4Exvff3rPJKt1VBuuX970,2147
|
|
@@ -147,10 +147,10 @@ deepeval/confident/types.py,sha256=9bgePDaU31yY7JGwCLZcc7pev9VGtNDZLbjsVpCLVdc,5
|
|
|
147
147
|
deepeval/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
148
148
|
deepeval/config/dotenv_handler.py,sha256=lOosoC7fm9RljriY8EFl5ywSGfSiQsVf_vmYqzpbZ8s,588
|
|
149
149
|
deepeval/config/logging.py,sha256=ivqmhOSB-oHOOU3MvnhImrZwkkxzxKJgoKxesnWfHjg,1314
|
|
150
|
-
deepeval/config/settings.py,sha256=
|
|
150
|
+
deepeval/config/settings.py,sha256=m5VFmEIWOuNo6JniWPqrgIrh-BMCOXyDipAmbPAEsII,57510
|
|
151
151
|
deepeval/config/settings_manager.py,sha256=Ynebm2BKDrzajc6DEq2eYIwyRAAtUQOkTnl46albxLk,4187
|
|
152
152
|
deepeval/config/utils.py,sha256=bJGljeAXoEYuUlYSvHSOsUnqINTwo6wOwfFHFpWxiaQ,4238
|
|
153
|
-
deepeval/constants.py,sha256=
|
|
153
|
+
deepeval/constants.py,sha256=bb6GOTWJog2M992Y91ZwaQjGXChUTuznfx_TXpdaBYY,1743
|
|
154
154
|
deepeval/contextvars.py,sha256=oqXtuYiKd4Zvc1rNoR1gcRBxzZYCGTMVn7XostwvkRI,524
|
|
155
155
|
deepeval/dataset/__init__.py,sha256=N2c-rkuxWYiiJSOZArw0H02Cwo7cnfzFuNYJlvsIBEg,249
|
|
156
156
|
deepeval/dataset/api.py,sha256=bZ95HfIaxYB1IwTnp7x4AaKXWuII17T5uqVkhUXNc7I,1650
|
|
@@ -459,10 +459,10 @@ deepeval/optimizer/utils.py,sha256=vOC7tFdWSqM62JQjtnEVjmxV8MIOlc83nuRT6ghhRGY,1
|
|
|
459
459
|
deepeval/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
460
460
|
deepeval/plugins/plugin.py,sha256=_dwsdx4Dg9DbXxK3f7zJY4QWTJQWc7QE1HmIg2Zjjag,1515
|
|
461
461
|
deepeval/progress_context.py,sha256=ZSKpxrE9sdgt9G3REKnVeXAv7GJXHHVGgLynpG1Pudw,3557
|
|
462
|
-
deepeval/prompt/__init__.py,sha256=
|
|
463
|
-
deepeval/prompt/api.py,sha256=
|
|
464
|
-
deepeval/prompt/prompt.py,sha256=
|
|
465
|
-
deepeval/prompt/utils.py,sha256=
|
|
462
|
+
deepeval/prompt/__init__.py,sha256=NLFORZoGWUV-I-UdzAsDlR_xikAP-uSl0mqngckTOK0,389
|
|
463
|
+
deepeval/prompt/api.py,sha256=uwE0V_gPSuR5ShHdvPR1v7IAF3O4xa2B3xgkiBDt4mE,7698
|
|
464
|
+
deepeval/prompt/prompt.py,sha256=2oPfCQBmgTXSJaIfxm_tp5X4dpD2578eeKPmuYDYyGw,33315
|
|
465
|
+
deepeval/prompt/utils.py,sha256=nhjTHgJEnXlDcLmHb5qleMSCFHiKrYHC1y4gl6MEaLU,9937
|
|
466
466
|
deepeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
467
467
|
deepeval/red_teaming/README.md,sha256=BY5rAdpp3-sMMToEKwq0Nsd9ivkGDzPE16DeDb8GY7U,154
|
|
468
468
|
deepeval/scorer/__init__.py,sha256=hTvtoV3a4l0dSBjERm-jX7jveTtKZXK0c9JerQo0T_w,27
|
|
@@ -520,8 +520,8 @@ deepeval/tracing/tracing.py,sha256=RS3mBV-63_vDyz-WxYPir34u0BF3mPnAJWee1aCc1sc,4
|
|
|
520
520
|
deepeval/tracing/types.py,sha256=PUXDC1JZDaAalPc3uUHywkt2GE2hZ-2ocGP0Fe4sB2E,6120
|
|
521
521
|
deepeval/tracing/utils.py,sha256=mdvhYAxDNsdnusaEXJd-c-_O2Jn6S3xSuzRvLO1Jz4U,5684
|
|
522
522
|
deepeval/utils.py,sha256=Wsu95g6t1wdttxWIESVwuUxbml7C-9ZTsV7qHCQI3Xg,27259
|
|
523
|
-
deepeval-3.8.
|
|
524
|
-
deepeval-3.8.
|
|
525
|
-
deepeval-3.8.
|
|
526
|
-
deepeval-3.8.
|
|
527
|
-
deepeval-3.8.
|
|
523
|
+
deepeval-3.8.4.dist-info/LICENSE.md,sha256=0ATkuLv6QgsJTBODUHC5Rak_PArA6gv2t7inJzNTP38,11352
|
|
524
|
+
deepeval-3.8.4.dist-info/METADATA,sha256=F1ZXkRUVi1Ntr1SLLXe-sife_qdnHofOJmSiTFVzIsk,18752
|
|
525
|
+
deepeval-3.8.4.dist-info/WHEEL,sha256=d2fvjOD7sXsVzChCqf0Ty0JbHKBaLYwDbGQDwQTnJ50,88
|
|
526
|
+
deepeval-3.8.4.dist-info/entry_points.txt,sha256=NoismUQfwLOojSGZmBrdcpwfaoFRAzUhBvZD3UwOKog,95
|
|
527
|
+
deepeval-3.8.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|