deepeval 3.7.8__py3-none-any.whl → 3.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/benchmarks/drop/drop.py +5 -2
- deepeval/benchmarks/mmlu/mmlu.py +6 -4
- deepeval/cli/main.py +168 -0
- deepeval/cli/utils.py +2 -2
- deepeval/confident/api.py +2 -0
- deepeval/config/settings.py +10 -0
- deepeval/constants.py +1 -0
- deepeval/integrations/langchain/callback.py +330 -158
- deepeval/integrations/langchain/utils.py +31 -8
- deepeval/key_handler.py +8 -1
- deepeval/metrics/conversational_g_eval/conversational_g_eval.py +35 -0
- deepeval/metrics/g_eval/g_eval.py +35 -1
- deepeval/metrics/g_eval/utils.py +65 -0
- deepeval/models/__init__.py +2 -0
- deepeval/models/llms/__init__.py +2 -0
- deepeval/models/llms/constants.py +23 -0
- deepeval/models/llms/gemini_model.py +27 -29
- deepeval/models/llms/openai_model.py +5 -4
- deepeval/models/llms/openrouter_model.py +398 -0
- deepeval/models/retry_policy.py +3 -0
- deepeval/prompt/api.py +1 -0
- deepeval/synthesizer/synthesizer.py +190 -82
- deepeval/tracing/tracing.py +6 -1
- deepeval/tracing/types.py +1 -1
- deepeval/utils.py +21 -6
- {deepeval-3.7.8.dist-info → deepeval-3.8.0.dist-info}/METADATA +7 -7
- {deepeval-3.7.8.dist-info → deepeval-3.8.0.dist-info}/RECORD +31 -30
- {deepeval-3.7.8.dist-info → deepeval-3.8.0.dist-info}/LICENSE.md +0 -0
- {deepeval-3.7.8.dist-info → deepeval-3.8.0.dist-info}/WHEEL +0 -0
- {deepeval-3.7.8.dist-info → deepeval-3.8.0.dist-info}/entry_points.txt +0 -0
|
@@ -1383,53 +1383,99 @@ class Synthesizer:
|
|
|
1383
1383
|
# Prepare data for the DataFrame
|
|
1384
1384
|
data = []
|
|
1385
1385
|
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
|
|
1401
|
-
|
|
1402
|
-
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1386
|
+
if (
|
|
1387
|
+
self.synthetic_goldens is not None
|
|
1388
|
+
and len(self.synthetic_goldens) > 0
|
|
1389
|
+
):
|
|
1390
|
+
for golden in self.synthetic_goldens:
|
|
1391
|
+
# Extract basic fields
|
|
1392
|
+
input_text = golden.input
|
|
1393
|
+
expected_output = golden.expected_output
|
|
1394
|
+
context = golden.context
|
|
1395
|
+
actual_output = golden.actual_output
|
|
1396
|
+
retrieval_context = golden.retrieval_context
|
|
1397
|
+
metadata = golden.additional_metadata
|
|
1398
|
+
source_file = golden.source_file
|
|
1399
|
+
|
|
1400
|
+
# Calculate num_context and context_length
|
|
1401
|
+
if context is not None:
|
|
1402
|
+
num_context = len(context)
|
|
1403
|
+
context_length = sum(len(c) for c in context)
|
|
1404
|
+
else:
|
|
1405
|
+
num_context = None
|
|
1406
|
+
context_length = None
|
|
1407
|
+
|
|
1408
|
+
# Handle metadata
|
|
1409
|
+
if metadata is not None:
|
|
1410
|
+
evolutions = metadata.get("evolutions", None)
|
|
1411
|
+
synthetic_input_quality = metadata.get(
|
|
1412
|
+
"synthetic_input_quality", None
|
|
1413
|
+
)
|
|
1414
|
+
context_quality = metadata.get("context_quality", None)
|
|
1415
|
+
else:
|
|
1416
|
+
evolutions = None
|
|
1417
|
+
synthetic_input_quality = None
|
|
1418
|
+
context_quality = None
|
|
1419
|
+
|
|
1420
|
+
# Prepare a row for the DataFrame
|
|
1421
|
+
row = {
|
|
1422
|
+
"input": input_text,
|
|
1423
|
+
"actual_output": actual_output,
|
|
1424
|
+
"expected_output": expected_output,
|
|
1425
|
+
"context": context,
|
|
1426
|
+
"retrieval_context": retrieval_context,
|
|
1427
|
+
"n_chunks_per_context": num_context,
|
|
1428
|
+
"context_length": context_length,
|
|
1429
|
+
"evolutions": evolutions,
|
|
1430
|
+
"context_quality": context_quality,
|
|
1431
|
+
"synthetic_input_quality": synthetic_input_quality,
|
|
1432
|
+
"source_file": source_file,
|
|
1433
|
+
}
|
|
1434
|
+
|
|
1435
|
+
# Append the row to the data list
|
|
1436
|
+
data.append(row)
|
|
1437
|
+
else:
|
|
1438
|
+
for golden in self.synthetic_conversational_goldens:
|
|
1439
|
+
# Extract basic fields
|
|
1440
|
+
scenario = golden.scenario
|
|
1441
|
+
expected_outcome = golden.expected_outcome
|
|
1442
|
+
context = golden.context
|
|
1443
|
+
metadata = golden.additional_metadata
|
|
1444
|
+
|
|
1445
|
+
# Calculate num_context and context_length
|
|
1446
|
+
if context is not None:
|
|
1447
|
+
num_context = len(context)
|
|
1448
|
+
context_length = sum(len(c) for c in context)
|
|
1449
|
+
else:
|
|
1450
|
+
num_context = None
|
|
1451
|
+
context_length = None
|
|
1452
|
+
|
|
1453
|
+
# Handle metadata
|
|
1454
|
+
if metadata is not None:
|
|
1455
|
+
evolutions = metadata.get("evolutions", None)
|
|
1456
|
+
synthetic_scenario_quality = metadata.get(
|
|
1457
|
+
"synthetic_scenario_quality", None
|
|
1458
|
+
)
|
|
1459
|
+
source_files = metadata.get("source_files", None)
|
|
1460
|
+
else:
|
|
1461
|
+
evolutions = None
|
|
1462
|
+
synthetic_scenario_quality = None
|
|
1463
|
+
source_files = None
|
|
1464
|
+
|
|
1465
|
+
# Prepare a row for the DataFrame
|
|
1466
|
+
row = {
|
|
1467
|
+
"scenario": scenario,
|
|
1468
|
+
"expected_outcome": expected_outcome,
|
|
1469
|
+
"context": context,
|
|
1470
|
+
"n_chunks_per_context": num_context,
|
|
1471
|
+
"context_length": context_length,
|
|
1472
|
+
"evolutions": evolutions,
|
|
1473
|
+
"synthetic_scenario_quality": synthetic_scenario_quality,
|
|
1474
|
+
"source_files": source_files,
|
|
1475
|
+
}
|
|
1476
|
+
|
|
1477
|
+
# Append the row to the data list
|
|
1478
|
+
data.append(row)
|
|
1433
1479
|
|
|
1434
1480
|
# Create the pandas DataFrame
|
|
1435
1481
|
df = pd.DataFrame(data)
|
|
@@ -1479,7 +1525,10 @@ class Synthesizer:
|
|
|
1479
1525
|
"parameter."
|
|
1480
1526
|
)
|
|
1481
1527
|
|
|
1482
|
-
if
|
|
1528
|
+
if (
|
|
1529
|
+
len(self.synthetic_goldens) == 0
|
|
1530
|
+
and len(self.synthetic_conversational_goldens) == 0
|
|
1531
|
+
):
|
|
1483
1532
|
raise ValueError(
|
|
1484
1533
|
"No synthetic goldens found. Please generate goldens before saving goldens."
|
|
1485
1534
|
)
|
|
@@ -1494,52 +1543,111 @@ class Synthesizer:
|
|
|
1494
1543
|
full_file_path = os.path.join(directory, new_filename)
|
|
1495
1544
|
if file_type == "json":
|
|
1496
1545
|
with open(full_file_path, "w", encoding="utf-8") as file:
|
|
1497
|
-
|
|
1498
|
-
|
|
1499
|
-
|
|
1500
|
-
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1504
|
-
|
|
1505
|
-
|
|
1506
|
-
|
|
1546
|
+
if (
|
|
1547
|
+
self.synthetic_goldens is not None
|
|
1548
|
+
and len(self.synthetic_goldens) > 0
|
|
1549
|
+
):
|
|
1550
|
+
json_data = [
|
|
1551
|
+
{
|
|
1552
|
+
"input": golden.input,
|
|
1553
|
+
"actual_output": golden.actual_output,
|
|
1554
|
+
"expected_output": golden.expected_output,
|
|
1555
|
+
"context": golden.context,
|
|
1556
|
+
"source_file": golden.source_file,
|
|
1557
|
+
}
|
|
1558
|
+
for golden in self.synthetic_goldens
|
|
1559
|
+
]
|
|
1560
|
+
else:
|
|
1561
|
+
json_data = [
|
|
1562
|
+
{
|
|
1563
|
+
"scenario": golden.scenario,
|
|
1564
|
+
"expected_outcome": golden.expected_outcome,
|
|
1565
|
+
"context": golden.context,
|
|
1566
|
+
"source_files": golden.additional_metadata.get(
|
|
1567
|
+
"source_files", None
|
|
1568
|
+
),
|
|
1569
|
+
}
|
|
1570
|
+
for golden in self.synthetic_conversational_goldens
|
|
1571
|
+
]
|
|
1507
1572
|
json.dump(json_data, file, indent=4, ensure_ascii=False)
|
|
1508
1573
|
elif file_type == "csv":
|
|
1509
1574
|
with open(
|
|
1510
1575
|
full_file_path, "w", newline="", encoding="utf-8"
|
|
1511
1576
|
) as file:
|
|
1512
1577
|
writer = csv.writer(file)
|
|
1513
|
-
|
|
1514
|
-
|
|
1515
|
-
|
|
1516
|
-
|
|
1517
|
-
|
|
1518
|
-
|
|
1519
|
-
|
|
1520
|
-
|
|
1521
|
-
|
|
1522
|
-
|
|
1578
|
+
if (
|
|
1579
|
+
self.synthetic_goldens is not None
|
|
1580
|
+
and len(self.synthetic_goldens) > 0
|
|
1581
|
+
):
|
|
1582
|
+
writer.writerow(
|
|
1583
|
+
[
|
|
1584
|
+
"input",
|
|
1585
|
+
"actual_output",
|
|
1586
|
+
"expected_output",
|
|
1587
|
+
"context",
|
|
1588
|
+
"source_file",
|
|
1589
|
+
]
|
|
1590
|
+
)
|
|
1591
|
+
for golden in self.synthetic_goldens:
|
|
1592
|
+
writer.writerow(
|
|
1593
|
+
[
|
|
1594
|
+
golden.input,
|
|
1595
|
+
golden.actual_output,
|
|
1596
|
+
golden.expected_output,
|
|
1597
|
+
"|".join(golden.context),
|
|
1598
|
+
golden.source_file,
|
|
1599
|
+
]
|
|
1600
|
+
)
|
|
1601
|
+
else:
|
|
1523
1602
|
writer.writerow(
|
|
1524
1603
|
[
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
|
|
1528
|
-
"
|
|
1529
|
-
golden.source_file,
|
|
1604
|
+
"scenario",
|
|
1605
|
+
"expected_outcome",
|
|
1606
|
+
"context",
|
|
1607
|
+
"source_files",
|
|
1530
1608
|
]
|
|
1531
1609
|
)
|
|
1610
|
+
for golden in self.synthetic_conversational_goldens:
|
|
1611
|
+
writer.writerow(
|
|
1612
|
+
[
|
|
1613
|
+
golden.scenario,
|
|
1614
|
+
golden.expected_outcome,
|
|
1615
|
+
"|".join(golden.context),
|
|
1616
|
+
golden.additional_metadata.get(
|
|
1617
|
+
"source_files", None
|
|
1618
|
+
),
|
|
1619
|
+
]
|
|
1620
|
+
)
|
|
1532
1621
|
elif file_type == "jsonl":
|
|
1533
1622
|
with open(full_file_path, "w", encoding="utf-8") as file:
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
|
|
1540
|
-
|
|
1541
|
-
|
|
1542
|
-
|
|
1623
|
+
if (
|
|
1624
|
+
self.synthetic_goldens is not None
|
|
1625
|
+
and len(self.synthetic_goldens) > 0
|
|
1626
|
+
):
|
|
1627
|
+
for golden in self.synthetic_goldens:
|
|
1628
|
+
record = {
|
|
1629
|
+
"input": golden.input,
|
|
1630
|
+
"actual_output": golden.actual_output,
|
|
1631
|
+
"expected_output": golden.expected_output,
|
|
1632
|
+
"context": golden.context,
|
|
1633
|
+
"source_file": golden.source_file,
|
|
1634
|
+
}
|
|
1635
|
+
file.write(
|
|
1636
|
+
json.dumps(record, ensure_ascii=False) + "\n"
|
|
1637
|
+
)
|
|
1638
|
+
else:
|
|
1639
|
+
for golden in self.synthetic_conversational_goldens:
|
|
1640
|
+
record = {
|
|
1641
|
+
"scenario": golden.scenario,
|
|
1642
|
+
"expected_outcome": golden.expected_outcome,
|
|
1643
|
+
"context": golden.context,
|
|
1644
|
+
"source_files": golden.additional_metadata.get(
|
|
1645
|
+
"source_files", None
|
|
1646
|
+
),
|
|
1647
|
+
}
|
|
1648
|
+
file.write(
|
|
1649
|
+
json.dumps(record, ensure_ascii=False) + "\n"
|
|
1650
|
+
)
|
|
1543
1651
|
if not quiet:
|
|
1544
1652
|
print(f"Synthetic goldens saved at {full_file_path}!")
|
|
1545
1653
|
|
deepeval/tracing/tracing.py
CHANGED
|
@@ -847,7 +847,12 @@ class Observer:
|
|
|
847
847
|
self.trace_uuid = parent_span.trace_uuid
|
|
848
848
|
else:
|
|
849
849
|
current_trace = current_trace_context.get()
|
|
850
|
-
|
|
850
|
+
# IMPORTANT: Verify trace is still active, not just in context
|
|
851
|
+
# (a previous failed async operation might leave a dead trace in context)
|
|
852
|
+
if (
|
|
853
|
+
current_trace
|
|
854
|
+
and current_trace.uuid in trace_manager.active_traces
|
|
855
|
+
):
|
|
851
856
|
self.trace_uuid = current_trace.uuid
|
|
852
857
|
else:
|
|
853
858
|
trace = trace_manager.start_new_trace(
|
deepeval/tracing/types.py
CHANGED
deepeval/utils.py
CHANGED
|
@@ -739,14 +739,29 @@ def update_pbar(
|
|
|
739
739
|
if progress is None or pbar_id is None:
|
|
740
740
|
return
|
|
741
741
|
# Get amount to advance
|
|
742
|
-
current_task = next(t for t in progress.tasks if t.id == pbar_id)
|
|
742
|
+
current_task = next((t for t in progress.tasks if t.id == pbar_id), None)
|
|
743
|
+
if current_task is None:
|
|
744
|
+
return
|
|
745
|
+
|
|
743
746
|
if advance_to_end:
|
|
744
|
-
|
|
747
|
+
remaining = current_task.remaining
|
|
748
|
+
if remaining is not None:
|
|
749
|
+
advance = remaining
|
|
750
|
+
|
|
745
751
|
# Advance
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
progress.
|
|
752
|
+
try:
|
|
753
|
+
progress.update(pbar_id, advance=advance, total=total)
|
|
754
|
+
except KeyError:
|
|
755
|
+
# progress task may be removed concurrently via callbacks which can race with teardown.
|
|
756
|
+
return
|
|
757
|
+
|
|
758
|
+
# Remove if finished and refetch before remove to avoid acting on a stale object
|
|
759
|
+
updated_task = next((t for t in progress.tasks if t.id == pbar_id), None)
|
|
760
|
+
if updated_task is not None and updated_task.finished and remove:
|
|
761
|
+
try:
|
|
762
|
+
progress.remove_task(pbar_id)
|
|
763
|
+
except KeyError:
|
|
764
|
+
pass
|
|
750
765
|
|
|
751
766
|
|
|
752
767
|
def add_pbar(progress: Optional[Progress], description: str, total: int = 1):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: deepeval
|
|
3
|
-
Version: 3.
|
|
3
|
+
Version: 3.8.0
|
|
4
4
|
Summary: The LLM Evaluation Framework
|
|
5
5
|
Home-page: https://github.com/confident-ai/deepeval
|
|
6
6
|
License: Apache-2.0
|
|
@@ -100,7 +100,7 @@ Description-Content-Type: text/markdown
|
|
|
100
100
|
<a href="https://www.readme-i18n.com/confident-ai/deepeval?lang=zh">中文</a>
|
|
101
101
|
</p>
|
|
102
102
|
|
|
103
|
-
**DeepEval** is a simple-to-use, open-source LLM evaluation framework, for evaluating and testing large-language model systems. It is similar to Pytest but specialized for unit testing LLM outputs. DeepEval incorporates the latest research to evaluate LLM outputs based on metrics such as G-Eval, task completion, answer relevancy, hallucination, etc., which uses LLM-as-a-judge and other NLP models that
|
|
103
|
+
**DeepEval** is a simple-to-use, open-source LLM evaluation framework, for evaluating and testing large-language model systems. It is similar to Pytest but specialized for unit testing LLM outputs. DeepEval incorporates the latest research to evaluate LLM outputs based on metrics such as G-Eval, task completion, answer relevancy, hallucination, etc., which uses LLM-as-a-judge and other NLP models that run **locally on your machine** for evaluation.
|
|
104
104
|
|
|
105
105
|
Whether your LLM applications are AI agents, RAG pipelines, or chatbots, implemented via LangChain or OpenAI, DeepEval has you covered. With it, you can easily determine the optimal models, prompts, and architecture to improve your RAG pipeline, agentic workflows, prevent prompt drifting, or even transition from OpenAI to hosting your own Deepseek R1 with confidence.
|
|
106
106
|
|
|
@@ -115,10 +115,10 @@ Whether your LLM applications are AI agents, RAG pipelines, or chatbots, impleme
|
|
|
115
115
|
|
|
116
116
|
# 🔥 Metrics and Features
|
|
117
117
|
|
|
118
|
-
> 🥳 You can now share DeepEval's test results on the cloud directly on [Confident AI](https://confident-ai.com?utm_source=GitHub)
|
|
118
|
+
> 🥳 You can now share DeepEval's test results on the cloud directly on [Confident AI](https://confident-ai.com?utm_source=GitHub)
|
|
119
119
|
|
|
120
120
|
- Supports both end-to-end and component-level LLM evaluation.
|
|
121
|
-
- Large variety of ready-to-use LLM evaluation metrics (all with explanations) powered by **ANY** LLM of your choice, statistical methods, or NLP models that
|
|
121
|
+
- Large variety of ready-to-use LLM evaluation metrics (all with explanations) powered by **ANY** LLM of your choice, statistical methods, or NLP models that run **locally on your machine**:
|
|
122
122
|
- G-Eval
|
|
123
123
|
- DAG ([deep acyclic graph](https://deepeval.com/docs/metrics-dag))
|
|
124
124
|
- **RAG metrics:**
|
|
@@ -158,7 +158,7 @@ Whether your LLM applications are AI agents, RAG pipelines, or chatbots, impleme
|
|
|
158
158
|
- TruthfulQA
|
|
159
159
|
- HumanEval
|
|
160
160
|
- GSM8K
|
|
161
|
-
- [100% integrated with Confident AI](https://confident-ai.com?utm_source=GitHub) for the full evaluation lifecycle:
|
|
161
|
+
- [100% integrated with Confident AI](https://confident-ai.com?utm_source=GitHub) for the full evaluation & observability lifecycle:
|
|
162
162
|
- Curate/annotate evaluation datasets on the cloud
|
|
163
163
|
- Benchmark LLM app using dataset, and compare with previous iterations to experiment which models/prompts works best
|
|
164
164
|
- Fine-tune metrics for custom results
|
|
@@ -167,7 +167,7 @@ Whether your LLM applications are AI agents, RAG pipelines, or chatbots, impleme
|
|
|
167
167
|
- Repeat until perfection
|
|
168
168
|
|
|
169
169
|
> [!NOTE]
|
|
170
|
-
> Confident AI
|
|
170
|
+
> DeepEval is available on Confident AI, an LLM evals platform for AI observability and quality. Create an account [here.](https://app.confident-ai.com?utm_source=GitHub)
|
|
171
171
|
|
|
172
172
|
<br />
|
|
173
173
|
|
|
@@ -394,7 +394,7 @@ cp .env.example .env.local
|
|
|
394
394
|
|
|
395
395
|
# DeepEval With Confident AI
|
|
396
396
|
|
|
397
|
-
DeepEval
|
|
397
|
+
DeepEval is available on [Confident AI](https://confident-ai.com?utm_source=Github), an evals & observability platform that allows you to:
|
|
398
398
|
|
|
399
399
|
1. Curate/annotate evaluation datasets on the cloud
|
|
400
400
|
2. Benchmark LLM app using dataset, and compare with previous iterations to experiment which models/prompts works best
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
deepeval/__init__.py,sha256=tle4lT4FONApg3OeztGPEdrpGMEGLWajyGTu7bEd3s0,2976
|
|
2
|
-
deepeval/_version.py,sha256=
|
|
2
|
+
deepeval/_version.py,sha256=zzwaJLAxT4xICJZz0E72TnWZ0UtmJr0uD8CK8vnXu6I,27
|
|
3
3
|
deepeval/annotation/__init__.py,sha256=ZFhUVNNuH_YgQSZJ-m5E9iUb9TkAkEV33a6ouMDZ8EI,111
|
|
4
4
|
deepeval/annotation/annotation.py,sha256=3j3-syeJepAcEj3u3e4T_BeRDzNr7yXGDIoNQGMKpwQ,2298
|
|
5
5
|
deepeval/annotation/api.py,sha256=EYN33ACVzVxsFleRYm60KB4Exvff3rPJKt1VBuuX970,2147
|
|
@@ -81,7 +81,7 @@ deepeval/benchmarks/bool_q/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
|
|
|
81
81
|
deepeval/benchmarks/bool_q/bool_q.py,sha256=wJM4-wSybT8EwgDJVB4p3QYXGNzLD3tdrpGE1cNEz_E,5507
|
|
82
82
|
deepeval/benchmarks/bool_q/template.py,sha256=pgNj4RR6-4VJDDySwnKt-MpghBCjVlZ7fPKY6PltllQ,4055
|
|
83
83
|
deepeval/benchmarks/drop/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
84
|
-
deepeval/benchmarks/drop/drop.py,sha256=
|
|
84
|
+
deepeval/benchmarks/drop/drop.py,sha256=7rs8eqhgPHYmRxx9uv0-iA9Oz4gI14QqXP1ckMtfaNk,13457
|
|
85
85
|
deepeval/benchmarks/drop/task.py,sha256=RV7DEXF192IOsY-yIVdlGb_y-A_sS5APPn8PGOPn5yU,17950
|
|
86
86
|
deepeval/benchmarks/drop/template.py,sha256=1P0mx_71Bxr9juIA8nGpVRIrP8NSoDILkIicjWvqE94,1376
|
|
87
87
|
deepeval/benchmarks/equity_med_qa/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -114,7 +114,7 @@ deepeval/benchmarks/math_qa/math_qa.py,sha256=_eP-yocJom9r91qmAUBbIH4hrWazEHLV2l
|
|
|
114
114
|
deepeval/benchmarks/math_qa/task.py,sha256=3q_jlK5kIl5Zs0mQwuzxyvmPP6ncLZwszn7gtl1GfZs,192
|
|
115
115
|
deepeval/benchmarks/math_qa/template.py,sha256=pC3PB2GGU5TQ81I7E76RJh0xlu7xiF6d4SK3T_Nksh8,4468
|
|
116
116
|
deepeval/benchmarks/mmlu/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
117
|
-
deepeval/benchmarks/mmlu/mmlu.py,sha256=
|
|
117
|
+
deepeval/benchmarks/mmlu/mmlu.py,sha256=hwB7w3W7ugs2n67s51pVaTUxO9HVS0-ugKwV7zqgV6M,11465
|
|
118
118
|
deepeval/benchmarks/mmlu/task.py,sha256=HnhnuD4Xjur9GlrBtswaR7ZPouGx4NTgbcFZu_oIzXw,2580
|
|
119
119
|
deepeval/benchmarks/mmlu/template.py,sha256=MsdcrZWVkyZpEw--Kj6W7vjOJgig-ABiz9B3WtZz1MQ,1303
|
|
120
120
|
deepeval/benchmarks/modes/__init__.py,sha256=IGhZp0-nmvVsZWBnTuBvKhdGiy4TJZShFSjYAeBZdbo,135
|
|
@@ -136,21 +136,21 @@ deepeval/benchmarks/winogrande/template.py,sha256=tDwH8NpNF9x7FbDmQw45XaW1LNqGBV
|
|
|
136
136
|
deepeval/benchmarks/winogrande/winogrande.py,sha256=_4irJkRPw3c-Ufo-hM4cHpPKUoxozedFQpok9n0csTg,5644
|
|
137
137
|
deepeval/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
138
138
|
deepeval/cli/dotenv_handler.py,sha256=7PtVjCNUZKAXsVJQxznsLexad7y8x-gQ195xAxmv4gA,2468
|
|
139
|
-
deepeval/cli/main.py,sha256=
|
|
139
|
+
deepeval/cli/main.py,sha256=IsnAF8XVMA4j0dExMjlsD0ABPzFCwHkI4ai2S567qlA,100548
|
|
140
140
|
deepeval/cli/server.py,sha256=cOm9xiYcPYB9GDeFQw9-Iawf9bNfOqftZs7q7mO_P7I,1979
|
|
141
141
|
deepeval/cli/test.py,sha256=aoBPMfk0HTvOqb2xdvMykkx_s4SHst7lEnoUiSXo1lU,5483
|
|
142
142
|
deepeval/cli/types.py,sha256=_7KdthstHNc-JKCWrfpDQCf_j8h9PMxh0qJCHmVXJr0,310
|
|
143
|
-
deepeval/cli/utils.py,sha256=
|
|
143
|
+
deepeval/cli/utils.py,sha256=3fgH5WPTTe7Cz_QOLCHyflXB81kmFaSxXHJ2tnxvFLw,10649
|
|
144
144
|
deepeval/confident/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
145
|
-
deepeval/confident/api.py,sha256=
|
|
145
|
+
deepeval/confident/api.py,sha256=3TpuZm59xo1_APsAPppreCRepf8pfGWksMmgxnwp764,8773
|
|
146
146
|
deepeval/confident/types.py,sha256=9bgePDaU31yY7JGwCLZcc7pev9VGtNDZLbjsVpCLVdc,574
|
|
147
147
|
deepeval/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
148
148
|
deepeval/config/dotenv_handler.py,sha256=lOosoC7fm9RljriY8EFl5ywSGfSiQsVf_vmYqzpbZ8s,588
|
|
149
149
|
deepeval/config/logging.py,sha256=ivqmhOSB-oHOOU3MvnhImrZwkkxzxKJgoKxesnWfHjg,1314
|
|
150
|
-
deepeval/config/settings.py,sha256=
|
|
150
|
+
deepeval/config/settings.py,sha256=l5wvTEyG7KgWoY6F4Gc0j19oZef151r20Z8fZsiXR_U,57004
|
|
151
151
|
deepeval/config/settings_manager.py,sha256=Ynebm2BKDrzajc6DEq2eYIwyRAAtUQOkTnl46albxLk,4187
|
|
152
152
|
deepeval/config/utils.py,sha256=bJGljeAXoEYuUlYSvHSOsUnqINTwo6wOwfFHFpWxiaQ,4238
|
|
153
|
-
deepeval/constants.py,sha256=
|
|
153
|
+
deepeval/constants.py,sha256=MvwjLC1IHUY35FnnSsWVcHScmdbYBbPr8eTnsLWn40Y,1697
|
|
154
154
|
deepeval/contextvars.py,sha256=oqXtuYiKd4Zvc1rNoR1gcRBxzZYCGTMVn7XostwvkRI,524
|
|
155
155
|
deepeval/dataset/__init__.py,sha256=N2c-rkuxWYiiJSOZArw0H02Cwo7cnfzFuNYJlvsIBEg,249
|
|
156
156
|
deepeval/dataset/api.py,sha256=bZ95HfIaxYB1IwTnp7x4AaKXWuII17T5uqVkhUXNc7I,1650
|
|
@@ -180,9 +180,9 @@ deepeval/integrations/hugging_face/rich_manager.py,sha256=WvFtPGpPmGeg2Ftsnojga6
|
|
|
180
180
|
deepeval/integrations/hugging_face/tests/test_callbacks.py,sha256=88Wyg-aDaXujj9jHeGdFF3ITSl2-y7eaJGWgSyvvDi8,4607
|
|
181
181
|
deepeval/integrations/hugging_face/utils.py,sha256=HUKdQcTIb76Ct69AS737oPxmlVxk5fw2UbT2pLn-o8k,1817
|
|
182
182
|
deepeval/integrations/langchain/__init__.py,sha256=G1Qey5WkKou2-PA34KwWgmayQ_TbvXqPyotTbzmD8tw,84
|
|
183
|
-
deepeval/integrations/langchain/callback.py,sha256=
|
|
183
|
+
deepeval/integrations/langchain/callback.py,sha256=1K5KxpND6XEKCWnz-DWjhbO35AzmM3M8PIlk6bTYO2k,20360
|
|
184
184
|
deepeval/integrations/langchain/patch.py,sha256=fCHfZXU9xX3IJ6SG8GEYzn3qrifyUkT0i_uUABTsmcs,1255
|
|
185
|
-
deepeval/integrations/langchain/utils.py,sha256=
|
|
185
|
+
deepeval/integrations/langchain/utils.py,sha256=oYsQYO3Ucbmd1d7gkb5ARd60gm6BHtLy_5OOqUw8HIQ,11311
|
|
186
186
|
deepeval/integrations/llama_index/__init__.py,sha256=Ujs9ZBJFkuCWUDBJOF88UbM1Y-S6QFQhxSo0oQnEWNw,90
|
|
187
187
|
deepeval/integrations/llama_index/handler.py,sha256=uTvNXmAF4xBh8t9bBm5sBFX6ETp8SrkOZlFlE_GWdmM,10771
|
|
188
188
|
deepeval/integrations/llama_index/utils.py,sha256=onmmo1vpn6cpOY5EhfTc0Uui7X6l1M0HD3sq-KVAesg,3380
|
|
@@ -191,7 +191,7 @@ deepeval/integrations/pydantic_ai/agent.py,sha256=-NKvpTUw3AxRNhuxVFcx9mw5BWCujz
|
|
|
191
191
|
deepeval/integrations/pydantic_ai/instrumentator.py,sha256=Us9LSYZWMfaeAc7PGXMDYWzjWKFVmhRvZrFhSvmk448,11922
|
|
192
192
|
deepeval/integrations/pydantic_ai/otel.py,sha256=CCqwCJ5pHqCzHgujHQqZy7Jxo2PH1BT0kR7QxdtzutY,2060
|
|
193
193
|
deepeval/integrations/pydantic_ai/test_instrumentator.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
194
|
-
deepeval/key_handler.py,sha256=
|
|
194
|
+
deepeval/key_handler.py,sha256=lajMBgF2lCzbQpW4e6Y7cD9FOw0Qk5UOKS4_kIIHj6Y,9562
|
|
195
195
|
deepeval/metrics/__init__.py,sha256=19Df323r8aAlx2sRfV9BHJLicORhTLpogR8M1deJetw,4680
|
|
196
196
|
deepeval/metrics/answer_relevancy/__init__.py,sha256=WbZUpoSg2GQoqJ4VIRirVVQ1JDx5xwT-RskwqNKfWGM,46
|
|
197
197
|
deepeval/metrics/answer_relevancy/answer_relevancy.py,sha256=2zRAuPq794S-rqMesMnuvSJxd1yhEUTq8nRwLxf42QE,10958
|
|
@@ -233,7 +233,7 @@ deepeval/metrics/conversational_dag/conversational_dag.py,sha256=GlhJC-BBtnfx3G2
|
|
|
233
233
|
deepeval/metrics/conversational_dag/nodes.py,sha256=xMhBEQ87CMWd6DPF-hMuMp3rxVuw_SS7mHKHqGSfxLw,31348
|
|
234
234
|
deepeval/metrics/conversational_dag/templates.py,sha256=zMF9rjRQCbAtMXXC03m1CwcYiYwsRCfoitf63QkvCmE,4243
|
|
235
235
|
deepeval/metrics/conversational_g_eval/__init__.py,sha256=0whQUYv_qZx4nkz0V6TTjgg8gJVCmW8323WcXpj2EzI,93
|
|
236
|
-
deepeval/metrics/conversational_g_eval/conversational_g_eval.py,sha256=
|
|
236
|
+
deepeval/metrics/conversational_g_eval/conversational_g_eval.py,sha256=R6qt6yO8_k0ciXUz3F_imMxS9hFi0EtMgDkooOyE790,17190
|
|
237
237
|
deepeval/metrics/conversational_g_eval/schema.py,sha256=H_9-iA1BXJwbPKrGEZBqxDO_En4sjXI8_xKSNYc-hnk,167
|
|
238
238
|
deepeval/metrics/conversational_g_eval/template.py,sha256=JVKwZJBgHiP1cMuGTLAL_taKvRL-ppJjkiTOs0wzgYk,2931
|
|
239
239
|
deepeval/metrics/dag/__init__.py,sha256=G5D9ngJ6nnbRBF2mfmNZymZId8gKD09QzTA1Y_bTrgM,157
|
|
@@ -250,10 +250,10 @@ deepeval/metrics/faithfulness/faithfulness.py,sha256=2mFXIh0U7Xf4Ybl6w-Lt74D4P13
|
|
|
250
250
|
deepeval/metrics/faithfulness/schema.py,sha256=yPbe1CrW6PMOJjnWnUOxeb_Ul8sfDwvwgt4QTW-95RI,437
|
|
251
251
|
deepeval/metrics/faithfulness/template.py,sha256=n9SvOM8iJ9Y4K0o8OHHc0uyw3E_v2BfZRd0fSUIX8XI,11126
|
|
252
252
|
deepeval/metrics/g_eval/__init__.py,sha256=HAhsQFVq9LIpZXPN00Jc_WrMXrh47NIT86VnUpWM4_4,102
|
|
253
|
-
deepeval/metrics/g_eval/g_eval.py,sha256=
|
|
253
|
+
deepeval/metrics/g_eval/g_eval.py,sha256=VlQkYuWwWITB0wo8q1OVZEQjZ7V7gpDv4kdvrUP3ROA,16134
|
|
254
254
|
deepeval/metrics/g_eval/schema.py,sha256=V629txuDrr_2IEKEsgJVYYZb_pkdfcltQV9ZjvxK5co,287
|
|
255
255
|
deepeval/metrics/g_eval/template.py,sha256=v96BJFOH1rnME6b-OwJwcunvA4dd2GwraoXnjiZRu9Y,5182
|
|
256
|
-
deepeval/metrics/g_eval/utils.py,sha256=
|
|
256
|
+
deepeval/metrics/g_eval/utils.py,sha256=t8SvFt_2GRSOOKiYMHJWyhrzLCWOdFWGnlQ38PZA0Ls,10767
|
|
257
257
|
deepeval/metrics/goal_accuracy/__init__.py,sha256=SVvA5Py1iNQoLujNUptvckoLoR6XMs-W2jQ7b89v-Tc,46
|
|
258
258
|
deepeval/metrics/goal_accuracy/goal_accuracy.py,sha256=zlzxrAQQ4ASrjVo4-jDmn6uBw50sHOsAtgWez0CpveU,13034
|
|
259
259
|
deepeval/metrics/goal_accuracy/schema.py,sha256=WmP1nw5ugOAKCFrpjZpF4jjeJzLB3Ecdp-2VWfBJLAE,257
|
|
@@ -388,7 +388,7 @@ deepeval/metrics/utils.py,sha256=RS8gsEh__DaKhXjdDfNcw1iOVvN40Z1mbQHM21Q30Iw,213
|
|
|
388
388
|
deepeval/model_integrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
389
389
|
deepeval/model_integrations/types.py,sha256=rbVMhC_2yWwD6JqzkRO9D7aMVC_KtXN686G_S7de7S8,630
|
|
390
390
|
deepeval/model_integrations/utils.py,sha256=Zt9SYPgTxlGsQFZgpZvh_a5fWuL8mmIFVSe6uoQywZ4,3562
|
|
391
|
-
deepeval/models/__init__.py,sha256
|
|
391
|
+
deepeval/models/__init__.py,sha256=-eHIhBn1a1fLKjPd5TCW8da8UuCKYdJvwic26swFtZM,1024
|
|
392
392
|
deepeval/models/_summac_model.py,sha256=xflanxl_IBuzuainlYCVX7UvjHCnAckKSvNR2NwZI6k,19750
|
|
393
393
|
deepeval/models/answer_relevancy_model.py,sha256=SLOA6uUImNOuxpPGfTg2AH7MIkf9QsotYixvI1jcVC8,2197
|
|
394
394
|
deepeval/models/base_model.py,sha256=uja2bZcrTCIPMkIDgOLG2k2Ncw7uTX6vjvnrQtFlNlk,4891
|
|
@@ -399,22 +399,23 @@ deepeval/models/embedding_models/local_embedding_model.py,sha256=Io5dYNR8f-_iMmz
|
|
|
399
399
|
deepeval/models/embedding_models/ollama_embedding_model.py,sha256=4uxrzdBlpWT-SM1HHHsZXhwg1ejkxUDFewxACLeYsG4,3747
|
|
400
400
|
deepeval/models/embedding_models/openai_embedding_model.py,sha256=S8uvWODbiTF4EYfeID5yEF0YvYkDs1dP_Kiur4sb67M,4477
|
|
401
401
|
deepeval/models/hallucination_model.py,sha256=ABi978VKLE_jNHbDzM96kJ08EsZ5ZlvOlJHA_ptSkfQ,1003
|
|
402
|
-
deepeval/models/llms/__init__.py,sha256=
|
|
402
|
+
deepeval/models/llms/__init__.py,sha256=Mlkvw9eIbxJXJjTB9Nj0LoL-kSRCmewrEihDvFyzvJA,799
|
|
403
403
|
deepeval/models/llms/amazon_bedrock_model.py,sha256=mgBdGhyZo0SU0OMzqWAF6pUlQMUuCCdDiordfinDNpM,10898
|
|
404
404
|
deepeval/models/llms/anthropic_model.py,sha256=08_nGK5EoGpf_F0I6JkhrEAswDc9DjLQqGYMX3emsoQ,10542
|
|
405
405
|
deepeval/models/llms/azure_model.py,sha256=Nc_LgA8rEhkldvdhccNojERaviaBg6jyfBVL9bGdKek,16673
|
|
406
|
-
deepeval/models/llms/constants.py,sha256=
|
|
406
|
+
deepeval/models/llms/constants.py,sha256=H6_FyTNkfF0wr3R8qUlvT2LuZGT5lbXFh9Hcq5T8A8k,72008
|
|
407
407
|
deepeval/models/llms/deepseek_model.py,sha256=OzEs0hnSixqICurVFo6T5GBAUeDrnWOlooEyJrgi5zE,8565
|
|
408
|
-
deepeval/models/llms/gemini_model.py,sha256=
|
|
408
|
+
deepeval/models/llms/gemini_model.py,sha256=h01bJnLBnc1xaqoVSBOL-PwllCsHRkA88bp21BA0Mws,15552
|
|
409
409
|
deepeval/models/llms/grok_model.py,sha256=zGU1WzKADrgap5NQJTDb6BY4SZNNJqAZ6phnK_HFJqw,10703
|
|
410
410
|
deepeval/models/llms/kimi_model.py,sha256=n5w2MeeKSMS7HvSpiDSQueZ2EQSv3c6pDb-C-ASHGwE,10441
|
|
411
411
|
deepeval/models/llms/litellm_model.py,sha256=lWfJvzWia7XCrLiRTNF0fUQXYOalsLV1y3Tq03loDP4,16533
|
|
412
412
|
deepeval/models/llms/local_model.py,sha256=1KWuvgfdSE2XaNriIz-8gBIrbvmLOgomZxXI80Zt-8c,8287
|
|
413
413
|
deepeval/models/llms/ollama_model.py,sha256=fk2GlQSFMYBe9oKrFouWAIf_PtSZZp8SGV2HXpE66no,7957
|
|
414
|
-
deepeval/models/llms/openai_model.py,sha256=
|
|
414
|
+
deepeval/models/llms/openai_model.py,sha256=3cp7fkpnh6suMu9lWTe0rYASALqWIZa2aUHVQhL8JHM,17243
|
|
415
|
+
deepeval/models/llms/openrouter_model.py,sha256=hXDPQGa-HrvkXBL1E8ju1pduqsAnZg86RbotC0NcENU,13617
|
|
415
416
|
deepeval/models/llms/portkey_model.py,sha256=EvUJDHhtKzetFakc1HzxYIpzUlgD3UnEvZr0q9hpIKU,6684
|
|
416
417
|
deepeval/models/llms/utils.py,sha256=NsrZ4DjrVBnYbMZEY6G8U9On_B84kiDkB88yeyBUFfw,1433
|
|
417
|
-
deepeval/models/retry_policy.py,sha256=
|
|
418
|
+
deepeval/models/retry_policy.py,sha256=hNQRpP6SL5rpFTvLL-BRXkBLknmTwusJXuZnW9ba_as,34973
|
|
418
419
|
deepeval/models/summac_model.py,sha256=wKeH7pWQRXrTlzlIw_r1YCb8b7jUhWq6jUz9FiNUCSg,1992
|
|
419
420
|
deepeval/models/unbias_model.py,sha256=umOMhQLTmnD7uOuhiQufEl4Wlti4q2s3EtKOpds7zhs,597
|
|
420
421
|
deepeval/models/utils.py,sha256=0_6_hmEwatWGoba-KNE38KvmDKMlhGff2lIzOCpRQgQ,5947
|
|
@@ -459,7 +460,7 @@ deepeval/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
|
|
|
459
460
|
deepeval/plugins/plugin.py,sha256=_dwsdx4Dg9DbXxK3f7zJY4QWTJQWc7QE1HmIg2Zjjag,1515
|
|
460
461
|
deepeval/progress_context.py,sha256=ZSKpxrE9sdgt9G3REKnVeXAv7GJXHHVGgLynpG1Pudw,3557
|
|
461
462
|
deepeval/prompt/__init__.py,sha256=rDU99KjydxDRKhuQJCBs_bpDJrWb2mpHtvyv6AEwFC8,367
|
|
462
|
-
deepeval/prompt/api.py,sha256=
|
|
463
|
+
deepeval/prompt/api.py,sha256=DNhKouq3ntEKmN_VegNh5X1gu_2RGJwzBp07rEEyg6s,6359
|
|
463
464
|
deepeval/prompt/prompt.py,sha256=waaQDrTXQQUzOIJbOYtUpoa4qsuXgmzObUwFH-wRx2Y,31654
|
|
464
465
|
deepeval/prompt/utils.py,sha256=knjgPU2066OtYWMb3NqMPChr9zQgKfXo_QTLTtSkmYg,7620
|
|
465
466
|
deepeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -478,7 +479,7 @@ deepeval/synthesizer/chunking/context_generator.py,sha256=ucwa-7BcPSHHf4Tu31dfyJ
|
|
|
478
479
|
deepeval/synthesizer/chunking/doc_chunker.py,sha256=DvJmbA_NnZBGCZgxKQsxlIC29kca_d7J-Dxq9SyfzX0,10612
|
|
479
480
|
deepeval/synthesizer/config.py,sha256=rg9HzN5w_82tpzBALgt__NkAcmh0vDEoORJpjsRLWMY,2207
|
|
480
481
|
deepeval/synthesizer/schema.py,sha256=TFCIvXeL0TOKqfjMm2qgR4hFcvvFaPEZdQ1xTnRJqPs,1294
|
|
481
|
-
deepeval/synthesizer/synthesizer.py,sha256=
|
|
482
|
+
deepeval/synthesizer/synthesizer.py,sha256=rElOS4gtfaY_xzOaYXknlgAzQ3o5TtIGYfO8X3qV9y8,115244
|
|
482
483
|
deepeval/synthesizer/templates/__init__.py,sha256=9UhfJFwPEdLWmxJz3ksNJps-jGYJFJnJP1U-x7j0By4,319
|
|
483
484
|
deepeval/synthesizer/templates/template.py,sha256=ri3dX2gzxNmL8qlkl47HD8kecmqMBF5mE-mraZvf1xU,65008
|
|
484
485
|
deepeval/synthesizer/templates/template_extraction.py,sha256=jmvr8AOOUzDgsHYIOsq-NaxlRQ5GygK16TTRGxBXDyM,3508
|
|
@@ -515,12 +516,12 @@ deepeval/tracing/patchers.py,sha256=Oi9wao3oDYhcviv7p0KoWBeS9ne7rHLa2gh9AR9EyiU,
|
|
|
515
516
|
deepeval/tracing/perf_epoch_bridge.py,sha256=iyAPddB6Op7NpMtPHJ29lDm53Btz9yLaN6xSCfTRQm4,1825
|
|
516
517
|
deepeval/tracing/trace_context.py,sha256=Z0n0Cu1A5g9dXiZnzTFO5TzeOYHKeNuO6v3_EU_Gi_c,3568
|
|
517
518
|
deepeval/tracing/trace_test_manager.py,sha256=wt4y7EWTRc4Bw938-UFFtXHkdFFOrnx6JaIk7J5Iulw,555
|
|
518
|
-
deepeval/tracing/tracing.py,sha256=
|
|
519
|
-
deepeval/tracing/types.py,sha256=
|
|
519
|
+
deepeval/tracing/tracing.py,sha256=AkbmgjWzSQ2k2qeN9i8LT17MsafuBenzzkP0r31I950,46728
|
|
520
|
+
deepeval/tracing/types.py,sha256=3QkF0toQ6f0fEDARYOUV6Iv9UJFbg14kSpn3dL1H5CE,6040
|
|
520
521
|
deepeval/tracing/utils.py,sha256=mdvhYAxDNsdnusaEXJd-c-_O2Jn6S3xSuzRvLO1Jz4U,5684
|
|
521
|
-
deepeval/utils.py,sha256=
|
|
522
|
-
deepeval-3.
|
|
523
|
-
deepeval-3.
|
|
524
|
-
deepeval-3.
|
|
525
|
-
deepeval-3.
|
|
526
|
-
deepeval-3.
|
|
522
|
+
deepeval/utils.py,sha256=Wsu95g6t1wdttxWIESVwuUxbml7C-9ZTsV7qHCQI3Xg,27259
|
|
523
|
+
deepeval-3.8.0.dist-info/LICENSE.md,sha256=0ATkuLv6QgsJTBODUHC5Rak_PArA6gv2t7inJzNTP38,11352
|
|
524
|
+
deepeval-3.8.0.dist-info/METADATA,sha256=z7OpguZITPdv0S9jhvE6CEq-zVCSq9fvbKChLKD9gwc,18752
|
|
525
|
+
deepeval-3.8.0.dist-info/WHEEL,sha256=d2fvjOD7sXsVzChCqf0Ty0JbHKBaLYwDbGQDwQTnJ50,88
|
|
526
|
+
deepeval-3.8.0.dist-info/entry_points.txt,sha256=NoismUQfwLOojSGZmBrdcpwfaoFRAzUhBvZD3UwOKog,95
|
|
527
|
+
deepeval-3.8.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|