DeepFabric 4.4.1__py3-none-any.whl → 4.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepfabric/builders_agent.py +16 -4
- deepfabric/cli.py +3 -3
- deepfabric/evaluation/backends/__init__.py +2 -0
- deepfabric/evaluation/backends/llm_eval_backend.py +527 -0
- deepfabric/evaluation/backends/ollama_backend.py +3 -3
- deepfabric/evaluation/backends/tool_call_parsers.py +7 -7
- deepfabric/evaluation/backends/transformers_backend.py +73 -16
- deepfabric/evaluation/evaluator.py +41 -7
- deepfabric/evaluation/evaluators/builtin/tool_calling.py +13 -8
- deepfabric/evaluation/inference.py +77 -5
- deepfabric/evaluation/metrics.py +4 -0
- deepfabric/evaluation/reporters/cloud_reporter.py +1 -1
- deepfabric/generator.py +4 -0
- deepfabric/hf_hub.py +1 -1
- deepfabric/schemas.py +2 -2
- deepfabric/training/__init__.py +24 -5
- deepfabric/training/dataset_utils.py +223 -0
- {deepfabric-4.4.1.dist-info → deepfabric-4.5.1.dist-info}/METADATA +6 -3
- {deepfabric-4.4.1.dist-info → deepfabric-4.5.1.dist-info}/RECORD +22 -20
- {deepfabric-4.4.1.dist-info → deepfabric-4.5.1.dist-info}/WHEEL +0 -0
- {deepfabric-4.4.1.dist-info → deepfabric-4.5.1.dist-info}/entry_points.txt +0 -0
- {deepfabric-4.4.1.dist-info → deepfabric-4.5.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
"""Dataset preparation utilities for training.
|
|
2
|
+
|
|
3
|
+
This module provides utilities for preparing DeepFabric datasets for training,
|
|
4
|
+
including tool filtering to reduce sequence lengths and memory usage.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import logging
|
|
10
|
+
|
|
11
|
+
from typing import TYPE_CHECKING, Any, Literal
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from datasets import Dataset
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
ToolInclusionStrategy = Literal["all", "used_only", "used_plus_related"]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def get_used_tool_names(messages: list[dict[str, Any]]) -> set[str]:
|
|
22
|
+
"""Extract tool names that are actually called in a conversation.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
messages: List of message dicts from the conversation
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
Set of tool names that were called
|
|
29
|
+
"""
|
|
30
|
+
used_tools: set[str] = set()
|
|
31
|
+
|
|
32
|
+
for msg in messages:
|
|
33
|
+
if msg.get("role") == "assistant":
|
|
34
|
+
tool_calls = msg.get("tool_calls", [])
|
|
35
|
+
if tool_calls:
|
|
36
|
+
for tc in tool_calls:
|
|
37
|
+
if isinstance(tc, dict):
|
|
38
|
+
# OpenAI format: {"function": {"name": "..."}}
|
|
39
|
+
func = tc.get("function", {})
|
|
40
|
+
if isinstance(func, dict) and func.get("name"):
|
|
41
|
+
used_tools.add(func["name"])
|
|
42
|
+
# Alternative format: {"name": "..."}
|
|
43
|
+
elif tc.get("name"):
|
|
44
|
+
used_tools.add(tc["name"])
|
|
45
|
+
|
|
46
|
+
return used_tools
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def clean_tool_schema(tool: dict[str, Any]) -> dict[str, Any]:
|
|
50
|
+
"""Remove null/None values from tool schema to reduce size.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
tool: Tool definition in OpenAI format
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Cleaned tool definition with nulls removed
|
|
57
|
+
"""
|
|
58
|
+
if not isinstance(tool, dict):
|
|
59
|
+
return tool
|
|
60
|
+
|
|
61
|
+
cleaned: dict[str, Any] = {}
|
|
62
|
+
|
|
63
|
+
for key, value in tool.items():
|
|
64
|
+
if value is None:
|
|
65
|
+
continue
|
|
66
|
+
if isinstance(value, dict):
|
|
67
|
+
cleaned_value = clean_tool_schema(value)
|
|
68
|
+
# Only include if dict is not empty after cleaning
|
|
69
|
+
if cleaned_value:
|
|
70
|
+
cleaned[key] = cleaned_value
|
|
71
|
+
elif isinstance(value, list):
|
|
72
|
+
cleaned_list = []
|
|
73
|
+
for item in value:
|
|
74
|
+
if isinstance(item, dict):
|
|
75
|
+
cleaned_item = clean_tool_schema(item)
|
|
76
|
+
if cleaned_item:
|
|
77
|
+
cleaned_list.append(cleaned_item)
|
|
78
|
+
elif item is not None:
|
|
79
|
+
cleaned_list.append(item)
|
|
80
|
+
if cleaned_list:
|
|
81
|
+
cleaned[key] = cleaned_list
|
|
82
|
+
else:
|
|
83
|
+
cleaned[key] = value
|
|
84
|
+
|
|
85
|
+
return cleaned
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def filter_tools_for_sample(
|
|
89
|
+
sample: dict[str, Any],
|
|
90
|
+
strategy: ToolInclusionStrategy = "used_only",
|
|
91
|
+
min_tools: int = 1,
|
|
92
|
+
clean_schemas: bool = True,
|
|
93
|
+
) -> dict[str, Any]:
|
|
94
|
+
"""Filter tools in a sample to only include relevant ones.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
sample: Dataset sample with 'messages' and 'tools' fields
|
|
98
|
+
strategy: Tool inclusion strategy:
|
|
99
|
+
- "all": Keep all tools (no filtering)
|
|
100
|
+
- "used_only": Only include tools that are called in the conversation
|
|
101
|
+
- "used_plus_related": Include used tools plus related ones (not implemented)
|
|
102
|
+
min_tools: Minimum number of tools to include (fallback if filtering
|
|
103
|
+
removes all tools)
|
|
104
|
+
clean_schemas: Whether to remove null values from tool schemas
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
Modified sample with filtered tools
|
|
108
|
+
"""
|
|
109
|
+
if strategy == "all" and not clean_schemas:
|
|
110
|
+
return sample
|
|
111
|
+
|
|
112
|
+
messages = sample.get("messages", [])
|
|
113
|
+
all_tools = sample.get("tools", [])
|
|
114
|
+
|
|
115
|
+
if not all_tools:
|
|
116
|
+
return sample
|
|
117
|
+
|
|
118
|
+
# Clean schemas if requested
|
|
119
|
+
if clean_schemas:
|
|
120
|
+
all_tools = [clean_tool_schema(tool) for tool in all_tools]
|
|
121
|
+
|
|
122
|
+
if strategy == "all":
|
|
123
|
+
sample["tools"] = all_tools
|
|
124
|
+
return sample
|
|
125
|
+
|
|
126
|
+
# Get tools actually used
|
|
127
|
+
used_names = get_used_tool_names(messages)
|
|
128
|
+
|
|
129
|
+
if not used_names:
|
|
130
|
+
# No tools used - keep minimum number of tools
|
|
131
|
+
sample["tools"] = all_tools[:min_tools] if min_tools > 0 else []
|
|
132
|
+
return sample
|
|
133
|
+
|
|
134
|
+
# Filter to used tools
|
|
135
|
+
filtered_tools = []
|
|
136
|
+
for tool in all_tools:
|
|
137
|
+
func = tool.get("function", {})
|
|
138
|
+
if isinstance(func, dict) and func.get("name") in used_names:
|
|
139
|
+
filtered_tools.append(tool)
|
|
140
|
+
|
|
141
|
+
# Ensure minimum tools
|
|
142
|
+
if len(filtered_tools) < min_tools:
|
|
143
|
+
# Add more tools from the original list
|
|
144
|
+
for tool in all_tools:
|
|
145
|
+
if tool not in filtered_tools:
|
|
146
|
+
filtered_tools.append(tool)
|
|
147
|
+
if len(filtered_tools) >= min_tools:
|
|
148
|
+
break
|
|
149
|
+
|
|
150
|
+
sample["tools"] = filtered_tools
|
|
151
|
+
return sample
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def prepare_dataset_for_training(
|
|
155
|
+
dataset: Dataset,
|
|
156
|
+
tool_strategy: ToolInclusionStrategy = "used_only",
|
|
157
|
+
clean_tool_schemas: bool = True,
|
|
158
|
+
min_tools: int = 1,
|
|
159
|
+
num_proc: int | None = None,
|
|
160
|
+
) -> Dataset:
|
|
161
|
+
"""Prepare a DeepFabric dataset for training with optimizations.
|
|
162
|
+
|
|
163
|
+
This function applies various optimizations to reduce dataset size and
|
|
164
|
+
memory usage during training:
|
|
165
|
+
- Filters tools to only include those actually used in each conversation
|
|
166
|
+
- Removes null values from tool schemas
|
|
167
|
+
- Can be extended with additional preprocessing steps
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
dataset: HuggingFace Dataset with DeepFabric conversation format
|
|
171
|
+
tool_strategy: How to filter tools (see filter_tools_for_sample)
|
|
172
|
+
clean_tool_schemas: Whether to remove null values from tool schemas
|
|
173
|
+
min_tools: Minimum tools to keep per sample
|
|
174
|
+
num_proc: Number of processes for parallel processing
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
Processed dataset ready for training
|
|
178
|
+
|
|
179
|
+
Example:
|
|
180
|
+
>>> from datasets import load_dataset
|
|
181
|
+
>>> from deepfabric.training import prepare_dataset_for_training
|
|
182
|
+
>>>
|
|
183
|
+
>>> dataset = load_dataset("your/dataset", split="train")
|
|
184
|
+
>>> prepared = prepare_dataset_for_training(
|
|
185
|
+
... dataset,
|
|
186
|
+
... tool_strategy="used_only",
|
|
187
|
+
... clean_tool_schemas=True,
|
|
188
|
+
... )
|
|
189
|
+
>>> # Now use prepared dataset for training
|
|
190
|
+
"""
|
|
191
|
+
logger.info(
|
|
192
|
+
"Preparing dataset for training: tool_strategy=%s, clean_schemas=%s",
|
|
193
|
+
tool_strategy,
|
|
194
|
+
clean_tool_schemas,
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# Get initial stats
|
|
198
|
+
if "tools" in dataset.column_names:
|
|
199
|
+
initial_tool_counts = [len(sample.get("tools", []) or []) for sample in dataset]
|
|
200
|
+
avg_initial = (
|
|
201
|
+
sum(initial_tool_counts) / len(initial_tool_counts) if initial_tool_counts else 0
|
|
202
|
+
)
|
|
203
|
+
logger.info("Initial average tools per sample: %.1f", avg_initial)
|
|
204
|
+
|
|
205
|
+
# Apply tool filtering
|
|
206
|
+
processed = dataset.map(
|
|
207
|
+
lambda x: filter_tools_for_sample(
|
|
208
|
+
x,
|
|
209
|
+
strategy=tool_strategy,
|
|
210
|
+
min_tools=min_tools,
|
|
211
|
+
clean_schemas=clean_tool_schemas,
|
|
212
|
+
),
|
|
213
|
+
num_proc=num_proc,
|
|
214
|
+
desc="Filtering tools",
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
# Log final stats
|
|
218
|
+
if "tools" in processed.column_names:
|
|
219
|
+
final_tool_counts = [len(sample.get("tools", []) or []) for sample in processed]
|
|
220
|
+
avg_final = sum(final_tool_counts) / len(final_tool_counts) if final_tool_counts else 0
|
|
221
|
+
logger.info("Final average tools per sample: %.1f", avg_final)
|
|
222
|
+
|
|
223
|
+
return processed
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: DeepFabric
|
|
3
|
-
Version: 4.
|
|
3
|
+
Version: 4.5.1
|
|
4
4
|
Summary: Curate High Quality Datasets, Train, Evaluate and Ship
|
|
5
5
|
Author-email: Luke Hinds <luke@alwaysfurther.ai>
|
|
6
6
|
License-File: LICENSE
|
|
@@ -77,6 +77,9 @@ Description-Content-Type: text/markdown
|
|
|
77
77
|
<a href="https://discord.gg/pPcjYzGvbS">
|
|
78
78
|
<img src="https://img.shields.io/discord/1384081906773131274?color=7289da&label=Discord&logo=discord&logoColor=white" alt="Discord"/>
|
|
79
79
|
</a>
|
|
80
|
+
<a href="https://www.reddit.com/r/deepfabric/">
|
|
81
|
+
<img src="https://img.shields.io/badge/Reddit-r%2Fdeepfabric-FF4500?logo=reddit&logoColor=white" alt="Reddit"/>
|
|
82
|
+
</a>
|
|
80
83
|
</p>
|
|
81
84
|
</div>
|
|
82
85
|
|
|
@@ -86,7 +89,7 @@ What sets DeepFabric apart from other dataset generation tools is its ability to
|
|
|
86
89
|
|
|
87
90
|
<img src="/assets/df-demo.gif" width="100%" height="100%"/>
|
|
88
91
|
|
|
89
|
-
Constrained decoding and response validation, along with real tool executions within isolated webassembly environments, ensure that generated samples strictly adhere to structured schema, variable constraints, and execution correctness, ensuring datasets have exact syntax and structure for use in model training pipelines. Tool definations can be either directly imported from MCP (Model Context Protocol) server schemas and automatically mocked, real life interfaces along with a standard set of common tools (`list_files()`, 'read_file()` etc)
|
|
92
|
+
Constrained decoding and response validation, along with real tool executions within isolated webassembly environments, ensure that generated samples strictly adhere to structured schema, variable constraints, and execution correctness, ensuring datasets have exact syntax and structure for use in model training pipelines. Tool definations can be either directly imported from MCP (Model Context Protocol) server schemas and automatically mocked, real life interfaces along with a standard set of common tools (`list_files()`, `'read_file()` etc)
|
|
90
93
|
|
|
91
94
|
Once your dataset is generated, it can be automatically uploaded to Hugging Face and directly imported into popular training frameworks like TRL, Unsloth, and Axolotl.
|
|
92
95
|
|
|
@@ -215,7 +218,7 @@ deepfabric generate config.yaml --output-save-as dataset.jsonl
|
|
|
215
218
|
Or upload to HuggingFace Hub:
|
|
216
219
|
|
|
217
220
|
```bash
|
|
218
|
-
deepfabric upload dataset.jsonl --repo your-username/my-dataset
|
|
221
|
+
deepfabric upload-hf dataset.jsonl --repo your-username/my-dataset
|
|
219
222
|
```
|
|
220
223
|
|
|
221
224
|
### 2. Load and Split for Training
|
|
@@ -2,8 +2,8 @@ deepfabric/__init__.py,sha256=BRwhcuwXhiDQ6jMlsjhTaPNaw15EyIY3KOLhhlwKcBo,1503
|
|
|
2
2
|
deepfabric/__main__.py,sha256=Ojx6VFnAWx4wY52VehsWhW85IaEmPb8FP_sGvOk628g,113
|
|
3
3
|
deepfabric/auth.py,sha256=C-GVaImVSqQIw4zm-H8PS19xBnkUOb3Z3M7sALCMsYs,12637
|
|
4
4
|
deepfabric/builders.py,sha256=hqoVKjh9n-XKX6qc12SNgs_L_9Ytb1qxtLTm_ixt3vY,11529
|
|
5
|
-
deepfabric/builders_agent.py,sha256=
|
|
6
|
-
deepfabric/cli.py,sha256=
|
|
5
|
+
deepfabric/builders_agent.py,sha256=MZhtzuSjordwm9XvEoz3Tk52gQgtCaxJE_scl6MAhrM,48494
|
|
6
|
+
deepfabric/cli.py,sha256=iAOmISksizKsxXNbou3733Qeo8-H1rea_ycpr2vKGRE,41980
|
|
7
7
|
deepfabric/config.py,sha256=B3T-Q3nuvkrMFJ1R3NvTYCSM7Baw5OoRovNdrNsbaes,33698
|
|
8
8
|
deepfabric/config_manager.py,sha256=VuHa2MxGYkGwt8fQ-J3d0ctikWGt6lmNShwaqVRWxnY,9067
|
|
9
9
|
deepfabric/constants.py,sha256=MwADziDmnt0zi9t9gG65EM7AJvIQP0FSsXgGj7Yqxm8,2578
|
|
@@ -11,14 +11,14 @@ deepfabric/dataset_manager.py,sha256=fJ6VFG05FLTpmbkLKlnVTTi7aim8q7eWI1cgOmKaP5s
|
|
|
11
11
|
deepfabric/error_codes.py,sha256=HGGWsahUTI8UG996C74X-XgNuaPX8RHo4gOidlaJql4,17630
|
|
12
12
|
deepfabric/exceptions.py,sha256=068iSPLG6iSA1KY8Hp19Tc_Y2dlew5lxihV31dGenUE,1322
|
|
13
13
|
deepfabric/factory.py,sha256=OCqo3w-eiYNWvK_I_egDZuWj192kf18yD3SPj8rrPxU,753
|
|
14
|
-
deepfabric/generator.py,sha256=
|
|
14
|
+
deepfabric/generator.py,sha256=p0oI_r4pEPnZ7j4-CQKxmAFVc-plQc17sRbh-GbuTAo,44278
|
|
15
15
|
deepfabric/graph.py,sha256=xyQ0zEESkZhJRYmm8a-IdyNKkXZDzAmlow8bci77u80,20502
|
|
16
|
-
deepfabric/hf_hub.py,sha256=
|
|
16
|
+
deepfabric/hf_hub.py,sha256=hw2CWqZ3CzyAzMo552VPZKVWtuv-j0TQ2_gV5K0AUto,7670
|
|
17
17
|
deepfabric/kaggle_hub.py,sha256=CXVO1Lv3IRhdO0bp9_IQr6nUs-v5jOWi5k4EwPkbJmw,7927
|
|
18
18
|
deepfabric/metrics.py,sha256=iwtNHBX4ZTYUg2FZgtFcG3U0e9RlV2c1cm1Kp34FeWU,6129
|
|
19
19
|
deepfabric/progress.py,sha256=3XQQrf2pUZlyd-8eRcNATH1v0Oi8JMedVHGbhPcca-8,9354
|
|
20
20
|
deepfabric/prompts.py,sha256=JVFMeeBa2qqOMvmP_xx8bWzZ6ot9eyqOP3u8XzzPx3g,10290
|
|
21
|
-
deepfabric/schemas.py,sha256=
|
|
21
|
+
deepfabric/schemas.py,sha256=ef3Ai6i1hNmW8aRyjxc7YJ4ldYp-kn3SauoCdcQK-Lw,36531
|
|
22
22
|
deepfabric/stream_simulator.py,sha256=GzvAxWxHVsuTwgXlqwXNfrTUDn6sND2kJOoQuYg88FA,3028
|
|
23
23
|
deepfabric/topic_manager.py,sha256=xbc3yv8u7EgPcV8rWzpAt3U1fDXM7YIBlvcuvZeBhVo,10992
|
|
24
24
|
deepfabric/topic_model.py,sha256=i_wYpw2kUl8NLodOSaqNu-C4_d6caYT1kPe_vkKjoyw,707
|
|
@@ -28,22 +28,23 @@ deepfabric/update_checker.py,sha256=AUa9iUdkGNzu7tWkQRxIlF19YRmKLetwxu-Ys2ONS8Y,
|
|
|
28
28
|
deepfabric/utils.py,sha256=DWcH7NeVGc_NVJ3v1rdLoQSSWeb2rwfUkDpKO1NGBB8,4507
|
|
29
29
|
deepfabric/validation.py,sha256=UY_oocCQTn77xZ6cIO6d1cRWVHCwBVqKdhUuQ07c7mQ,5123
|
|
30
30
|
deepfabric/evaluation/__init__.py,sha256=7xMLmYXaNC1U7qf88S9fMxWTABoDRiOcimSYfCt_PSo,1224
|
|
31
|
-
deepfabric/evaluation/evaluator.py,sha256=
|
|
32
|
-
deepfabric/evaluation/inference.py,sha256=
|
|
33
|
-
deepfabric/evaluation/metrics.py,sha256=
|
|
31
|
+
deepfabric/evaluation/evaluator.py,sha256=ExUrL5Zil4DzibzjzngA7dfxnmGIVp9H7319FhLHYmk,33918
|
|
32
|
+
deepfabric/evaluation/inference.py,sha256=y7JA0IsBDwe0sJzVQeItYHAV5wUJn6Bjp1Wsp3r7qYQ,7644
|
|
33
|
+
deepfabric/evaluation/metrics.py,sha256=ITNevYj7CBXzYs-rYhsihO6-rE9n30CYRaVUfdTbcFQ,12026
|
|
34
34
|
deepfabric/evaluation/parser.py,sha256=GZc_Sce0r8zs8s6UCtga8Glp37Ffj2qLJeSfK-Vm8gI,10707
|
|
35
|
-
deepfabric/evaluation/backends/__init__.py,sha256=
|
|
36
|
-
deepfabric/evaluation/backends/
|
|
37
|
-
deepfabric/evaluation/backends/
|
|
38
|
-
deepfabric/evaluation/backends/
|
|
35
|
+
deepfabric/evaluation/backends/__init__.py,sha256=GqC0FfpWmtgJmjHd0kVKNg7g-NjhRoh5h2MtAoOhUOY,827
|
|
36
|
+
deepfabric/evaluation/backends/llm_eval_backend.py,sha256=4jp5tnTp7v_0pHCGhcPbI55ig79-eVxdzooesi2PymA,18827
|
|
37
|
+
deepfabric/evaluation/backends/ollama_backend.py,sha256=mtPp1JtIDRjb76X_rTa1jS1ETzMjte8t3WJjuYV1oDQ,4372
|
|
38
|
+
deepfabric/evaluation/backends/tool_call_parsers.py,sha256=Ufg4Xt3mrDS-WbGor6tOOr4xZNCHk3Co2C-z_o-pAkM,14126
|
|
39
|
+
deepfabric/evaluation/backends/transformers_backend.py,sha256=WcqB9gkayQpjx2Em00lhzJg8RcWdQEYbctDNXLzFChA,14484
|
|
39
40
|
deepfabric/evaluation/evaluators/__init__.py,sha256=NdH65YvanskRGe6r7JepkTNGGt8xA-GLugagU3VQ_WM,353
|
|
40
41
|
deepfabric/evaluation/evaluators/base.py,sha256=1TiLr-_oF9dRmdSgJs94dDbf0gTwRS8TGGz2C1Z3nag,2946
|
|
41
42
|
deepfabric/evaluation/evaluators/registry.py,sha256=VGeb1AHFGkn9TLpcqfuGIZi1jgh7Qw0NNILT6z3Se6M,2171
|
|
42
43
|
deepfabric/evaluation/evaluators/builtin/__init__.py,sha256=s-9SOOZVMHL5E3QR8obZ8Z_mAWBYY_UbxT2hxpdHQZ8,111
|
|
43
|
-
deepfabric/evaluation/evaluators/builtin/tool_calling.py,sha256=
|
|
44
|
+
deepfabric/evaluation/evaluators/builtin/tool_calling.py,sha256=S-odftiLM0OBGQryJgvL9Ysqxajiv5QlEyJI6_ABw2g,3697
|
|
44
45
|
deepfabric/evaluation/reporters/__init__.py,sha256=o1eteq-gpfoPkiLpacl0VB-RAQ7Yj-81cTpXgP48M3c,299
|
|
45
46
|
deepfabric/evaluation/reporters/base.py,sha256=YUJYRv5L0rpdd21KhLkz6NmvCzNex0n5c3zT-7rpBnI,1654
|
|
46
|
-
deepfabric/evaluation/reporters/cloud_reporter.py,sha256=
|
|
47
|
+
deepfabric/evaluation/reporters/cloud_reporter.py,sha256=bMiAKbibQ8cf8DmvuRfhXNzekDRPBTbTa7wcmT8piWM,7574
|
|
47
48
|
deepfabric/evaluation/reporters/file_reporter.py,sha256=cRSrtFjYdrUoSwCHPLd6e5irqczP-OyH85npYQ8XkDo,1781
|
|
48
49
|
deepfabric/evaluation/reporters/multi_reporter.py,sha256=Faxi_cgLIVsRzti9n6zd_fxWUToSam65Rqylp6RX97c,1621
|
|
49
50
|
deepfabric/llm/__init__.py,sha256=lfWDLLQ6VWJ4birKVRFh3ypk9zEOfG1ZtU08iD8bd3U,1038
|
|
@@ -60,12 +61,13 @@ deepfabric/tools/__init__.py,sha256=hV65lJmVH2qrWCvzHb-IS3VxYP9lal1j8-J3DzBGieM,
|
|
|
60
61
|
deepfabric/tools/defaults.py,sha256=NcvrYo88OC1ID4U0CuKg_WYKz2pwFowsjBjSMZip-bI,2372
|
|
61
62
|
deepfabric/tools/loader.py,sha256=Bv56D-76JChlK_QXfHLw_rneGLZYRhkn5ETbJMIdJsA,2910
|
|
62
63
|
deepfabric/tools/mcp_client.py,sha256=uQRrlDSVwF0ZatOl9bidBNU7IgXgJKQU-xG50dK0Uy4,23377
|
|
63
|
-
deepfabric/training/__init__.py,sha256=
|
|
64
|
+
deepfabric/training/__init__.py,sha256=MqIyBnloX-4_zqAgoEaGzKXTVXroi40wEs4V7lbQNqk,1563
|
|
64
65
|
deepfabric/training/api_key_prompt.py,sha256=bzcdzeK6ql_8Vz0cbR2vmxxtMRNRFpzYAJx7i8GNI3U,9315
|
|
65
66
|
deepfabric/training/callback.py,sha256=d4heAO5gcBCGXmJIgA9MI9lFBBHnYtdm76rhPh6RDSA,11427
|
|
67
|
+
deepfabric/training/dataset_utils.py,sha256=klx8DoawEwuMigBDP-RpMAfe7FvYxRbhj599MErxBr4,7313
|
|
66
68
|
deepfabric/training/metrics_sender.py,sha256=Fh_qvqrK9mNpTHfG8jgU7t1oSMKbov3mmj3r6o_Q6X4,9347
|
|
67
|
-
deepfabric-4.
|
|
68
|
-
deepfabric-4.
|
|
69
|
-
deepfabric-4.
|
|
70
|
-
deepfabric-4.
|
|
71
|
-
deepfabric-4.
|
|
69
|
+
deepfabric-4.5.1.dist-info/METADATA,sha256=fbtl_pTG40JhAPcdC_VIoA988RJ2emjN8iAKvuS_rCo,22789
|
|
70
|
+
deepfabric-4.5.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
71
|
+
deepfabric-4.5.1.dist-info/entry_points.txt,sha256=zatevils13hfs8x29_vmUyivQ6rTtq7hE2RBusZw1Fo,50
|
|
72
|
+
deepfabric-4.5.1.dist-info/licenses/LICENSE,sha256=-qRt8wmrhQ9aMf7KhmZXc2vrTETYZF-6_T1KCeUhvHY,11340
|
|
73
|
+
deepfabric-4.5.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|