langtune 0.1.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langtune-0.1.17/LICENSE +21 -0
- langtune-0.1.17/PKG-INFO +257 -0
- langtune-0.1.17/README.md +184 -0
- langtune-0.1.17/pyproject.toml +109 -0
- langtune-0.1.17/setup.cfg +4 -0
- langtune-0.1.17/src/langtune/__init__.py +315 -0
- langtune-0.1.17/src/langtune/acceleration.py +132 -0
- langtune-0.1.17/src/langtune/api.py +320 -0
- langtune-0.1.17/src/langtune/auth.py +434 -0
- langtune-0.1.17/src/langtune/callbacks.py +268 -0
- langtune-0.1.17/src/langtune/cli.py +687 -0
- langtune-0.1.17/src/langtune/client.py +721 -0
- langtune-0.1.17/src/langtune/config.py +356 -0
- langtune-0.1.17/src/langtune/data.py +526 -0
- langtune-0.1.17/src/langtune/distributed.py +154 -0
- langtune-0.1.17/src/langtune/facade.py +149 -0
- langtune-0.1.17/src/langtune/finetune.py +491 -0
- langtune-0.1.17/src/langtune/generation.py +95 -0
- langtune-0.1.17/src/langtune/logging_utils.py +182 -0
- langtune-0.1.17/src/langtune/metrics.py +345 -0
- langtune-0.1.17/src/langtune/model/__init__.py +20 -0
- langtune-0.1.17/src/langtune/model/hub.py +109 -0
- langtune-0.1.17/src/langtune/model/loader.py +84 -0
- langtune-0.1.17/src/langtune/model/safetensors.py +104 -0
- langtune-0.1.17/src/langtune/model/weights.py +100 -0
- langtune-0.1.17/src/langtune/models.py +19 -0
- langtune-0.1.17/src/langtune/nn/fast_transformer.py +399 -0
- langtune-0.1.17/src/langtune/nn/layers.py +178 -0
- langtune-0.1.17/src/langtune/nn/transformer.py +254 -0
- langtune-0.1.17/src/langtune/optimizations.py +870 -0
- langtune-0.1.17/src/langtune/py.typed +2 -0
- langtune-0.1.17/src/langtune/schedulers.py +234 -0
- langtune-0.1.17/src/langtune/tokenizers.py +275 -0
- langtune-0.1.17/src/langtune/trainer.py +889 -0
- langtune-0.1.17/src/langtune/training/neftune.py +80 -0
- langtune-0.1.17/src/langtune/utils.py +337 -0
- langtune-0.1.17/src/langtune.egg-info/PKG-INFO +257 -0
- langtune-0.1.17/src/langtune.egg-info/SOURCES.txt +43 -0
- langtune-0.1.17/src/langtune.egg-info/dependency_links.txt +1 -0
- langtune-0.1.17/src/langtune.egg-info/entry_points.txt +2 -0
- langtune-0.1.17/src/langtune.egg-info/requires.txt +22 -0
- langtune-0.1.17/src/langtune.egg-info/top_level.txt +1 -0
- langtune-0.1.17/tests/test_models.py +160 -0
- langtune-0.1.17/tests/test_optimizations.py +147 -0
- langtune-0.1.17/tests/test_trainer.py +102 -0
langtune-0.1.17/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Pritesh Raj
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
langtune-0.1.17/PKG-INFO
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: langtune
|
|
3
|
+
Version: 0.1.17
|
|
4
|
+
Summary: Efficient LoRA Fine-Tuning for Large Language Models - Train smarter, not harder.
|
|
5
|
+
Author-email: Pritesh Raj <priteshraj41@gmail.com>
|
|
6
|
+
Maintainer-email: Langtrain AI <contact@langtrain.ai>
|
|
7
|
+
License: MIT License
|
|
8
|
+
|
|
9
|
+
Copyright (c) 2025 Pritesh Raj
|
|
10
|
+
|
|
11
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
12
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
13
|
+
in the Software without restriction, including without limitation the rights
|
|
14
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
15
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
16
|
+
furnished to do so, subject to the following conditions:
|
|
17
|
+
|
|
18
|
+
The above copyright notice and this permission notice shall be included in all
|
|
19
|
+
copies or substantial portions of the Software.
|
|
20
|
+
|
|
21
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
22
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
23
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
24
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
25
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
26
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
27
|
+
SOFTWARE.
|
|
28
|
+
|
|
29
|
+
Project-URL: Homepage, https://github.com/langtrain-ai/langtune
|
|
30
|
+
Project-URL: Documentation, https://github.com/langtrain-ai/langtune/tree/main/docs
|
|
31
|
+
Project-URL: Repository, https://github.com/langtrain-ai/langtune
|
|
32
|
+
Project-URL: Changelog, https://github.com/langtrain-ai/langtune/blob/main/CHANGELOG.md
|
|
33
|
+
Project-URL: Bug Tracker, https://github.com/langtrain-ai/langtune/issues
|
|
34
|
+
Keywords: llm,lora,fine-tuning,machine-learning,deep-learning,transformers,nlp,language-model,pytorch,rlhf,dpo,ppo
|
|
35
|
+
Classifier: Development Status :: 4 - Beta
|
|
36
|
+
Classifier: Intended Audience :: Developers
|
|
37
|
+
Classifier: Intended Audience :: Science/Research
|
|
38
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
39
|
+
Classifier: Operating System :: OS Independent
|
|
40
|
+
Classifier: Programming Language :: Python :: 3
|
|
41
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
42
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
43
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
44
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
45
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
46
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
47
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
48
|
+
Classifier: Typing :: Typed
|
|
49
|
+
Requires-Python: >=3.8
|
|
50
|
+
Description-Content-Type: text/markdown
|
|
51
|
+
License-File: LICENSE
|
|
52
|
+
Requires-Dist: torch>=1.10
|
|
53
|
+
Requires-Dist: numpy
|
|
54
|
+
Requires-Dist: tqdm
|
|
55
|
+
Requires-Dist: pyyaml
|
|
56
|
+
Requires-Dist: scipy
|
|
57
|
+
Requires-Dist: wandb
|
|
58
|
+
Requires-Dist: rich>=13.0.0
|
|
59
|
+
Requires-Dist: rich>=13.0.0
|
|
60
|
+
Provides-Extra: dev
|
|
61
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
62
|
+
Requires-Dist: pytest-cov; extra == "dev"
|
|
63
|
+
Requires-Dist: black; extra == "dev"
|
|
64
|
+
Requires-Dist: flake8; extra == "dev"
|
|
65
|
+
Requires-Dist: mypy; extra == "dev"
|
|
66
|
+
Requires-Dist: isort; extra == "dev"
|
|
67
|
+
Provides-Extra: all
|
|
68
|
+
Requires-Dist: transformers; extra == "all"
|
|
69
|
+
Requires-Dist: datasets; extra == "all"
|
|
70
|
+
Requires-Dist: accelerate; extra == "all"
|
|
71
|
+
Requires-Dist: bitsandbytes; extra == "all"
|
|
72
|
+
Dynamic: license-file
|
|
73
|
+
|
|
74
|
+
<div align="center">
|
|
75
|
+
|
|
76
|
+
<img src="https://raw.githubusercontent.com/langtrain-ai/langtune/main/static/langtune-white.png" alt="Langtune" width="400" />
|
|
77
|
+
|
|
78
|
+
<h3>The fastest way to fine-tune LLMs</h3>
|
|
79
|
+
|
|
80
|
+
<p>
|
|
81
|
+
<strong>Production-ready LoRA fine-tuning in minutes, not days.</strong><br>
|
|
82
|
+
Built for ML engineers who need results, not complexity.
|
|
83
|
+
</p>
|
|
84
|
+
|
|
85
|
+
<p>
|
|
86
|
+
<a href="https://www.producthunt.com/products/langtrain-2" target="_blank"><img src="https://api.producthunt.com/widgets/embed-image/v1/featured.svg?post_id=1049974&theme=light" alt="Product Hunt" width="200" /></a>
|
|
87
|
+
</p>
|
|
88
|
+
|
|
89
|
+
<p>
|
|
90
|
+
<a href="https://pypi.org/project/langtune/"><img src="https://img.shields.io/pypi/v/langtune.svg?style=for-the-badge&logo=pypi&logoColor=white" alt="PyPI" /></a>
|
|
91
|
+
<a href="https://pepy.tech/project/langtune"><img src="https://img.shields.io/pepy/dt/langtune?style=for-the-badge&logo=python&logoColor=white&label=downloads" alt="Downloads" /></a>
|
|
92
|
+
<a href="https://github.com/langtrain-ai/langtune/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-blue?style=for-the-badge" alt="License" /></a>
|
|
93
|
+
</p>
|
|
94
|
+
|
|
95
|
+
<p>
|
|
96
|
+
<a href="#quick-start">Quick Start</a> •
|
|
97
|
+
<a href="#features">Features</a> •
|
|
98
|
+
<a href="#why-langtune">Why Langtune</a> •
|
|
99
|
+
<a href="https://langtrain.xyz/docs">Docs</a>
|
|
100
|
+
</p>
|
|
101
|
+
|
|
102
|
+
</div>
|
|
103
|
+
|
|
104
|
+
---
|
|
105
|
+
|
|
106
|
+
## ⚡ Quick Start
|
|
107
|
+
|
|
108
|
+
```bash
|
|
109
|
+
pip install langtune
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
Fine-tune your first model in **3 lines of code**:
|
|
113
|
+
|
|
114
|
+
```python
|
|
115
|
+
from langtune import LoRATrainer
|
|
116
|
+
|
|
117
|
+
trainer = LoRATrainer(model_name="meta-llama/Llama-2-7b-hf")
|
|
118
|
+
trainer.train_from_file("data.jsonl")
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
That's it. Your fine-tuned model is ready.
|
|
122
|
+
|
|
123
|
+
---
|
|
124
|
+
|
|
125
|
+
## ✨ Features
|
|
126
|
+
|
|
127
|
+
<table>
|
|
128
|
+
<tr>
|
|
129
|
+
<td width="50%">
|
|
130
|
+
|
|
131
|
+
### 🚀 **Blazing Fast**
|
|
132
|
+
Train 7B models in under 30 minutes on a single GPU. Our optimized kernels squeeze every last FLOP.
|
|
133
|
+
|
|
134
|
+
### 🎯 **Zero Config Required**
|
|
135
|
+
Smart defaults that just work. No PhD required. Start training in seconds.
|
|
136
|
+
|
|
137
|
+
### 💾 **Memory Efficient**
|
|
138
|
+
4-bit quantization + gradient checkpointing = Train 70B models on consumer hardware.
|
|
139
|
+
|
|
140
|
+
</td>
|
|
141
|
+
<td width="50%">
|
|
142
|
+
|
|
143
|
+
### 🔧 **Production Ready**
|
|
144
|
+
Battle-tested at scale. Used by teams fine-tuning thousands of models daily.
|
|
145
|
+
|
|
146
|
+
### 🌐 **Any Model, Any Data**
|
|
147
|
+
Works with Llama, Mistral, Qwen, Phi, and more. JSONL, CSV, or HuggingFace datasets.
|
|
148
|
+
|
|
149
|
+
### ☁️ **Cloud Native**
|
|
150
|
+
One-click deployment to Langtrain Cloud. Or export to GGUF, ONNX, HuggingFace.
|
|
151
|
+
|
|
152
|
+
</td>
|
|
153
|
+
</tr>
|
|
154
|
+
</table>
|
|
155
|
+
|
|
156
|
+
---
|
|
157
|
+
|
|
158
|
+
## 🎯 Why Langtune?
|
|
159
|
+
|
|
160
|
+
| | Langtune | Others |
|
|
161
|
+
|---|:---:|:---:|
|
|
162
|
+
| **Time to first training** | 30 seconds | 2+ hours |
|
|
163
|
+
| **Lines of code** | 3 | 100+ |
|
|
164
|
+
| **Memory usage** | 8GB | 24GB+ |
|
|
165
|
+
| **Learning curve** | Minutes | Days |
|
|
166
|
+
|
|
167
|
+
---
|
|
168
|
+
|
|
169
|
+
## 📖 Full Example
|
|
170
|
+
|
|
171
|
+
```python
|
|
172
|
+
from langtune import LoRATrainer
|
|
173
|
+
from langtune.config import TrainingConfig, LoRAConfig
|
|
174
|
+
|
|
175
|
+
# Configure your training
|
|
176
|
+
config = TrainingConfig(
|
|
177
|
+
num_epochs=3,
|
|
178
|
+
batch_size=4,
|
|
179
|
+
learning_rate=2e-4,
|
|
180
|
+
lora=LoRAConfig(rank=16, alpha=32)
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
# Initialize and train
|
|
184
|
+
trainer = LoRATrainer(
|
|
185
|
+
model_name="mistralai/Mistral-7B-v0.1",
|
|
186
|
+
output_dir="./my-model",
|
|
187
|
+
config=config
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
# Train on your data
|
|
191
|
+
trainer.train_from_file("training_data.jsonl")
|
|
192
|
+
|
|
193
|
+
# Push to Hub (optional)
|
|
194
|
+
trainer.push_to_hub("my-username/my-fine-tuned-model")
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
---
|
|
198
|
+
|
|
199
|
+
## 🛠️ Advanced Usage
|
|
200
|
+
|
|
201
|
+
<details>
|
|
202
|
+
<summary><b>Custom Dataset Format</b></summary>
|
|
203
|
+
|
|
204
|
+
```python
|
|
205
|
+
# JSONL format (recommended)
|
|
206
|
+
{"text": "Your training example here"}
|
|
207
|
+
{"text": "Another example"}
|
|
208
|
+
|
|
209
|
+
# Or instruction format
|
|
210
|
+
{"instruction": "Summarize this:", "input": "Long text...", "output": "Summary"}
|
|
211
|
+
```
|
|
212
|
+
|
|
213
|
+
</details>
|
|
214
|
+
|
|
215
|
+
<details>
|
|
216
|
+
<summary><b>Distributed Training</b></summary>
|
|
217
|
+
|
|
218
|
+
```python
|
|
219
|
+
trainer = LoRATrainer(
|
|
220
|
+
model_name="meta-llama/Llama-2-70b-hf",
|
|
221
|
+
device_map="auto", # Automatic multi-GPU
|
|
222
|
+
)
|
|
223
|
+
```
|
|
224
|
+
|
|
225
|
+
</details>
|
|
226
|
+
|
|
227
|
+
<details>
|
|
228
|
+
<summary><b>Export Formats</b></summary>
|
|
229
|
+
|
|
230
|
+
```python
|
|
231
|
+
# Export to different formats
|
|
232
|
+
trainer.export("gguf") # For llama.cpp
|
|
233
|
+
trainer.export("onnx") # For ONNX Runtime
|
|
234
|
+
trainer.export("hf") # HuggingFace format
|
|
235
|
+
```
|
|
236
|
+
|
|
237
|
+
</details>
|
|
238
|
+
|
|
239
|
+
---
|
|
240
|
+
|
|
241
|
+
## 🤝 Community
|
|
242
|
+
|
|
243
|
+
<p align="center">
|
|
244
|
+
<a href="https://discord.gg/langtrain">Discord</a> •
|
|
245
|
+
<a href="https://twitter.com/langtrainai">Twitter</a> •
|
|
246
|
+
<a href="https://langtrain.xyz">Website</a>
|
|
247
|
+
</p>
|
|
248
|
+
|
|
249
|
+
---
|
|
250
|
+
|
|
251
|
+
<div align="center">
|
|
252
|
+
|
|
253
|
+
**Built with ❤️ by [Langtrain AI](https://langtrain.xyz)**
|
|
254
|
+
|
|
255
|
+
*Making LLM fine-tuning accessible to everyone.*
|
|
256
|
+
|
|
257
|
+
</div>
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
<div align="center">
|
|
2
|
+
|
|
3
|
+
<img src="https://raw.githubusercontent.com/langtrain-ai/langtune/main/static/langtune-white.png" alt="Langtune" width="400" />
|
|
4
|
+
|
|
5
|
+
<h3>The fastest way to fine-tune LLMs</h3>
|
|
6
|
+
|
|
7
|
+
<p>
|
|
8
|
+
<strong>Production-ready LoRA fine-tuning in minutes, not days.</strong><br>
|
|
9
|
+
Built for ML engineers who need results, not complexity.
|
|
10
|
+
</p>
|
|
11
|
+
|
|
12
|
+
<p>
|
|
13
|
+
<a href="https://www.producthunt.com/products/langtrain-2" target="_blank"><img src="https://api.producthunt.com/widgets/embed-image/v1/featured.svg?post_id=1049974&theme=light" alt="Product Hunt" width="200" /></a>
|
|
14
|
+
</p>
|
|
15
|
+
|
|
16
|
+
<p>
|
|
17
|
+
<a href="https://pypi.org/project/langtune/"><img src="https://img.shields.io/pypi/v/langtune.svg?style=for-the-badge&logo=pypi&logoColor=white" alt="PyPI" /></a>
|
|
18
|
+
<a href="https://pepy.tech/project/langtune"><img src="https://img.shields.io/pepy/dt/langtune?style=for-the-badge&logo=python&logoColor=white&label=downloads" alt="Downloads" /></a>
|
|
19
|
+
<a href="https://github.com/langtrain-ai/langtune/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-blue?style=for-the-badge" alt="License" /></a>
|
|
20
|
+
</p>
|
|
21
|
+
|
|
22
|
+
<p>
|
|
23
|
+
<a href="#quick-start">Quick Start</a> •
|
|
24
|
+
<a href="#features">Features</a> •
|
|
25
|
+
<a href="#why-langtune">Why Langtune</a> •
|
|
26
|
+
<a href="https://langtrain.xyz/docs">Docs</a>
|
|
27
|
+
</p>
|
|
28
|
+
|
|
29
|
+
</div>
|
|
30
|
+
|
|
31
|
+
---
|
|
32
|
+
|
|
33
|
+
## ⚡ Quick Start
|
|
34
|
+
|
|
35
|
+
```bash
|
|
36
|
+
pip install langtune
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
Fine-tune your first model in **3 lines of code**:
|
|
40
|
+
|
|
41
|
+
```python
|
|
42
|
+
from langtune import LoRATrainer
|
|
43
|
+
|
|
44
|
+
trainer = LoRATrainer(model_name="meta-llama/Llama-2-7b-hf")
|
|
45
|
+
trainer.train_from_file("data.jsonl")
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
That's it. Your fine-tuned model is ready.
|
|
49
|
+
|
|
50
|
+
---
|
|
51
|
+
|
|
52
|
+
## ✨ Features
|
|
53
|
+
|
|
54
|
+
<table>
|
|
55
|
+
<tr>
|
|
56
|
+
<td width="50%">
|
|
57
|
+
|
|
58
|
+
### 🚀 **Blazing Fast**
|
|
59
|
+
Train 7B models in under 30 minutes on a single GPU. Our optimized kernels squeeze every last FLOP.
|
|
60
|
+
|
|
61
|
+
### 🎯 **Zero Config Required**
|
|
62
|
+
Smart defaults that just work. No PhD required. Start training in seconds.
|
|
63
|
+
|
|
64
|
+
### 💾 **Memory Efficient**
|
|
65
|
+
4-bit quantization + gradient checkpointing = Train 70B models on consumer hardware.
|
|
66
|
+
|
|
67
|
+
</td>
|
|
68
|
+
<td width="50%">
|
|
69
|
+
|
|
70
|
+
### 🔧 **Production Ready**
|
|
71
|
+
Battle-tested at scale. Used by teams fine-tuning thousands of models daily.
|
|
72
|
+
|
|
73
|
+
### 🌐 **Any Model, Any Data**
|
|
74
|
+
Works with Llama, Mistral, Qwen, Phi, and more. JSONL, CSV, or HuggingFace datasets.
|
|
75
|
+
|
|
76
|
+
### ☁️ **Cloud Native**
|
|
77
|
+
One-click deployment to Langtrain Cloud. Or export to GGUF, ONNX, HuggingFace.
|
|
78
|
+
|
|
79
|
+
</td>
|
|
80
|
+
</tr>
|
|
81
|
+
</table>
|
|
82
|
+
|
|
83
|
+
---
|
|
84
|
+
|
|
85
|
+
## 🎯 Why Langtune?
|
|
86
|
+
|
|
87
|
+
| | Langtune | Others |
|
|
88
|
+
|---|:---:|:---:|
|
|
89
|
+
| **Time to first training** | 30 seconds | 2+ hours |
|
|
90
|
+
| **Lines of code** | 3 | 100+ |
|
|
91
|
+
| **Memory usage** | 8GB | 24GB+ |
|
|
92
|
+
| **Learning curve** | Minutes | Days |
|
|
93
|
+
|
|
94
|
+
---
|
|
95
|
+
|
|
96
|
+
## 📖 Full Example
|
|
97
|
+
|
|
98
|
+
```python
|
|
99
|
+
from langtune import LoRATrainer
|
|
100
|
+
from langtune.config import TrainingConfig, LoRAConfig
|
|
101
|
+
|
|
102
|
+
# Configure your training
|
|
103
|
+
config = TrainingConfig(
|
|
104
|
+
num_epochs=3,
|
|
105
|
+
batch_size=4,
|
|
106
|
+
learning_rate=2e-4,
|
|
107
|
+
lora=LoRAConfig(rank=16, alpha=32)
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
# Initialize and train
|
|
111
|
+
trainer = LoRATrainer(
|
|
112
|
+
model_name="mistralai/Mistral-7B-v0.1",
|
|
113
|
+
output_dir="./my-model",
|
|
114
|
+
config=config
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# Train on your data
|
|
118
|
+
trainer.train_from_file("training_data.jsonl")
|
|
119
|
+
|
|
120
|
+
# Push to Hub (optional)
|
|
121
|
+
trainer.push_to_hub("my-username/my-fine-tuned-model")
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
---
|
|
125
|
+
|
|
126
|
+
## 🛠️ Advanced Usage
|
|
127
|
+
|
|
128
|
+
<details>
|
|
129
|
+
<summary><b>Custom Dataset Format</b></summary>
|
|
130
|
+
|
|
131
|
+
```python
|
|
132
|
+
# JSONL format (recommended)
|
|
133
|
+
{"text": "Your training example here"}
|
|
134
|
+
{"text": "Another example"}
|
|
135
|
+
|
|
136
|
+
# Or instruction format
|
|
137
|
+
{"instruction": "Summarize this:", "input": "Long text...", "output": "Summary"}
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
</details>
|
|
141
|
+
|
|
142
|
+
<details>
|
|
143
|
+
<summary><b>Distributed Training</b></summary>
|
|
144
|
+
|
|
145
|
+
```python
|
|
146
|
+
trainer = LoRATrainer(
|
|
147
|
+
model_name="meta-llama/Llama-2-70b-hf",
|
|
148
|
+
device_map="auto", # Automatic multi-GPU
|
|
149
|
+
)
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
</details>
|
|
153
|
+
|
|
154
|
+
<details>
|
|
155
|
+
<summary><b>Export Formats</b></summary>
|
|
156
|
+
|
|
157
|
+
```python
|
|
158
|
+
# Export to different formats
|
|
159
|
+
trainer.export("gguf") # For llama.cpp
|
|
160
|
+
trainer.export("onnx") # For ONNX Runtime
|
|
161
|
+
trainer.export("hf") # HuggingFace format
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
</details>
|
|
165
|
+
|
|
166
|
+
---
|
|
167
|
+
|
|
168
|
+
## 🤝 Community
|
|
169
|
+
|
|
170
|
+
<p align="center">
|
|
171
|
+
<a href="https://discord.gg/langtrain">Discord</a> •
|
|
172
|
+
<a href="https://twitter.com/langtrainai">Twitter</a> •
|
|
173
|
+
<a href="https://langtrain.xyz">Website</a>
|
|
174
|
+
</p>
|
|
175
|
+
|
|
176
|
+
---
|
|
177
|
+
|
|
178
|
+
<div align="center">
|
|
179
|
+
|
|
180
|
+
**Built with ❤️ by [Langtrain AI](https://langtrain.xyz)**
|
|
181
|
+
|
|
182
|
+
*Making LLM fine-tuning accessible to everyone.*
|
|
183
|
+
|
|
184
|
+
</div>
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "langtune"
|
|
7
|
+
version = "0.1.17"
|
|
8
|
+
description = "Efficient LoRA Fine-Tuning for Large Language Models - Train smarter, not harder."
|
|
9
|
+
authors = [
|
|
10
|
+
{ name = "Pritesh Raj", email = "priteshraj41@gmail.com" }
|
|
11
|
+
]
|
|
12
|
+
maintainers = [
|
|
13
|
+
{ name = "Langtrain AI", email = "contact@langtrain.ai" }
|
|
14
|
+
]
|
|
15
|
+
readme = "README.md"
|
|
16
|
+
license = { file = "LICENSE" }
|
|
17
|
+
requires-python = ">=3.8"
|
|
18
|
+
keywords = [
|
|
19
|
+
"llm",
|
|
20
|
+
"lora",
|
|
21
|
+
"fine-tuning",
|
|
22
|
+
"machine-learning",
|
|
23
|
+
"deep-learning",
|
|
24
|
+
"transformers",
|
|
25
|
+
"nlp",
|
|
26
|
+
"language-model",
|
|
27
|
+
"pytorch",
|
|
28
|
+
"rlhf",
|
|
29
|
+
"dpo",
|
|
30
|
+
"ppo"
|
|
31
|
+
]
|
|
32
|
+
classifiers = [
|
|
33
|
+
"Development Status :: 4 - Beta",
|
|
34
|
+
"Intended Audience :: Developers",
|
|
35
|
+
"Intended Audience :: Science/Research",
|
|
36
|
+
"License :: OSI Approved :: MIT License",
|
|
37
|
+
"Operating System :: OS Independent",
|
|
38
|
+
"Programming Language :: Python :: 3",
|
|
39
|
+
"Programming Language :: Python :: 3.8",
|
|
40
|
+
"Programming Language :: Python :: 3.9",
|
|
41
|
+
"Programming Language :: Python :: 3.10",
|
|
42
|
+
"Programming Language :: Python :: 3.11",
|
|
43
|
+
"Programming Language :: Python :: 3.12",
|
|
44
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
45
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
46
|
+
"Typing :: Typed"
|
|
47
|
+
]
|
|
48
|
+
dependencies = [
|
|
49
|
+
"torch>=1.10",
|
|
50
|
+
"numpy",
|
|
51
|
+
"tqdm",
|
|
52
|
+
"pyyaml",
|
|
53
|
+
"scipy",
|
|
54
|
+
"wandb",
|
|
55
|
+
"rich>=13.0.0",
|
|
56
|
+
"rich>=13.0.0",
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
[project.optional-dependencies]
|
|
60
|
+
dev = [
|
|
61
|
+
"pytest>=7.0",
|
|
62
|
+
"pytest-cov",
|
|
63
|
+
"black",
|
|
64
|
+
"flake8",
|
|
65
|
+
"mypy",
|
|
66
|
+
"isort"
|
|
67
|
+
]
|
|
68
|
+
all = [
|
|
69
|
+
"transformers",
|
|
70
|
+
"datasets",
|
|
71
|
+
"accelerate",
|
|
72
|
+
"bitsandbytes"
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
[project.urls]
|
|
76
|
+
Homepage = "https://github.com/langtrain-ai/langtune"
|
|
77
|
+
Documentation = "https://github.com/langtrain-ai/langtune/tree/main/docs"
|
|
78
|
+
Repository = "https://github.com/langtrain-ai/langtune"
|
|
79
|
+
Changelog = "https://github.com/langtrain-ai/langtune/blob/main/CHANGELOG.md"
|
|
80
|
+
"Bug Tracker" = "https://github.com/langtrain-ai/langtune/issues"
|
|
81
|
+
|
|
82
|
+
[project.scripts]
|
|
83
|
+
langtune = "langtune.cli:main"
|
|
84
|
+
|
|
85
|
+
[tool.setuptools.packages.find]
|
|
86
|
+
where = ["src"]
|
|
87
|
+
|
|
88
|
+
[tool.setuptools.package-data]
|
|
89
|
+
langtune = ["py.typed"]
|
|
90
|
+
|
|
91
|
+
[bumpver]
|
|
92
|
+
current_version = "0.1.17"
|
|
93
|
+
version_pattern = "MAJOR.MINOR.PATCH"
|
|
94
|
+
commit_message = "Bump version: {old_version} → {new_version}"
|
|
95
|
+
tag_message = "v{new_version}"
|
|
96
|
+
|
|
97
|
+
[bumpver.file_patterns]
|
|
98
|
+
"pyproject.toml" = [
|
|
99
|
+
'current_version = "{version}"',
|
|
100
|
+
'version = "{version}"',
|
|
101
|
+
]
|
|
102
|
+
|
|
103
|
+
[tool.black]
|
|
104
|
+
line-length = 100
|
|
105
|
+
target-version = ["py38", "py39", "py310", "py311", "py312"]
|
|
106
|
+
|
|
107
|
+
[tool.isort]
|
|
108
|
+
profile = "black"
|
|
109
|
+
line_length = 100
|