llmflowstack 1.0.0__tar.gz → 1.0.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/.github/workflows/python-publish.yml +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/.gitignore +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/LICENSE +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/PKG-INFO +12 -12
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/README.md +10 -10
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/__init__.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/base/__init__.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/base/base.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/callbacks/__init__.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/callbacks/log_collector.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/callbacks/stop_on_token.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/models/GPT_OSS.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/models/Gemma.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/models/LLaMA3.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/models/__init__.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/rag/__iinit__.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/rag/pipeline.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/schemas/__init__.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/schemas/params.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/utils/__init__.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/utils/evaluation_methods.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/utils/exceptions.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/llmflowstack/utils/generation_utils.py +0 -0
- {llmflowstack-1.0.0 → llmflowstack-1.0.2}/pyproject.toml +2 -2
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: llmflowstack
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.2
|
|
4
4
|
Summary: LLMFlowStack is a framework for training and using LLMs (LLaMA, GPT-OSS, Gemma). Supports DAPT, fine-tuning, and distributed inference. Public fork without institution-specific components.
|
|
5
5
|
Author-email: Gustavo Henrique Ferreira Cruz <gustavohferreiracruz@gmail.com>
|
|
6
6
|
License: MIT
|
|
7
7
|
License-File: LICENSE
|
|
8
|
-
Requires-Python: >=3.
|
|
8
|
+
Requires-Python: >=3.12
|
|
9
9
|
Requires-Dist: accelerate
|
|
10
10
|
Requires-Dist: bert-score
|
|
11
11
|
Requires-Dist: bitsandbytes
|
|
@@ -34,11 +34,11 @@ Requires-Dist: triton
|
|
|
34
34
|
Requires-Dist: trl
|
|
35
35
|
Description-Content-Type: text/markdown
|
|
36
36
|
|
|
37
|
-
#
|
|
37
|
+
# LLMFlowStack
|
|
38
38
|
|
|
39
|
-
**
|
|
39
|
+
**LLMFlowStack** is a lightweight framework designed to simplify the use of LLMs (LLaMA, GPT-OSS, and Gemma) for NLP tasks.
|
|
40
40
|
|
|
41
|
-
> **Note:**
|
|
41
|
+
> **Note:** LLMFlowStack is intended for high-performance machines with **one or more NVIDIA H100 GPUs**.
|
|
42
42
|
|
|
43
43
|
It provides:
|
|
44
44
|
|
|
@@ -73,10 +73,10 @@ This framework is designed to provide flexibility when working with different op
|
|
|
73
73
|
|
|
74
74
|
## Installation
|
|
75
75
|
|
|
76
|
-
You can install the package directly from [PyPI](https://pypi.org/project/
|
|
76
|
+
You can install the package directly from [PyPI](https://pypi.org/project/llmflowstack/):
|
|
77
77
|
|
|
78
78
|
```bash
|
|
79
|
-
pip install
|
|
79
|
+
pip install llmflowstack
|
|
80
80
|
```
|
|
81
81
|
|
|
82
82
|
## Usage
|
|
@@ -88,7 +88,7 @@ This section presents a bit of what you can do with the framework.
|
|
|
88
88
|
You can load as many models as your hardware allows (H100 GPU recommended)...
|
|
89
89
|
|
|
90
90
|
```python
|
|
91
|
-
from
|
|
91
|
+
from llmflowstack import GPT_OSS, LLaMA3
|
|
92
92
|
|
|
93
93
|
# Loading a LLaMA model
|
|
94
94
|
first_model = LLaMA3()
|
|
@@ -144,8 +144,8 @@ thrid_model = GPT_OSS(
|
|
|
144
144
|
### Training Examples (DAPT & Fine-tune)
|
|
145
145
|
|
|
146
146
|
```python
|
|
147
|
-
from
|
|
148
|
-
from
|
|
147
|
+
from llmflowstack import LLaMA3
|
|
148
|
+
from llmflowstack.schemas import TrainParams
|
|
149
149
|
|
|
150
150
|
model = LLaMA3(
|
|
151
151
|
checkpoint="llama-3.1-8b-Instruct"
|
|
@@ -197,8 +197,8 @@ model.save_checkpoint(
|
|
|
197
197
|
### NLP Evaluation
|
|
198
198
|
|
|
199
199
|
```python
|
|
200
|
-
> from
|
|
201
|
-
> from
|
|
200
|
+
> from llmflowstack import text_evaluation
|
|
201
|
+
> from llmflowstack.utils import (bert_score_evaluation, cosine_similarity_evaluation, rouge_evaluation)
|
|
202
202
|
|
|
203
203
|
# Predictions from some model
|
|
204
204
|
> predictions = ["Chico is a dog, and he is orange!", "Fred is a cat, and he is white!"]
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
#
|
|
1
|
+
# LLMFlowStack
|
|
2
2
|
|
|
3
|
-
**
|
|
3
|
+
**LLMFlowStack** is a lightweight framework designed to simplify the use of LLMs (LLaMA, GPT-OSS, and Gemma) for NLP tasks.
|
|
4
4
|
|
|
5
|
-
> **Note:**
|
|
5
|
+
> **Note:** LLMFlowStack is intended for high-performance machines with **one or more NVIDIA H100 GPUs**.
|
|
6
6
|
|
|
7
7
|
It provides:
|
|
8
8
|
|
|
@@ -37,10 +37,10 @@ This framework is designed to provide flexibility when working with different op
|
|
|
37
37
|
|
|
38
38
|
## Installation
|
|
39
39
|
|
|
40
|
-
You can install the package directly from [PyPI](https://pypi.org/project/
|
|
40
|
+
You can install the package directly from [PyPI](https://pypi.org/project/llmflowstack/):
|
|
41
41
|
|
|
42
42
|
```bash
|
|
43
|
-
pip install
|
|
43
|
+
pip install llmflowstack
|
|
44
44
|
```
|
|
45
45
|
|
|
46
46
|
## Usage
|
|
@@ -52,7 +52,7 @@ This section presents a bit of what you can do with the framework.
|
|
|
52
52
|
You can load as many models as your hardware allows (H100 GPU recommended)...
|
|
53
53
|
|
|
54
54
|
```python
|
|
55
|
-
from
|
|
55
|
+
from llmflowstack import GPT_OSS, LLaMA3
|
|
56
56
|
|
|
57
57
|
# Loading a LLaMA model
|
|
58
58
|
first_model = LLaMA3()
|
|
@@ -108,8 +108,8 @@ thrid_model = GPT_OSS(
|
|
|
108
108
|
### Training Examples (DAPT & Fine-tune)
|
|
109
109
|
|
|
110
110
|
```python
|
|
111
|
-
from
|
|
112
|
-
from
|
|
111
|
+
from llmflowstack import LLaMA3
|
|
112
|
+
from llmflowstack.schemas import TrainParams
|
|
113
113
|
|
|
114
114
|
model = LLaMA3(
|
|
115
115
|
checkpoint="llama-3.1-8b-Instruct"
|
|
@@ -161,8 +161,8 @@ model.save_checkpoint(
|
|
|
161
161
|
### NLP Evaluation
|
|
162
162
|
|
|
163
163
|
```python
|
|
164
|
-
> from
|
|
165
|
-
> from
|
|
164
|
+
> from llmflowstack import text_evaluation
|
|
165
|
+
> from llmflowstack.utils import (bert_score_evaluation, cosine_similarity_evaluation, rouge_evaluation)
|
|
166
166
|
|
|
167
167
|
# Predictions from some model
|
|
168
168
|
> predictions = ["Chico is a dog, and he is orange!", "Fred is a cat, and he is white!"]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -4,13 +4,13 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "llmflowstack"
|
|
7
|
-
version = "1.0.
|
|
7
|
+
version = "1.0.2"
|
|
8
8
|
authors = [
|
|
9
9
|
{ name = "Gustavo Henrique Ferreira Cruz", email = "gustavohferreiracruz@gmail.com" }
|
|
10
10
|
]
|
|
11
11
|
description = "LLMFlowStack is a framework for training and using LLMs (LLaMA, GPT-OSS, Gemma). Supports DAPT, fine-tuning, and distributed inference. Public fork without institution-specific components."
|
|
12
12
|
readme = "README.md"
|
|
13
|
-
requires-python = ">=3.
|
|
13
|
+
requires-python = ">=3.12"
|
|
14
14
|
license = {text = "MIT"}
|
|
15
15
|
|
|
16
16
|
dependencies = [
|