liger-kernel 0.0.1__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- liger_kernel/ops/rms_norm.py +38 -20
- liger_kernel/ops/swiglu.py +16 -16
- liger_kernel/transformers/__init__.py +1 -0
- liger_kernel/transformers/monkey_patch.py +30 -0
- liger_kernel/transformers/trainer_integration.py +45 -0
- liger_kernel/triton/monkey_patch.py +0 -2
- liger_kernel-0.1.1.dist-info/METADATA +245 -0
- {liger_kernel-0.0.1.dist-info → liger_kernel-0.1.1.dist-info}/RECORD +12 -11
- liger_kernel-0.0.1.dist-info/METADATA +0 -16
- {liger_kernel-0.0.1.dist-info → liger_kernel-0.1.1.dist-info}/LICENSE +0 -0
- {liger_kernel-0.0.1.dist-info → liger_kernel-0.1.1.dist-info}/NOTICE +0 -0
- {liger_kernel-0.0.1.dist-info → liger_kernel-0.1.1.dist-info}/WHEEL +0 -0
- {liger_kernel-0.0.1.dist-info → liger_kernel-0.1.1.dist-info}/top_level.txt +0 -0
liger_kernel/ops/rms_norm.py
CHANGED
|
@@ -20,9 +20,12 @@ def _rms_norm_forward(
|
|
|
20
20
|
BLOCK_SIZE: tl.constexpr,
|
|
21
21
|
):
|
|
22
22
|
"""
|
|
23
|
+
y_i = (x_i / (RMS)) * wi, RMS = sqrt(sum(x_i^2) / N)
|
|
24
|
+
|
|
23
25
|
Reference:
|
|
24
26
|
1. https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html
|
|
25
27
|
2. https://github.com/unslothai/unsloth/blob/fd753fed99ed5f10ef8a9b7139588d9de9ddecfb/unsloth/kernels/rms_layernorm.py#L22
|
|
28
|
+
3. https://arxiv.org/pdf/1910.07467
|
|
26
29
|
"""
|
|
27
30
|
|
|
28
31
|
row_idx = tl.program_id(0)
|
|
@@ -36,16 +39,17 @@ def _rms_norm_forward(
|
|
|
36
39
|
X_row = tl.load(X_ptr + col_offsets, mask=mask, other=0)
|
|
37
40
|
W_row = tl.load(W_ptr + col_offsets, mask=mask, other=0)
|
|
38
41
|
|
|
39
|
-
|
|
40
|
-
|
|
42
|
+
mean_square = tl.sum(X_row * X_row, axis=0) / n_cols
|
|
43
|
+
inv_rms = tl.math.rsqrt(mean_square + eps)
|
|
41
44
|
|
|
42
|
-
#
|
|
43
|
-
|
|
45
|
+
# We can save time by caching rms with minimal memory overhead
|
|
46
|
+
# because rms is much smaller compared to X_row, as rms is for each row.
|
|
47
|
+
# However, on the computation side, it can save 4 operations (*, sum, /, sqrt).
|
|
48
|
+
tl.store(r_ptr, inv_rms)
|
|
44
49
|
|
|
45
|
-
|
|
50
|
+
Y_row = X_row * inv_rms * W_row
|
|
46
51
|
|
|
47
|
-
|
|
48
|
-
tl.store(Y_ptr + col_offsets, output, mask=mask)
|
|
52
|
+
tl.store(Y_ptr + col_offsets, Y_row, mask=mask)
|
|
49
53
|
|
|
50
54
|
|
|
51
55
|
@triton.jit
|
|
@@ -65,9 +69,10 @@ def _rms_norm_backward(
|
|
|
65
69
|
BLOCK_SIZE: tl.constexpr,
|
|
66
70
|
):
|
|
67
71
|
"""
|
|
68
|
-
dx = (1 /
|
|
69
|
-
dw = sum(dy * (x /
|
|
72
|
+
dx = (1 / RMS) * [dy * w - (1 / N) * (1 / RMS^2) * ((dy * w) dot x) * x]. * means element-wise multiplication, whileas dot means dot product
|
|
73
|
+
dw = sum(dy * (x / RMS)). summation over BxT dimension
|
|
70
74
|
"""
|
|
75
|
+
|
|
71
76
|
row_idx = tl.program_id(0)
|
|
72
77
|
col_offsets = tl.arange(0, BLOCK_SIZE)
|
|
73
78
|
mask = col_offsets < n_cols
|
|
@@ -81,26 +86,33 @@ def _rms_norm_backward(
|
|
|
81
86
|
X_row = tl.load(X_ptr + col_offsets, mask=mask, other=0)
|
|
82
87
|
W_row = tl.load(W_ptr + col_offsets, mask=mask, other=0)
|
|
83
88
|
|
|
84
|
-
# Get
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
normed = X_row * inv_var
|
|
89
|
+
# Get cached rms
|
|
90
|
+
inv_rms_row = tl.load(r_ptr)
|
|
88
91
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
92
|
+
dX_row = (inv_rms_row) * (
|
|
93
|
+
dY_row * W_row
|
|
94
|
+
- (1 / n_cols)
|
|
95
|
+
* inv_rms_row
|
|
96
|
+
* inv_rms_row
|
|
97
|
+
* tl.sum(dY_row * W_row * X_row, axis=0)
|
|
98
|
+
* X_row
|
|
99
|
+
)
|
|
100
|
+
tl.store(dY_ptr + col_offsets, dX_row, mask=mask)
|
|
95
101
|
|
|
96
102
|
# calculate the gradient of W
|
|
97
|
-
|
|
103
|
+
dW_row = dY_row * X_row * inv_rms_row
|
|
104
|
+
tl.store(dW_ptr + col_offsets, dW_row, mask=mask)
|
|
98
105
|
|
|
99
106
|
|
|
100
107
|
class LigerRMSNormFunction(torch.autograd.Function):
|
|
101
108
|
@staticmethod
|
|
102
109
|
@ensure_contiguous
|
|
103
110
|
def forward(ctx, X, W, eps):
|
|
111
|
+
"""
|
|
112
|
+
X: (B, T, H) or (BxT, H)
|
|
113
|
+
W: (H,)
|
|
114
|
+
"""
|
|
115
|
+
|
|
104
116
|
shape = X.shape
|
|
105
117
|
dim = shape[-1]
|
|
106
118
|
X = X.view(-1, dim)
|
|
@@ -108,6 +120,7 @@ class LigerRMSNormFunction(torch.autograd.Function):
|
|
|
108
120
|
BLOCK_SIZE, num_warps = calculate_settings(n_cols)
|
|
109
121
|
|
|
110
122
|
Y = torch.empty((n_rows, n_cols), dtype=X.dtype, device=X.device)
|
|
123
|
+
# r is to cache (1/rms) for each row
|
|
111
124
|
r = torch.empty(n_rows, dtype=X.dtype, device=X.device)
|
|
112
125
|
|
|
113
126
|
# Check constraints.
|
|
@@ -139,6 +152,10 @@ class LigerRMSNormFunction(torch.autograd.Function):
|
|
|
139
152
|
@staticmethod
|
|
140
153
|
@ensure_contiguous
|
|
141
154
|
def backward(ctx, dY):
|
|
155
|
+
"""
|
|
156
|
+
Y: (B, T, H) or (BxT, H)
|
|
157
|
+
"""
|
|
158
|
+
|
|
142
159
|
shape = dY.shape
|
|
143
160
|
dim = shape[-1]
|
|
144
161
|
dY = dY.view(-1, dim)
|
|
@@ -146,6 +163,7 @@ class LigerRMSNormFunction(torch.autograd.Function):
|
|
|
146
163
|
n_rows, n_cols = dY.shape
|
|
147
164
|
dW = torch.zeros_like(X)
|
|
148
165
|
|
|
166
|
+
# Here we use dY to store the value of dX to save memory
|
|
149
167
|
_rms_norm_backward[(n_rows,)](
|
|
150
168
|
dY,
|
|
151
169
|
dY.stride(0),
|
liger_kernel/ops/swiglu.py
CHANGED
|
@@ -12,43 +12,43 @@ def silu(x):
|
|
|
12
12
|
|
|
13
13
|
@triton.jit
|
|
14
14
|
def _swiglu_forward_kernel(
|
|
15
|
-
|
|
15
|
+
a_ptr, b_ptr, c_ptr, stride, n_cols: tl.constexpr, BLOCK_SIZE: tl.constexpr
|
|
16
16
|
):
|
|
17
17
|
program_id = tl.program_id(0)
|
|
18
18
|
|
|
19
19
|
# locate start index
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
20
|
+
a_ptr += program_id * stride
|
|
21
|
+
b_ptr += program_id * stride
|
|
22
|
+
c_ptr += program_id * stride
|
|
23
23
|
|
|
24
24
|
col_offsets = tl.arange(0, BLOCK_SIZE)
|
|
25
25
|
mask = col_offsets < n_cols
|
|
26
26
|
|
|
27
27
|
# sigmoid requires type float32
|
|
28
|
-
a_row = tl.load(
|
|
29
|
-
b_row = tl.load(
|
|
28
|
+
a_row = tl.load(a_ptr + col_offsets, mask=mask, other=0).to(tl.float32)
|
|
29
|
+
b_row = tl.load(b_ptr + col_offsets, mask=mask, other=0)
|
|
30
30
|
c_row = silu(a_row) * b_row
|
|
31
|
-
tl.store(
|
|
31
|
+
tl.store(c_ptr + col_offsets, c_row, mask=mask)
|
|
32
32
|
|
|
33
33
|
|
|
34
34
|
@triton.jit
|
|
35
35
|
def _swiglu_backward_kernel(
|
|
36
|
-
|
|
36
|
+
dc_ptr, a_ptr, b_ptr, stride, n_cols: tl.constexpr, BLOCK_SIZE: tl.constexpr
|
|
37
37
|
):
|
|
38
38
|
program_id = tl.program_id(0)
|
|
39
39
|
|
|
40
40
|
# locate start index
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
41
|
+
dc_ptr += program_id * stride
|
|
42
|
+
a_ptr += program_id * stride
|
|
43
|
+
b_ptr += program_id * stride
|
|
44
44
|
|
|
45
45
|
col_offsets = tl.arange(0, BLOCK_SIZE)
|
|
46
46
|
mask = col_offsets < n_cols
|
|
47
47
|
|
|
48
|
-
dc_row = tl.load(
|
|
48
|
+
dc_row = tl.load(dc_ptr + col_offsets, mask=mask, other=0)
|
|
49
49
|
# sigmoid requires type float32
|
|
50
|
-
a_row = tl.load(
|
|
51
|
-
b_row = tl.load(
|
|
50
|
+
a_row = tl.load(a_ptr + col_offsets, mask=mask, other=0).to(tl.float32)
|
|
51
|
+
b_row = tl.load(b_ptr + col_offsets, mask=mask, other=0)
|
|
52
52
|
|
|
53
53
|
# recomputation to save memory
|
|
54
54
|
sig_a = tl.sigmoid(a_row)
|
|
@@ -56,8 +56,8 @@ def _swiglu_backward_kernel(
|
|
|
56
56
|
db_row = dc_row * silu_a
|
|
57
57
|
da_row = dc_row * (silu_a * (1 - sig_a) + sig_a) * b_row
|
|
58
58
|
|
|
59
|
-
tl.store(
|
|
60
|
-
tl.store(
|
|
59
|
+
tl.store(a_ptr + col_offsets, da_row, mask=mask)
|
|
60
|
+
tl.store(b_ptr + col_offsets, db_row, mask=mask)
|
|
61
61
|
|
|
62
62
|
|
|
63
63
|
class LigerSiLUMulFunction(torch.autograd.Function):
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from liger_kernel.transformers.cross_entropy import LigerCrossEntropyLoss
|
|
2
|
+
from liger_kernel.transformers.geglu import LigerGEGLUMLP
|
|
2
3
|
from liger_kernel.transformers.model.llama import lce_forward
|
|
3
4
|
from liger_kernel.transformers.rms_norm import LigerRMSNorm
|
|
4
5
|
from liger_kernel.transformers.rope import liger_rotary_pos_emb
|
|
@@ -98,3 +99,32 @@ def apply_liger_kernel_to_mixtral(
|
|
|
98
99
|
modeling_mixtral.CrossEntropyLoss = LigerCrossEntropyLoss
|
|
99
100
|
if swiglu:
|
|
100
101
|
modeling_mixtral.MixtralBlockSparseTop2MLP = LigerBlockSparseTop2MLP
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def apply_liger_kernel_to_gemma(
|
|
105
|
+
rope: bool = True,
|
|
106
|
+
cross_entropy: bool = True,
|
|
107
|
+
rms_norm: bool = True,
|
|
108
|
+
geglu: bool = True,
|
|
109
|
+
) -> None:
|
|
110
|
+
"""
|
|
111
|
+
Apply Liger kernels to replace original implementation in HuggingFace Gemma2 models
|
|
112
|
+
to make GPU go burrr.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
rope (bool): Whether to apply Liger's rotary position embedding. Default is True.
|
|
116
|
+
cross_entropy (bool): Whether to apply Liger's cross entropy loss. Default is True.
|
|
117
|
+
rms_norm (bool): Whether to apply Liger's RMSNorm. Default is True.
|
|
118
|
+
geglu (bool): Whether to apply Liger's GeGLU MLP. Default is True.
|
|
119
|
+
"""
|
|
120
|
+
# TODO(yundai424): add convergence test for gemma
|
|
121
|
+
from transformers.models.gemma import modeling_gemma
|
|
122
|
+
|
|
123
|
+
if rope:
|
|
124
|
+
modeling_gemma.apply_rotary_pos_emb = liger_rotary_pos_emb
|
|
125
|
+
if rms_norm:
|
|
126
|
+
modeling_gemma.GemmaRMSNorm = LigerRMSNorm
|
|
127
|
+
if cross_entropy:
|
|
128
|
+
modeling_gemma.CrossEntropyLoss = LigerCrossEntropyLoss
|
|
129
|
+
if geglu:
|
|
130
|
+
modeling_gemma.GemmaMLP = LigerGEGLUMLP
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
from liger_kernel.transformers.monkey_patch import (
|
|
4
|
+
apply_liger_kernel_to_gemma,
|
|
5
|
+
apply_liger_kernel_to_llama,
|
|
6
|
+
apply_liger_kernel_to_mistral,
|
|
7
|
+
apply_liger_kernel_to_mixtral,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
# Model type corresponds to the keys defined in transformers/models/auto/modeling_auto.py
|
|
13
|
+
MODEL_TYPE_TO_APPLY_LIGER_FN = {
|
|
14
|
+
"gemma": apply_liger_kernel_to_gemma,
|
|
15
|
+
"llama": apply_liger_kernel_to_llama,
|
|
16
|
+
"mistral": apply_liger_kernel_to_mistral,
|
|
17
|
+
"mixtral": apply_liger_kernel_to_mixtral,
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _apply_liger_kernel(model_type: str = "", **kwargs) -> None:
|
|
22
|
+
"""
|
|
23
|
+
Applies Liger kernels based on the specified model type. The custom
|
|
24
|
+
kernels for the specified model type will be applied with the provided
|
|
25
|
+
keyword arguments, otherwise the default configuration will be used.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
- model_type: the model types as defined in transformers/models/auto/modeling_auto.py
|
|
29
|
+
and specified in the model's config.json
|
|
30
|
+
- kwargs: keyword arguments that are passed to the corresponding apply_liger_kernel_to_* function.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
if not model_type:
|
|
34
|
+
logger.info("Model type was not provided. No Liger kernels will be applied.")
|
|
35
|
+
return
|
|
36
|
+
|
|
37
|
+
if model_type not in MODEL_TYPE_TO_APPLY_LIGER_FN.keys():
|
|
38
|
+
logger.info(
|
|
39
|
+
f"There are currently no Liger kernels supported for model type: {model_type}."
|
|
40
|
+
)
|
|
41
|
+
return
|
|
42
|
+
|
|
43
|
+
logger.info(f"Applying Liger kernels for model type: {model_type}.")
|
|
44
|
+
# Apply the default combination of liger kernels available for the model
|
|
45
|
+
MODEL_TYPE_TO_APPLY_LIGER_FN[model_type](**kwargs)
|
|
@@ -1,12 +1,10 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import random
|
|
3
3
|
|
|
4
|
-
from overrides import override
|
|
5
4
|
from triton.runtime.cache import FileCacheManager
|
|
6
5
|
|
|
7
6
|
|
|
8
7
|
class LigerTritonFileCacheManager(FileCacheManager):
|
|
9
|
-
@override
|
|
10
8
|
def put(self, data, filename, binary=True) -> str:
|
|
11
9
|
if not self.cache_dir:
|
|
12
10
|
raise RuntimeError("Could not create or locate cache dir")
|
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: liger-kernel
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: Efficient Triton kernels for LLM Training
|
|
5
|
+
Home-page: https://github.com/linkedin/Liger-Kernel
|
|
6
|
+
License: BSD-2-Clause
|
|
7
|
+
Keywords: triton,kernels,LLM training,deep learning,Hugging Face,PyTorch,GPU optimization
|
|
8
|
+
Classifier: Development Status :: 4 - Beta
|
|
9
|
+
Classifier: Intended Audience :: Developers
|
|
10
|
+
Classifier: Intended Audience :: Science/Research
|
|
11
|
+
Classifier: Intended Audience :: Education
|
|
12
|
+
Classifier: License :: OSI Approved :: BSD License
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
18
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
19
|
+
Description-Content-Type: text/markdown
|
|
20
|
+
License-File: LICENSE
|
|
21
|
+
License-File: NOTICE
|
|
22
|
+
Requires-Dist: torch>=2.1.2
|
|
23
|
+
Requires-Dist: triton>=2.3.0
|
|
24
|
+
Requires-Dist: transformers>=4.40.1
|
|
25
|
+
Provides-Extra: dev
|
|
26
|
+
Requires-Dist: matplotlib>=3.7.2; extra == "dev"
|
|
27
|
+
Requires-Dist: flake8>=4.0.1.1; extra == "dev"
|
|
28
|
+
Requires-Dist: black>=24.4.2; extra == "dev"
|
|
29
|
+
Requires-Dist: isort>=5.13.2; extra == "dev"
|
|
30
|
+
Requires-Dist: pre-commit>=3.7.1; extra == "dev"
|
|
31
|
+
Requires-Dist: torch-tb-profiler>=0.4.1; extra == "dev"
|
|
32
|
+
|
|
33
|
+
# Liger Kernel: Efficient Triton Kernels for LLM Training
|
|
34
|
+
|
|
35
|
+
[](https://pepy.tech/project/liger-kernel) [](https://badge.fury.io/py/liger-kernel) [](https://badge.fury.io/py/liger-kernel-nightly)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
[Installation](#installation) | [Getting Started](#getting-started) | [Examples](#examples) | [APIs](#apis) | [Structure](#structure) | [Contributing](#contributing)
|
|
39
|
+
|
|
40
|
+
**Liger (Linkedin GPU Efficient Runtime) Kernel** is a collection of Triton kernels designed specifically for LLM training. It can effectively increase multi-GPU **training throughput by 20%** and reduces **memory usage by 60%**. We have implemented **Hugging Face Compatible** `RMSNorm`, `RoPE`, `SwiGLU`, `CrossEntropy`, `FusedLinearCrossEntropy`, and more to come. The kernel works out of the box with [Flash Attention](https://github.com/Dao-AILab/flash-attention), [PyTorch FSDP](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html), and [Microsoft DeepSpeed](https://github.com/microsoft/DeepSpeed). We welcome contributions from the community to gather the best kernels for LLM training.
|
|
41
|
+
|
|
42
|
+
## Supercharge Your Model with Liger Kernel
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+

|
|
46
|
+
|
|
47
|
+
With one line of code, Liger Kernel can increase throughput by more than 20% and reduce memory usage by 60%, thereby enabling longer context lengths, larger batch sizes, and massive vocabularies.
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
| Speed Up | Memory Reduction |
|
|
51
|
+
|--------------------------|-------------------------|
|
|
52
|
+
|  |  |
|
|
53
|
+
|
|
54
|
+
> **Note:**
|
|
55
|
+
> - Benchmark conditions: LLaMA 3-8B, Batch Size = 8, Data Type = `bf16`, Optimizer = AdamW, Gradient Checkpointing = True, Distributed Strategy = FSDP1 on 8 A100s.
|
|
56
|
+
> - Hugging Face models start to OOM at a 4K context length, whereas Hugging Face + Liger Kernel scales up to 16K.
|
|
57
|
+
|
|
58
|
+
## Examples
|
|
59
|
+
|
|
60
|
+
### Basic
|
|
61
|
+
|
|
62
|
+
| **Example** | **Description** | **Lightning Studio** |
|
|
63
|
+
|------------------------------------------------|---------------------------------------------------------------------------------------------------|----------------------|
|
|
64
|
+
| [**Hugging Face Trainer**](https://github.com/linkedin/Liger-Kernel/tree/main/examples/huggingface) | Train LLaMA 3-8B ~20% faster with over 40% memory reduction on Alpaca dataset using 4 A100s with FSDP | TBA |
|
|
65
|
+
| [**Lightning Trainer**](https://github.com/linkedin/Liger-Kernel/tree/main/examples/lightning) | Increase 15% throughput and reduce memory usage by 40% with LLaMA3-8B on MMLU dataset using 8 A100s with DeepSpeed ZeRO3 | TBA |
|
|
66
|
+
|
|
67
|
+
### Advanced
|
|
68
|
+
|
|
69
|
+
| **Example** | **Description** | **Lightning Studio** |
|
|
70
|
+
|------------------------------------------------|---------------------------------------------------------------------------------------------------|----------------------|
|
|
71
|
+
| [**Medusa Multi-head LLM (Retraining Phase)**](https://github.com/linkedin/Liger-Kernel/tree/main/examples/medusa) | Reduce memory usage by 80% with 5 LM heads and improve throughput by 40% using 8 A100s with FSDP | TBA |
|
|
72
|
+
|
|
73
|
+
## Key Features
|
|
74
|
+
|
|
75
|
+
- **Ease of use:** Simply patch your Hugging Face model with one line of code, or compose your own model using our Liger Kernel modules.
|
|
76
|
+
- **Time and memory efficient:** In the same spirit as Flash-Attn, but for layers like **RMSNorm**, **RoPE**, **SwiGLU**, and **CrossEntropy**! Increases multi-GPU training throughput by 20% and reduces memory usage by 60% with **kernel fusion**, **in-place replacement**, and **chunking** techniques.
|
|
77
|
+
- **Exact:** Computation is exact—no approximations! Both forward and backward passes are implemented with rigorous unit tests and undergo convergence testing against training runs without Liger Kernel to ensure accuracy.
|
|
78
|
+
- **Lightweight:** Liger Kernel has minimal dependencies, requiring only Torch and Triton—no extra libraries needed! Say goodbye to dependency headaches!
|
|
79
|
+
- **Multi-GPU supported:** Compatible with multi-GPU setups (PyTorch FSDP, DeepSpeed, DDP, etc.).
|
|
80
|
+
|
|
81
|
+
## Target Audiences
|
|
82
|
+
|
|
83
|
+
- **Researchers**: Looking to compose models using efficient and reliable kernels for frontier experiments.
|
|
84
|
+
- **ML Practitioners**: Focused on maximizing GPU training efficiency with optimal, high-performance kernels.
|
|
85
|
+
- **Curious Novices**: Eager to learn how to write reliable Triton kernels to enhance training efficiency.
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
## Installation
|
|
89
|
+
|
|
90
|
+
### Dependencies
|
|
91
|
+
|
|
92
|
+
- `torch >= 2.1.2`
|
|
93
|
+
- `triton >= 2.3.0`
|
|
94
|
+
- `transformers >= 4.40.1`
|
|
95
|
+
|
|
96
|
+
To install the stable version:
|
|
97
|
+
|
|
98
|
+
```bash
|
|
99
|
+
$ pip install liger-kernel
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
To install the nightly version:
|
|
103
|
+
|
|
104
|
+
```bash
|
|
105
|
+
$ pip install liger-kernel-nightly
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
## Getting Started
|
|
109
|
+
|
|
110
|
+
### 1. Patch Existing Hugging Face Models
|
|
111
|
+
|
|
112
|
+
Using the [patching APIs](#patching), you can swap Hugging Face models with optimized Liger Kernels.
|
|
113
|
+
|
|
114
|
+
```python
|
|
115
|
+
import transformers
|
|
116
|
+
from liger_kernel.transformers import apply_liger_kernel_to_llama
|
|
117
|
+
|
|
118
|
+
model = transformers.AutoModelForCausalLM.from_pretrained("<some llama model>")
|
|
119
|
+
|
|
120
|
+
# Adding this line automatically monkey-patches the model with the optimized Liger kernels
|
|
121
|
+
apply_liger_kernel_to_llama()
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
### 2. Compose Your Own Model
|
|
125
|
+
|
|
126
|
+
You can take individual [kernels](#kernels) to compose your models.
|
|
127
|
+
|
|
128
|
+
```python
|
|
129
|
+
from liger_kernel.transformers import LigerFusedLinearCrossEntropyLoss
|
|
130
|
+
import torch.nn as nn
|
|
131
|
+
import torch
|
|
132
|
+
|
|
133
|
+
model = nn.Linear(128, 256).cuda()
|
|
134
|
+
|
|
135
|
+
# fuses linear + cross entropy layers together and performs chunk-by-chunk computation to reduce memory
|
|
136
|
+
loss_fn = LigerFusedLinearCrossEntropyLoss()
|
|
137
|
+
|
|
138
|
+
input = torch.randn(4, 128, requires_grad=True, device="cuda")
|
|
139
|
+
target = torch.randint(256, (4, ), device="cuda")
|
|
140
|
+
|
|
141
|
+
loss = loss_fn(model.weight, input, target)
|
|
142
|
+
loss.backward()
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
## Structure
|
|
147
|
+
|
|
148
|
+
### Source Code
|
|
149
|
+
|
|
150
|
+
- `ops/`: Core Triton operations.
|
|
151
|
+
- `transformers/`: PyTorch `nn.Module` implementations built on Triton operations, compliant with the `transformers` API.
|
|
152
|
+
|
|
153
|
+
### Tests
|
|
154
|
+
|
|
155
|
+
- `transformers/`: Correctness tests for the Triton-based layers.
|
|
156
|
+
- `convergence/`: Patches Hugging Face models with all kernels, runs multiple iterations, and compares weights, logits, and loss layer-by-layer.
|
|
157
|
+
|
|
158
|
+
### Benchmark
|
|
159
|
+
|
|
160
|
+
- `benchmark/`: Execution time and memory benchmarks compared to Hugging Face layers.
|
|
161
|
+
|
|
162
|
+
## APIs
|
|
163
|
+
|
|
164
|
+
### Patching
|
|
165
|
+
|
|
166
|
+
| **Model** | **API** | **Supported Operations** |
|
|
167
|
+
|-------------|--------------------------------------------------------------|-------------------------------------------------------------------------|
|
|
168
|
+
| LLaMA (2 & 3) | `liger_kernel.transformers.apply_liger_kernel_to_llama` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
169
|
+
| Mistral | `liger_kernel.transformers.apply_liger_kernel_to_mistral` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss |
|
|
170
|
+
| Mixtral | `liger_kernel.transformers.apply_liger_kernel_to_mixtral` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss |
|
|
171
|
+
| Gemma2 | `liger_kernel.transformers.apply_liger_kernel_to_gemma` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss |
|
|
172
|
+
|
|
173
|
+
### Kernels
|
|
174
|
+
|
|
175
|
+
| **Kernel** | **API** |
|
|
176
|
+
|---------------------------------|-------------------------------------------------------------|
|
|
177
|
+
| RMSNorm | `liger_kernel.transformers.LigerRMSNorm` |
|
|
178
|
+
| RoPE | `liger_kernel.transformers.liger_rotary_pos_emb` |
|
|
179
|
+
| SwiGLU | `liger_kernel.transformers.LigerSwiGLUMLP` |
|
|
180
|
+
| GeGLU | `liger_kernel.transformers.LigerGEGLUMLP` |
|
|
181
|
+
| CrossEntropy | `liger_kernel.transformers.LigerCrossEntropyLoss` |
|
|
182
|
+
| FusedLinearCrossEntropy | `liger_kernel.transformers.LigerFusedLinearCrossEntropyLoss`|
|
|
183
|
+
|
|
184
|
+
- **RMSNorm**: [RMSNorm](https://arxiv.org/pdf/1910.07467), which normalizes activations using their root mean square, is implemented by fusing the normalization and scaling steps into a single Triton kernel, and achieves ~3X speedup with ~3X peak memory reduction.
|
|
185
|
+
- **RoPE**: [Rotary Positional Embedding](https://arxiv.org/pdf/2104.09864) is implemented by fusing the query and key embedding rotary into a single kernel with inplace replacement, and achieves ~3X speedup with ~3X peak memory reduction.
|
|
186
|
+
- **SwiGLU**: [Swish Gated Linear Units](https://arxiv.org/pdf/2002.05202), given by
|
|
187
|
+
$$\text{SwiGLU}(x)=\text{Swish}_{\beta}(xW+b)\otimes(xV+c)$$
|
|
188
|
+
, is implemented by fusing the elementwise multiplication (denoted by $\otimes$) into a single kernel with inplace replacement, and achieves parity speed with ~1.5X peak memory reduction.
|
|
189
|
+
- **GeGLU**: [GELU Gated Linear Units](https://arxiv.org/pdf/2002.05202), given by
|
|
190
|
+
$$\text{GeGLU}(x)=\text{GELU}(xW+b)\otimes(xV+c)$$
|
|
191
|
+
, is implemented by fusing the elementwise multiplication into a single kernel with inplace replacement, and achieves parity speed with ~1.5X peak memory reduction. Note that the [tanh approximation form of GELU](https://pytorch.org/docs/stable/generated/torch.nn.GELU.html) is used.
|
|
192
|
+
- **CrossEntropy**: [Cross entropy loss](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html) is implemented by computing both the loss and gradient in the forward pass with inplace replacement of input to reduce the peak memory by avoiding simultaneous materialization of both input logits and gradient. It achieves >2X speedup and >4X memory reduction for common vocab sizes (e.g., 32K, 128K, etc.).
|
|
193
|
+
<!-- TODO: verify vocab sizes are accurate -->
|
|
194
|
+
- **FusedLinearCrossEntropy**: Peak memory usage of cross entropy loss is further improved by fusing the model head with the CE loss and chunking the input for block-wise loss and gradient calculation, a technique inspired by [Efficient Cross Entropy](https://github.com/mgmalek/efficient_cross_entropy). It achieves >4X memory reduction for 128k vocab size. **This is highly effective for large batch size, large sequence length, and large vocabulary sizes.** Please refer to the [Medusa example](https://github.com/linkedin/Liger-Kernel/tree/main/examples/medusa) for individual kernel usage.
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
<!-- TODO: be more specific about batch size -->
|
|
198
|
+
> **Note:**
|
|
199
|
+
> Reported speedups and memory reductions are with respect to the LLaMA 3-8B Hugging Face layer implementations. All models use 4K hidden size and 4K sequence length and are evaluated based on memory usage and wall time for the forward+backward pass on a single NVIDIA A100 80G GPU using small batch sizes. Liger kernels exhibit more efficient scaling to larger batch sizes, detailed further in the [Benchmark](./benchmark) folder.
|
|
200
|
+
|
|
201
|
+
## Note on ML Compiler
|
|
202
|
+
|
|
203
|
+
### 1. Torch Compile
|
|
204
|
+
|
|
205
|
+
Since Liger Kernel is 100% Triton-based, it works seamlessly with [`torch.compile`](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html). In the following example, Liger Kernel can further optimize the model on top of Torch Compile, reducing the memory by more than half.
|
|
206
|
+
|
|
207
|
+
| Configuration | Throughput (tokens/sec) | Memory Reserved (GB) |
|
|
208
|
+
|--------------------------------|----------------------------|-------------------------|
|
|
209
|
+
| Torch Compile | 3780 | 66.4 |
|
|
210
|
+
| Torch Compile + Liger Kernel | 3702 | 31.0 |
|
|
211
|
+
|
|
212
|
+
> **Note:**
|
|
213
|
+
> 1. Benchmark conditions: LLaMA 3-8B, Batch Size = 8, Seq Len = 4096, Data Type = `bf16`, Optimizer = AdamW, Gradient Checkpointing = True, Distributed Strategy = FSDP1 on 8 A100s.
|
|
214
|
+
> 2. Tested on torch `2.5.0.dev20240731+cu118`
|
|
215
|
+
|
|
216
|
+
### 2. Lightning Thunder
|
|
217
|
+
|
|
218
|
+
*WIP*
|
|
219
|
+
|
|
220
|
+
## Contributing
|
|
221
|
+
|
|
222
|
+
[CONTRIBUTING GUIDE](https://github.com/linkedin/Liger-Kernel/blob/main/CONTRIBUTING.md)
|
|
223
|
+
|
|
224
|
+
## Acknowledgement
|
|
225
|
+
|
|
226
|
+
- [flash-attn](https://github.com/Dao-AILab/flash-attention) and [Unsloth](https://github.com/unslothai/unsloth) for inspiration in Triton kernels for training
|
|
227
|
+
- [tiny shakespeare dataset](https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt) by Andrej Karpathy for convergence testing
|
|
228
|
+
- [Efficient Cross Entropy](https://github.com/mgmalek/efficient_cross_entropy) for lm_head + cross entropy inspiration
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
## License
|
|
232
|
+
|
|
233
|
+
[BSD 2-CLAUSE](https://github.com/linkedin/Liger-Kernel/blob/main/LICENSE)
|
|
234
|
+
|
|
235
|
+
## Cite this work
|
|
236
|
+
|
|
237
|
+
Biblatex entry:
|
|
238
|
+
```bib
|
|
239
|
+
@software{liger2024,
|
|
240
|
+
title = {Liger-Kernel: Efficient Triton Kernels for LLM Training},
|
|
241
|
+
author = {Hsu, Pin-Lun and Dai, Yun and Kothapalli, Vignesh and Song, Qingquan and Tang, Shao and Zhu, Siyu},
|
|
242
|
+
url = {https://github.com/linkedin/Liger-Kernel},
|
|
243
|
+
year = {2024}
|
|
244
|
+
}
|
|
245
|
+
```
|
|
@@ -2,25 +2,26 @@ liger_kernel/ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
|
|
|
2
2
|
liger_kernel/ops/cross_entropy.py,sha256=YTHKVyPW748EWtbWJeKdIe9S1dEq6i90_PbBuCD-9s0,9178
|
|
3
3
|
liger_kernel/ops/fused_linear_cross_entropy.py,sha256=58MmDhLJGR5b8ixztkhR707yp0VY28oBRASFVwGbeV8,7346
|
|
4
4
|
liger_kernel/ops/geglu.py,sha256=5tGinryOOYRpGtKwJ4B1ertwtzd81xdjevD3Ha7H1AY,3849
|
|
5
|
-
liger_kernel/ops/rms_norm.py,sha256=
|
|
5
|
+
liger_kernel/ops/rms_norm.py,sha256=AQ1jaCXUlrBazqAPg-Cpf2K5OsO4byDKcdfWsGy9-zI,4848
|
|
6
6
|
liger_kernel/ops/rope.py,sha256=fYBct8gDQfKPZdMWlzkZZ8kBzh6nQ7DIpDsc7lZwM8c,8584
|
|
7
|
-
liger_kernel/ops/swiglu.py,sha256=
|
|
7
|
+
liger_kernel/ops/swiglu.py,sha256=MRbSIXsBLqlFr9ZdtuFqSjLJJ-716URmQIhxQ57GGEw,2915
|
|
8
8
|
liger_kernel/ops/utils.py,sha256=vsFIywd8LQlVPRA3RPZOm5HyN8c0cS4NFEEnwjNw-MI,1427
|
|
9
|
-
liger_kernel/transformers/__init__.py,sha256=
|
|
9
|
+
liger_kernel/transformers/__init__.py,sha256=nVvk0h7er3fdgubQF8Z8KjA3ew-q5oJHyJRg5cKmBoc,205
|
|
10
10
|
liger_kernel/transformers/cross_entropy.py,sha256=G-L4EaUYVc25NKZ2jrlaG-d5YUvDqJdUlawPN7K1d1g,389
|
|
11
11
|
liger_kernel/transformers/fused_linear_cross_entropy.py,sha256=h0AW9ubFGfz4DBwgh2CLW8rpKo9PvxYpB6AUzjx-1b0,501
|
|
12
12
|
liger_kernel/transformers/geglu.py,sha256=FrLBHZRdI68jw9RR6MSTE59-xCzueOwSRp9jL8y-j98,896
|
|
13
|
-
liger_kernel/transformers/monkey_patch.py,sha256=
|
|
13
|
+
liger_kernel/transformers/monkey_patch.py,sha256=FjaRZVWm_ZMHO3NXc4IT6EpCTWJOdZKP72mZq01qbrA,5006
|
|
14
14
|
liger_kernel/transformers/rms_norm.py,sha256=2LHfEctSpzuNRaoZ9uUECSFK8fZeIxIsHm9QbEHZvDQ,452
|
|
15
15
|
liger_kernel/transformers/rope.py,sha256=m-ah8vZBYW8tfplTXCiAPMHJWlB1tdp_JPXJeWE-Boo,943
|
|
16
16
|
liger_kernel/transformers/swiglu.py,sha256=8kt4MffEZT5vx3k0WA-GO-WPLv5kGdnu_nAwlJyMI2U,1516
|
|
17
|
+
liger_kernel/transformers/trainer_integration.py,sha256=gt0fF-se2XiIB6PocHBPBuD6tLCOtQRcb20WfUS2ceA,1645
|
|
17
18
|
liger_kernel/transformers/model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
18
19
|
liger_kernel/transformers/model/llama.py,sha256=4mfVTMrY7T-xiJeQJe02hBVnAwNCKlvLGp49gj6TWiU,5298
|
|
19
20
|
liger_kernel/triton/__init__.py,sha256=yfRe0zMb47QnqjecZWG7LnanfCTzeku7SgWRAwNVmzU,101
|
|
20
|
-
liger_kernel/triton/monkey_patch.py,sha256=
|
|
21
|
-
liger_kernel-0.
|
|
22
|
-
liger_kernel-0.
|
|
23
|
-
liger_kernel-0.
|
|
24
|
-
liger_kernel-0.
|
|
25
|
-
liger_kernel-0.
|
|
26
|
-
liger_kernel-0.
|
|
21
|
+
liger_kernel/triton/monkey_patch.py,sha256=5BcGKTtdqeYchypBIBopGIWPx1-cFALz7sOKoEsqXJ0,1584
|
|
22
|
+
liger_kernel-0.1.1.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
|
|
23
|
+
liger_kernel-0.1.1.dist-info/METADATA,sha256=jkp8JFT7zDNqf4-i0HQruXWhd-RGjJ8pTbCsM_K2ftI,14533
|
|
24
|
+
liger_kernel-0.1.1.dist-info/NOTICE,sha256=BXkXY9aWvEy_7MAB57zDu1z8uMYT1i1l9B6EpHuBa8s,173
|
|
25
|
+
liger_kernel-0.1.1.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
|
26
|
+
liger_kernel-0.1.1.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
|
|
27
|
+
liger_kernel-0.1.1.dist-info/RECORD,,
|
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: liger-kernel
|
|
3
|
-
Version: 0.0.1
|
|
4
|
-
License-File: LICENSE
|
|
5
|
-
License-File: NOTICE
|
|
6
|
-
Requires-Dist: torch>=2.1.2
|
|
7
|
-
Requires-Dist: triton>=2.3.0
|
|
8
|
-
Requires-Dist: transformers>=4.40.1
|
|
9
|
-
Provides-Extra: dev
|
|
10
|
-
Requires-Dist: matplotlib>=3.7.2; extra == "dev"
|
|
11
|
-
Requires-Dist: flake8>=4.0.1.1; extra == "dev"
|
|
12
|
-
Requires-Dist: black>=24.4.2; extra == "dev"
|
|
13
|
-
Requires-Dist: isort>=5.13.2; extra == "dev"
|
|
14
|
-
Requires-Dist: pre-commit>=3.7.1; extra == "dev"
|
|
15
|
-
Requires-Dist: torch-tb-profiler>=0.4.1; extra == "dev"
|
|
16
|
-
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|