liger-kernel 0.4.0__tar.gz → 0.4.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {liger_kernel-0.4.0/src/liger_kernel.egg-info → liger_kernel-0.4.1}/PKG-INFO +5 -3
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/README.md +4 -2
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/pyproject.toml +1 -1
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/env_report.py +2 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/cross_entropy.py +143 -30
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/fused_linear_cross_entropy.py +19 -2
- liger_kernel-0.4.1/src/liger_kernel/ops/group_norm.py +322 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/rms_norm.py +27 -6
- liger_kernel-0.4.1/src/liger_kernel/transformers/cross_entropy.py +53 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/functional.py +34 -1
- liger_kernel-0.4.1/src/liger_kernel/transformers/fused_linear_cross_entropy.py +48 -0
- liger_kernel-0.4.1/src/liger_kernel/transformers/group_norm.py +56 -0
- liger_kernel-0.4.1/src/liger_kernel/transformers/model/gemma2.py +277 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/monkey_patch.py +101 -62
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/rms_norm.py +11 -3
- {liger_kernel-0.4.0 → liger_kernel-0.4.1/src/liger_kernel.egg-info}/PKG-INFO +5 -3
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel.egg-info/SOURCES.txt +3 -0
- liger_kernel-0.4.0/src/liger_kernel/transformers/cross_entropy.py +0 -21
- liger_kernel-0.4.0/src/liger_kernel/transformers/fused_linear_cross_entropy.py +0 -21
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/LICENSE +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/NOTICE +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/setup.cfg +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/__init__.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/experimental/embedding.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/experimental/mm_int8int2.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/fused_linear_jsd.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/geglu.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/jsd.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/kl_div.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/layer_norm.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/rope.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/swiglu.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/utils.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/__init__.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/auto_model.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/experimental/embedding.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/fused_linear_jsd.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/geglu.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/jsd.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/kl_div.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/layer_norm.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/model/__init__.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/model/gemma.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/model/llama.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/model/mistral.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/model/mixtral.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/model/mllama.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/model/phi3.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/model/qwen2.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/model/qwen2_vl.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/rope.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/swiglu.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/transformers/trainer_integration.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/triton/__init__.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/triton/monkey_patch.py +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel.egg-info/dependency_links.txt +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel.egg-info/requires.txt +0 -0
- {liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: liger_kernel
|
|
3
|
-
Version: 0.4.
|
|
3
|
+
Version: 0.4.1
|
|
4
4
|
Summary: Efficient Triton kernels for LLM Training
|
|
5
5
|
License: BSD 2-CLAUSE LICENSE
|
|
6
6
|
Copyright 2024 LinkedIn Corporation
|
|
@@ -296,7 +296,7 @@ loss.backward()
|
|
|
296
296
|
| Mistral | `liger_kernel.transformers.apply_liger_kernel_to_mistral` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
297
297
|
| Mixtral | `liger_kernel.transformers.apply_liger_kernel_to_mixtral` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
298
298
|
| Gemma1 | `liger_kernel.transformers.apply_liger_kernel_to_gemma` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
299
|
-
| Gemma2 | `liger_kernel.transformers.apply_liger_kernel_to_gemma2` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss |
|
|
299
|
+
| Gemma2 | `liger_kernel.transformers.apply_liger_kernel_to_gemma2` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
300
300
|
| Qwen2 & Qwen2.5 | `liger_kernel.transformers.apply_liger_kernel_to_qwen2` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
301
301
|
| Qwen2-VL | `liger_kernel.transformers.apply_liger_kernel_to_qwen2_vl` | RMSNorm, LayerNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
302
302
|
| Phi3 & Phi3.5 | `liger_kernel.transformers.apply_liger_kernel_to_phi3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
@@ -320,6 +320,7 @@ loss.backward()
|
|
|
320
320
|
|
|
321
321
|
- **RMSNorm**: [RMSNorm](https://arxiv.org/pdf/1910.07467), which normalizes activations using their root mean square, is implemented by fusing the normalization and scaling steps into a single Triton kernel, and achieves ~3X speedup with ~3X peak memory reduction.
|
|
322
322
|
- **LayerNorm**: [LayerNorm](https://arxiv.org/pdf/1607.06450), which centers and normalizes activations across the feature dimension, is implemented by fusing the centering, normalization and scaling steps into a single Triton kernel, and achieves ~2X speedup.
|
|
323
|
+
- **GroupNorm**: [GroupNorm](https://arxiv.org/pdf/1803.08494), which normalizes activations across the group dimension for a given sample. Channels are grouped in K groups over which the normalization is performed, is implemented by fusing the centering, normalization and scaling steps into a single Triton kernel, and can achieve up to ~2X speedup as the number of channels/groups increases.
|
|
323
324
|
- **RoPE**: [Rotary Positional Embedding](https://arxiv.org/pdf/2104.09864) is implemented by fusing the query and key embedding rotary into a single kernel with inplace replacement, and achieves ~3X speedup with ~3X peak memory reduction.
|
|
324
325
|
- **SwiGLU**: [Swish Gated Linear Units](https://arxiv.org/pdf/2002.05202), given by
|
|
325
326
|
$$\text{SwiGLU}(x)=\text{Swish}_{\beta}(xW+b)\otimes(xV+c)$$
|
|
@@ -332,7 +333,7 @@ $$\text{GeGLU}(x)=\text{GELU}(xW+b)\otimes(xV+c)$$
|
|
|
332
333
|
- **FusedLinearCrossEntropy**: Peak memory usage of cross entropy loss is further improved by fusing the model head with the CE loss and chunking the input for block-wise loss and gradient calculation, a technique inspired by [Efficient Cross Entropy](https://github.com/mgmalek/efficient_cross_entropy). It achieves >4X memory reduction for 128k vocab size. **This is highly effective for large batch size, large sequence length, and large vocabulary sizes.** Please refer to the [Medusa example](https://github.com/linkedin/Liger-Kernel/tree/main/examples/medusa) for individual kernel usage.
|
|
333
334
|
- **KLDivergence**: [KL Divergence](https://pytorch.org/docs/stable/generated/torch.nn.KLDivLoss.html) is implemented by fusing the forward into a single triton kernel, with reduction done outside the kernel. It achieves ~1.5X speed and ~15% memory reduction for 128K vocab size.
|
|
334
335
|
- **JSD**: [Generalized JSD](https://arxiv.org/pdf/2306.13649) (Jensen-Shannon divergence), is implemented by computing both the loss and gradient in the forward pass. It achieves ~1.5X speed and ~54% memory reduction for 128k vocab size.
|
|
335
|
-
- **FusedLinearJSD**: Peak memory usage of JSD loss is further improved by fusing the model head with the
|
|
336
|
+
- **FusedLinearJSD**: Peak memory usage of JSD loss is further improved by fusing the model head with the JSD and chunking the input for block-wise loss and gradient calculation. It achieves ~85% memory reduction for 128k vocab size where batch size $\times$ sequence length is 8192.
|
|
336
337
|
|
|
337
338
|
|
|
338
339
|
### Experimental Kernels
|
|
@@ -425,3 +426,4 @@ Biblatex entry:
|
|
|
425
426
|
↑ Back to Top ↑
|
|
426
427
|
</a>
|
|
427
428
|
</p>
|
|
429
|
+
|
|
@@ -249,7 +249,7 @@ loss.backward()
|
|
|
249
249
|
| Mistral | `liger_kernel.transformers.apply_liger_kernel_to_mistral` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
250
250
|
| Mixtral | `liger_kernel.transformers.apply_liger_kernel_to_mixtral` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
251
251
|
| Gemma1 | `liger_kernel.transformers.apply_liger_kernel_to_gemma` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
252
|
-
| Gemma2 | `liger_kernel.transformers.apply_liger_kernel_to_gemma2` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss |
|
|
252
|
+
| Gemma2 | `liger_kernel.transformers.apply_liger_kernel_to_gemma2` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
253
253
|
| Qwen2 & Qwen2.5 | `liger_kernel.transformers.apply_liger_kernel_to_qwen2` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
254
254
|
| Qwen2-VL | `liger_kernel.transformers.apply_liger_kernel_to_qwen2_vl` | RMSNorm, LayerNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
255
255
|
| Phi3 & Phi3.5 | `liger_kernel.transformers.apply_liger_kernel_to_phi3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
|
@@ -273,6 +273,7 @@ loss.backward()
|
|
|
273
273
|
|
|
274
274
|
- **RMSNorm**: [RMSNorm](https://arxiv.org/pdf/1910.07467), which normalizes activations using their root mean square, is implemented by fusing the normalization and scaling steps into a single Triton kernel, and achieves ~3X speedup with ~3X peak memory reduction.
|
|
275
275
|
- **LayerNorm**: [LayerNorm](https://arxiv.org/pdf/1607.06450), which centers and normalizes activations across the feature dimension, is implemented by fusing the centering, normalization and scaling steps into a single Triton kernel, and achieves ~2X speedup.
|
|
276
|
+
- **GroupNorm**: [GroupNorm](https://arxiv.org/pdf/1803.08494), which normalizes activations across the group dimension for a given sample. Channels are grouped in K groups over which the normalization is performed, is implemented by fusing the centering, normalization and scaling steps into a single Triton kernel, and can achieve up to ~2X speedup as the number of channels/groups increases.
|
|
276
277
|
- **RoPE**: [Rotary Positional Embedding](https://arxiv.org/pdf/2104.09864) is implemented by fusing the query and key embedding rotary into a single kernel with inplace replacement, and achieves ~3X speedup with ~3X peak memory reduction.
|
|
277
278
|
- **SwiGLU**: [Swish Gated Linear Units](https://arxiv.org/pdf/2002.05202), given by
|
|
278
279
|
$$\text{SwiGLU}(x)=\text{Swish}_{\beta}(xW+b)\otimes(xV+c)$$
|
|
@@ -285,7 +286,7 @@ $$\text{GeGLU}(x)=\text{GELU}(xW+b)\otimes(xV+c)$$
|
|
|
285
286
|
- **FusedLinearCrossEntropy**: Peak memory usage of cross entropy loss is further improved by fusing the model head with the CE loss and chunking the input for block-wise loss and gradient calculation, a technique inspired by [Efficient Cross Entropy](https://github.com/mgmalek/efficient_cross_entropy). It achieves >4X memory reduction for 128k vocab size. **This is highly effective for large batch size, large sequence length, and large vocabulary sizes.** Please refer to the [Medusa example](https://github.com/linkedin/Liger-Kernel/tree/main/examples/medusa) for individual kernel usage.
|
|
286
287
|
- **KLDivergence**: [KL Divergence](https://pytorch.org/docs/stable/generated/torch.nn.KLDivLoss.html) is implemented by fusing the forward into a single triton kernel, with reduction done outside the kernel. It achieves ~1.5X speed and ~15% memory reduction for 128K vocab size.
|
|
287
288
|
- **JSD**: [Generalized JSD](https://arxiv.org/pdf/2306.13649) (Jensen-Shannon divergence), is implemented by computing both the loss and gradient in the forward pass. It achieves ~1.5X speed and ~54% memory reduction for 128k vocab size.
|
|
288
|
-
- **FusedLinearJSD**: Peak memory usage of JSD loss is further improved by fusing the model head with the
|
|
289
|
+
- **FusedLinearJSD**: Peak memory usage of JSD loss is further improved by fusing the model head with the JSD and chunking the input for block-wise loss and gradient calculation. It achieves ~85% memory reduction for 128k vocab size where batch size $\times$ sequence length is 8192.
|
|
289
290
|
|
|
290
291
|
|
|
291
292
|
### Experimental Kernels
|
|
@@ -378,3 +379,4 @@ Biblatex entry:
|
|
|
378
379
|
↑ Back to Top ↑
|
|
379
380
|
</a>
|
|
380
381
|
</p>
|
|
382
|
+
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "liger_kernel"
|
|
7
|
-
version = "0.4.
|
|
7
|
+
version = "0.4.1"
|
|
8
8
|
description = "Efficient Triton kernels for LLM Training"
|
|
9
9
|
urls = { "Homepage" = "https://github.com/linkedin/Liger-Kernel" }
|
|
10
10
|
readme = { file = "README.md", content-type = "text/markdown" }
|
|
@@ -4,11 +4,13 @@ import sys
|
|
|
4
4
|
|
|
5
5
|
def print_env_report():
|
|
6
6
|
"""
|
|
7
|
+
|
|
7
8
|
Prints a report of the environment. Useful for debugging and reproducibility.
|
|
8
9
|
Usage:
|
|
9
10
|
```
|
|
10
11
|
python -m liger_kernel.env_report
|
|
11
12
|
```
|
|
13
|
+
|
|
12
14
|
"""
|
|
13
15
|
print("Environment Report:")
|
|
14
16
|
print("-------------------")
|
|
@@ -1,8 +1,24 @@
|
|
|
1
|
+
import operator
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
1
4
|
import torch
|
|
2
5
|
import triton
|
|
3
6
|
import triton.language as tl
|
|
4
7
|
|
|
5
|
-
from liger_kernel.ops.utils import element_mul_kernel, is_hip
|
|
8
|
+
from liger_kernel.ops.utils import compare_version, element_mul_kernel, is_hip
|
|
9
|
+
|
|
10
|
+
if compare_version("triton", operator.ge, "3.0.0"):
|
|
11
|
+
try:
|
|
12
|
+
# typical import path with dispatch available
|
|
13
|
+
from triton.language.extra.libdevice import tanh
|
|
14
|
+
except ModuleNotFoundError:
|
|
15
|
+
# for working with NGC containers
|
|
16
|
+
from triton.language.extra.cuda.libdevice import tanh
|
|
17
|
+
else:
|
|
18
|
+
from triton.language.math import tanh
|
|
19
|
+
|
|
20
|
+
_TRUE = tl.constexpr(1)
|
|
21
|
+
_FALSE = tl.constexpr(0)
|
|
6
22
|
|
|
7
23
|
|
|
8
24
|
@triton.jit
|
|
@@ -12,13 +28,18 @@ def liger_cross_entropy_kernel(
|
|
|
12
28
|
Y_ptr,
|
|
13
29
|
Y_stride,
|
|
14
30
|
loss_ptr,
|
|
31
|
+
z_loss_ptr,
|
|
15
32
|
loss_stride,
|
|
16
33
|
n_cols,
|
|
17
34
|
n_non_ignore,
|
|
18
35
|
ignore_index,
|
|
36
|
+
lse_square_scale: tl.constexpr,
|
|
19
37
|
label_smoothing: tl.constexpr,
|
|
20
38
|
reduction: tl.constexpr, # set it as constexpr since reduction is always known at compile time
|
|
39
|
+
softcap,
|
|
40
|
+
RETURN_Z_LOSS: tl.constexpr,
|
|
21
41
|
BLOCK_SIZE: tl.constexpr,
|
|
42
|
+
HAS_SOFTCAPPING: tl.constexpr,
|
|
22
43
|
):
|
|
23
44
|
"""
|
|
24
45
|
This kernel computes both cross entropy loss and the gradient of the input.
|
|
@@ -30,13 +51,18 @@ def liger_cross_entropy_kernel(
|
|
|
30
51
|
Y_ptr: Pointer to target tensor.
|
|
31
52
|
Y_stride (int): The stride of the target tensor.
|
|
32
53
|
loss_ptr: Pointer to tensor to store the loss.
|
|
54
|
+
z_loss_ptr: Pointer to tensor to store the z loss. No operation if RETURN_Z_LOSS is 0.
|
|
33
55
|
loss_stride (int): The stride of the loss tensor.
|
|
34
56
|
n_cols (int): The number of columns in the input tensor.
|
|
35
57
|
n_non_ignore (int): The number of non-ignored elements in the batch.
|
|
36
58
|
ignore_index (int): The index to ignore in the target.
|
|
37
59
|
label_smoothing (float): The amount of smoothing when computing the loss, where 0.0 means no smoothing.
|
|
60
|
+
lse_square_scale (float): The scaler of (logsumexp(_input)) ^ 2 adding to the loss for the stability of training.
|
|
61
|
+
RETURN_Z_LOSS (int): The boolean value to decide whether storing z loss to z_loss_ptr or not. It must be 0 or 1.
|
|
38
62
|
reduction (str): The string for the reduction to apply
|
|
63
|
+
softcap (float): The upper threshold for scaling logits to the range (-softcap, +softcap).
|
|
39
64
|
BLOCK_SIZE (int): The block size for Triton operations.
|
|
65
|
+
HAS_SOFTCAPPING (bool): The boolean value to determine whether applying soft-capping or not.
|
|
40
66
|
"""
|
|
41
67
|
|
|
42
68
|
# https://github.com/triton-lang/triton/issues/1058
|
|
@@ -58,6 +84,7 @@ def liger_cross_entropy_kernel(
|
|
|
58
84
|
return
|
|
59
85
|
|
|
60
86
|
loss_ptr += program_id * loss_stride
|
|
87
|
+
z_loss_ptr += program_id * loss_stride
|
|
61
88
|
|
|
62
89
|
# Online softmax: 2 loads + 1 store (compared with 3 loads + 1 store for the safe softmax)
|
|
63
90
|
# Refer to Algorithm 3 in the paper: https://arxiv.org/pdf/1805.02867
|
|
@@ -68,6 +95,8 @@ def liger_cross_entropy_kernel(
|
|
|
68
95
|
ori_X_y = tl.load(
|
|
69
96
|
X_ptr + y
|
|
70
97
|
) # we need to store the original value of X_y for the loss calculation
|
|
98
|
+
if HAS_SOFTCAPPING:
|
|
99
|
+
ori_X_y = softcap * tanh(ori_X_y / softcap)
|
|
71
100
|
|
|
72
101
|
# Label smoothing is a general case of normal cross entropy
|
|
73
102
|
# See the full derivation at https://github.com/linkedin/Liger-Kernel/pull/198#issue-2503665310
|
|
@@ -79,6 +108,8 @@ def liger_cross_entropy_kernel(
|
|
|
79
108
|
X_block = tl.load(
|
|
80
109
|
X_ptr + X_offsets, mask=X_offsets < n_cols, other=float("-inf")
|
|
81
110
|
)
|
|
111
|
+
if HAS_SOFTCAPPING:
|
|
112
|
+
X_block = softcap * tanh(X_block / softcap)
|
|
82
113
|
block_max = tl.max(X_block)
|
|
83
114
|
if label_smoothing > 0:
|
|
84
115
|
# scale X beforehand to avoid overflow
|
|
@@ -87,32 +118,49 @@ def liger_cross_entropy_kernel(
|
|
|
87
118
|
d = d * tl.exp(m - m_new) + tl.sum(tl.exp(X_block - m_new))
|
|
88
119
|
m = m_new
|
|
89
120
|
|
|
121
|
+
# log (sum(e^(X_i))) = log (sum(e ^ (max(X) * e ^ (X_i - max(X)))))
|
|
122
|
+
# = log (e^(max(X)) * sum(e ^ (X_i - max(X))))
|
|
123
|
+
# = max(X) + log (sum(e ^ (X_i - max(X)))) = m + log d
|
|
124
|
+
lse = m + tl.log(d)
|
|
125
|
+
|
|
90
126
|
# 4. [Online Softmax] Second pass: compute gradients
|
|
91
127
|
# For 'mean' reduction, gradients are normalized by number of non-ignored elements (N)
|
|
92
128
|
# dx_y = (softmax(x_y) - 1) / N
|
|
93
129
|
# dx_i = softmax(x_i) / N, i != y
|
|
94
130
|
# For label smoothing:
|
|
95
|
-
# dx_i = (softmax(
|
|
131
|
+
# dx_i = (softmax(x_i) - label_smoothing / V) / N, V = n_cols, i != y
|
|
96
132
|
# dx_y = (softmax(x_y) - label_smoothing / V - (1 - label_smoothing)) / N
|
|
97
133
|
# = dx_i - (1 - label_smoothing) / N
|
|
98
|
-
#
|
|
134
|
+
# With Z loss:
|
|
135
|
+
# dx_i = ((1 + 2 * lse_square_scale * lse) * softmax(x_i) - label_smoothing / V) / N, i != y
|
|
136
|
+
# dx_y = dx_i - (1 - label_smoothing) / N
|
|
99
137
|
# For 'sum' reduction, no normalization is applied:
|
|
100
138
|
# dx_y = softmax(x_y) - 1
|
|
101
139
|
# dx_i = softmax(x_i), for i ≠ y
|
|
102
|
-
# For label smoothing:
|
|
103
|
-
# dx_i = (softmax(x_y) - label_smoothing / V), V = n_cols, i != y
|
|
104
|
-
# dx_y = (softmax(x_y) - label_smoothing / V - (1 - label_smoothing))
|
|
105
|
-
# = dx_i - (1 - label_smoothing)
|
|
106
140
|
|
|
107
141
|
for i in range(0, n_cols, BLOCK_SIZE):
|
|
108
142
|
X_offsets = i + tl.arange(0, BLOCK_SIZE)
|
|
109
143
|
X_block = tl.load(
|
|
110
144
|
X_ptr + X_offsets, mask=X_offsets < n_cols, other=float("-inf")
|
|
111
145
|
)
|
|
146
|
+
if HAS_SOFTCAPPING:
|
|
147
|
+
intermediate = tanh(X_block / softcap)
|
|
148
|
+
X_block = softcap * intermediate
|
|
149
|
+
# softmax(x_i)
|
|
150
|
+
X_block = tl.exp(X_block - m) / d
|
|
151
|
+
# derivative of z-loss: 2 * lse_square_scale * lse * softmax(x_i)
|
|
152
|
+
X_block += 2 * lse_square_scale * lse * X_block
|
|
153
|
+
# smoothing term
|
|
154
|
+
X_block += -eps
|
|
155
|
+
# special handle dx_y
|
|
156
|
+
X_block = tl.where(X_offsets != y, X_block, X_block - (1 - label_smoothing))
|
|
157
|
+
# reduction scale
|
|
112
158
|
if reduction == "mean":
|
|
113
|
-
X_block =
|
|
114
|
-
|
|
115
|
-
|
|
159
|
+
X_block = X_block / (n_non_ignore)
|
|
160
|
+
# chain rule
|
|
161
|
+
# d(softcap * tanh(x / softcap)) = (1 - tanh^2(x / softcap))
|
|
162
|
+
if HAS_SOFTCAPPING:
|
|
163
|
+
X_block = X_block * (1 - intermediate * intermediate)
|
|
116
164
|
|
|
117
165
|
tl.store(X_ptr + X_offsets, X_block, mask=X_offsets < n_cols)
|
|
118
166
|
|
|
@@ -124,35 +172,35 @@ def liger_cross_entropy_kernel(
|
|
|
124
172
|
|
|
125
173
|
# loss = log (softmax(X_y)) = log ((e ^ (X_y - max(X)) / sum(e ^ (X - max(X))))
|
|
126
174
|
# = (X_y - max(X)) - log(sum(e ^ (X - max(X))))
|
|
175
|
+
# = X_y - m - log d = X_y - lse
|
|
127
176
|
# sum(e ^ (X - max(X))) must >= 1 because the max term is e ^ 0 = 1
|
|
128
177
|
# So we can safely calculate log (softmax(X_y)) without overflow
|
|
129
|
-
loss =
|
|
178
|
+
loss = lse - ori_X_y
|
|
130
179
|
|
|
131
180
|
# Original loss = H(q, p), with label smoothing regularization = H(q', p) and (label_smoothing / V) = eps
|
|
132
181
|
# H(q', p) = (1 - label_smoothing) * H(q, p) + label_smoothing * H(u, p)
|
|
133
182
|
# = (1 - label_smoothing) * H(q, p) + eps * sum(logsoftmax(x_i))
|
|
134
183
|
# By using m (global max of xi) and d (sum of e^(xi-m)), we can simplify as:
|
|
135
|
-
# = (1 - label_smoothing) * H(q, p) + (
|
|
184
|
+
# = (1 - label_smoothing) * H(q, p) + (sum(-eps * x_i) + label_smoothing * (m + logd))
|
|
136
185
|
# Refer to H(q', p) in section 7 of the paper: https://arxiv.org/pdf/1512.00567
|
|
137
186
|
# pytorch: https://github.com/pytorch/pytorch/blob/2981534f54d49fa3a9755c9b0855e7929c2527f0/aten/src/ATen/native/LossNLL.cpp#L516
|
|
138
187
|
# See full derivation at https://github.com/linkedin/Liger-Kernel/pull/198#issuecomment-2333753087
|
|
139
188
|
if label_smoothing > 0:
|
|
140
|
-
smooth_loss = scaled_x_sum + label_smoothing *
|
|
189
|
+
smooth_loss = scaled_x_sum + label_smoothing * lse
|
|
141
190
|
loss = loss * (1 - label_smoothing) + smooth_loss
|
|
142
191
|
|
|
192
|
+
# An auxiliary loss, z_loss
|
|
193
|
+
# Refer to Page14 Loss function section in the paper PaLM: https://www.jmlr.org/papers/v24/22-1144.html
|
|
194
|
+
z_loss = lse_square_scale * lse * lse
|
|
195
|
+
loss += z_loss
|
|
143
196
|
# Normalize the loss by the number of non-ignored elements if reduction is "mean"
|
|
144
197
|
if reduction == "mean":
|
|
198
|
+
z_loss = z_loss / n_non_ignore
|
|
145
199
|
loss = loss / n_non_ignore
|
|
146
200
|
|
|
147
|
-
# 6. Specially handle the i==y case where `dx_y = (softmax(x_y) - (1 - label_smoothing) / N`
|
|
148
|
-
X_y = tl.load(X_ptr + y)
|
|
149
|
-
if reduction == "mean":
|
|
150
|
-
X_y += -(1 - label_smoothing) / (n_non_ignore)
|
|
151
|
-
else:
|
|
152
|
-
X_y += -(1 - label_smoothing)
|
|
153
|
-
|
|
154
201
|
tl.store(loss_ptr, loss)
|
|
155
|
-
|
|
202
|
+
if RETURN_Z_LOSS == _TRUE:
|
|
203
|
+
tl.store(z_loss_ptr, z_loss)
|
|
156
204
|
|
|
157
205
|
|
|
158
206
|
# The hard limit of TRITON_MAX_TENSOR_NUMEL is 1048576 https://github.com/triton-lang/triton/blob/ba42a5c68fd0505f8c42f4202d53be0f8d9a5fe0/python/triton/language/core.py#L19
|
|
@@ -161,7 +209,32 @@ def liger_cross_entropy_kernel(
|
|
|
161
209
|
MAX_FUSED_SIZE = 65536 // 2 # the best size we found by manually tuning
|
|
162
210
|
|
|
163
211
|
|
|
164
|
-
|
|
212
|
+
_bool_to_return_z_loss = {
|
|
213
|
+
True: _TRUE.value,
|
|
214
|
+
False: _FALSE.value,
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def cross_entropy_forward(
|
|
219
|
+
_input,
|
|
220
|
+
target,
|
|
221
|
+
ignore_index,
|
|
222
|
+
lse_square_scale,
|
|
223
|
+
label_smoothing,
|
|
224
|
+
reduction,
|
|
225
|
+
softcap,
|
|
226
|
+
return_z_loss,
|
|
227
|
+
):
|
|
228
|
+
if not isinstance(return_z_loss, int):
|
|
229
|
+
assert (
|
|
230
|
+
return_z_loss in _bool_to_return_z_loss
|
|
231
|
+
), f"return_z_loss must be True or False. Got: {return_z_loss}"
|
|
232
|
+
return_z_loss = _bool_to_return_z_loss[return_z_loss]
|
|
233
|
+
else:
|
|
234
|
+
assert (
|
|
235
|
+
return_z_loss in _bool_to_return_z_loss
|
|
236
|
+
), f"return_z_loss must be True or False. Got: {return_z_loss}"
|
|
237
|
+
|
|
165
238
|
BT, V = _input.shape
|
|
166
239
|
n_rows = BT
|
|
167
240
|
|
|
@@ -169,6 +242,10 @@ def cross_entropy_forward(_input, target, ignore_index, label_smoothing, reducti
|
|
|
169
242
|
|
|
170
243
|
# unreduced loss
|
|
171
244
|
loss_1d = torch.zeros(n_rows, dtype=_input.dtype, device=_input.device)
|
|
245
|
+
if return_z_loss == _TRUE.value:
|
|
246
|
+
z_loss_1d = torch.zeros(n_rows, dtype=_input.dtype, device=_input.device)
|
|
247
|
+
else:
|
|
248
|
+
z_loss_1d = loss_1d # dummy ptr when return_z_loss == False
|
|
172
249
|
|
|
173
250
|
n_non_ignore = (target != ignore_index).sum().item()
|
|
174
251
|
|
|
@@ -185,20 +262,30 @@ def cross_entropy_forward(_input, target, ignore_index, label_smoothing, reducti
|
|
|
185
262
|
Y_ptr=target,
|
|
186
263
|
Y_stride=target.stride(-1), # always 1
|
|
187
264
|
loss_ptr=loss_1d,
|
|
265
|
+
z_loss_ptr=z_loss_1d,
|
|
188
266
|
loss_stride=loss_1d.stride(-1), # always 1
|
|
189
267
|
n_cols=V,
|
|
190
268
|
n_non_ignore=n_non_ignore,
|
|
191
269
|
ignore_index=ignore_index,
|
|
270
|
+
lse_square_scale=lse_square_scale,
|
|
192
271
|
label_smoothing=label_smoothing,
|
|
193
272
|
reduction=reduction,
|
|
273
|
+
softcap=softcap if softcap is not None else 0.0,
|
|
274
|
+
RETURN_Z_LOSS=return_z_loss,
|
|
194
275
|
BLOCK_SIZE=BLOCK_SIZE,
|
|
276
|
+
HAS_SOFTCAPPING=True if softcap is not None else False,
|
|
195
277
|
# TODO: 32 seems to give the best performance
|
|
196
278
|
# Performance is quite sensitive to num_warps
|
|
197
279
|
num_warps=32 if not is_hip() else 16,
|
|
198
280
|
)
|
|
199
281
|
|
|
200
282
|
loss = torch.sum(loss_1d)
|
|
201
|
-
|
|
283
|
+
if return_z_loss == _TRUE.value:
|
|
284
|
+
z_loss = torch.sum(z_loss_1d)
|
|
285
|
+
else:
|
|
286
|
+
z_loss = None
|
|
287
|
+
|
|
288
|
+
return loss, z_loss, _input
|
|
202
289
|
|
|
203
290
|
|
|
204
291
|
def cross_entropy_backward(_input, grad_output):
|
|
@@ -233,7 +320,15 @@ class LigerCrossEntropyFunction(torch.autograd.Function):
|
|
|
233
320
|
|
|
234
321
|
@staticmethod
|
|
235
322
|
def forward(
|
|
236
|
-
ctx,
|
|
323
|
+
ctx,
|
|
324
|
+
_input: torch.Tensor,
|
|
325
|
+
target: torch.Tensor,
|
|
326
|
+
ignore_index: int = -100,
|
|
327
|
+
lse_square_scale: float = 0.0,
|
|
328
|
+
label_smoothing: float = 0.0,
|
|
329
|
+
reduction: str = "mean",
|
|
330
|
+
softcap: Optional[float] = None,
|
|
331
|
+
return_z_loss: bool = False,
|
|
237
332
|
):
|
|
238
333
|
"""
|
|
239
334
|
The forward pass of the Liger Cross Entropy loss.
|
|
@@ -243,33 +338,48 @@ class LigerCrossEntropyFunction(torch.autograd.Function):
|
|
|
243
338
|
_input (tensor): The input tensor of shape (BT, V) where B is batch size, T is sequence length, V is vocab size.
|
|
244
339
|
target (tensor): The target tensor of shape (BT) where each value is in [0, V-1].
|
|
245
340
|
ignore_index (int): The index to ignore in the target.
|
|
341
|
+
lse_square_scale (float): The scaler of (logsumexp(_input)) ^ 2 adding to the loss for the stability of training.
|
|
246
342
|
label_smoothing (float): The amount of smoothing when computing the loss, where 0.0 means no smoothing.
|
|
247
343
|
reduction (str): The reduction to apply to the output: "none" | "mean | "sum".
|
|
344
|
+
softcap (Optional[float]): The upper threshold for scaling logits to the range (-softcap, +softcap).
|
|
345
|
+
return_z_loss (bool): When `return_z_loss` is `True`, returns (loss, z_loss) instead of (loss, None). Default: `False`
|
|
248
346
|
|
|
249
347
|
Returns:
|
|
250
|
-
|
|
348
|
+
tuple: A tuple with the compouted losses with respect to loss and z loss. The elements are tensors or None.
|
|
251
349
|
"""
|
|
252
|
-
loss, _input = cross_entropy_forward(
|
|
253
|
-
_input,
|
|
350
|
+
loss, z_loss, _input = cross_entropy_forward(
|
|
351
|
+
_input,
|
|
352
|
+
target,
|
|
353
|
+
ignore_index,
|
|
354
|
+
lse_square_scale,
|
|
355
|
+
label_smoothing,
|
|
356
|
+
reduction,
|
|
357
|
+
softcap,
|
|
358
|
+
return_z_loss,
|
|
254
359
|
)
|
|
255
360
|
# TODO: investigation
|
|
256
361
|
# If we don't detach the _input tensor, the memory will double
|
|
257
362
|
# Not sure why but seems that there will be a time both grad and value exist but in different location
|
|
258
363
|
ctx.save_for_backward(_input.detach())
|
|
259
|
-
|
|
364
|
+
ctx.return_z_loss = return_z_loss
|
|
365
|
+
|
|
366
|
+
return loss, z_loss
|
|
260
367
|
|
|
261
368
|
@staticmethod
|
|
262
|
-
def backward(ctx, grad_output):
|
|
369
|
+
def backward(ctx, grad_output, grad_ouput2):
|
|
263
370
|
"""
|
|
264
371
|
The backward pass of the Liger Cross Entropy loss.
|
|
265
372
|
|
|
266
373
|
Parameters:
|
|
267
374
|
ctx : The context object with saved tensors.
|
|
268
375
|
grad_output (tensor): The tensor containing the gradient of the loss with respect to the output.
|
|
269
|
-
|
|
376
|
+
grad_output2 (tenosr): No use.
|
|
270
377
|
Returns:
|
|
271
378
|
tuple: A tuple with the gradients with respect to the inputs. The elements are tensors or None.
|
|
272
379
|
"""
|
|
380
|
+
if ctx.return_z_loss:
|
|
381
|
+
del grad_ouput2 # z_loss is only for logging
|
|
382
|
+
|
|
273
383
|
(_input,) = ctx.saved_tensors
|
|
274
384
|
_input = cross_entropy_backward(_input, grad_output)
|
|
275
385
|
return (
|
|
@@ -278,4 +388,7 @@ class LigerCrossEntropyFunction(torch.autograd.Function):
|
|
|
278
388
|
None,
|
|
279
389
|
None,
|
|
280
390
|
None,
|
|
391
|
+
None,
|
|
392
|
+
None,
|
|
393
|
+
None,
|
|
281
394
|
)
|
{liger_kernel-0.4.0 → liger_kernel-0.4.1}/src/liger_kernel/ops/fused_linear_cross_entropy.py
RENAMED
|
@@ -21,8 +21,10 @@ def fused_linear_cross_entropy_forward(
|
|
|
21
21
|
target,
|
|
22
22
|
bias=None,
|
|
23
23
|
ignore_index=-100,
|
|
24
|
+
lse_square_scale=0.0,
|
|
24
25
|
label_smoothing=0.0,
|
|
25
26
|
reduction="mean",
|
|
27
|
+
softcap=None,
|
|
26
28
|
):
|
|
27
29
|
dtype = _input.dtype
|
|
28
30
|
device = _input.device
|
|
@@ -86,12 +88,17 @@ def fused_linear_cross_entropy_forward(
|
|
|
86
88
|
Y_ptr=target_chunk,
|
|
87
89
|
Y_stride=target_chunk.stride(-1), # always 1
|
|
88
90
|
loss_ptr=loss_1d_slice,
|
|
91
|
+
z_loss_ptr=loss_1d_slice, # dummy ptr, not used
|
|
89
92
|
loss_stride=loss_1d_slice.stride(-1), # always 1
|
|
90
93
|
n_cols=V,
|
|
91
94
|
n_non_ignore=n_non_ignore,
|
|
92
95
|
ignore_index=ignore_index,
|
|
96
|
+
lse_square_scale=lse_square_scale,
|
|
93
97
|
label_smoothing=label_smoothing,
|
|
94
98
|
reduction=reduction,
|
|
99
|
+
softcap=softcap if softcap is not None else 0.0,
|
|
100
|
+
RETURN_Z_LOSS=0, # False
|
|
101
|
+
HAS_SOFTCAPPING=True if softcap is not None else False,
|
|
95
102
|
BLOCK_SIZE=BLOCK_SIZE,
|
|
96
103
|
num_warps=32 if not is_hip() else 16,
|
|
97
104
|
)
|
|
@@ -200,8 +207,10 @@ class LigerFusedLinearCrossEntropyFunction(torch.autograd.Function):
|
|
|
200
207
|
target,
|
|
201
208
|
bias=None,
|
|
202
209
|
ignore_index=-100,
|
|
210
|
+
lse_square_scale=0.0,
|
|
203
211
|
label_smoothing=0.0,
|
|
204
212
|
reduction="mean",
|
|
213
|
+
softcap=None,
|
|
205
214
|
):
|
|
206
215
|
"""
|
|
207
216
|
Fusing the last linear layer with cross-entropy loss
|
|
@@ -221,7 +230,15 @@ class LigerFusedLinearCrossEntropyFunction(torch.autograd.Function):
|
|
|
221
230
|
reduction: reduction to apply
|
|
222
231
|
"""
|
|
223
232
|
loss, grad_input, grad_weight, grad_bias = fused_linear_cross_entropy_forward(
|
|
224
|
-
_input,
|
|
233
|
+
_input,
|
|
234
|
+
weight,
|
|
235
|
+
target,
|
|
236
|
+
bias,
|
|
237
|
+
ignore_index,
|
|
238
|
+
lse_square_scale,
|
|
239
|
+
label_smoothing,
|
|
240
|
+
reduction,
|
|
241
|
+
softcap,
|
|
225
242
|
)
|
|
226
243
|
# downcast to dtype and store for backward
|
|
227
244
|
ctx.save_for_backward(
|
|
@@ -238,4 +255,4 @@ class LigerFusedLinearCrossEntropyFunction(torch.autograd.Function):
|
|
|
238
255
|
grad_input, grad_weight, grad_bias = fused_linear_cross_entropy_backward(
|
|
239
256
|
grad_output, grad_input, grad_weight, grad_bias
|
|
240
257
|
)
|
|
241
|
-
return (grad_input, grad_weight, None, grad_bias, None, None, None)
|
|
258
|
+
return (grad_input, grad_weight, None, grad_bias, None, None, None, None, None)
|