liger-kernel-nightly 0.5.2.dev20250110102924__py3-none-any.whl → 0.5.2.dev20250118115043__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- liger_kernel/ops/fused_linear_cross_entropy.py +22 -6
- liger_kernel/transformers/functional.py +6 -1
- liger_kernel/transformers/fused_linear_cross_entropy.py +7 -1
- {liger_kernel_nightly-0.5.2.dev20250110102924.dist-info → liger_kernel_nightly-0.5.2.dev20250118115043.dist-info}/METADATA +6 -6
- {liger_kernel_nightly-0.5.2.dev20250110102924.dist-info → liger_kernel_nightly-0.5.2.dev20250118115043.dist-info}/RECORD +9 -9
- {liger_kernel_nightly-0.5.2.dev20250110102924.dist-info → liger_kernel_nightly-0.5.2.dev20250118115043.dist-info}/LICENSE +0 -0
- {liger_kernel_nightly-0.5.2.dev20250110102924.dist-info → liger_kernel_nightly-0.5.2.dev20250118115043.dist-info}/NOTICE +0 -0
- {liger_kernel_nightly-0.5.2.dev20250110102924.dist-info → liger_kernel_nightly-0.5.2.dev20250118115043.dist-info}/WHEEL +0 -0
- {liger_kernel_nightly-0.5.2.dev20250110102924.dist-info → liger_kernel_nightly-0.5.2.dev20250118115043.dist-info}/top_level.txt +0 -0
@@ -24,7 +24,9 @@ def fused_linear_cross_entropy_forward(
|
|
24
24
|
label_smoothing=0.0,
|
25
25
|
reduction="mean",
|
26
26
|
softcap=None,
|
27
|
+
return_z_loss=False,
|
27
28
|
):
|
29
|
+
assert isinstance(return_z_loss, bool), f"return_z_loss must be True or False. Got: {return_z_loss}"
|
28
30
|
device = _input.device
|
29
31
|
|
30
32
|
# inputs have shape: BT x H
|
@@ -47,6 +49,7 @@ def fused_linear_cross_entropy_forward(
|
|
47
49
|
grad_bias = torch.zeros_like(bias, device=device) if bias is not None else None
|
48
50
|
# we use fp32 for loss accumulator
|
49
51
|
loss_1d = torch.zeros(BT, dtype=torch.float32, device=device)
|
52
|
+
z_loss_1d = torch.zeros(BT, dtype=_input.dtype, device=_input.device) if return_z_loss else None
|
50
53
|
|
51
54
|
# TODO: evaluate how CUDA synchronization caused by .item() affects the speed
|
52
55
|
target_mask = target != ignore_index
|
@@ -81,6 +84,7 @@ def fused_linear_cross_entropy_forward(
|
|
81
84
|
|
82
85
|
# unreduced loss
|
83
86
|
loss_1d_slice = loss_1d[start_idx:end_idx] # chunk_size,
|
87
|
+
z_loss_1d_slice = z_loss_1d[start_idx:end_idx] if return_z_loss else None
|
84
88
|
|
85
89
|
# ensure _input and target are contiguous
|
86
90
|
logits_chunk = logits_chunk.contiguous()
|
@@ -94,7 +98,7 @@ def fused_linear_cross_entropy_forward(
|
|
94
98
|
Y_stride=target_chunk.stride(-1), # always 1
|
95
99
|
weight_ptr=ce_weight,
|
96
100
|
loss_ptr=loss_1d_slice,
|
97
|
-
z_loss_ptr=
|
101
|
+
z_loss_ptr=z_loss_1d_slice,
|
98
102
|
loss_stride=loss_1d_slice.stride(-1), # always 1
|
99
103
|
n_cols=V,
|
100
104
|
n_non_ignore=total_n_non_ignore,
|
@@ -105,7 +109,7 @@ def fused_linear_cross_entropy_forward(
|
|
105
109
|
label_smoothing=label_smoothing,
|
106
110
|
reduction=reduction,
|
107
111
|
softcap=softcap,
|
108
|
-
RETURN_Z_LOSS=
|
112
|
+
RETURN_Z_LOSS=return_z_loss,
|
109
113
|
HAS_WEIGHT=True if ce_weight is not None else False,
|
110
114
|
HAS_SOFTCAPPING=True if softcap is not None else False,
|
111
115
|
BLOCK_SIZE=BLOCK_SIZE,
|
@@ -113,6 +117,8 @@ def fused_linear_cross_entropy_forward(
|
|
113
117
|
)
|
114
118
|
|
115
119
|
loss_1d[start_idx:end_idx] = loss_1d_slice
|
120
|
+
if return_z_loss:
|
121
|
+
z_loss_1d[start_idx:end_idx] = z_loss_1d_slice
|
116
122
|
grad_logits_chunk = logits_chunk # chunk_size x V
|
117
123
|
|
118
124
|
grad_input[start_idx:end_idx] = grad_logits_chunk @ weight
|
@@ -139,9 +145,13 @@ def fused_linear_cross_entropy_forward(
|
|
139
145
|
|
140
146
|
if reduction == "none":
|
141
147
|
loss = loss_1d
|
148
|
+
z_loss = z_loss_1d if return_z_loss else None
|
149
|
+
|
142
150
|
else:
|
143
151
|
loss = torch.sum(loss_1d)
|
144
|
-
|
152
|
+
z_loss = torch.sum(z_loss_1d) if return_z_loss else None
|
153
|
+
print(f"{z_loss=}")
|
154
|
+
return loss, z_loss, grad_input, grad_weight, grad_bias
|
145
155
|
|
146
156
|
|
147
157
|
def fused_linear_cross_entropy_backward(grad_output, grad_input, grad_weight, grad_bias):
|
@@ -206,6 +216,7 @@ class LigerFusedLinearCrossEntropyFunction(torch.autograd.Function):
|
|
206
216
|
label_smoothing=0.0,
|
207
217
|
reduction="mean",
|
208
218
|
softcap=None,
|
219
|
+
return_z_loss: bool = False,
|
209
220
|
):
|
210
221
|
"""
|
211
222
|
Fusing the last linear layer with cross-entropy loss
|
@@ -226,7 +237,7 @@ class LigerFusedLinearCrossEntropyFunction(torch.autograd.Function):
|
|
226
237
|
reduction: reduction to apply
|
227
238
|
"""
|
228
239
|
|
229
|
-
loss, grad_input, grad_weight, grad_bias = fused_linear_cross_entropy_forward(
|
240
|
+
loss, z_loss, grad_input, grad_weight, grad_bias = fused_linear_cross_entropy_forward(
|
230
241
|
_input=_input,
|
231
242
|
weight=weight,
|
232
243
|
target=target,
|
@@ -237,6 +248,7 @@ class LigerFusedLinearCrossEntropyFunction(torch.autograd.Function):
|
|
237
248
|
label_smoothing=label_smoothing,
|
238
249
|
reduction=reduction,
|
239
250
|
softcap=softcap,
|
251
|
+
return_z_loss=return_z_loss,
|
240
252
|
)
|
241
253
|
# downcast to dtype and store for backward
|
242
254
|
ctx.save_for_backward(
|
@@ -244,11 +256,14 @@ class LigerFusedLinearCrossEntropyFunction(torch.autograd.Function):
|
|
244
256
|
grad_weight.detach() if grad_weight is not None else None,
|
245
257
|
grad_bias.detach() if bias is not None else None,
|
246
258
|
)
|
247
|
-
|
259
|
+
ctx.return_z_loss = return_z_loss
|
260
|
+
return loss, z_loss
|
248
261
|
|
249
262
|
@staticmethod
|
250
263
|
@amp_custom_bwd
|
251
|
-
def backward(ctx, grad_output):
|
264
|
+
def backward(ctx, grad_output, grad_output2):
|
265
|
+
if ctx.return_z_loss:
|
266
|
+
del grad_output2 # z_loss is only for logging
|
252
267
|
(grad_input, grad_weight, grad_bias) = ctx.saved_tensors
|
253
268
|
grad_input, grad_weight, grad_bias = fused_linear_cross_entropy_backward(
|
254
269
|
grad_output, grad_input, grad_weight, grad_bias
|
@@ -264,4 +279,5 @@ class LigerFusedLinearCrossEntropyFunction(torch.autograd.Function):
|
|
264
279
|
None,
|
265
280
|
None,
|
266
281
|
None,
|
282
|
+
None,
|
267
283
|
)
|
@@ -56,8 +56,9 @@ def liger_fused_linear_cross_entropy(
|
|
56
56
|
label_smoothing: float = 0.0,
|
57
57
|
reduction: str = "mean",
|
58
58
|
softcap: Optional[float] = None,
|
59
|
+
return_z_loss: bool = False,
|
59
60
|
):
|
60
|
-
|
61
|
+
loss, z_loss = LigerFusedLinearCrossEntropyFunction.apply(
|
61
62
|
input,
|
62
63
|
weight,
|
63
64
|
target,
|
@@ -68,7 +69,11 @@ def liger_fused_linear_cross_entropy(
|
|
68
69
|
label_smoothing,
|
69
70
|
reduction,
|
70
71
|
softcap,
|
72
|
+
return_z_loss,
|
71
73
|
)
|
74
|
+
if not return_z_loss:
|
75
|
+
return loss
|
76
|
+
return loss, z_loss
|
72
77
|
|
73
78
|
|
74
79
|
def liger_fused_linear_jsd(
|
@@ -14,6 +14,7 @@ class LigerFusedLinearCrossEntropyLoss(torch.nn.Module):
|
|
14
14
|
label_smoothing: float = 0.0,
|
15
15
|
reduction: str = "mean",
|
16
16
|
softcap: Optional[float] = None,
|
17
|
+
return_z_loss: bool = False,
|
17
18
|
):
|
18
19
|
super().__init__()
|
19
20
|
assert (label_smoothing >= 0) and (
|
@@ -31,9 +32,10 @@ class LigerFusedLinearCrossEntropyLoss(torch.nn.Module):
|
|
31
32
|
self.label_smoothing = label_smoothing
|
32
33
|
self.reduction = reduction
|
33
34
|
self.softcap = softcap
|
35
|
+
self.return_z_loss = return_z_loss
|
34
36
|
|
35
37
|
def forward(self, lin_weight, _input, target, bias=None):
|
36
|
-
|
38
|
+
loss, z_loss = LigerFusedLinearCrossEntropyFunction.apply(
|
37
39
|
_input,
|
38
40
|
lin_weight,
|
39
41
|
target,
|
@@ -44,4 +46,8 @@ class LigerFusedLinearCrossEntropyLoss(torch.nn.Module):
|
|
44
46
|
self.label_smoothing,
|
45
47
|
self.reduction,
|
46
48
|
self.softcap,
|
49
|
+
self.return_z_loss,
|
47
50
|
)
|
51
|
+
if not self.return_z_loss:
|
52
|
+
return loss
|
53
|
+
return loss, z_loss
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: liger_kernel_nightly
|
3
|
-
Version: 0.5.2.
|
3
|
+
Version: 0.5.2.dev20250118115043
|
4
4
|
Summary: Efficient Triton kernels for LLM Training
|
5
5
|
License: BSD 2-CLAUSE LICENSE
|
6
6
|
Copyright 2024 LinkedIn Corporation
|
@@ -105,8 +105,8 @@ Requires-Dist: seaborn; extra == "dev"
|
|
105
105
|
|
106
106
|
<details>
|
107
107
|
<summary>Latest News 🔥</summary>
|
108
|
-
|
109
|
-
- [2024/12/11] We release [v0.5.0](https://github.com/linkedin/Liger-Kernel/releases/tag/v0.5.0): 80% more memory efficient post training losses (DPO, ORPO, CPO, etc)!
|
108
|
+
|
109
|
+
- [2024/12/11] We release [v0.5.0](https://github.com/linkedin/Liger-Kernel/releases/tag/v0.5.0): 80% more memory efficient post training losses (DPO, ORPO, CPO, etc)!
|
110
110
|
- [2024/12/5] We release LinkedIn Engineering Blog - [Liger-Kernel: Empowering an open source ecosystem of Triton Kernels for Efficient LLM Training](https://www.linkedin.com/blog/engineering/open-source/liger-kernel-open-source-ecosystem-for-efficient-llm-training)
|
111
111
|
- [2024/11/6] We release [v0.4.0](https://github.com/linkedin/Liger-Kernel/releases/tag/v0.4.0): Full AMD support, Tech Report, Modal CI, Llama-3.2-Vision!
|
112
112
|
- [2024/10/21] We have released the tech report of Liger Kernel on Arxiv: https://arxiv.org/pdf/2410.10989
|
@@ -299,7 +299,7 @@ loss.backward()
|
|
299
299
|
| Gemma1 | `liger_kernel.transformers.apply_liger_kernel_to_gemma` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
300
300
|
| Gemma2 | `liger_kernel.transformers.apply_liger_kernel_to_gemma2` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
301
301
|
| Qwen2, Qwen2.5, & QwQ | `liger_kernel.transformers.apply_liger_kernel_to_qwen2` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
302
|
-
| Qwen2-VL | `liger_kernel.transformers.apply_liger_kernel_to_qwen2_vl` | RMSNorm, LayerNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
302
|
+
| Qwen2-VL, & QVQ | `liger_kernel.transformers.apply_liger_kernel_to_qwen2_vl` | RMSNorm, LayerNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
303
303
|
| Phi3 & Phi3.5 | `liger_kernel.transformers.apply_liger_kernel_to_phi3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
|
304
304
|
|
305
305
|
|
@@ -353,11 +353,11 @@ loss.backward()
|
|
353
353
|
- [License Information](https://github.com/linkedin/Liger-Kernel/blob/main/docs/License.md)
|
354
354
|
|
355
355
|
## Sponsorship and Collaboration
|
356
|
-
|
356
|
+
|
357
357
|
- [AMD](https://www.amd.com/en.html): Providing AMD GPUs for our AMD CI.
|
358
358
|
- [Intel](https://www.intel.com/): Providing Intel GPUs for our Intel CI.
|
359
359
|
- [Modal](https://modal.com/): Free 3000 credits from GPU MODE IRL for our NVIDIA CI.
|
360
|
-
- [EmbeddedLLM](https://embeddedllm.com/): Making Liger Kernel run fast and stable on AMD.
|
360
|
+
- [EmbeddedLLM](https://embeddedllm.com/): Making Liger Kernel run fast and stable on AMD.
|
361
361
|
- [HuggingFace](https://huggingface.co/): Integrating Liger Kernel into Hugging Face Transformers and TRL.
|
362
362
|
- [Lightning AI](https://lightning.ai/): Integrating Liger Kernel into Lightning Thunder.
|
363
363
|
- [Axolotl](https://axolotl.ai/): Integrating Liger Kernel into Axolotl.
|
@@ -12,7 +12,7 @@ liger_kernel/chunked_loss/orpo_loss.py,sha256=yjcrrbVeemLYodoSKT-FMSnaPtyKAZ3aOr
|
|
12
12
|
liger_kernel/chunked_loss/simpo_loss.py,sha256=3TTc7U79Orjgi-Wu81WZkWk5MgsdqKXIOBHgIvDazPw,3865
|
13
13
|
liger_kernel/ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
14
14
|
liger_kernel/ops/cross_entropy.py,sha256=SRzAF9Ek84pBVFy3wqQZs7AhRoorKRIgQ-Td_rtl1Kk,18606
|
15
|
-
liger_kernel/ops/fused_linear_cross_entropy.py,sha256=
|
15
|
+
liger_kernel/ops/fused_linear_cross_entropy.py,sha256=Ad8mhD1LZSX-kNSi8xY_zeM-Gq-BzrleyDdf8E0c3Rs,10934
|
16
16
|
liger_kernel/ops/fused_linear_jsd.py,sha256=eKqaADj7LgWfoYqyH03tjrmhNTfJOF1Dhx_bWzBTnTU,9600
|
17
17
|
liger_kernel/ops/geglu.py,sha256=axGvCIvlBzuluoAIrWTsp2iZM4BFKNInkPov8YVvH9E,4126
|
18
18
|
liger_kernel/ops/group_norm.py,sha256=qD4D4lSjSgVtO52EBNLC2iTseALRgPgqXE50U2woggk,10837
|
@@ -29,8 +29,8 @@ liger_kernel/ops/experimental/mm_int8int2.py,sha256=TrS9lpwekrik_w5qE7AhMJD1bcq-
|
|
29
29
|
liger_kernel/transformers/__init__.py,sha256=QPmYkL6hosBPpPqCUGqvIvAtD9XzLgvZqZxUyYMZeVk,2008
|
30
30
|
liger_kernel/transformers/auto_model.py,sha256=0qCTRZt280Bj_LcFdzo9hlaR-BWNazawXOGgoCZjgEg,1545
|
31
31
|
liger_kernel/transformers/cross_entropy.py,sha256=LtiHlj_tK2YFpilwvbG_NEVzbf82zKRpWCZMjaFUd4M,1681
|
32
|
-
liger_kernel/transformers/functional.py,sha256=
|
33
|
-
liger_kernel/transformers/fused_linear_cross_entropy.py,sha256=
|
32
|
+
liger_kernel/transformers/functional.py,sha256=lDOjch622dJIc78K3ePFK_H1DX00GC5kKjodjcbEgbM,4624
|
33
|
+
liger_kernel/transformers/fused_linear_cross_entropy.py,sha256=ygU7cycCHWvSrrTgn2TseN8t-Qwfer4V7ldwhZ1E6_g,1776
|
34
34
|
liger_kernel/transformers/fused_linear_jsd.py,sha256=bZ4otCvWBuOnA5XdQL-FzZVItJlDt-ht9e_pG7PG93E,3999
|
35
35
|
liger_kernel/transformers/geglu.py,sha256=mrgqzIUVd6lN7fkDKLkw5YaESDxDtFgbot430WwPVOQ,1107
|
36
36
|
liger_kernel/transformers/group_norm.py,sha256=URmjkQFsrbMffzcJiGpX7ckxWlpL95AiJS-80hwAWPk,2173
|
@@ -58,9 +58,9 @@ liger_kernel/transformers/trainer/__init__.py,sha256=p7yQfklV8-467qSz_ZMimkbDF7H
|
|
58
58
|
liger_kernel/transformers/trainer/orpo_trainer.py,sha256=pdekW7l6Qg_aqa5SYKYlSWUF8m3lkOFvFLcIMEHrz9s,8338
|
59
59
|
liger_kernel/triton/__init__.py,sha256=qCiCamzCRv6lpV8IqpAc9YMdNKC7GKurClWceQPnlis,92
|
60
60
|
liger_kernel/triton/monkey_patch.py,sha256=Rd0hUHAzDkFfHvnX7-PBaNK5EKnZhtfM_h-fgQH9HPY,1568
|
61
|
-
liger_kernel_nightly-0.5.2.
|
62
|
-
liger_kernel_nightly-0.5.2.
|
63
|
-
liger_kernel_nightly-0.5.2.
|
64
|
-
liger_kernel_nightly-0.5.2.
|
65
|
-
liger_kernel_nightly-0.5.2.
|
66
|
-
liger_kernel_nightly-0.5.2.
|
61
|
+
liger_kernel_nightly-0.5.2.dev20250118115043.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
|
62
|
+
liger_kernel_nightly-0.5.2.dev20250118115043.dist-info/METADATA,sha256=-JNLpqcUGxtW2t-EcDWMEkSyjE5B1ZyyHzJJs3P_ZyI,21055
|
63
|
+
liger_kernel_nightly-0.5.2.dev20250118115043.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
|
64
|
+
liger_kernel_nightly-0.5.2.dev20250118115043.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
|
65
|
+
liger_kernel_nightly-0.5.2.dev20250118115043.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
|
66
|
+
liger_kernel_nightly-0.5.2.dev20250118115043.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|