liger-kernel-nightly 0.4.1.dev20241114041219__tar.gz → 0.4.1.dev20241115012952__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {liger_kernel_nightly-0.4.1.dev20241114041219/src/liger_kernel_nightly.egg-info → liger_kernel_nightly-0.4.1.dev20241115012952}/PKG-INFO +1 -1
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/pyproject.toml +1 -1
- liger_kernel_nightly-0.4.1.dev20241115012952/src/liger_kernel/chunked_loss/dpo_loss.py +57 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/chunked_loss/fused_linear_preference.py +101 -2
- liger_kernel_nightly-0.4.1.dev20241115012952/src/liger_kernel/chunked_loss/orpo_loss.py +63 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952/src/liger_kernel_nightly.egg-info}/PKG-INFO +1 -1
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel_nightly.egg-info/SOURCES.txt +1 -0
- liger_kernel_nightly-0.4.1.dev20241114041219/src/liger_kernel/chunked_loss/orpo_loss.py +0 -117
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/LICENSE +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/NOTICE +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/README.md +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/setup.cfg +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/chunked_loss/__init__.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/env_report.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/__init__.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/cross_entropy.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/experimental/embedding.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/experimental/mm_int8int2.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/fused_linear_cross_entropy.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/fused_linear_jsd.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/geglu.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/group_norm.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/jsd.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/kl_div.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/layer_norm.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/rms_norm.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/rope.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/swiglu.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/ops/utils.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/__init__.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/auto_model.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/cross_entropy.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/experimental/embedding.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/functional.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/fused_linear_cross_entropy.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/fused_linear_jsd.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/geglu.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/group_norm.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/jsd.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/kl_div.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/layer_norm.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/model/__init__.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/model/gemma.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/model/gemma2.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/model/llama.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/model/mistral.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/model/mixtral.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/model/mllama.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/model/phi3.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/model/qwen2.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/model/qwen2_vl.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/monkey_patch.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/rms_norm.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/rope.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/swiglu.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/transformers/trainer_integration.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/triton/__init__.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel/triton/monkey_patch.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel_nightly.egg-info/dependency_links.txt +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel_nightly.egg-info/requires.txt +0 -0
- {liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/src/liger_kernel_nightly.egg-info/top_level.txt +0 -0
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "liger_kernel_nightly"
|
7
|
-
version = "0.4.1.
|
7
|
+
version = "0.4.1.dev20241115012952"
|
8
8
|
description = "Efficient Triton kernels for LLM Training"
|
9
9
|
urls = { "Homepage" = "https://github.com/linkedin/Liger-Kernel" }
|
10
10
|
readme = { file = "README.md", content-type = "text/markdown" }
|
@@ -0,0 +1,57 @@
|
|
1
|
+
import torch.nn.functional as F
|
2
|
+
|
3
|
+
from liger_kernel.chunked_loss.fused_linear_preference import (
|
4
|
+
LigerFusedLinearPreferenceBase,
|
5
|
+
)
|
6
|
+
|
7
|
+
|
8
|
+
class LigerFusedLinearDPOFunction(LigerFusedLinearPreferenceBase):
|
9
|
+
|
10
|
+
@staticmethod
|
11
|
+
def preference_loss_fn(chosen_logps, rejected_logps, beta=0.1):
|
12
|
+
"""
|
13
|
+
Compute DPO loss (Direct Preference Optimization).
|
14
|
+
Args:
|
15
|
+
chosen_logps (torch.Tensor): Avg log probabilities of chosen tokens. Shape: (batch_size,).
|
16
|
+
rejected_logps (torch.Tensor): Avg log probabilities of rejected tokens. Shape: (batch_size,).
|
17
|
+
beta (float): Weight for the direct preference loss.
|
18
|
+
"""
|
19
|
+
logits_diff = beta * (chosen_logps - rejected_logps)
|
20
|
+
losses = -F.logsigmoid(logits_diff)
|
21
|
+
return losses.sum()
|
22
|
+
|
23
|
+
@staticmethod
|
24
|
+
def forward(
|
25
|
+
ctx,
|
26
|
+
_input,
|
27
|
+
weight,
|
28
|
+
target,
|
29
|
+
bias=None,
|
30
|
+
ignore_index=-100,
|
31
|
+
beta=0.1,
|
32
|
+
compute_nll_loss=True,
|
33
|
+
compiled=True,
|
34
|
+
):
|
35
|
+
"""
|
36
|
+
Fused linear layer with DPO (Direct Preference Optimization) loss.
|
37
|
+
Handles both the forward and backward pass of the final linear layer with DPO loss.
|
38
|
+
"""
|
39
|
+
return LigerFusedLinearPreferenceBase.forward(
|
40
|
+
ctx=ctx,
|
41
|
+
_input=_input,
|
42
|
+
weight=weight,
|
43
|
+
target=target,
|
44
|
+
bias=bias,
|
45
|
+
loss_fn=LigerFusedLinearDPOFunction.preference_loss_fn,
|
46
|
+
compute_nll_loss=compute_nll_loss,
|
47
|
+
ignore_index=ignore_index,
|
48
|
+
beta=beta,
|
49
|
+
compiled=compiled,
|
50
|
+
)
|
51
|
+
|
52
|
+
@staticmethod
|
53
|
+
def backward(ctx, grad_output):
|
54
|
+
# Get gradients for _input, weight, bias, and target from the base class
|
55
|
+
grads = LigerFusedLinearPreferenceBase.backward(ctx, grad_output)[:4]
|
56
|
+
# Return these gradients, followed by None for the remaining inputs
|
57
|
+
return *grads, None, None, None, None
|
@@ -1,7 +1,23 @@
|
|
1
|
+
from abc import abstractmethod
|
2
|
+
from functools import partial
|
3
|
+
|
1
4
|
import torch
|
5
|
+
from torch.nn import functional as F
|
2
6
|
|
3
7
|
|
4
8
|
class LigerFusedLinearPreferenceBase(torch.autograd.Function):
|
9
|
+
|
10
|
+
@abstractmethod
|
11
|
+
def preference_loss_fn(chosen_logps, rejected_logps, beta=0.1):
|
12
|
+
"""
|
13
|
+
Compute preference loss.
|
14
|
+
Args:
|
15
|
+
chosen_logps (torch.Tensor): Avg log probabilities of chosen tokens. Shape: (batch_size,).
|
16
|
+
rejected_logps (torch.Tensor): Avg log probabilities of rejected tokens. Shape: (batch_size,).
|
17
|
+
beta (float): Weight for the odds ratio loss.
|
18
|
+
"""
|
19
|
+
raise NotImplementedError("Preference loss function must be implemented.")
|
20
|
+
|
5
21
|
@staticmethod
|
6
22
|
def forward(
|
7
23
|
ctx,
|
@@ -11,6 +27,9 @@ class LigerFusedLinearPreferenceBase(torch.autograd.Function):
|
|
11
27
|
bias=None,
|
12
28
|
loss_fn=None,
|
13
29
|
chunk_size=1,
|
30
|
+
compute_nll_loss=True,
|
31
|
+
ignore_index=-100,
|
32
|
+
beta=0.1,
|
14
33
|
compiled=True,
|
15
34
|
):
|
16
35
|
"""
|
@@ -24,6 +43,9 @@ class LigerFusedLinearPreferenceBase(torch.autograd.Function):
|
|
24
43
|
bias (torch.Tensor, optional): Bias tensor. Shape: (vocab_size,).
|
25
44
|
loss_fn (callable): Loss function to compute the loss on a chunk of input/target.
|
26
45
|
chunk_size (int): Size of a chunk (# of batches of stacked chosen and rejected inputs).
|
46
|
+
compute_nll_loss (bool): Whether to compute NLL loss.
|
47
|
+
ignore_index (int): Index to ignore for loss computation.
|
48
|
+
beta (float): Weight for the odds ratio loss.
|
27
49
|
compiled (bool): Whether to use torch compile for chunk accumulation.
|
28
50
|
"""
|
29
51
|
# TODO: Tune CHUNK_SIZE to fully utilize the GPU
|
@@ -36,13 +58,23 @@ class LigerFusedLinearPreferenceBase(torch.autograd.Function):
|
|
36
58
|
loss_acc = torch.zeros((), device=_input.device)
|
37
59
|
|
38
60
|
chunks = max(1, _input.shape[0] // (2 * CHUNK_SIZE))
|
61
|
+
loss_func_to_call = partial(
|
62
|
+
LigerFusedLinearPreferenceBase._compute_loss,
|
63
|
+
preference_loss_fn=loss_fn,
|
64
|
+
ignore_index=ignore_index,
|
65
|
+
beta=beta,
|
66
|
+
compute_nll_loss=compute_nll_loss,
|
67
|
+
full_target=target,
|
68
|
+
)
|
39
69
|
|
40
70
|
def accumulate_chunk(input_chunk, target_chunk):
|
41
71
|
if bias is not None:
|
42
72
|
(chunk_grad_input, chunk_grad_weight, chunk_grad_bias), (
|
43
73
|
chunk_loss,
|
44
74
|
(chunk_or_loss, chunk_chosen_logps, chunk_rejected_logps),
|
45
|
-
) = torch.func.grad_and_value(
|
75
|
+
) = torch.func.grad_and_value(
|
76
|
+
loss_func_to_call, argnums=(0, 1, 3), has_aux=True
|
77
|
+
)(
|
46
78
|
input_chunk, weight, target_chunk, bias
|
47
79
|
)
|
48
80
|
grad_bias.add_(chunk_grad_bias)
|
@@ -50,7 +82,9 @@ class LigerFusedLinearPreferenceBase(torch.autograd.Function):
|
|
50
82
|
(chunk_grad_input, chunk_grad_weight), (
|
51
83
|
chunk_loss,
|
52
84
|
(chunk_or_loss, chunk_chosen_logps, chunk_rejected_logps),
|
53
|
-
) = torch.func.grad_and_value(
|
85
|
+
) = torch.func.grad_and_value(
|
86
|
+
loss_func_to_call, argnums=(0, 1), has_aux=True
|
87
|
+
)(
|
54
88
|
input_chunk, weight, target_chunk
|
55
89
|
)
|
56
90
|
grad_weight.add_(chunk_grad_weight)
|
@@ -105,3 +139,68 @@ class LigerFusedLinearPreferenceBase(torch.autograd.Function):
|
|
105
139
|
grad_bias = grad_bias * grad_output if grad_bias is not None else None
|
106
140
|
|
107
141
|
return grad_input, grad_weight, None, grad_bias, None, None, None
|
142
|
+
|
143
|
+
@staticmethod
|
144
|
+
def _compute_loss(
|
145
|
+
input_chunk,
|
146
|
+
weight,
|
147
|
+
target_chunk,
|
148
|
+
bias=None,
|
149
|
+
preference_loss_fn=None,
|
150
|
+
full_target=None,
|
151
|
+
ignore_index=-100,
|
152
|
+
beta=0.1,
|
153
|
+
compute_nll_loss=True,
|
154
|
+
**loss_kwargs,
|
155
|
+
):
|
156
|
+
"""
|
157
|
+
Compute the total loss for a chunk of input and target, while using an alignment/preference loss function.
|
158
|
+
Args:
|
159
|
+
preference_loss_fn (callable): Loss function to compute the loss on a chunk of input/target.
|
160
|
+
input_chunk (torch.Tensor): Chunk of input tensor. Shape: (2 * chunk_size, sequence_length, hidden_size).
|
161
|
+
weight (torch.Tensor): Weight tensor. Shape: (vocab_size, hidden_size).
|
162
|
+
target_chunk (torch.Tensor): Chunk of target tensor. Shape: (2 * chunk_size, sequence_length).
|
163
|
+
bias (torch.Tensor, optional): Bias tensor. Shape: (vocab_size,).
|
164
|
+
full_target (torch.Tensor): Full target tensor. Shape: (batch_size, sequence_length).
|
165
|
+
ignore_index (int): Index to ignore for loss computation.
|
166
|
+
beta (float): Weight for the odds ratio loss.
|
167
|
+
loss_kwargs (dict): Additional arguments for the loss function.
|
168
|
+
"""
|
169
|
+
len_chosen_chunk = target_chunk.shape[0] // 2
|
170
|
+
|
171
|
+
logits_chunk = input_chunk @ weight.t() # chunk_size x V
|
172
|
+
if bias is not None:
|
173
|
+
logits_chunk = logits_chunk + bias
|
174
|
+
log_probs_chunk = F.log_softmax(logits_chunk.float(), dim=-1)
|
175
|
+
|
176
|
+
chosen_nll_loss = 0.0
|
177
|
+
if compute_nll_loss:
|
178
|
+
chosen_nll_loss = F.nll_loss(
|
179
|
+
log_probs_chunk[:len_chosen_chunk].view(-1, log_probs_chunk.shape[-1]),
|
180
|
+
target_chunk[:len_chosen_chunk].view(-1),
|
181
|
+
reduction="sum",
|
182
|
+
ignore_index=ignore_index,
|
183
|
+
)
|
184
|
+
chosen_nll_loss = (
|
185
|
+
chosen_nll_loss
|
186
|
+
/ (full_target[: full_target.shape[0] // 2] != ignore_index).sum()
|
187
|
+
)
|
188
|
+
|
189
|
+
loss_mask = target_chunk != ignore_index
|
190
|
+
label_chunk = torch.where(loss_mask, target_chunk, 0)
|
191
|
+
|
192
|
+
per_token_logps = log_probs_chunk.gather(-1, label_chunk.unsqueeze(-1)).squeeze(
|
193
|
+
-1
|
194
|
+
)
|
195
|
+
average_log_prob = (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
|
196
|
+
|
197
|
+
chosen_logps = average_log_prob[:len_chosen_chunk]
|
198
|
+
rejected_logps = average_log_prob[len_chosen_chunk:]
|
199
|
+
|
200
|
+
alignment_loss = preference_loss_fn(
|
201
|
+
chosen_logps, rejected_logps, beta=beta, **loss_kwargs
|
202
|
+
)
|
203
|
+
alignment_loss = alignment_loss / (full_target.shape[0] // 2)
|
204
|
+
|
205
|
+
loss = chosen_nll_loss - alignment_loss
|
206
|
+
return loss, (alignment_loss, chosen_logps, rejected_logps)
|
@@ -0,0 +1,63 @@
|
|
1
|
+
import torch
|
2
|
+
import torch.nn.functional as F
|
3
|
+
|
4
|
+
from liger_kernel.chunked_loss.fused_linear_preference import (
|
5
|
+
LigerFusedLinearPreferenceBase,
|
6
|
+
)
|
7
|
+
|
8
|
+
|
9
|
+
class LigerFusedLinearORPOFunction(LigerFusedLinearPreferenceBase):
|
10
|
+
|
11
|
+
@staticmethod
|
12
|
+
def preference_loss_fn(chosen_logps, rejected_logps, beta=0.1):
|
13
|
+
"""
|
14
|
+
Compute odds-ratio loss.
|
15
|
+
Args:
|
16
|
+
chosen_logps (torch.Tensor): Avg log probabilities of chosen tokens. Shape: (batch_size,).
|
17
|
+
rejected_logps (torch.Tensor): Avg log probabilities of rejected tokens. Shape: (batch_size,).
|
18
|
+
beta (float): Weight for the odds ratio loss.
|
19
|
+
"""
|
20
|
+
log_odds = (chosen_logps - rejected_logps) - (
|
21
|
+
torch.log1p(-torch.exp(chosen_logps))
|
22
|
+
- torch.log1p(-torch.exp(rejected_logps))
|
23
|
+
)
|
24
|
+
ratio = F.logsigmoid(log_odds)
|
25
|
+
return beta * ratio.sum()
|
26
|
+
|
27
|
+
@staticmethod
|
28
|
+
def forward(
|
29
|
+
ctx,
|
30
|
+
_input,
|
31
|
+
weight,
|
32
|
+
target,
|
33
|
+
bias=None,
|
34
|
+
ignore_index=-100,
|
35
|
+
beta=0.1,
|
36
|
+
compute_nll_loss=True,
|
37
|
+
compiled=True,
|
38
|
+
):
|
39
|
+
"""
|
40
|
+
Fused linear layer with ORPO (Odds-Ratio Preference Optimization) loss.
|
41
|
+
Handles both the forward and backward pass of the final linear layer with ORPO loss.
|
42
|
+
Inspired from LigerFusedLinearCrossEntropyFunction (https://arxiv.org/abs/2410.10989) which fuses final linear layer and CE loss.
|
43
|
+
"""
|
44
|
+
|
45
|
+
return LigerFusedLinearPreferenceBase.forward(
|
46
|
+
ctx=ctx,
|
47
|
+
_input=_input,
|
48
|
+
weight=weight,
|
49
|
+
target=target,
|
50
|
+
bias=bias,
|
51
|
+
loss_fn=LigerFusedLinearORPOFunction.preference_loss_fn,
|
52
|
+
compute_nll_loss=compute_nll_loss,
|
53
|
+
ignore_index=ignore_index,
|
54
|
+
beta=beta,
|
55
|
+
compiled=compiled,
|
56
|
+
)
|
57
|
+
|
58
|
+
@staticmethod
|
59
|
+
def backward(ctx, grad_output):
|
60
|
+
# Get gradients for _input, weight, bias, and target from the base class
|
61
|
+
grads = LigerFusedLinearPreferenceBase.backward(ctx, grad_output)[:4]
|
62
|
+
# Return these gradients, followed by None for the remaining inputs
|
63
|
+
return *grads, None, None, None, None
|
@@ -4,6 +4,7 @@ README.md
|
|
4
4
|
pyproject.toml
|
5
5
|
src/liger_kernel/env_report.py
|
6
6
|
src/liger_kernel/chunked_loss/__init__.py
|
7
|
+
src/liger_kernel/chunked_loss/dpo_loss.py
|
7
8
|
src/liger_kernel/chunked_loss/fused_linear_preference.py
|
8
9
|
src/liger_kernel/chunked_loss/orpo_loss.py
|
9
10
|
src/liger_kernel/ops/__init__.py
|
@@ -1,117 +0,0 @@
|
|
1
|
-
from functools import partial
|
2
|
-
|
3
|
-
import torch
|
4
|
-
import torch.nn.functional as F
|
5
|
-
|
6
|
-
from liger_kernel.chunked_loss.fused_linear_preference import (
|
7
|
-
LigerFusedLinearPreferenceBase,
|
8
|
-
)
|
9
|
-
|
10
|
-
|
11
|
-
def odds_ratio_loss(chosen_logps, rejected_logps, beta=0.1):
|
12
|
-
"""
|
13
|
-
Compute odds-ratio loss.
|
14
|
-
Args:
|
15
|
-
chosen_logps (torch.Tensor): Avg log probabilities of chosen tokens. Shape: (batch_size,).
|
16
|
-
rejected_logps (torch.Tensor): Avg log probabilities of rejected tokens. Shape: (batch_size,).
|
17
|
-
beta (float): Weight for the odds ratio loss.
|
18
|
-
"""
|
19
|
-
log_odds = (chosen_logps - rejected_logps) - (
|
20
|
-
torch.log1p(-torch.exp(chosen_logps)) - torch.log1p(-torch.exp(rejected_logps))
|
21
|
-
)
|
22
|
-
ratio = F.logsigmoid(log_odds)
|
23
|
-
return beta * ratio.sum()
|
24
|
-
|
25
|
-
|
26
|
-
def _compute_orpo_loss(
|
27
|
-
input_chunk,
|
28
|
-
weight,
|
29
|
-
target_chunk,
|
30
|
-
bias=None,
|
31
|
-
full_target=None,
|
32
|
-
ignore_index=-100,
|
33
|
-
beta=0.1,
|
34
|
-
compute_nll_loss=True,
|
35
|
-
):
|
36
|
-
"""
|
37
|
-
Compute ORPO loss for a chunk of input and target.
|
38
|
-
Args:
|
39
|
-
input_chunk (torch.Tensor): Chunk of input tensor. Shape: (2 * chunk_size, sequence_length, hidden_size).
|
40
|
-
weight (torch.Tensor): Weight tensor. Shape: (vocab_size, hidden_size).
|
41
|
-
target_chunk (torch.Tensor): Chunk of target tensor. Shape: (2 * chunk_size, sequence_length).
|
42
|
-
bias (torch.Tensor, optional): Bias tensor. Shape: (vocab_size,).
|
43
|
-
full_target (torch.Tensor): Full target tensor. Shape: (batch_size, sequence_length).
|
44
|
-
ignore_index (int): Index to ignore for loss computation.
|
45
|
-
beta (float): Weight for the odds ratio loss.
|
46
|
-
"""
|
47
|
-
len_chosen_chunk = target_chunk.shape[0] // 2
|
48
|
-
|
49
|
-
logits_chunk = input_chunk @ weight.t() # chunk_size x V
|
50
|
-
if bias is not None:
|
51
|
-
logits_chunk = logits_chunk + bias
|
52
|
-
log_probs_chunk = F.log_softmax(logits_chunk.float(), dim=-1)
|
53
|
-
|
54
|
-
chosen_nll_loss = 0.0
|
55
|
-
if compute_nll_loss:
|
56
|
-
chosen_nll_loss = F.nll_loss(
|
57
|
-
log_probs_chunk[:len_chosen_chunk].view(-1, log_probs_chunk.shape[-1]),
|
58
|
-
target_chunk[:len_chosen_chunk].view(-1),
|
59
|
-
reduction="sum",
|
60
|
-
ignore_index=ignore_index,
|
61
|
-
)
|
62
|
-
chosen_nll_loss = (
|
63
|
-
chosen_nll_loss
|
64
|
-
/ (full_target[: full_target.shape[0] // 2] != ignore_index).sum()
|
65
|
-
)
|
66
|
-
|
67
|
-
loss_mask = target_chunk != ignore_index
|
68
|
-
label_chunk = torch.where(loss_mask, target_chunk, 0)
|
69
|
-
|
70
|
-
per_token_logps = log_probs_chunk.gather(-1, label_chunk.unsqueeze(-1)).squeeze(-1)
|
71
|
-
average_log_prob = (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
|
72
|
-
|
73
|
-
chosen_logps = average_log_prob[:len_chosen_chunk]
|
74
|
-
rejected_logps = average_log_prob[len_chosen_chunk:]
|
75
|
-
|
76
|
-
or_loss = odds_ratio_loss(chosen_logps, rejected_logps, beta=beta)
|
77
|
-
or_loss = or_loss / (full_target.shape[0] // 2)
|
78
|
-
|
79
|
-
loss = chosen_nll_loss - or_loss
|
80
|
-
return loss, (or_loss, chosen_logps, rejected_logps)
|
81
|
-
|
82
|
-
|
83
|
-
class LigerFusedLinearORPOFunction(LigerFusedLinearPreferenceBase):
|
84
|
-
@staticmethod
|
85
|
-
def forward(
|
86
|
-
ctx,
|
87
|
-
_input,
|
88
|
-
weight,
|
89
|
-
target,
|
90
|
-
bias=None,
|
91
|
-
ignore_index=-100,
|
92
|
-
beta=0.1,
|
93
|
-
compute_nll_loss=True,
|
94
|
-
compiled=True,
|
95
|
-
):
|
96
|
-
"""
|
97
|
-
Fused linear layer with ORPO (Odds-Ratio Preference Optimization) loss.
|
98
|
-
Handles both the forward and backward pass of the final linear layer with ORPO loss.
|
99
|
-
Inspired from LigerFusedLinearCrossEntropyFunction (https://arxiv.org/abs/2410.10989) which fuses final linear layer and CE loss.
|
100
|
-
"""
|
101
|
-
orpo_loss_fn = partial(
|
102
|
-
_compute_orpo_loss,
|
103
|
-
full_target=target,
|
104
|
-
ignore_index=ignore_index,
|
105
|
-
beta=beta,
|
106
|
-
compute_nll_loss=compute_nll_loss,
|
107
|
-
)
|
108
|
-
return LigerFusedLinearPreferenceBase.forward(
|
109
|
-
ctx, _input, weight, target, bias, loss_fn=orpo_loss_fn
|
110
|
-
)
|
111
|
-
|
112
|
-
@staticmethod
|
113
|
-
def backward(ctx, grad_output):
|
114
|
-
# Get gradients for _input, weight, bias, and target from the base class
|
115
|
-
grads = LigerFusedLinearPreferenceBase.backward(ctx, grad_output)[:4]
|
116
|
-
# Return these gradients, followed by None for the remaining inputs
|
117
|
-
return *grads, None, None, None, None
|
File without changes
|
{liger_kernel_nightly-0.4.1.dev20241114041219 → liger_kernel_nightly-0.4.1.dev20241115012952}/NOTICE
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|