liger-kernel-nightly 0.4.1.dev20241113011623__tar.gz → 0.4.1.dev20241114155849__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {liger_kernel_nightly-0.4.1.dev20241113011623/src/liger_kernel_nightly.egg-info → liger_kernel_nightly-0.4.1.dev20241114155849}/PKG-INFO +1 -1
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/pyproject.toml +1 -1
- liger_kernel_nightly-0.4.1.dev20241114155849/src/liger_kernel/chunked_loss/fused_linear_preference.py +206 -0
- liger_kernel_nightly-0.4.1.dev20241114155849/src/liger_kernel/chunked_loss/orpo_loss.py +63 -0
- liger_kernel_nightly-0.4.1.dev20241114155849/src/liger_kernel/transformers/model/__init__.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849/src/liger_kernel_nightly.egg-info}/PKG-INFO +1 -1
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel_nightly.egg-info/SOURCES.txt +3 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/LICENSE +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/NOTICE +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/README.md +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/setup.cfg +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623/src/liger_kernel/ops → liger_kernel_nightly-0.4.1.dev20241114155849/src/liger_kernel/chunked_loss}/__init__.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/env_report.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623/src/liger_kernel/transformers/model → liger_kernel_nightly-0.4.1.dev20241114155849/src/liger_kernel/ops}/__init__.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/cross_entropy.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/experimental/embedding.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/experimental/mm_int8int2.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/fused_linear_cross_entropy.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/fused_linear_jsd.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/geglu.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/group_norm.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/jsd.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/kl_div.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/layer_norm.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/rms_norm.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/rope.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/swiglu.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/ops/utils.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/__init__.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/auto_model.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/cross_entropy.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/experimental/embedding.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/functional.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/fused_linear_cross_entropy.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/fused_linear_jsd.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/geglu.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/group_norm.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/jsd.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/kl_div.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/layer_norm.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/model/gemma.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/model/gemma2.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/model/llama.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/model/mistral.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/model/mixtral.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/model/mllama.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/model/phi3.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/model/qwen2.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/model/qwen2_vl.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/monkey_patch.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/rms_norm.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/rope.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/swiglu.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/transformers/trainer_integration.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/triton/__init__.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel/triton/monkey_patch.py +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel_nightly.egg-info/dependency_links.txt +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel_nightly.egg-info/requires.txt +0 -0
- {liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/src/liger_kernel_nightly.egg-info/top_level.txt +0 -0
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "liger_kernel_nightly"
|
7
|
-
version = "0.4.1.
|
7
|
+
version = "0.4.1.dev20241114155849"
|
8
8
|
description = "Efficient Triton kernels for LLM Training"
|
9
9
|
urls = { "Homepage" = "https://github.com/linkedin/Liger-Kernel" }
|
10
10
|
readme = { file = "README.md", content-type = "text/markdown" }
|
@@ -0,0 +1,206 @@
|
|
1
|
+
from abc import abstractmethod
|
2
|
+
from functools import partial
|
3
|
+
|
4
|
+
import torch
|
5
|
+
from torch.nn import functional as F
|
6
|
+
|
7
|
+
|
8
|
+
class LigerFusedLinearPreferenceBase(torch.autograd.Function):
|
9
|
+
|
10
|
+
@abstractmethod
|
11
|
+
def preference_loss_fn(chosen_logps, rejected_logps, beta=0.1):
|
12
|
+
"""
|
13
|
+
Compute preference loss.
|
14
|
+
Args:
|
15
|
+
chosen_logps (torch.Tensor): Avg log probabilities of chosen tokens. Shape: (batch_size,).
|
16
|
+
rejected_logps (torch.Tensor): Avg log probabilities of rejected tokens. Shape: (batch_size,).
|
17
|
+
beta (float): Weight for the odds ratio loss.
|
18
|
+
"""
|
19
|
+
raise NotImplementedError("Preference loss function must be implemented.")
|
20
|
+
|
21
|
+
@staticmethod
|
22
|
+
def forward(
|
23
|
+
ctx,
|
24
|
+
_input,
|
25
|
+
weight,
|
26
|
+
target,
|
27
|
+
bias=None,
|
28
|
+
loss_fn=None,
|
29
|
+
chunk_size=1,
|
30
|
+
compute_nll_loss=True,
|
31
|
+
ignore_index=-100,
|
32
|
+
beta=0.1,
|
33
|
+
compiled=True,
|
34
|
+
):
|
35
|
+
"""
|
36
|
+
Base class for fused linear layer with preference loss.
|
37
|
+
Expects _input to be stacked with chosen and rejected inputs on the batch dimension.
|
38
|
+
|
39
|
+
Args:
|
40
|
+
_input (torch.Tensor): Input tensor. Shape: (batch_size, seq_len, hidden_size).
|
41
|
+
weight (torch.Tensor): Weight tensor. Shape: (vocab_size, hidden_size).
|
42
|
+
target (torch.Tensor): Target tensor. Shape: (batch_size, seq_len).
|
43
|
+
bias (torch.Tensor, optional): Bias tensor. Shape: (vocab_size,).
|
44
|
+
loss_fn (callable): Loss function to compute the loss on a chunk of input/target.
|
45
|
+
chunk_size (int): Size of a chunk (# of batches of stacked chosen and rejected inputs).
|
46
|
+
compute_nll_loss (bool): Whether to compute NLL loss.
|
47
|
+
ignore_index (int): Index to ignore for loss computation.
|
48
|
+
beta (float): Weight for the odds ratio loss.
|
49
|
+
compiled (bool): Whether to use torch compile for chunk accumulation.
|
50
|
+
"""
|
51
|
+
# TODO: Tune CHUNK_SIZE to fully utilize the GPU
|
52
|
+
CHUNK_SIZE = chunk_size
|
53
|
+
|
54
|
+
grad_weight = torch.zeros_like(weight)
|
55
|
+
grad_chosen_inputs = []
|
56
|
+
grad_rejected_inputs = []
|
57
|
+
grad_bias = torch.zeros_like(bias) if bias is not None else None
|
58
|
+
loss_acc = torch.zeros((), device=_input.device)
|
59
|
+
|
60
|
+
chunks = max(1, _input.shape[0] // (2 * CHUNK_SIZE))
|
61
|
+
loss_func_to_call = partial(
|
62
|
+
LigerFusedLinearPreferenceBase._compute_loss,
|
63
|
+
preference_loss_fn=loss_fn,
|
64
|
+
ignore_index=ignore_index,
|
65
|
+
beta=beta,
|
66
|
+
compute_nll_loss=compute_nll_loss,
|
67
|
+
full_target=target,
|
68
|
+
)
|
69
|
+
|
70
|
+
def accumulate_chunk(input_chunk, target_chunk):
|
71
|
+
if bias is not None:
|
72
|
+
(chunk_grad_input, chunk_grad_weight, chunk_grad_bias), (
|
73
|
+
chunk_loss,
|
74
|
+
(chunk_or_loss, chunk_chosen_logps, chunk_rejected_logps),
|
75
|
+
) = torch.func.grad_and_value(
|
76
|
+
loss_func_to_call, argnums=(0, 1, 3), has_aux=True
|
77
|
+
)(
|
78
|
+
input_chunk, weight, target_chunk, bias
|
79
|
+
)
|
80
|
+
grad_bias.add_(chunk_grad_bias)
|
81
|
+
else:
|
82
|
+
(chunk_grad_input, chunk_grad_weight), (
|
83
|
+
chunk_loss,
|
84
|
+
(chunk_or_loss, chunk_chosen_logps, chunk_rejected_logps),
|
85
|
+
) = torch.func.grad_and_value(
|
86
|
+
loss_func_to_call, argnums=(0, 1), has_aux=True
|
87
|
+
)(
|
88
|
+
input_chunk, weight, target_chunk
|
89
|
+
)
|
90
|
+
grad_weight.add_(chunk_grad_weight)
|
91
|
+
loss_acc.add_(chunk_loss)
|
92
|
+
return chunk_grad_input
|
93
|
+
|
94
|
+
len_chosen = target.shape[0] // 2
|
95
|
+
_chosen_input_chunks = torch.chunk(_input[:len_chosen], chunks=chunks, dim=0)
|
96
|
+
_chosen_target_chunks = torch.chunk(target[:len_chosen], chunks=chunks, dim=0)
|
97
|
+
_rejected_input_chunks = torch.chunk(_input[len_chosen:], chunks=chunks, dim=0)
|
98
|
+
_rejected_target_chunks = torch.chunk(target[len_chosen:], chunks=chunks, dim=0)
|
99
|
+
|
100
|
+
for (
|
101
|
+
chosen_input_chunk,
|
102
|
+
rejected_input_chunk,
|
103
|
+
chosen_target_chunk,
|
104
|
+
rejected_target_chunk,
|
105
|
+
) in zip(
|
106
|
+
_chosen_input_chunks,
|
107
|
+
_rejected_input_chunks,
|
108
|
+
_chosen_target_chunks,
|
109
|
+
_rejected_target_chunks,
|
110
|
+
):
|
111
|
+
input_chunk = torch.cat([chosen_input_chunk, rejected_input_chunk], dim=0)
|
112
|
+
target_chunk = torch.cat(
|
113
|
+
[chosen_target_chunk, rejected_target_chunk], dim=0
|
114
|
+
)
|
115
|
+
|
116
|
+
if compiled:
|
117
|
+
accumulate_chunk = torch.compile(accumulate_chunk)
|
118
|
+
grad_input = accumulate_chunk(input_chunk, target_chunk)
|
119
|
+
|
120
|
+
grad_chosen_inputs.append(grad_input[: chosen_target_chunk.shape[0]])
|
121
|
+
grad_rejected_inputs.append(grad_input[chosen_target_chunk.shape[0] :])
|
122
|
+
|
123
|
+
# combine grad_chosen_inputs and grad_rejected_inputs
|
124
|
+
grad_inputs = grad_chosen_inputs + grad_rejected_inputs
|
125
|
+
|
126
|
+
ctx.save_for_backward(
|
127
|
+
torch.cat(grad_inputs, dim=0),
|
128
|
+
grad_weight,
|
129
|
+
grad_bias,
|
130
|
+
)
|
131
|
+
return loss_acc
|
132
|
+
|
133
|
+
@staticmethod
|
134
|
+
def backward(ctx, grad_output):
|
135
|
+
grad_input, grad_weight, grad_bias = ctx.saved_tensors
|
136
|
+
if torch.ne(grad_output, torch.tensor(1.0, device=grad_output.device)):
|
137
|
+
grad_input = grad_input * grad_output
|
138
|
+
grad_weight = grad_weight * grad_output
|
139
|
+
grad_bias = grad_bias * grad_output if grad_bias is not None else None
|
140
|
+
|
141
|
+
return grad_input, grad_weight, None, grad_bias, None, None, None
|
142
|
+
|
143
|
+
@staticmethod
|
144
|
+
def _compute_loss(
|
145
|
+
input_chunk,
|
146
|
+
weight,
|
147
|
+
target_chunk,
|
148
|
+
bias=None,
|
149
|
+
preference_loss_fn=None,
|
150
|
+
full_target=None,
|
151
|
+
ignore_index=-100,
|
152
|
+
beta=0.1,
|
153
|
+
compute_nll_loss=True,
|
154
|
+
**loss_kwargs,
|
155
|
+
):
|
156
|
+
"""
|
157
|
+
Compute the total loss for a chunk of input and target, while using an alignment/preference loss function.
|
158
|
+
Args:
|
159
|
+
preference_loss_fn (callable): Loss function to compute the loss on a chunk of input/target.
|
160
|
+
input_chunk (torch.Tensor): Chunk of input tensor. Shape: (2 * chunk_size, sequence_length, hidden_size).
|
161
|
+
weight (torch.Tensor): Weight tensor. Shape: (vocab_size, hidden_size).
|
162
|
+
target_chunk (torch.Tensor): Chunk of target tensor. Shape: (2 * chunk_size, sequence_length).
|
163
|
+
bias (torch.Tensor, optional): Bias tensor. Shape: (vocab_size,).
|
164
|
+
full_target (torch.Tensor): Full target tensor. Shape: (batch_size, sequence_length).
|
165
|
+
ignore_index (int): Index to ignore for loss computation.
|
166
|
+
beta (float): Weight for the odds ratio loss.
|
167
|
+
loss_kwargs (dict): Additional arguments for the loss function.
|
168
|
+
"""
|
169
|
+
len_chosen_chunk = target_chunk.shape[0] // 2
|
170
|
+
|
171
|
+
logits_chunk = input_chunk @ weight.t() # chunk_size x V
|
172
|
+
if bias is not None:
|
173
|
+
logits_chunk = logits_chunk + bias
|
174
|
+
log_probs_chunk = F.log_softmax(logits_chunk.float(), dim=-1)
|
175
|
+
|
176
|
+
chosen_nll_loss = 0.0
|
177
|
+
if compute_nll_loss:
|
178
|
+
chosen_nll_loss = F.nll_loss(
|
179
|
+
log_probs_chunk[:len_chosen_chunk].view(-1, log_probs_chunk.shape[-1]),
|
180
|
+
target_chunk[:len_chosen_chunk].view(-1),
|
181
|
+
reduction="sum",
|
182
|
+
ignore_index=ignore_index,
|
183
|
+
)
|
184
|
+
chosen_nll_loss = (
|
185
|
+
chosen_nll_loss
|
186
|
+
/ (full_target[: full_target.shape[0] // 2] != ignore_index).sum()
|
187
|
+
)
|
188
|
+
|
189
|
+
loss_mask = target_chunk != ignore_index
|
190
|
+
label_chunk = torch.where(loss_mask, target_chunk, 0)
|
191
|
+
|
192
|
+
per_token_logps = log_probs_chunk.gather(-1, label_chunk.unsqueeze(-1)).squeeze(
|
193
|
+
-1
|
194
|
+
)
|
195
|
+
average_log_prob = (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
|
196
|
+
|
197
|
+
chosen_logps = average_log_prob[:len_chosen_chunk]
|
198
|
+
rejected_logps = average_log_prob[len_chosen_chunk:]
|
199
|
+
|
200
|
+
alignment_loss = preference_loss_fn(
|
201
|
+
chosen_logps, rejected_logps, beta=beta, **loss_kwargs
|
202
|
+
)
|
203
|
+
alignment_loss = alignment_loss / (full_target.shape[0] // 2)
|
204
|
+
|
205
|
+
loss = chosen_nll_loss - alignment_loss
|
206
|
+
return loss, (alignment_loss, chosen_logps, rejected_logps)
|
@@ -0,0 +1,63 @@
|
|
1
|
+
import torch
|
2
|
+
import torch.nn.functional as F
|
3
|
+
|
4
|
+
from liger_kernel.chunked_loss.fused_linear_preference import (
|
5
|
+
LigerFusedLinearPreferenceBase,
|
6
|
+
)
|
7
|
+
|
8
|
+
|
9
|
+
class LigerFusedLinearORPOFunction(LigerFusedLinearPreferenceBase):
|
10
|
+
|
11
|
+
@staticmethod
|
12
|
+
def preference_loss_fn(chosen_logps, rejected_logps, beta=0.1):
|
13
|
+
"""
|
14
|
+
Compute odds-ratio loss.
|
15
|
+
Args:
|
16
|
+
chosen_logps (torch.Tensor): Avg log probabilities of chosen tokens. Shape: (batch_size,).
|
17
|
+
rejected_logps (torch.Tensor): Avg log probabilities of rejected tokens. Shape: (batch_size,).
|
18
|
+
beta (float): Weight for the odds ratio loss.
|
19
|
+
"""
|
20
|
+
log_odds = (chosen_logps - rejected_logps) - (
|
21
|
+
torch.log1p(-torch.exp(chosen_logps))
|
22
|
+
- torch.log1p(-torch.exp(rejected_logps))
|
23
|
+
)
|
24
|
+
ratio = F.logsigmoid(log_odds)
|
25
|
+
return beta * ratio.sum()
|
26
|
+
|
27
|
+
@staticmethod
|
28
|
+
def forward(
|
29
|
+
ctx,
|
30
|
+
_input,
|
31
|
+
weight,
|
32
|
+
target,
|
33
|
+
bias=None,
|
34
|
+
ignore_index=-100,
|
35
|
+
beta=0.1,
|
36
|
+
compute_nll_loss=True,
|
37
|
+
compiled=True,
|
38
|
+
):
|
39
|
+
"""
|
40
|
+
Fused linear layer with ORPO (Odds-Ratio Preference Optimization) loss.
|
41
|
+
Handles both the forward and backward pass of the final linear layer with ORPO loss.
|
42
|
+
Inspired from LigerFusedLinearCrossEntropyFunction (https://arxiv.org/abs/2410.10989) which fuses final linear layer and CE loss.
|
43
|
+
"""
|
44
|
+
|
45
|
+
return LigerFusedLinearPreferenceBase.forward(
|
46
|
+
ctx=ctx,
|
47
|
+
_input=_input,
|
48
|
+
weight=weight,
|
49
|
+
target=target,
|
50
|
+
bias=bias,
|
51
|
+
loss_fn=LigerFusedLinearORPOFunction.preference_loss_fn,
|
52
|
+
compute_nll_loss=compute_nll_loss,
|
53
|
+
ignore_index=ignore_index,
|
54
|
+
beta=beta,
|
55
|
+
compiled=compiled,
|
56
|
+
)
|
57
|
+
|
58
|
+
@staticmethod
|
59
|
+
def backward(ctx, grad_output):
|
60
|
+
# Get gradients for _input, weight, bias, and target from the base class
|
61
|
+
grads = LigerFusedLinearPreferenceBase.backward(ctx, grad_output)[:4]
|
62
|
+
# Return these gradients, followed by None for the remaining inputs
|
63
|
+
return *grads, None, None, None, None
|
File without changes
|
@@ -3,6 +3,9 @@ NOTICE
|
|
3
3
|
README.md
|
4
4
|
pyproject.toml
|
5
5
|
src/liger_kernel/env_report.py
|
6
|
+
src/liger_kernel/chunked_loss/__init__.py
|
7
|
+
src/liger_kernel/chunked_loss/fused_linear_preference.py
|
8
|
+
src/liger_kernel/chunked_loss/orpo_loss.py
|
6
9
|
src/liger_kernel/ops/__init__.py
|
7
10
|
src/liger_kernel/ops/cross_entropy.py
|
8
11
|
src/liger_kernel/ops/fused_linear_cross_entropy.py
|
File without changes
|
{liger_kernel_nightly-0.4.1.dev20241113011623 → liger_kernel_nightly-0.4.1.dev20241114155849}/NOTICE
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|