liger-kernel-nightly 0.4.0.dev20241107052928__py3-none-any.whl → 0.6.3.dev20251121010306__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of liger-kernel-nightly might be problematic. Click here for more details.
- liger_kernel/__init__.py +0 -0
- liger_kernel/chunked_loss/README.md +25 -0
- liger_kernel/chunked_loss/__init__.py +8 -0
- liger_kernel/chunked_loss/cosine_similarity_loss.py +136 -0
- liger_kernel/chunked_loss/cpo_loss.py +157 -0
- liger_kernel/chunked_loss/dpo_loss.py +229 -0
- liger_kernel/chunked_loss/functional.py +17 -0
- liger_kernel/chunked_loss/fused_linear_distillation.py +292 -0
- liger_kernel/chunked_loss/fused_linear_ppo.py +350 -0
- liger_kernel/chunked_loss/fused_linear_preference.py +433 -0
- liger_kernel/chunked_loss/fused_linear_unpaired_preference.py +341 -0
- liger_kernel/chunked_loss/grpo_loss.py +304 -0
- liger_kernel/chunked_loss/jsd_loss.py +200 -0
- liger_kernel/chunked_loss/kto_loss.py +210 -0
- liger_kernel/chunked_loss/orpo_loss.py +144 -0
- liger_kernel/chunked_loss/simpo_loss.py +165 -0
- liger_kernel/env_report.py +21 -4
- liger_kernel/ops/cross_entropy.py +235 -84
- liger_kernel/ops/dyt.py +157 -0
- liger_kernel/ops/experimental/embedding.py +1 -3
- liger_kernel/ops/experimental/mm_int8int2.py +3 -9
- liger_kernel/ops/fused_add_rms_norm.py +412 -0
- liger_kernel/ops/fused_linear_cross_entropy.py +197 -75
- liger_kernel/ops/fused_linear_jsd.py +17 -34
- liger_kernel/ops/fused_neighborhood_attention.py +1022 -0
- liger_kernel/ops/geglu.py +7 -18
- liger_kernel/ops/group_norm.py +305 -0
- liger_kernel/ops/grpo_loss.py +310 -0
- liger_kernel/ops/jsd.py +46 -21
- liger_kernel/ops/kl_div.py +23 -19
- liger_kernel/ops/layer_norm.py +150 -86
- liger_kernel/ops/llama4_rope.py +225 -0
- liger_kernel/ops/multi_token_attention.py +207 -0
- liger_kernel/ops/poly_norm.py +386 -0
- liger_kernel/ops/qwen2vl_mrope.py +222 -0
- liger_kernel/ops/rms_norm.py +314 -84
- liger_kernel/ops/rope.py +32 -34
- liger_kernel/ops/softmax.py +201 -0
- liger_kernel/ops/sparsemax.py +179 -0
- liger_kernel/ops/swiglu.py +5 -9
- liger_kernel/ops/tiled_mlp.py +136 -0
- liger_kernel/ops/tvd.py +207 -0
- liger_kernel/ops/utils.py +8 -4
- liger_kernel/transformers/__init__.py +199 -24
- liger_kernel/transformers/auto_model.py +6 -13
- liger_kernel/transformers/cross_entropy.py +33 -20
- liger_kernel/transformers/dyt.py +22 -0
- liger_kernel/transformers/experimental/__init__.py +5 -0
- liger_kernel/transformers/experimental/embedding.py +1 -3
- liger_kernel/transformers/fsdp.py +55 -0
- liger_kernel/transformers/functional.py +291 -13
- liger_kernel/transformers/fused_add_rms_norm.py +39 -0
- liger_kernel/transformers/fused_linear_cross_entropy.py +43 -14
- liger_kernel/transformers/fused_linear_jsd.py +1 -4
- liger_kernel/transformers/fused_neighborhood_attention.py +234 -0
- liger_kernel/transformers/geglu.py +1 -4
- liger_kernel/transformers/group_norm.py +50 -0
- liger_kernel/transformers/grpo_loss.py +98 -0
- liger_kernel/transformers/jsd.py +2 -7
- liger_kernel/transformers/kl_div.py +1 -3
- liger_kernel/transformers/layer_norm.py +3 -9
- liger_kernel/transformers/llama4_rope.py +93 -0
- liger_kernel/transformers/model/falcon_h1.py +122 -0
- liger_kernel/transformers/model/gemma.py +77 -77
- liger_kernel/transformers/model/gemma2.py +283 -0
- liger_kernel/transformers/model/gemma3.py +331 -0
- liger_kernel/transformers/model/glm4.py +141 -0
- liger_kernel/transformers/model/glm4v.py +163 -0
- liger_kernel/transformers/model/glm4v_moe.py +172 -0
- liger_kernel/transformers/model/internvl.py +157 -0
- liger_kernel/transformers/model/llama.py +128 -79
- liger_kernel/transformers/model/llama4.py +121 -0
- liger_kernel/transformers/model/llava.py +344 -0
- liger_kernel/transformers/model/loss_utils.py +95 -0
- liger_kernel/transformers/model/mistral.py +68 -64
- liger_kernel/transformers/model/mixtral.py +75 -91
- liger_kernel/transformers/model/mllama.py +63 -68
- liger_kernel/transformers/model/olmo2.py +141 -0
- liger_kernel/transformers/model/output_classes.py +147 -0
- liger_kernel/transformers/model/paligemma.py +432 -0
- liger_kernel/transformers/model/phi3.py +59 -213
- liger_kernel/transformers/model/qwen2.py +75 -72
- liger_kernel/transformers/model/qwen2_5_vl.py +163 -0
- liger_kernel/transformers/model/qwen2_vl.py +78 -98
- liger_kernel/transformers/model/qwen3.py +136 -0
- liger_kernel/transformers/model/qwen3_moe.py +152 -0
- liger_kernel/transformers/model/qwen3_next.py +146 -0
- liger_kernel/transformers/model/qwen3_vl.py +150 -0
- liger_kernel/transformers/model/qwen3_vl_moe.py +126 -0
- liger_kernel/transformers/model/smollm3.py +199 -0
- liger_kernel/transformers/model/smolvlm.py +158 -0
- liger_kernel/transformers/monkey_patch.py +2106 -289
- liger_kernel/transformers/multi_token_attention.py +64 -0
- liger_kernel/transformers/poly_norm.py +42 -0
- liger_kernel/transformers/qwen2vl_mrope.py +20 -0
- liger_kernel/transformers/rms_norm.py +57 -6
- liger_kernel/transformers/rope.py +45 -2
- liger_kernel/transformers/softmax.py +12 -0
- liger_kernel/transformers/sparsemax.py +16 -0
- liger_kernel/transformers/swiglu.py +23 -8
- liger_kernel/transformers/tiled_mlp.py +133 -0
- liger_kernel/transformers/trainer/__init__.py +4 -0
- liger_kernel/transformers/trainer/orpo_trainer.py +130 -0
- liger_kernel/transformers/tvd.py +13 -0
- liger_kernel/triton/__init__.py +1 -3
- liger_kernel/triton/monkey_patch.py +1 -3
- liger_kernel/utils.py +71 -0
- {liger_kernel_nightly-0.4.0.dev20241107052928.dist-info → liger_kernel_nightly-0.6.3.dev20251121010306.dist-info}/METADATA +150 -137
- liger_kernel_nightly-0.6.3.dev20251121010306.dist-info/RECORD +116 -0
- {liger_kernel_nightly-0.4.0.dev20241107052928.dist-info → liger_kernel_nightly-0.6.3.dev20251121010306.dist-info}/WHEEL +1 -1
- liger_kernel_nightly-0.4.0.dev20241107052928.dist-info/RECORD +0 -48
- {liger_kernel_nightly-0.4.0.dev20241107052928.dist-info → liger_kernel_nightly-0.6.3.dev20251121010306.dist-info}/LICENSE +0 -0
- {liger_kernel_nightly-0.4.0.dev20241107052928.dist-info → liger_kernel_nightly-0.6.3.dev20251121010306.dist-info}/NOTICE +0 -0
- {liger_kernel_nightly-0.4.0.dev20241107052928.dist-info → liger_kernel_nightly-0.6.3.dev20251121010306.dist-info}/top_level.txt +0 -0
|
@@ -1,27 +1,21 @@
|
|
|
1
|
-
from typing import List
|
|
1
|
+
from typing import List
|
|
2
|
+
from typing import Optional
|
|
3
|
+
from typing import Tuple
|
|
4
|
+
from typing import Union
|
|
2
5
|
|
|
3
6
|
import torch
|
|
7
|
+
|
|
4
8
|
from torch.nn import CrossEntropyLoss
|
|
5
9
|
from transformers.cache_utils import Cache
|
|
6
10
|
from transformers.modeling_outputs import CausalLMOutputWithPast
|
|
7
|
-
from transformers.
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
from transformers.
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
from liger_kernel.transformers.fused_linear_cross_entropy import (
|
|
17
|
-
LigerFusedLinearCrossEntropyLoss,
|
|
18
|
-
)
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
@add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING)
|
|
22
|
-
@replace_return_docstrings(
|
|
23
|
-
output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
|
|
24
|
-
)
|
|
11
|
+
from transformers.utils.deprecation import deprecate_kwarg
|
|
12
|
+
|
|
13
|
+
from liger_kernel.transformers.fused_linear_cross_entropy import LigerFusedLinearCrossEntropyLoss
|
|
14
|
+
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
|
15
|
+
from liger_kernel.transformers.model.loss_utils import unpack_cross_entropy_result
|
|
16
|
+
from liger_kernel.transformers.model.output_classes import LigerCausalLMOutputWithPast
|
|
17
|
+
|
|
18
|
+
|
|
25
19
|
def lce_forward_deprecated(
|
|
26
20
|
self,
|
|
27
21
|
input_ids: torch.LongTensor = None,
|
|
@@ -35,6 +29,7 @@ def lce_forward_deprecated(
|
|
|
35
29
|
output_hidden_states: Optional[bool] = None,
|
|
36
30
|
return_dict: Optional[bool] = None,
|
|
37
31
|
cache_position: Optional[torch.LongTensor] = None,
|
|
32
|
+
skip_logits: Optional[bool] = None,
|
|
38
33
|
) -> Union[Tuple, CausalLMOutputWithPast]:
|
|
39
34
|
r"""
|
|
40
35
|
|
|
@@ -64,19 +59,11 @@ def lce_forward_deprecated(
|
|
|
64
59
|
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
65
60
|
"What is your favorite condiment?"
|
|
66
61
|
```"""
|
|
67
|
-
output_attentions =
|
|
68
|
-
output_attentions
|
|
69
|
-
if output_attentions is not None
|
|
70
|
-
else self.config.output_attentions
|
|
71
|
-
)
|
|
62
|
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
72
63
|
output_hidden_states = (
|
|
73
|
-
output_hidden_states
|
|
74
|
-
if output_hidden_states is not None
|
|
75
|
-
else self.config.output_hidden_states
|
|
76
|
-
)
|
|
77
|
-
return_dict = (
|
|
78
|
-
return_dict if return_dict is not None else self.config.use_return_dict
|
|
64
|
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
79
65
|
)
|
|
66
|
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
80
67
|
|
|
81
68
|
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
82
69
|
outputs = self.model(
|
|
@@ -97,7 +84,14 @@ def lce_forward_deprecated(
|
|
|
97
84
|
loss = None
|
|
98
85
|
logits = None
|
|
99
86
|
|
|
100
|
-
if
|
|
87
|
+
if skip_logits and labels is None:
|
|
88
|
+
raise ValueError("skip_logits is True, but labels is None")
|
|
89
|
+
|
|
90
|
+
if skip_logits is None:
|
|
91
|
+
# By default, if in training mode, don't materialize logits
|
|
92
|
+
skip_logits = self.training and labels is not None
|
|
93
|
+
|
|
94
|
+
if skip_logits:
|
|
101
95
|
shift_hidden_states = hidden_states[..., :-1, :].contiguous()
|
|
102
96
|
shift_labels = labels[..., 1:].contiguous()
|
|
103
97
|
|
|
@@ -138,10 +132,7 @@ def lce_forward_deprecated(
|
|
|
138
132
|
)
|
|
139
133
|
|
|
140
134
|
|
|
141
|
-
@
|
|
142
|
-
@replace_return_docstrings(
|
|
143
|
-
output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
|
|
144
|
-
)
|
|
135
|
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
|
145
136
|
def lce_forward(
|
|
146
137
|
self,
|
|
147
138
|
input_ids: torch.LongTensor = None,
|
|
@@ -155,9 +146,10 @@ def lce_forward(
|
|
|
155
146
|
output_hidden_states: Optional[bool] = None,
|
|
156
147
|
return_dict: Optional[bool] = None,
|
|
157
148
|
cache_position: Optional[torch.LongTensor] = None,
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
149
|
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
150
|
+
skip_logits: Optional[bool] = None,
|
|
151
|
+
**kwargs,
|
|
152
|
+
) -> Union[Tuple, LigerCausalLMOutputWithPast]:
|
|
161
153
|
r"""
|
|
162
154
|
Args:
|
|
163
155
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
@@ -165,10 +157,12 @@ def lce_forward(
|
|
|
165
157
|
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
|
166
158
|
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
167
159
|
|
|
168
|
-
|
|
169
|
-
|
|
160
|
+
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
|
161
|
+
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
|
170
162
|
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
|
171
163
|
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
|
164
|
+
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
|
165
|
+
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
|
172
166
|
|
|
173
167
|
Returns:
|
|
174
168
|
|
|
@@ -188,19 +182,11 @@ def lce_forward(
|
|
|
188
182
|
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
189
183
|
"What is your favorite condiment?"
|
|
190
184
|
```"""
|
|
191
|
-
output_attentions =
|
|
192
|
-
output_attentions
|
|
193
|
-
if output_attentions is not None
|
|
194
|
-
else self.config.output_attentions
|
|
195
|
-
)
|
|
185
|
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
196
186
|
output_hidden_states = (
|
|
197
|
-
output_hidden_states
|
|
198
|
-
if output_hidden_states is not None
|
|
199
|
-
else self.config.output_hidden_states
|
|
200
|
-
)
|
|
201
|
-
return_dict = (
|
|
202
|
-
return_dict if return_dict is not None else self.config.use_return_dict
|
|
187
|
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
203
188
|
)
|
|
189
|
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
204
190
|
|
|
205
191
|
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
206
192
|
outputs = self.model(
|
|
@@ -214,48 +200,62 @@ def lce_forward(
|
|
|
214
200
|
output_hidden_states=output_hidden_states,
|
|
215
201
|
return_dict=return_dict,
|
|
216
202
|
cache_position=cache_position,
|
|
203
|
+
**kwargs,
|
|
217
204
|
)
|
|
218
205
|
|
|
219
206
|
hidden_states = outputs[0]
|
|
207
|
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
|
208
|
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
|
209
|
+
kept_hidden_states = hidden_states[:, slice_indices, :]
|
|
220
210
|
|
|
211
|
+
shift_labels = kwargs.pop("shift_labels", None)
|
|
221
212
|
logits = None
|
|
222
213
|
loss = None
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
214
|
+
token_accuracy = None
|
|
215
|
+
|
|
216
|
+
if skip_logits and labels is None and shift_labels is None:
|
|
217
|
+
raise ValueError("skip_logits is True, but labels and shift_labels are None")
|
|
218
|
+
|
|
219
|
+
if skip_logits is None:
|
|
220
|
+
# By default, if in training mode, don't materialize logits
|
|
221
|
+
skip_logits = self.training and (labels is not None or shift_labels is not None)
|
|
222
|
+
|
|
223
|
+
# Compute loss
|
|
224
|
+
if skip_logits:
|
|
225
|
+
result = LigerForCausalLMLoss(
|
|
226
|
+
hidden_states=kept_hidden_states,
|
|
227
|
+
lm_head_weight=self.lm_head.weight,
|
|
228
|
+
labels=labels,
|
|
229
|
+
shift_labels=shift_labels,
|
|
230
|
+
hidden_size=self.config.hidden_size,
|
|
231
|
+
**kwargs,
|
|
232
|
+
)
|
|
233
|
+
loss, _, token_accuracy = unpack_cross_entropy_result(result)
|
|
234
|
+
else:
|
|
235
|
+
logits = self.lm_head(kept_hidden_states)
|
|
236
|
+
if labels is not None or shift_labels is not None:
|
|
244
237
|
loss = self.loss_function(
|
|
245
238
|
logits=logits,
|
|
246
239
|
labels=labels,
|
|
240
|
+
shift_labels=shift_labels,
|
|
247
241
|
vocab_size=self.config.vocab_size,
|
|
248
|
-
**
|
|
242
|
+
**kwargs,
|
|
249
243
|
)
|
|
250
244
|
|
|
251
245
|
if not return_dict:
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
246
|
+
output_tuple = (logits,) + outputs[1:]
|
|
247
|
+
if loss is not None:
|
|
248
|
+
output_tuple = (loss,) + output_tuple
|
|
249
|
+
if token_accuracy is not None:
|
|
250
|
+
output_tuple = output_tuple + (token_accuracy,)
|
|
251
|
+
return output_tuple
|
|
252
|
+
|
|
253
|
+
# Return custom output class with token_accuracy field
|
|
254
|
+
return LigerCausalLMOutputWithPast(
|
|
256
255
|
loss=loss,
|
|
257
256
|
logits=logits,
|
|
258
257
|
past_key_values=outputs.past_key_values,
|
|
259
258
|
hidden_states=outputs.hidden_states,
|
|
260
259
|
attentions=outputs.attentions,
|
|
260
|
+
token_accuracy=token_accuracy,
|
|
261
261
|
)
|
|
@@ -0,0 +1,283 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
from typing import Tuple
|
|
5
|
+
from typing import Union
|
|
6
|
+
|
|
7
|
+
import torch
|
|
8
|
+
|
|
9
|
+
from torch.nn import CrossEntropyLoss
|
|
10
|
+
from transformers.cache_utils import HybridCache
|
|
11
|
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
|
12
|
+
from transformers.utils.deprecation import deprecate_kwarg
|
|
13
|
+
|
|
14
|
+
from liger_kernel.transformers.fused_linear_cross_entropy import LigerFusedLinearCrossEntropyLoss
|
|
15
|
+
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
|
16
|
+
from liger_kernel.transformers.model.loss_utils import unpack_cross_entropy_result
|
|
17
|
+
from liger_kernel.transformers.model.output_classes import LigerCausalLMOutputWithPast
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def lce_forward_deprecated(
|
|
23
|
+
self,
|
|
24
|
+
input_ids: torch.LongTensor = None,
|
|
25
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
26
|
+
position_ids: Optional[torch.LongTensor] = None,
|
|
27
|
+
past_key_values: Optional[HybridCache] = None,
|
|
28
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
29
|
+
labels: Optional[torch.LongTensor] = None,
|
|
30
|
+
use_cache: Optional[bool] = None,
|
|
31
|
+
output_attentions: Optional[bool] = None,
|
|
32
|
+
output_hidden_states: Optional[bool] = None,
|
|
33
|
+
return_dict: Optional[bool] = None,
|
|
34
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
35
|
+
skip_logits: Optional[bool] = None,
|
|
36
|
+
**kwargs,
|
|
37
|
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
|
38
|
+
r"""
|
|
39
|
+
Args:
|
|
40
|
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
41
|
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
42
|
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
|
43
|
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
|
|
47
|
+
Example:
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
>>> from transformers import AutoTokenizer, GemmaForCausalLM
|
|
51
|
+
>>> model = GemmaForCausalLM.from_pretrained("google/gemma-2-9b")
|
|
52
|
+
>>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b")
|
|
53
|
+
>>> prompt = "What is your favorite condiment?"
|
|
54
|
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
55
|
+
>>> # Generate
|
|
56
|
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
|
57
|
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
58
|
+
"What is your favorite condiment?"
|
|
59
|
+
```"""
|
|
60
|
+
|
|
61
|
+
if self.training and self.config._attn_implementation != "eager":
|
|
62
|
+
logger.warning_once(
|
|
63
|
+
"It is strongly recommended to train Gemma2 models with the `eager` attention implementation "
|
|
64
|
+
f"instead of `{self.config._attn_implementation}`. Use `eager` with `AutoModelForCausalLM.from_pretrained('<path-to-checkpoint>', attn_implementation='eager')`."
|
|
65
|
+
)
|
|
66
|
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
67
|
+
output_hidden_states = (
|
|
68
|
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
69
|
+
)
|
|
70
|
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
71
|
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
72
|
+
outputs = self.model(
|
|
73
|
+
input_ids=input_ids,
|
|
74
|
+
attention_mask=attention_mask,
|
|
75
|
+
position_ids=position_ids,
|
|
76
|
+
past_key_values=past_key_values,
|
|
77
|
+
inputs_embeds=inputs_embeds,
|
|
78
|
+
use_cache=use_cache,
|
|
79
|
+
output_attentions=output_attentions,
|
|
80
|
+
output_hidden_states=output_hidden_states,
|
|
81
|
+
return_dict=return_dict,
|
|
82
|
+
cache_position=cache_position,
|
|
83
|
+
**kwargs,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
hidden_states = outputs[0]
|
|
87
|
+
|
|
88
|
+
loss = None
|
|
89
|
+
logits = None
|
|
90
|
+
|
|
91
|
+
if skip_logits and labels is None:
|
|
92
|
+
raise ValueError("skip_logits is True, but labels is None")
|
|
93
|
+
|
|
94
|
+
if skip_logits is None:
|
|
95
|
+
# By default, if in training mode, don't materialize logits
|
|
96
|
+
skip_logits = self.training and labels is not None
|
|
97
|
+
|
|
98
|
+
if skip_logits:
|
|
99
|
+
shift_hidden_states = hidden_states[..., :-1, :].contiguous()
|
|
100
|
+
shift_labels = labels[..., 1:].contiguous()
|
|
101
|
+
|
|
102
|
+
# flatten
|
|
103
|
+
|
|
104
|
+
shift_hidden_states = shift_hidden_states.view(-1, self.config.hidden_size)
|
|
105
|
+
shift_labels = shift_labels.view(-1)
|
|
106
|
+
|
|
107
|
+
lce = LigerFusedLinearCrossEntropyLoss(softcap=self.config.final_logit_softcapping)
|
|
108
|
+
loss = lce(self.lm_head.weight, shift_hidden_states, shift_labels)
|
|
109
|
+
|
|
110
|
+
else:
|
|
111
|
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
|
112
|
+
logits = self.lm_head(hidden_states)
|
|
113
|
+
if self.config.final_logit_softcapping is not None:
|
|
114
|
+
logits = logits / self.config.final_logit_softcapping
|
|
115
|
+
logits = torch.tanh(logits)
|
|
116
|
+
logits = logits * self.config.final_logit_softcapping
|
|
117
|
+
|
|
118
|
+
loss = None
|
|
119
|
+
if labels is not None:
|
|
120
|
+
# Upcast to float if we need to compute the loss to avoid potential precision issues
|
|
121
|
+
logits = logits.float()
|
|
122
|
+
# Shift so that tokens < n predict n
|
|
123
|
+
shift_logits = logits[..., :-1, :].contiguous()
|
|
124
|
+
shift_labels = labels[..., 1:].contiguous()
|
|
125
|
+
# Flatten the tokens
|
|
126
|
+
loss_fct = CrossEntropyLoss()
|
|
127
|
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
|
128
|
+
shift_labels = shift_labels.view(-1)
|
|
129
|
+
# Enable model parallelism
|
|
130
|
+
shift_labels = shift_labels.to(shift_logits.device)
|
|
131
|
+
loss = loss_fct(shift_logits, shift_labels)
|
|
132
|
+
|
|
133
|
+
if not return_dict:
|
|
134
|
+
output = (logits,) + outputs[1:]
|
|
135
|
+
return (loss,) + output if loss is not None else output
|
|
136
|
+
|
|
137
|
+
return CausalLMOutputWithPast(
|
|
138
|
+
loss=loss,
|
|
139
|
+
logits=logits,
|
|
140
|
+
past_key_values=outputs.past_key_values,
|
|
141
|
+
hidden_states=outputs.hidden_states,
|
|
142
|
+
attentions=outputs.attentions,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
|
147
|
+
def lce_forward(
|
|
148
|
+
self,
|
|
149
|
+
input_ids: torch.LongTensor = None,
|
|
150
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
151
|
+
position_ids: Optional[torch.LongTensor] = None,
|
|
152
|
+
past_key_values: Optional[HybridCache] = None,
|
|
153
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
154
|
+
labels: Optional[torch.LongTensor] = None,
|
|
155
|
+
use_cache: Optional[bool] = None,
|
|
156
|
+
output_attentions: Optional[bool] = None,
|
|
157
|
+
output_hidden_states: Optional[bool] = None,
|
|
158
|
+
return_dict: Optional[bool] = None,
|
|
159
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
160
|
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
161
|
+
skip_logits: Optional[bool] = None,
|
|
162
|
+
**kwargs,
|
|
163
|
+
) -> Union[Tuple, LigerCausalLMOutputWithPast]:
|
|
164
|
+
r"""
|
|
165
|
+
Args:
|
|
166
|
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
167
|
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
168
|
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
|
169
|
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
170
|
+
|
|
171
|
+
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
|
172
|
+
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
|
173
|
+
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
|
174
|
+
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
|
175
|
+
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
|
176
|
+
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
|
|
180
|
+
Example:
|
|
181
|
+
|
|
182
|
+
```python
|
|
183
|
+
>>> from transformers import AutoTokenizer, GemmaForCausalLM
|
|
184
|
+
|
|
185
|
+
>>> model = GemmaForCausalLM.from_pretrained("google/gemma-2-9b")
|
|
186
|
+
>>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b")
|
|
187
|
+
|
|
188
|
+
>>> prompt = "What is your favorite condiment?"
|
|
189
|
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
190
|
+
|
|
191
|
+
>>> # Generate
|
|
192
|
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
|
193
|
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
194
|
+
"What is your favorite condiment?"
|
|
195
|
+
```"""
|
|
196
|
+
|
|
197
|
+
if self.training and self.config._attn_implementation != "eager":
|
|
198
|
+
logger.warning_once(
|
|
199
|
+
"It is strongly recommended to train Gemma2 models with the `eager` attention implementation "
|
|
200
|
+
f"instead of `{self.config._attn_implementation}`. Use `eager` with `AutoModelForCausalLM.from_pretrained('<path-to-checkpoint>', attn_implementation='eager')`."
|
|
201
|
+
)
|
|
202
|
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
203
|
+
output_hidden_states = (
|
|
204
|
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
205
|
+
)
|
|
206
|
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
207
|
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
208
|
+
outputs = self.model(
|
|
209
|
+
input_ids=input_ids,
|
|
210
|
+
attention_mask=attention_mask,
|
|
211
|
+
position_ids=position_ids,
|
|
212
|
+
past_key_values=past_key_values,
|
|
213
|
+
inputs_embeds=inputs_embeds,
|
|
214
|
+
use_cache=use_cache,
|
|
215
|
+
output_attentions=output_attentions,
|
|
216
|
+
output_hidden_states=output_hidden_states,
|
|
217
|
+
return_dict=return_dict,
|
|
218
|
+
cache_position=cache_position,
|
|
219
|
+
**kwargs,
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
hidden_states = outputs[0]
|
|
223
|
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
|
224
|
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
|
225
|
+
kept_hidden_states = hidden_states[:, slice_indices, :]
|
|
226
|
+
|
|
227
|
+
shift_labels = kwargs.pop("shift_labels", None)
|
|
228
|
+
logits = None
|
|
229
|
+
loss = None
|
|
230
|
+
token_accuracy = None
|
|
231
|
+
|
|
232
|
+
if skip_logits and labels is None and shift_labels is None:
|
|
233
|
+
raise ValueError("skip_logits is True, but labels and shift_labels are None")
|
|
234
|
+
|
|
235
|
+
if skip_logits is None:
|
|
236
|
+
# By default, if in training mode, don't materialize logits
|
|
237
|
+
skip_logits = self.training and (labels is not None or shift_labels is not None)
|
|
238
|
+
|
|
239
|
+
# Compute loss
|
|
240
|
+
if skip_logits:
|
|
241
|
+
result = LigerForCausalLMLoss(
|
|
242
|
+
hidden_states=kept_hidden_states,
|
|
243
|
+
lm_head_weight=self.lm_head.weight,
|
|
244
|
+
labels=labels,
|
|
245
|
+
shift_labels=shift_labels,
|
|
246
|
+
hidden_size=self.config.hidden_size,
|
|
247
|
+
final_logit_softcapping=self.config.final_logit_softcapping,
|
|
248
|
+
**kwargs,
|
|
249
|
+
)
|
|
250
|
+
loss, _, token_accuracy = unpack_cross_entropy_result(result)
|
|
251
|
+
|
|
252
|
+
else:
|
|
253
|
+
logits = self.lm_head(kept_hidden_states)
|
|
254
|
+
if self.config.final_logit_softcapping is not None:
|
|
255
|
+
logits = logits / self.config.final_logit_softcapping
|
|
256
|
+
logits = torch.tanh(logits)
|
|
257
|
+
logits = logits * self.config.final_logit_softcapping
|
|
258
|
+
|
|
259
|
+
loss = None
|
|
260
|
+
if labels is not None or shift_labels is not None:
|
|
261
|
+
loss = self.loss_function(
|
|
262
|
+
logits=logits,
|
|
263
|
+
labels=labels,
|
|
264
|
+
shift_labels=shift_labels,
|
|
265
|
+
vocab_size=self.vocab_size,
|
|
266
|
+
**kwargs,
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
if not return_dict:
|
|
270
|
+
output_tuple = (logits,) + outputs[1:]
|
|
271
|
+
output_tuple = (loss,) + output_tuple if loss is not None else output_tuple
|
|
272
|
+
output_tuple = output_tuple + (token_accuracy,) if token_accuracy is not None else output_tuple
|
|
273
|
+
return output_tuple
|
|
274
|
+
|
|
275
|
+
# Return custom output class with token_accuracy field
|
|
276
|
+
return LigerCausalLMOutputWithPast(
|
|
277
|
+
loss=loss,
|
|
278
|
+
logits=logits,
|
|
279
|
+
past_key_values=outputs.past_key_values,
|
|
280
|
+
hidden_states=outputs.hidden_states,
|
|
281
|
+
attentions=outputs.attentions,
|
|
282
|
+
token_accuracy=token_accuracy,
|
|
283
|
+
)
|