liger-kernel-nightly 0.5.10.dev20250611191801__py3-none-any.whl → 0.6.4.dev20260112233432__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of liger-kernel-nightly might be problematic. Click here for more details.
- liger_kernel/chunked_loss/__init__.py +1 -0
- liger_kernel/chunked_loss/cosine_similarity_loss.py +142 -0
- liger_kernel/chunked_loss/dpo_loss.py +54 -3
- liger_kernel/chunked_loss/functional.py +2 -0
- liger_kernel/chunked_loss/fused_linear_distillation.py +23 -5
- liger_kernel/chunked_loss/fused_linear_ppo.py +25 -5
- liger_kernel/chunked_loss/grpo_loss.py +46 -9
- liger_kernel/chunked_loss/jsd_loss.py +44 -13
- liger_kernel/ops/__init__.py +141 -0
- liger_kernel/ops/backends/README.md +151 -0
- liger_kernel/ops/backends/__init__.py +13 -0
- liger_kernel/ops/backends/_ascend/__init__.py +5 -0
- liger_kernel/ops/backends/_ascend/ascend-ub-manager-design.md +485 -0
- liger_kernel/ops/backends/_ascend/ops/__init__.py +49 -0
- liger_kernel/ops/backends/_ascend/ops/geglu.py +266 -0
- liger_kernel/ops/backends/_ascend/ops/qwen2vl_mrope.py +285 -0
- liger_kernel/ops/backends/_ascend/ops/rope.py +290 -0
- liger_kernel/ops/backends/_ascend/ops/swiglu.py +142 -0
- liger_kernel/ops/backends/_ascend/ops/tvd.py +221 -0
- liger_kernel/ops/backends/_ascend/ub_manager.py +349 -0
- liger_kernel/ops/backends/registry.py +61 -0
- liger_kernel/ops/cross_entropy.py +130 -64
- liger_kernel/ops/dyt.py +5 -4
- liger_kernel/ops/fused_add_rms_norm.py +416 -0
- liger_kernel/ops/fused_linear_cross_entropy.py +115 -22
- liger_kernel/ops/geglu.py +6 -4
- liger_kernel/ops/group_norm.py +7 -7
- liger_kernel/ops/grpo_loss.py +3 -1
- liger_kernel/ops/kl_div.py +8 -11
- liger_kernel/ops/layer_norm.py +135 -80
- liger_kernel/ops/llama4_rope.py +225 -0
- liger_kernel/ops/poly_norm.py +390 -0
- liger_kernel/ops/rms_norm.py +148 -71
- liger_kernel/ops/rope.py +1 -1
- liger_kernel/ops/swiglu.py +1 -1
- liger_kernel/ops/tiled_mlp.py +136 -0
- liger_kernel/ops/utils.py +14 -0
- liger_kernel/transformers/__init__.py +65 -0
- liger_kernel/transformers/auto_model.py +21 -0
- liger_kernel/transformers/cross_entropy.py +9 -4
- liger_kernel/transformers/dyt.py +1 -1
- liger_kernel/transformers/experimental/__init__.py +5 -0
- liger_kernel/transformers/experimental/embedding.py +1 -1
- liger_kernel/transformers/functional.py +56 -24
- liger_kernel/transformers/fused_add_rms_norm.py +39 -0
- liger_kernel/transformers/fused_linear_cross_entropy.py +17 -5
- liger_kernel/transformers/fused_linear_jsd.py +1 -1
- liger_kernel/transformers/fused_neighborhood_attention.py +1 -1
- liger_kernel/transformers/geglu.py +1 -1
- liger_kernel/transformers/group_norm.py +1 -1
- liger_kernel/transformers/grpo_loss.py +57 -2
- liger_kernel/transformers/jsd.py +1 -1
- liger_kernel/transformers/kl_div.py +1 -1
- liger_kernel/transformers/layer_norm.py +1 -1
- liger_kernel/transformers/llama4_rope.py +93 -0
- liger_kernel/transformers/model/exaone4.py +136 -0
- liger_kernel/transformers/model/falcon_h1.py +122 -0
- liger_kernel/transformers/model/gemma.py +28 -8
- liger_kernel/transformers/model/gemma2.py +34 -11
- liger_kernel/transformers/model/gemma3.py +102 -112
- liger_kernel/transformers/model/glm4.py +18 -5
- liger_kernel/transformers/model/glm4v.py +163 -0
- liger_kernel/transformers/model/glm4v_moe.py +172 -0
- liger_kernel/transformers/model/gpt_oss.py +211 -0
- liger_kernel/transformers/model/hunyuan_v1.py +134 -0
- liger_kernel/transformers/model/internvl.py +157 -0
- liger_kernel/transformers/model/llama.py +26 -7
- liger_kernel/transformers/model/llama4.py +121 -0
- liger_kernel/transformers/model/llava.py +18 -6
- liger_kernel/transformers/model/loss_utils.py +34 -3
- liger_kernel/transformers/model/mistral.py +17 -10
- liger_kernel/transformers/model/mixtral.py +24 -9
- liger_kernel/transformers/model/mllama.py +18 -7
- liger_kernel/transformers/model/olmo2.py +18 -5
- liger_kernel/transformers/model/olmo3.py +142 -0
- liger_kernel/transformers/model/output_classes.py +147 -0
- liger_kernel/transformers/model/paligemma.py +42 -5
- liger_kernel/transformers/model/phi3.py +24 -159
- liger_kernel/transformers/model/qwen2.py +26 -4
- liger_kernel/transformers/model/qwen2_5_vl.py +21 -8
- liger_kernel/transformers/model/qwen2_vl.py +24 -7
- liger_kernel/transformers/model/qwen3.py +22 -6
- liger_kernel/transformers/model/qwen3_moe.py +27 -7
- liger_kernel/transformers/model/qwen3_next.py +146 -0
- liger_kernel/transformers/model/qwen3_vl.py +150 -0
- liger_kernel/transformers/model/qwen3_vl_moe.py +126 -0
- liger_kernel/transformers/model/smollm3.py +199 -0
- liger_kernel/transformers/model/smolvlm.py +158 -0
- liger_kernel/transformers/monkey_patch.py +1423 -100
- liger_kernel/transformers/multi_token_attention.py +2 -2
- liger_kernel/transformers/poly_norm.py +42 -0
- liger_kernel/transformers/qwen2vl_mrope.py +1 -1
- liger_kernel/transformers/rms_norm.py +15 -5
- liger_kernel/transformers/rope.py +45 -1
- liger_kernel/transformers/softmax.py +1 -1
- liger_kernel/transformers/sparsemax.py +1 -1
- liger_kernel/transformers/swiglu.py +18 -1
- liger_kernel/transformers/tiled_mlp.py +125 -0
- liger_kernel/transformers/tvd.py +1 -1
- liger_kernel/utils.py +52 -0
- {liger_kernel_nightly-0.5.10.dev20250611191801.dist-info → liger_kernel_nightly-0.6.4.dev20260112233432.dist-info}/METADATA +37 -25
- liger_kernel_nightly-0.6.4.dev20260112233432.dist-info/RECORD +132 -0
- liger_kernel_nightly-0.5.10.dev20250611191801.dist-info/RECORD +0 -95
- {liger_kernel_nightly-0.5.10.dev20250611191801.dist-info → liger_kernel_nightly-0.6.4.dev20260112233432.dist-info}/LICENSE +0 -0
- {liger_kernel_nightly-0.5.10.dev20250611191801.dist-info → liger_kernel_nightly-0.6.4.dev20260112233432.dist-info}/NOTICE +0 -0
- {liger_kernel_nightly-0.5.10.dev20250611191801.dist-info → liger_kernel_nightly-0.6.4.dev20260112233432.dist-info}/WHEEL +0 -0
- {liger_kernel_nightly-0.5.10.dev20250611191801.dist-info → liger_kernel_nightly-0.6.4.dev20260112233432.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
from typing import List
|
|
2
1
|
from typing import Optional
|
|
3
2
|
from typing import Tuple
|
|
4
3
|
from typing import Union
|
|
@@ -7,26 +6,23 @@ import torch
|
|
|
7
6
|
import torch.nn as nn
|
|
8
7
|
|
|
9
8
|
from transformers.cache_utils import Cache
|
|
10
|
-
from transformers.cache_utils import HybridCache
|
|
11
|
-
from transformers.modeling_outputs import CausalLMOutputWithPast
|
|
12
|
-
from transformers.models.gemma3.modeling_gemma3 import Gemma3CausalLMOutputWithPast
|
|
13
|
-
from transformers.utils import is_torchdynamo_compiling
|
|
14
9
|
from transformers.utils import logging
|
|
15
|
-
from transformers.utils.deprecation import deprecate_kwarg
|
|
16
10
|
|
|
17
11
|
from liger_kernel.transformers.fused_linear_cross_entropy import LigerFusedLinearCrossEntropyLoss
|
|
18
12
|
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
|
13
|
+
from liger_kernel.transformers.model.loss_utils import unpack_cross_entropy_result
|
|
14
|
+
from liger_kernel.transformers.model.output_classes import LigerCausalLMOutputWithPast
|
|
15
|
+
from liger_kernel.transformers.model.output_classes import LigerGemma3CausalLMOutputWithPast
|
|
19
16
|
|
|
20
17
|
logger = logging.get_logger(__name__)
|
|
21
18
|
|
|
22
19
|
|
|
23
|
-
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
|
24
20
|
def causal_forward(
|
|
25
21
|
self,
|
|
26
22
|
input_ids: torch.LongTensor = None,
|
|
27
23
|
attention_mask: Optional[torch.Tensor] = None,
|
|
28
24
|
position_ids: Optional[torch.LongTensor] = None,
|
|
29
|
-
past_key_values: Optional[
|
|
25
|
+
past_key_values: Optional[Cache] = None,
|
|
30
26
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
31
27
|
labels: Optional[torch.LongTensor] = None,
|
|
32
28
|
use_cache: Optional[bool] = None,
|
|
@@ -37,7 +33,7 @@ def causal_forward(
|
|
|
37
33
|
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
38
34
|
skip_logits: Optional[bool] = None,
|
|
39
35
|
**loss_kwargs,
|
|
40
|
-
) -> Union[Tuple,
|
|
36
|
+
) -> Union[Tuple, LigerCausalLMOutputWithPast]:
|
|
41
37
|
r"""
|
|
42
38
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
43
39
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
@@ -102,12 +98,14 @@ def causal_forward(
|
|
|
102
98
|
shift_labels = loss_kwargs.pop("shift_labels", None)
|
|
103
99
|
loss = None
|
|
104
100
|
logits = None
|
|
101
|
+
token_accuracy = None
|
|
105
102
|
|
|
106
103
|
if skip_logits is None:
|
|
107
104
|
skip_logits = self.training and (labels is not None or shift_labels is not None)
|
|
108
105
|
|
|
106
|
+
# Compute loss
|
|
109
107
|
if skip_logits:
|
|
110
|
-
|
|
108
|
+
result = LigerForCausalLMLoss(
|
|
111
109
|
hidden_states=kept_hidden_states,
|
|
112
110
|
lm_head_weight=self.lm_head.weight,
|
|
113
111
|
labels=labels,
|
|
@@ -116,37 +114,46 @@ def causal_forward(
|
|
|
116
114
|
final_logit_softcapping=self.config.final_logit_softcapping,
|
|
117
115
|
**loss_kwargs,
|
|
118
116
|
)
|
|
119
|
-
|
|
117
|
+
loss, _, token_accuracy = unpack_cross_entropy_result(result)
|
|
120
118
|
else:
|
|
121
119
|
logits = self.lm_head(kept_hidden_states)
|
|
122
120
|
if self.config.final_logit_softcapping is not None:
|
|
123
121
|
logits = logits / self.config.final_logit_softcapping
|
|
124
122
|
logits = torch.tanh(logits)
|
|
125
123
|
logits = logits * self.config.final_logit_softcapping
|
|
126
|
-
if labels is not None:
|
|
127
|
-
loss = self.loss_function(
|
|
124
|
+
if labels is not None or shift_labels is not None:
|
|
125
|
+
loss = self.loss_function(
|
|
126
|
+
logits=logits,
|
|
127
|
+
labels=labels,
|
|
128
|
+
shift_labels=shift_labels,
|
|
129
|
+
vocab_size=self.vocab_size,
|
|
130
|
+
**loss_kwargs,
|
|
131
|
+
)
|
|
128
132
|
|
|
129
133
|
if not return_dict:
|
|
130
|
-
|
|
131
|
-
|
|
134
|
+
output_tuple = (logits,) + outputs[1:]
|
|
135
|
+
output_tuple = (loss,) + output_tuple if loss is not None else output_tuple
|
|
136
|
+
output_tuple = output_tuple + (token_accuracy,) if token_accuracy is not None else output_tuple
|
|
137
|
+
return output_tuple
|
|
132
138
|
|
|
133
|
-
|
|
139
|
+
# Return custom output class with token_accuracy field
|
|
140
|
+
return LigerCausalLMOutputWithPast(
|
|
134
141
|
loss=loss,
|
|
135
142
|
logits=logits,
|
|
136
143
|
past_key_values=outputs.past_key_values,
|
|
137
144
|
hidden_states=outputs.hidden_states,
|
|
138
145
|
attentions=outputs.attentions,
|
|
146
|
+
token_accuracy=token_accuracy,
|
|
139
147
|
)
|
|
140
148
|
|
|
141
149
|
|
|
142
|
-
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
|
143
150
|
def multimodal_forward(
|
|
144
151
|
self,
|
|
145
152
|
input_ids: torch.LongTensor = None,
|
|
146
153
|
pixel_values: torch.FloatTensor = None,
|
|
147
154
|
attention_mask: Optional[torch.Tensor] = None,
|
|
148
155
|
position_ids: Optional[torch.LongTensor] = None,
|
|
149
|
-
past_key_values: Optional[Union[
|
|
156
|
+
past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None,
|
|
150
157
|
token_type_ids: Optional[torch.LongTensor] = None,
|
|
151
158
|
cache_position: Optional[torch.LongTensor] = None,
|
|
152
159
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
@@ -158,21 +165,12 @@ def multimodal_forward(
|
|
|
158
165
|
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
159
166
|
skip_logits: Optional[bool] = None,
|
|
160
167
|
**lm_kwargs,
|
|
161
|
-
) -> Union[
|
|
168
|
+
) -> Union[tuple, LigerGemma3CausalLMOutputWithPast]:
|
|
162
169
|
r"""
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
|
169
|
-
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
|
170
|
-
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
|
171
|
-
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
|
172
|
-
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
|
173
|
-
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
|
174
|
-
|
|
175
|
-
Returns:
|
|
170
|
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
171
|
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
172
|
+
config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
|
173
|
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`.
|
|
176
174
|
|
|
177
175
|
Example:
|
|
178
176
|
|
|
@@ -181,23 +179,37 @@ def multimodal_forward(
|
|
|
181
179
|
>>> import requests
|
|
182
180
|
>>> from transformers import AutoProcessor, Gemma3ForConditionalGeneration
|
|
183
181
|
|
|
184
|
-
>>> model = Gemma3ForConditionalGeneration.from_pretrained("google/
|
|
185
|
-
>>> processor = AutoProcessor.from_pretrained("google/
|
|
186
|
-
|
|
187
|
-
>>>
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
182
|
+
>>> model = Gemma3ForConditionalGeneration.from_pretrained("google/gemma-3-4b-it")
|
|
183
|
+
>>> processor = AutoProcessor.from_pretrained("google/gemma-3-4b-it")
|
|
184
|
+
|
|
185
|
+
>>> messages = [
|
|
186
|
+
... {
|
|
187
|
+
... "role": "system",
|
|
188
|
+
... "content": [
|
|
189
|
+
... {"type": "text", "text": "You are a helpful assistant."}
|
|
190
|
+
... ]
|
|
191
|
+
... },
|
|
192
|
+
... {
|
|
193
|
+
... "role": "user", "content": [
|
|
194
|
+
... {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"},
|
|
195
|
+
... {"type": "text", "text": "Where is the cat standing?"},
|
|
196
|
+
... ]
|
|
197
|
+
... },
|
|
198
|
+
... ]
|
|
199
|
+
|
|
200
|
+
>>> inputs = processor.apply_chat_template(
|
|
201
|
+
... messages,
|
|
202
|
+
... tokenize=True,
|
|
203
|
+
... return_dict=True,
|
|
204
|
+
... return_tensors="pt",
|
|
205
|
+
... add_generation_prompt=True
|
|
206
|
+
... )
|
|
193
207
|
>>> # Generate
|
|
194
|
-
>>> generate_ids = model.generate(**inputs
|
|
208
|
+
>>> generate_ids = model.generate(**inputs)
|
|
195
209
|
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
196
|
-
"
|
|
197
|
-
```
|
|
198
|
-
|
|
199
|
-
if (input_ids is None) ^ (inputs_embeds is not None):
|
|
200
|
-
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
|
210
|
+
"user\nYou are a helpful assistant.\n\n\n\n\n\nWhere is the cat standing?\nmodel\nBased on the image, the cat is standing in a snowy area, likely outdoors. It appears to"
|
|
211
|
+
```
|
|
212
|
+
"""
|
|
201
213
|
|
|
202
214
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
203
215
|
output_hidden_states = (
|
|
@@ -205,79 +217,32 @@ def multimodal_forward(
|
|
|
205
217
|
)
|
|
206
218
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
207
219
|
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
llm_input_ids = input_ids.clone()
|
|
214
|
-
llm_input_ids[special_image_mask] = 0
|
|
215
|
-
else:
|
|
216
|
-
llm_input_ids = input_ids
|
|
217
|
-
|
|
218
|
-
if inputs_embeds is None:
|
|
219
|
-
inputs_embeds = self.get_input_embeddings()(llm_input_ids)
|
|
220
|
-
|
|
221
|
-
if cache_position is None:
|
|
222
|
-
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
|
223
|
-
cache_position = torch.arange(
|
|
224
|
-
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
|
225
|
-
)
|
|
226
|
-
|
|
227
|
-
if position_ids is None:
|
|
228
|
-
position_ids = cache_position.unsqueeze(0) + 1 # Gemma3 positions are 1-indexed
|
|
229
|
-
|
|
230
|
-
# Merge text and images
|
|
231
|
-
if pixel_values is not None:
|
|
232
|
-
image_features = self.get_image_features(pixel_values)
|
|
233
|
-
|
|
234
|
-
if input_ids is None:
|
|
235
|
-
special_image_mask = inputs_embeds == self.get_input_embeddings()(
|
|
236
|
-
torch.tensor(self.config.image_token_index, dtype=torch.long, device=inputs_embeds.device)
|
|
237
|
-
)
|
|
238
|
-
else:
|
|
239
|
-
special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1)
|
|
240
|
-
special_image_mask = special_image_mask.expand_as(inputs_embeds).to(inputs_embeds.device)
|
|
241
|
-
|
|
242
|
-
if not is_torchdynamo_compiling() and inputs_embeds[special_image_mask].numel() != image_features.numel():
|
|
243
|
-
image_tokens_in_text = (special_image_mask).sum(dim=1).sum(dim=0)[0]
|
|
244
|
-
raise ValueError(
|
|
245
|
-
f"Number of images does not match number of special image tokens in the input text. "
|
|
246
|
-
f"Got {image_tokens_in_text} image tokens in the text but {image_features.shape[0] * image_features.shape[1]} "
|
|
247
|
-
"tokens from image embeddings."
|
|
248
|
-
)
|
|
249
|
-
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
|
|
250
|
-
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
|
|
251
|
-
|
|
252
|
-
# mask out pad-token-ids in labels for BC
|
|
253
|
-
if labels is not None and self.pad_token_id in labels:
|
|
254
|
-
logger.warning_once(
|
|
255
|
-
"`labels` contains `pad_token_id` which will be masked with `config.ignore_index`. "
|
|
256
|
-
"You have to mask out `pad_token_id` when preparing `labels`, this behavior will be removed in v.4.46.",
|
|
257
|
-
)
|
|
258
|
-
labels = torch.where(input_ids == self.pad_token_id, self.config.ignore_index, labels)
|
|
259
|
-
|
|
260
|
-
causal_mask = self._update_causal_mask(
|
|
261
|
-
attention_mask, token_type_ids, past_key_values, cache_position, inputs_embeds, is_training
|
|
262
|
-
)
|
|
263
|
-
outputs = self.language_model.model(
|
|
264
|
-
attention_mask=causal_mask,
|
|
220
|
+
outputs = self.model(
|
|
221
|
+
input_ids=input_ids,
|
|
222
|
+
pixel_values=pixel_values,
|
|
223
|
+
token_type_ids=token_type_ids,
|
|
224
|
+
attention_mask=attention_mask,
|
|
265
225
|
position_ids=position_ids,
|
|
266
226
|
past_key_values=past_key_values,
|
|
267
227
|
inputs_embeds=inputs_embeds,
|
|
268
228
|
use_cache=use_cache,
|
|
229
|
+
labels=labels,
|
|
269
230
|
output_attentions=output_attentions,
|
|
270
231
|
output_hidden_states=output_hidden_states,
|
|
271
232
|
return_dict=return_dict,
|
|
272
233
|
cache_position=cache_position,
|
|
273
|
-
logits_to_keep=logits_to_keep,
|
|
274
234
|
**lm_kwargs,
|
|
275
235
|
)
|
|
276
236
|
|
|
237
|
+
shift_labels = lm_kwargs.pop("shift_labels", None)
|
|
277
238
|
hidden_states = outputs[0]
|
|
239
|
+
|
|
240
|
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
|
241
|
+
kept_hidden_states = hidden_states[:, slice_indices, :]
|
|
242
|
+
|
|
278
243
|
loss = None
|
|
279
244
|
logits = None
|
|
280
|
-
|
|
245
|
+
token_accuracy = None
|
|
281
246
|
if skip_logits and labels is None:
|
|
282
247
|
raise ValueError("skip_logits is True, but labels is None")
|
|
283
248
|
|
|
@@ -285,7 +250,7 @@ def multimodal_forward(
|
|
|
285
250
|
skip_logits = self.training and (labels is not None)
|
|
286
251
|
|
|
287
252
|
if skip_logits:
|
|
288
|
-
shift_hidden_states =
|
|
253
|
+
shift_hidden_states = kept_hidden_states[..., :-1, :]
|
|
289
254
|
shift_labels = labels[..., 1:]
|
|
290
255
|
|
|
291
256
|
hidden_device = shift_hidden_states.device
|
|
@@ -304,9 +269,11 @@ def multimodal_forward(
|
|
|
304
269
|
shift_labels = shift_labels.view(-1).to(hidden_device)
|
|
305
270
|
|
|
306
271
|
lce = LigerFusedLinearCrossEntropyLoss()
|
|
307
|
-
|
|
272
|
+
result = lce(self.lm_head.weight, shift_hidden_states, shift_labels)
|
|
273
|
+
loss, _, token_accuracy = unpack_cross_entropy_result(result)
|
|
274
|
+
|
|
308
275
|
else:
|
|
309
|
-
logits = self.
|
|
276
|
+
logits = self.lm_head(kept_hidden_states)
|
|
310
277
|
if labels is not None:
|
|
311
278
|
# Upcast to float if we need to compute the loss to avoid potential precision issues
|
|
312
279
|
logits = logits.float()
|
|
@@ -327,15 +294,38 @@ def multimodal_forward(
|
|
|
327
294
|
flat_logits = shift_logits.view(-1, self.config.text_config.vocab_size)
|
|
328
295
|
flat_labels = shift_labels.view(-1).to(shift_logits.device)
|
|
329
296
|
loss = loss_fct(flat_logits, flat_labels)
|
|
297
|
+
elif shift_labels is not None:
|
|
298
|
+
# Upcast to float if we need to compute the loss to avoid potential precision issues
|
|
299
|
+
logits = logits.float()
|
|
300
|
+
shift_logits = logits[..., :-1, :]
|
|
301
|
+
if attention_mask is not None:
|
|
302
|
+
# we use the input attention mask to shift the logits and labels, because it is 2D.
|
|
303
|
+
# we also crop attn mask in case it is longer, which happens in PrefixTuning with peft
|
|
304
|
+
shift_attention_mask = attention_mask[:, -shift_logits.shape[1] :].to(logits.device)
|
|
305
|
+
shift_logits = shift_logits[shift_attention_mask.to(logits.device) != 0].contiguous()
|
|
306
|
+
shift_labels = shift_labels[shift_attention_mask.to(shift_labels.device) != 0].contiguous()
|
|
307
|
+
else:
|
|
308
|
+
shift_logits = shift_logits.contiguous()
|
|
309
|
+
shift_labels = shift_labels.contiguous()
|
|
310
|
+
# Flatten the tokens
|
|
311
|
+
loss_fct = nn.CrossEntropyLoss()
|
|
312
|
+
|
|
313
|
+
flat_logits = shift_logits.view(-1, self.config.text_config.vocab_size)
|
|
314
|
+
flat_labels = shift_labels.view(-1).to(shift_logits.device)
|
|
315
|
+
loss = loss_fct(flat_logits, flat_labels)
|
|
316
|
+
|
|
330
317
|
if not return_dict:
|
|
331
318
|
output = (logits,) + outputs[1:]
|
|
332
|
-
|
|
319
|
+
output = (loss,) + output if loss is not None else output
|
|
320
|
+
output = output + (token_accuracy,) if token_accuracy is not None else output
|
|
321
|
+
return output
|
|
333
322
|
|
|
334
|
-
return
|
|
323
|
+
return LigerGemma3CausalLMOutputWithPast(
|
|
335
324
|
loss=loss,
|
|
336
325
|
logits=logits,
|
|
337
326
|
past_key_values=outputs.past_key_values,
|
|
338
327
|
hidden_states=outputs.hidden_states,
|
|
339
328
|
attentions=outputs.attentions,
|
|
340
|
-
image_hidden_states=
|
|
329
|
+
image_hidden_states=outputs.image_hidden_states,
|
|
330
|
+
token_accuracy=token_accuracy,
|
|
341
331
|
)
|
|
@@ -5,10 +5,11 @@ from typing import Union
|
|
|
5
5
|
|
|
6
6
|
import torch
|
|
7
7
|
|
|
8
|
-
from transformers.modeling_outputs import CausalLMOutputWithPast
|
|
9
8
|
from transformers.utils.deprecation import deprecate_kwarg
|
|
10
9
|
|
|
11
10
|
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
|
11
|
+
from liger_kernel.transformers.model.loss_utils import unpack_cross_entropy_result
|
|
12
|
+
from liger_kernel.transformers.model.output_classes import LigerCausalLMOutputWithPast
|
|
12
13
|
|
|
13
14
|
|
|
14
15
|
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
|
@@ -28,7 +29,7 @@ def lce_forward(
|
|
|
28
29
|
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
29
30
|
skip_logits: Optional[bool] = None,
|
|
30
31
|
**kwargs,
|
|
31
|
-
) -> Union[Tuple,
|
|
32
|
+
) -> Union[Tuple, LigerCausalLMOutputWithPast]:
|
|
32
33
|
r"""
|
|
33
34
|
Args:
|
|
34
35
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
@@ -91,6 +92,7 @@ def lce_forward(
|
|
|
91
92
|
shift_labels = kwargs.pop("shift_labels", None)
|
|
92
93
|
logits = None
|
|
93
94
|
loss = None
|
|
95
|
+
token_accuracy = None
|
|
94
96
|
|
|
95
97
|
if skip_logits and labels is None and shift_labels is None:
|
|
96
98
|
raise ValueError("skip_logits is True, but labels and shift_labels are None")
|
|
@@ -99,8 +101,9 @@ def lce_forward(
|
|
|
99
101
|
# By default, if in training mode, don't materialize logits
|
|
100
102
|
skip_logits = self.training and (labels is not None or shift_labels is not None)
|
|
101
103
|
|
|
104
|
+
# Compute loss
|
|
102
105
|
if skip_logits:
|
|
103
|
-
|
|
106
|
+
result = LigerForCausalLMLoss(
|
|
104
107
|
hidden_states=kept_hidden_states,
|
|
105
108
|
lm_head_weight=self.lm_head.weight,
|
|
106
109
|
labels=labels,
|
|
@@ -108,21 +111,31 @@ def lce_forward(
|
|
|
108
111
|
hidden_size=self.config.hidden_size,
|
|
109
112
|
**kwargs,
|
|
110
113
|
)
|
|
114
|
+
loss, _, token_accuracy = unpack_cross_entropy_result(result)
|
|
111
115
|
|
|
112
116
|
else:
|
|
113
117
|
logits = self.lm_head(kept_hidden_states)
|
|
114
|
-
if labels is not None:
|
|
118
|
+
if labels is not None or shift_labels is not None:
|
|
115
119
|
loss = self.loss_function(
|
|
116
120
|
logits=logits,
|
|
117
121
|
labels=labels,
|
|
122
|
+
shift_labels=shift_labels,
|
|
118
123
|
vocab_size=self.config.vocab_size,
|
|
119
124
|
**kwargs,
|
|
120
125
|
)
|
|
121
126
|
|
|
122
|
-
|
|
127
|
+
if not return_dict:
|
|
128
|
+
output = (logits,) + outputs[1:]
|
|
129
|
+
output = ((loss,) + output) if loss is not None else output
|
|
130
|
+
output = output + (token_accuracy,) if token_accuracy is not None else output
|
|
131
|
+
return output
|
|
132
|
+
|
|
133
|
+
# Return custom output class with token_accuracy field
|
|
134
|
+
return LigerCausalLMOutputWithPast(
|
|
123
135
|
loss=loss,
|
|
124
136
|
logits=logits,
|
|
125
137
|
past_key_values=outputs.past_key_values,
|
|
126
138
|
hidden_states=outputs.hidden_states,
|
|
127
139
|
attentions=outputs.attentions,
|
|
140
|
+
token_accuracy=token_accuracy,
|
|
128
141
|
)
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
from typing import Optional
|
|
3
|
+
from typing import Tuple
|
|
4
|
+
from typing import Union
|
|
5
|
+
|
|
6
|
+
import torch
|
|
7
|
+
|
|
8
|
+
from transformers.utils.deprecation import deprecate_kwarg
|
|
9
|
+
|
|
10
|
+
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
|
11
|
+
from liger_kernel.transformers.model.loss_utils import unpack_cross_entropy_result
|
|
12
|
+
from liger_kernel.transformers.model.output_classes import LigerCausalLMOutputWithPast
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
|
16
|
+
def lce_forward(
|
|
17
|
+
self,
|
|
18
|
+
input_ids: torch.LongTensor = None,
|
|
19
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
20
|
+
position_ids: Optional[torch.LongTensor] = None,
|
|
21
|
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
|
22
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
23
|
+
labels: Optional[torch.LongTensor] = None,
|
|
24
|
+
use_cache: Optional[bool] = None,
|
|
25
|
+
output_attentions: Optional[bool] = None,
|
|
26
|
+
output_hidden_states: Optional[bool] = None,
|
|
27
|
+
return_dict: Optional[bool] = None,
|
|
28
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
29
|
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
30
|
+
skip_logits: Optional[bool] = None,
|
|
31
|
+
**kwargs,
|
|
32
|
+
) -> Union[Tuple, LigerCausalLMOutputWithPast]:
|
|
33
|
+
r"""
|
|
34
|
+
Args:
|
|
35
|
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
36
|
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
37
|
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
|
38
|
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
39
|
+
|
|
40
|
+
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
|
41
|
+
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
|
42
|
+
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
|
43
|
+
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
|
44
|
+
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
|
45
|
+
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
|
|
49
|
+
Example:
|
|
50
|
+
|
|
51
|
+
```python
|
|
52
|
+
>>> from PIL import Image
|
|
53
|
+
>>> from transformers import AutoTokenizer, Glm4vForConditionalGeneration
|
|
54
|
+
|
|
55
|
+
>>> MODEL_PATH = "THUDM/GLM-4.1V-9B-Thinking"
|
|
56
|
+
>>> messages = [
|
|
57
|
+
{
|
|
58
|
+
"role": "user",
|
|
59
|
+
"content": [
|
|
60
|
+
{
|
|
61
|
+
"type": "image",
|
|
62
|
+
"url": "https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png"
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
"type": "text",
|
|
66
|
+
"text": "describe this image"
|
|
67
|
+
}
|
|
68
|
+
],
|
|
69
|
+
}
|
|
70
|
+
]
|
|
71
|
+
>>> processor = AutoProcessor.from_pretrained(MODEL_PATH, use_fast=True)
|
|
72
|
+
>>> model = Glm4vForConditionalGeneration.from_pretrained(
|
|
73
|
+
pretrained_model_name_or_path=MODEL_PATH,
|
|
74
|
+
dtype=torch.bfloat16,
|
|
75
|
+
device_map="auto",
|
|
76
|
+
)
|
|
77
|
+
>>> inputs = processor.apply_chat_template(
|
|
78
|
+
messages,
|
|
79
|
+
tokenize=True,
|
|
80
|
+
add_generation_prompt=True,
|
|
81
|
+
return_dict=True,
|
|
82
|
+
return_tensors="pt"
|
|
83
|
+
).to(model.device)
|
|
84
|
+
>>> generated_ids = model.generate(**inputs, max_new_tokens=8192)
|
|
85
|
+
output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
|
|
86
|
+
<think>Got it, let's describe the image. First, there's a vintage car, specifically a Volkswagen Beetle
|
|
87
|
+
```"""
|
|
88
|
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
89
|
+
output_hidden_states = (
|
|
90
|
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
91
|
+
)
|
|
92
|
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
93
|
+
|
|
94
|
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
95
|
+
outputs = self.model(
|
|
96
|
+
input_ids=input_ids,
|
|
97
|
+
attention_mask=attention_mask,
|
|
98
|
+
position_ids=position_ids,
|
|
99
|
+
past_key_values=past_key_values,
|
|
100
|
+
inputs_embeds=inputs_embeds,
|
|
101
|
+
use_cache=use_cache,
|
|
102
|
+
output_attentions=output_attentions,
|
|
103
|
+
output_hidden_states=output_hidden_states,
|
|
104
|
+
return_dict=return_dict,
|
|
105
|
+
cache_position=cache_position,
|
|
106
|
+
**kwargs,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
hidden_states = outputs[0]
|
|
110
|
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
|
111
|
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
|
112
|
+
kept_hidden_states = hidden_states[:, slice_indices, :]
|
|
113
|
+
|
|
114
|
+
shift_labels = kwargs.pop("shift_labels", None)
|
|
115
|
+
logits = None
|
|
116
|
+
loss = None
|
|
117
|
+
token_accuracy = None
|
|
118
|
+
|
|
119
|
+
if skip_logits and labels is None and shift_labels is None:
|
|
120
|
+
raise ValueError("skip_logits is True, but labels and shift_labels are None")
|
|
121
|
+
|
|
122
|
+
if skip_logits is None:
|
|
123
|
+
# By default, if in training mode, don't materialize logits
|
|
124
|
+
skip_logits = self.training and (labels is not None or shift_labels is not None)
|
|
125
|
+
|
|
126
|
+
# Compute loss
|
|
127
|
+
if skip_logits:
|
|
128
|
+
result = LigerForCausalLMLoss(
|
|
129
|
+
hidden_states=kept_hidden_states,
|
|
130
|
+
lm_head_weight=self.lm_head.weight,
|
|
131
|
+
labels=labels,
|
|
132
|
+
shift_labels=shift_labels,
|
|
133
|
+
hidden_size=self.config.hidden_size,
|
|
134
|
+
**kwargs,
|
|
135
|
+
)
|
|
136
|
+
loss, _, token_accuracy = unpack_cross_entropy_result(result)
|
|
137
|
+
|
|
138
|
+
else:
|
|
139
|
+
logits = self.lm_head(kept_hidden_states)
|
|
140
|
+
if labels is not None or shift_labels is not None:
|
|
141
|
+
loss = self.loss_function(
|
|
142
|
+
logits=logits,
|
|
143
|
+
labels=labels,
|
|
144
|
+
shift_labels=shift_labels,
|
|
145
|
+
vocab_size=self.config.vocab_size,
|
|
146
|
+
**kwargs,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
if not return_dict:
|
|
150
|
+
output = (logits,) + outputs[1:]
|
|
151
|
+
output = ((loss,) + output) if loss is not None else output
|
|
152
|
+
output = output + (token_accuracy,) if token_accuracy is not None else output
|
|
153
|
+
return output
|
|
154
|
+
|
|
155
|
+
# Return custom output class with token_accuracy field
|
|
156
|
+
return LigerCausalLMOutputWithPast(
|
|
157
|
+
loss=loss,
|
|
158
|
+
logits=logits,
|
|
159
|
+
past_key_values=outputs.past_key_values,
|
|
160
|
+
hidden_states=outputs.hidden_states,
|
|
161
|
+
attentions=outputs.attentions,
|
|
162
|
+
token_accuracy=token_accuracy,
|
|
163
|
+
)
|