liger-kernel-nightly 0.5.10.dev20250624183504__py3-none-any.whl → 0.6.4.dev20251121224847__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of liger-kernel-nightly might be problematic. Click here for more details.
- liger_kernel/chunked_loss/__init__.py +1 -0
- liger_kernel/chunked_loss/cosine_similarity_loss.py +136 -0
- liger_kernel/chunked_loss/dpo_loss.py +54 -3
- liger_kernel/chunked_loss/functional.py +2 -0
- liger_kernel/chunked_loss/fused_linear_distillation.py +13 -2
- liger_kernel/chunked_loss/fused_linear_ppo.py +25 -5
- liger_kernel/chunked_loss/grpo_loss.py +46 -9
- liger_kernel/chunked_loss/jsd_loss.py +23 -7
- liger_kernel/ops/cross_entropy.py +118 -62
- liger_kernel/ops/fused_add_rms_norm.py +412 -0
- liger_kernel/ops/fused_linear_cross_entropy.py +113 -21
- liger_kernel/ops/geglu.py +1 -1
- liger_kernel/ops/grpo_loss.py +3 -1
- liger_kernel/ops/layer_norm.py +133 -79
- liger_kernel/ops/llama4_rope.py +225 -0
- liger_kernel/ops/poly_norm.py +386 -0
- liger_kernel/ops/rms_norm.py +2 -2
- liger_kernel/ops/rope.py +1 -1
- liger_kernel/ops/swiglu.py +1 -1
- liger_kernel/ops/tiled_mlp.py +136 -0
- liger_kernel/transformers/__init__.py +59 -0
- liger_kernel/transformers/cross_entropy.py +8 -3
- liger_kernel/transformers/experimental/__init__.py +5 -0
- liger_kernel/transformers/functional.py +38 -6
- liger_kernel/transformers/fused_add_rms_norm.py +39 -0
- liger_kernel/transformers/fused_linear_cross_entropy.py +16 -4
- liger_kernel/transformers/grpo_loss.py +56 -1
- liger_kernel/transformers/llama4_rope.py +93 -0
- liger_kernel/transformers/model/falcon_h1.py +122 -0
- liger_kernel/transformers/model/gemma.py +28 -8
- liger_kernel/transformers/model/gemma2.py +31 -8
- liger_kernel/transformers/model/gemma3.py +100 -110
- liger_kernel/transformers/model/glm4.py +18 -5
- liger_kernel/transformers/model/glm4v.py +163 -0
- liger_kernel/transformers/model/glm4v_moe.py +172 -0
- liger_kernel/transformers/model/hunyuan_v1.py +134 -0
- liger_kernel/transformers/model/internvl.py +157 -0
- liger_kernel/transformers/model/llama.py +26 -7
- liger_kernel/transformers/model/llama4.py +121 -0
- liger_kernel/transformers/model/llava.py +18 -6
- liger_kernel/transformers/model/loss_utils.py +34 -3
- liger_kernel/transformers/model/mistral.py +17 -10
- liger_kernel/transformers/model/mixtral.py +24 -9
- liger_kernel/transformers/model/mllama.py +18 -7
- liger_kernel/transformers/model/olmo2.py +18 -5
- liger_kernel/transformers/model/olmo3.py +142 -0
- liger_kernel/transformers/model/output_classes.py +147 -0
- liger_kernel/transformers/model/paligemma.py +41 -5
- liger_kernel/transformers/model/phi3.py +24 -159
- liger_kernel/transformers/model/qwen2.py +26 -4
- liger_kernel/transformers/model/qwen2_5_vl.py +21 -8
- liger_kernel/transformers/model/qwen2_vl.py +24 -7
- liger_kernel/transformers/model/qwen3.py +22 -6
- liger_kernel/transformers/model/qwen3_moe.py +27 -7
- liger_kernel/transformers/model/qwen3_next.py +146 -0
- liger_kernel/transformers/model/qwen3_vl.py +150 -0
- liger_kernel/transformers/model/qwen3_vl_moe.py +126 -0
- liger_kernel/transformers/model/smollm3.py +199 -0
- liger_kernel/transformers/model/smolvlm.py +158 -0
- liger_kernel/transformers/monkey_patch.py +1278 -116
- liger_kernel/transformers/multi_token_attention.py +1 -1
- liger_kernel/transformers/poly_norm.py +42 -0
- liger_kernel/transformers/rms_norm.py +7 -0
- liger_kernel/transformers/rope.py +43 -0
- liger_kernel/transformers/swiglu.py +17 -0
- liger_kernel/transformers/tiled_mlp.py +133 -0
- {liger_kernel_nightly-0.5.10.dev20250624183504.dist-info → liger_kernel_nightly-0.6.4.dev20251121224847.dist-info}/METADATA +29 -24
- liger_kernel_nightly-0.6.4.dev20251121224847.dist-info/RECORD +118 -0
- liger_kernel_nightly-0.5.10.dev20250624183504.dist-info/RECORD +0 -95
- {liger_kernel_nightly-0.5.10.dev20250624183504.dist-info → liger_kernel_nightly-0.6.4.dev20251121224847.dist-info}/LICENSE +0 -0
- {liger_kernel_nightly-0.5.10.dev20250624183504.dist-info → liger_kernel_nightly-0.6.4.dev20251121224847.dist-info}/NOTICE +0 -0
- {liger_kernel_nightly-0.5.10.dev20250624183504.dist-info → liger_kernel_nightly-0.6.4.dev20251121224847.dist-info}/WHEEL +0 -0
- {liger_kernel_nightly-0.5.10.dev20250624183504.dist-info → liger_kernel_nightly-0.6.4.dev20251121224847.dist-info}/top_level.txt +0 -0
|
@@ -5,10 +5,11 @@ from typing import Union
|
|
|
5
5
|
|
|
6
6
|
import torch
|
|
7
7
|
|
|
8
|
-
from transformers.modeling_outputs import CausalLMOutputWithPast
|
|
9
8
|
from transformers.utils.deprecation import deprecate_kwarg
|
|
10
9
|
|
|
11
10
|
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
|
11
|
+
from liger_kernel.transformers.model.loss_utils import unpack_cross_entropy_result
|
|
12
|
+
from liger_kernel.transformers.model.output_classes import LigerCausalLMOutputWithPast
|
|
12
13
|
|
|
13
14
|
|
|
14
15
|
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
|
@@ -28,7 +29,7 @@ def lce_forward(
|
|
|
28
29
|
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
29
30
|
skip_logits: Optional[bool] = None,
|
|
30
31
|
**kwargs,
|
|
31
|
-
) -> Union[Tuple,
|
|
32
|
+
) -> Union[Tuple, LigerCausalLMOutputWithPast]:
|
|
32
33
|
r"""
|
|
33
34
|
Args:
|
|
34
35
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
@@ -91,6 +92,7 @@ def lce_forward(
|
|
|
91
92
|
shift_labels = kwargs.pop("shift_labels", None)
|
|
92
93
|
logits = None
|
|
93
94
|
loss = None
|
|
95
|
+
token_accuracy = None
|
|
94
96
|
|
|
95
97
|
if skip_logits and labels is None and shift_labels is None:
|
|
96
98
|
raise ValueError("skip_logits is True, but labels and shift_labels are None")
|
|
@@ -99,8 +101,9 @@ def lce_forward(
|
|
|
99
101
|
# By default, if in training mode, don't materialize logits
|
|
100
102
|
skip_logits = self.training and (labels is not None or shift_labels is not None)
|
|
101
103
|
|
|
104
|
+
# Compute loss
|
|
102
105
|
if skip_logits:
|
|
103
|
-
|
|
106
|
+
result = LigerForCausalLMLoss(
|
|
104
107
|
hidden_states=kept_hidden_states,
|
|
105
108
|
lm_head_weight=self.lm_head.weight,
|
|
106
109
|
labels=labels,
|
|
@@ -108,21 +111,31 @@ def lce_forward(
|
|
|
108
111
|
hidden_size=self.config.hidden_size,
|
|
109
112
|
**kwargs,
|
|
110
113
|
)
|
|
114
|
+
loss, _, token_accuracy = unpack_cross_entropy_result(result)
|
|
111
115
|
|
|
112
116
|
else:
|
|
113
117
|
logits = self.lm_head(kept_hidden_states)
|
|
114
|
-
if labels is not None:
|
|
118
|
+
if labels is not None or shift_labels is not None:
|
|
115
119
|
loss = self.loss_function(
|
|
116
120
|
logits=logits,
|
|
117
121
|
labels=labels,
|
|
122
|
+
shift_labels=shift_labels,
|
|
118
123
|
vocab_size=self.config.vocab_size,
|
|
119
124
|
**kwargs,
|
|
120
125
|
)
|
|
121
126
|
|
|
122
|
-
|
|
127
|
+
if not return_dict:
|
|
128
|
+
output = (logits,) + outputs[1:]
|
|
129
|
+
output = ((loss,) + output) if loss is not None else output
|
|
130
|
+
output = output + (token_accuracy,) if token_accuracy is not None else output
|
|
131
|
+
return output
|
|
132
|
+
|
|
133
|
+
# Return custom output class with token_accuracy field
|
|
134
|
+
return LigerCausalLMOutputWithPast(
|
|
123
135
|
loss=loss,
|
|
124
136
|
logits=logits,
|
|
125
137
|
past_key_values=outputs.past_key_values,
|
|
126
138
|
hidden_states=outputs.hidden_states,
|
|
127
139
|
attentions=outputs.attentions,
|
|
140
|
+
token_accuracy=token_accuracy,
|
|
128
141
|
)
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
from typing import Optional
|
|
3
|
+
from typing import Tuple
|
|
4
|
+
from typing import Union
|
|
5
|
+
|
|
6
|
+
import torch
|
|
7
|
+
|
|
8
|
+
from transformers.utils.deprecation import deprecate_kwarg
|
|
9
|
+
|
|
10
|
+
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
|
11
|
+
from liger_kernel.transformers.model.loss_utils import unpack_cross_entropy_result
|
|
12
|
+
from liger_kernel.transformers.model.output_classes import LigerCausalLMOutputWithPast
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
|
16
|
+
def lce_forward(
|
|
17
|
+
self,
|
|
18
|
+
input_ids: torch.LongTensor = None,
|
|
19
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
20
|
+
position_ids: Optional[torch.LongTensor] = None,
|
|
21
|
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
|
22
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
23
|
+
labels: Optional[torch.LongTensor] = None,
|
|
24
|
+
use_cache: Optional[bool] = None,
|
|
25
|
+
output_attentions: Optional[bool] = None,
|
|
26
|
+
output_hidden_states: Optional[bool] = None,
|
|
27
|
+
return_dict: Optional[bool] = None,
|
|
28
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
29
|
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
30
|
+
skip_logits: Optional[bool] = None,
|
|
31
|
+
**kwargs,
|
|
32
|
+
) -> Union[Tuple, LigerCausalLMOutputWithPast]:
|
|
33
|
+
r"""
|
|
34
|
+
Args:
|
|
35
|
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
36
|
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
37
|
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
|
38
|
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
39
|
+
|
|
40
|
+
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
|
41
|
+
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
|
42
|
+
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
|
43
|
+
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
|
44
|
+
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
|
45
|
+
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
|
|
49
|
+
Example:
|
|
50
|
+
|
|
51
|
+
```python
|
|
52
|
+
>>> from PIL import Image
|
|
53
|
+
>>> from transformers import AutoTokenizer, Glm4vForConditionalGeneration
|
|
54
|
+
|
|
55
|
+
>>> MODEL_PATH = "THUDM/GLM-4.1V-9B-Thinking"
|
|
56
|
+
>>> messages = [
|
|
57
|
+
{
|
|
58
|
+
"role": "user",
|
|
59
|
+
"content": [
|
|
60
|
+
{
|
|
61
|
+
"type": "image",
|
|
62
|
+
"url": "https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png"
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
"type": "text",
|
|
66
|
+
"text": "describe this image"
|
|
67
|
+
}
|
|
68
|
+
],
|
|
69
|
+
}
|
|
70
|
+
]
|
|
71
|
+
>>> processor = AutoProcessor.from_pretrained(MODEL_PATH, use_fast=True)
|
|
72
|
+
>>> model = Glm4vForConditionalGeneration.from_pretrained(
|
|
73
|
+
pretrained_model_name_or_path=MODEL_PATH,
|
|
74
|
+
dtype=torch.bfloat16,
|
|
75
|
+
device_map="auto",
|
|
76
|
+
)
|
|
77
|
+
>>> inputs = processor.apply_chat_template(
|
|
78
|
+
messages,
|
|
79
|
+
tokenize=True,
|
|
80
|
+
add_generation_prompt=True,
|
|
81
|
+
return_dict=True,
|
|
82
|
+
return_tensors="pt"
|
|
83
|
+
).to(model.device)
|
|
84
|
+
>>> generated_ids = model.generate(**inputs, max_new_tokens=8192)
|
|
85
|
+
output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
|
|
86
|
+
<think>Got it, let's describe the image. First, there's a vintage car, specifically a Volkswagen Beetle
|
|
87
|
+
```"""
|
|
88
|
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
89
|
+
output_hidden_states = (
|
|
90
|
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
91
|
+
)
|
|
92
|
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
93
|
+
|
|
94
|
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
95
|
+
outputs = self.model(
|
|
96
|
+
input_ids=input_ids,
|
|
97
|
+
attention_mask=attention_mask,
|
|
98
|
+
position_ids=position_ids,
|
|
99
|
+
past_key_values=past_key_values,
|
|
100
|
+
inputs_embeds=inputs_embeds,
|
|
101
|
+
use_cache=use_cache,
|
|
102
|
+
output_attentions=output_attentions,
|
|
103
|
+
output_hidden_states=output_hidden_states,
|
|
104
|
+
return_dict=return_dict,
|
|
105
|
+
cache_position=cache_position,
|
|
106
|
+
**kwargs,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
hidden_states = outputs[0]
|
|
110
|
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
|
111
|
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
|
112
|
+
kept_hidden_states = hidden_states[:, slice_indices, :]
|
|
113
|
+
|
|
114
|
+
shift_labels = kwargs.pop("shift_labels", None)
|
|
115
|
+
logits = None
|
|
116
|
+
loss = None
|
|
117
|
+
token_accuracy = None
|
|
118
|
+
|
|
119
|
+
if skip_logits and labels is None and shift_labels is None:
|
|
120
|
+
raise ValueError("skip_logits is True, but labels and shift_labels are None")
|
|
121
|
+
|
|
122
|
+
if skip_logits is None:
|
|
123
|
+
# By default, if in training mode, don't materialize logits
|
|
124
|
+
skip_logits = self.training and (labels is not None or shift_labels is not None)
|
|
125
|
+
|
|
126
|
+
# Compute loss
|
|
127
|
+
if skip_logits:
|
|
128
|
+
result = LigerForCausalLMLoss(
|
|
129
|
+
hidden_states=kept_hidden_states,
|
|
130
|
+
lm_head_weight=self.lm_head.weight,
|
|
131
|
+
labels=labels,
|
|
132
|
+
shift_labels=shift_labels,
|
|
133
|
+
hidden_size=self.config.hidden_size,
|
|
134
|
+
**kwargs,
|
|
135
|
+
)
|
|
136
|
+
loss, _, token_accuracy = unpack_cross_entropy_result(result)
|
|
137
|
+
|
|
138
|
+
else:
|
|
139
|
+
logits = self.lm_head(kept_hidden_states)
|
|
140
|
+
if labels is not None or shift_labels is not None:
|
|
141
|
+
loss = self.loss_function(
|
|
142
|
+
logits=logits,
|
|
143
|
+
labels=labels,
|
|
144
|
+
shift_labels=shift_labels,
|
|
145
|
+
vocab_size=self.config.vocab_size,
|
|
146
|
+
**kwargs,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
if not return_dict:
|
|
150
|
+
output = (logits,) + outputs[1:]
|
|
151
|
+
output = ((loss,) + output) if loss is not None else output
|
|
152
|
+
output = output + (token_accuracy,) if token_accuracy is not None else output
|
|
153
|
+
return output
|
|
154
|
+
|
|
155
|
+
# Return custom output class with token_accuracy field
|
|
156
|
+
return LigerCausalLMOutputWithPast(
|
|
157
|
+
loss=loss,
|
|
158
|
+
logits=logits,
|
|
159
|
+
past_key_values=outputs.past_key_values,
|
|
160
|
+
hidden_states=outputs.hidden_states,
|
|
161
|
+
attentions=outputs.attentions,
|
|
162
|
+
token_accuracy=token_accuracy,
|
|
163
|
+
)
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
from typing import Tuple
|
|
3
|
+
from typing import Union
|
|
4
|
+
|
|
5
|
+
import torch
|
|
6
|
+
|
|
7
|
+
from transformers.utils.deprecation import deprecate_kwarg
|
|
8
|
+
|
|
9
|
+
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
|
10
|
+
from liger_kernel.transformers.model.loss_utils import unpack_cross_entropy_result
|
|
11
|
+
from liger_kernel.transformers.model.output_classes import LigerGlm4vMoeCausalLMOutputWithPast
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
|
15
|
+
def lce_forward(
|
|
16
|
+
self,
|
|
17
|
+
input_ids: torch.LongTensor = None,
|
|
18
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
19
|
+
position_ids: Optional[torch.LongTensor] = None,
|
|
20
|
+
past_key_values: Optional[list[torch.FloatTensor]] = None,
|
|
21
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
22
|
+
labels: Optional[torch.LongTensor] = None,
|
|
23
|
+
pixel_values: Optional[torch.Tensor] = None,
|
|
24
|
+
pixel_values_videos: Optional[torch.FloatTensor] = None,
|
|
25
|
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
|
26
|
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
|
27
|
+
rope_deltas: Optional[torch.LongTensor] = None,
|
|
28
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
29
|
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
30
|
+
skip_logits: Optional[bool] = None,
|
|
31
|
+
return_dict: Optional[bool] = None,
|
|
32
|
+
**kwargs,
|
|
33
|
+
) -> Union[Tuple, LigerGlm4vMoeCausalLMOutputWithPast]:
|
|
34
|
+
r"""
|
|
35
|
+
Args:
|
|
36
|
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
37
|
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
38
|
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
|
39
|
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
40
|
+
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
|
|
41
|
+
The temporal, height and width of feature shape of each image in LLM.
|
|
42
|
+
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
|
|
43
|
+
The temporal, height and width of feature shape of each video in LLM.
|
|
44
|
+
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
|
|
45
|
+
The rope index difference between sequence length and multimodal rope.
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
|
49
|
+
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
|
50
|
+
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
|
51
|
+
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
|
52
|
+
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
|
53
|
+
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
|
54
|
+
|
|
55
|
+
Example:
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
>>> from transformers import AutoProcessor, Glm4vMoeForConditionalGeneration
|
|
59
|
+
>>> import torch
|
|
60
|
+
|
|
61
|
+
>>> MODEL_PATH = "zai-org/GLM-4.5V"
|
|
62
|
+
>>> messages = [
|
|
63
|
+
{
|
|
64
|
+
"role": "user",
|
|
65
|
+
"content": [
|
|
66
|
+
{
|
|
67
|
+
"type": "image",
|
|
68
|
+
"url": "https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png"
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
"type": "text",
|
|
72
|
+
"text": "describe this image"
|
|
73
|
+
}
|
|
74
|
+
],
|
|
75
|
+
}
|
|
76
|
+
]
|
|
77
|
+
>>> processor = AutoProcessor.from_pretrained(MODEL_PATH)
|
|
78
|
+
>>> model = Glm4vMoeForConditionalGeneration.from_pretrained(
|
|
79
|
+
pretrained_model_name_or_path=MODEL_PATH,
|
|
80
|
+
dtype="auto",
|
|
81
|
+
device_map="auto",
|
|
82
|
+
)
|
|
83
|
+
>>> inputs = processor.apply_chat_template(
|
|
84
|
+
messages,
|
|
85
|
+
tokenize=True,
|
|
86
|
+
add_generation_prompt=True,
|
|
87
|
+
return_dict=True,
|
|
88
|
+
return_tensors="pt"
|
|
89
|
+
).to(model.device)
|
|
90
|
+
>>> inputs.pop("token_type_ids", None)
|
|
91
|
+
>>> generated_ids = model.generate(**inputs, max_new_tokens=8192)
|
|
92
|
+
>>> output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
|
|
93
|
+
```
|
|
94
|
+
"""
|
|
95
|
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
96
|
+
|
|
97
|
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
98
|
+
outputs = self.model(
|
|
99
|
+
input_ids=input_ids,
|
|
100
|
+
pixel_values=pixel_values,
|
|
101
|
+
pixel_values_videos=pixel_values_videos,
|
|
102
|
+
image_grid_thw=image_grid_thw,
|
|
103
|
+
video_grid_thw=video_grid_thw,
|
|
104
|
+
position_ids=position_ids,
|
|
105
|
+
attention_mask=attention_mask,
|
|
106
|
+
past_key_values=past_key_values,
|
|
107
|
+
inputs_embeds=inputs_embeds,
|
|
108
|
+
cache_position=cache_position,
|
|
109
|
+
**kwargs,
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
hidden_states = outputs[0]
|
|
113
|
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
|
114
|
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
|
115
|
+
kept_hidden_states = hidden_states[:, slice_indices, :]
|
|
116
|
+
|
|
117
|
+
shift_labels = kwargs.pop("shift_labels", None)
|
|
118
|
+
logits = None
|
|
119
|
+
loss = None
|
|
120
|
+
token_accuracy = None
|
|
121
|
+
|
|
122
|
+
if skip_logits and labels is None and shift_labels is None:
|
|
123
|
+
raise ValueError("skip_logits is True, but labels and shift_labels are None")
|
|
124
|
+
|
|
125
|
+
if skip_logits is None:
|
|
126
|
+
# By default, if in training mode, don't materialize logits
|
|
127
|
+
skip_logits = self.training and (labels is not None or shift_labels is not None)
|
|
128
|
+
|
|
129
|
+
# Compute loss
|
|
130
|
+
if skip_logits:
|
|
131
|
+
result = LigerForCausalLMLoss(
|
|
132
|
+
hidden_states=kept_hidden_states,
|
|
133
|
+
lm_head_weight=self.lm_head.weight,
|
|
134
|
+
labels=labels,
|
|
135
|
+
shift_labels=shift_labels,
|
|
136
|
+
hidden_size=self.config.hidden_size,
|
|
137
|
+
**kwargs,
|
|
138
|
+
)
|
|
139
|
+
loss, _, token_accuracy = unpack_cross_entropy_result(result)
|
|
140
|
+
|
|
141
|
+
else:
|
|
142
|
+
logits = self.lm_head(kept_hidden_states)
|
|
143
|
+
if labels is not None or shift_labels is not None:
|
|
144
|
+
loss = self.loss_function(
|
|
145
|
+
logits=logits,
|
|
146
|
+
labels=labels,
|
|
147
|
+
shift_labels=shift_labels,
|
|
148
|
+
vocab_size=self.config.vocab_size,
|
|
149
|
+
**kwargs,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
if not return_dict:
|
|
153
|
+
output = (logits,) + outputs[1:]
|
|
154
|
+
output = ((loss,) + output) if loss is not None else output
|
|
155
|
+
output = output + (token_accuracy,) if token_accuracy is not None else output
|
|
156
|
+
return output
|
|
157
|
+
|
|
158
|
+
# Build output kwargs and include aux_loss only if present (depends on transformers version)
|
|
159
|
+
output_kwargs = dict(
|
|
160
|
+
loss=loss,
|
|
161
|
+
logits=logits,
|
|
162
|
+
past_key_values=outputs.past_key_values,
|
|
163
|
+
hidden_states=outputs.hidden_states,
|
|
164
|
+
attentions=outputs.attentions,
|
|
165
|
+
rope_deltas=outputs.rope_deltas,
|
|
166
|
+
token_accuracy=token_accuracy,
|
|
167
|
+
)
|
|
168
|
+
if hasattr(outputs, "aux_loss"):
|
|
169
|
+
output_kwargs["aux_loss"] = outputs.aux_loss
|
|
170
|
+
|
|
171
|
+
# Return GLM4V MoE output with accuracy
|
|
172
|
+
return LigerGlm4vMoeCausalLMOutputWithPast(**output_kwargs)
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
from typing import Optional
|
|
3
|
+
from typing import Union
|
|
4
|
+
|
|
5
|
+
import torch
|
|
6
|
+
|
|
7
|
+
from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
|
|
8
|
+
from liger_kernel.transformers.model.loss_utils import unpack_cross_entropy_result
|
|
9
|
+
from liger_kernel.transformers.model.output_classes import LigerCausalLMOutputWithPast
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def lce_forward(
|
|
13
|
+
self,
|
|
14
|
+
input_ids: Optional[torch.LongTensor] = None,
|
|
15
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
16
|
+
position_ids: Optional[torch.LongTensor] = None,
|
|
17
|
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
|
18
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
19
|
+
labels: Optional[torch.LongTensor] = None,
|
|
20
|
+
use_cache: Optional[bool] = None,
|
|
21
|
+
output_attentions: Optional[bool] = None,
|
|
22
|
+
output_hidden_states: Optional[bool] = None,
|
|
23
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
24
|
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
25
|
+
skip_logits: Optional[bool] = None,
|
|
26
|
+
return_dict: Optional[bool] = None,
|
|
27
|
+
**kwargs,
|
|
28
|
+
) -> LigerCausalLMOutputWithPast:
|
|
29
|
+
r"""
|
|
30
|
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
31
|
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
32
|
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
|
33
|
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
34
|
+
|
|
35
|
+
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
|
36
|
+
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
|
37
|
+
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
|
38
|
+
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
|
39
|
+
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
|
40
|
+
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
|
|
44
|
+
Example:
|
|
45
|
+
|
|
46
|
+
```python
|
|
47
|
+
>>> from transformers import AutoTokenizer, HunYuanDenseV1ForCausalLM
|
|
48
|
+
|
|
49
|
+
>>> model = HunYuanDenseV1ForCausalLM.from_pretrained("meta-hunyuan_v1_dense/HunYuanDenseV1-2-7b-hf")
|
|
50
|
+
>>> tokenizer = AutoTokenizer.from_pretrained("meta-hunyuan_v1_dense/HunYuanDenseV1-2-7b-hf")
|
|
51
|
+
|
|
52
|
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
|
53
|
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
54
|
+
|
|
55
|
+
>>> # Generate
|
|
56
|
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
|
57
|
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
58
|
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
|
59
|
+
```"""
|
|
60
|
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
61
|
+
output_hidden_states = (
|
|
62
|
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
63
|
+
)
|
|
64
|
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
65
|
+
|
|
66
|
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
67
|
+
outputs = self.model(
|
|
68
|
+
input_ids=input_ids,
|
|
69
|
+
attention_mask=attention_mask,
|
|
70
|
+
position_ids=position_ids,
|
|
71
|
+
past_key_values=past_key_values,
|
|
72
|
+
inputs_embeds=inputs_embeds,
|
|
73
|
+
use_cache=use_cache,
|
|
74
|
+
output_attentions=output_attentions,
|
|
75
|
+
output_hidden_states=output_hidden_states,
|
|
76
|
+
cache_position=cache_position,
|
|
77
|
+
**kwargs,
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
hidden_states = outputs[0]
|
|
81
|
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
|
82
|
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
|
83
|
+
kept_hidden_states = hidden_states[:, slice_indices, :]
|
|
84
|
+
|
|
85
|
+
shift_labels = kwargs.pop("shift_labels", None)
|
|
86
|
+
logits = None
|
|
87
|
+
loss = None
|
|
88
|
+
token_accuracy = None
|
|
89
|
+
|
|
90
|
+
if skip_logits and labels is None and shift_labels is None:
|
|
91
|
+
raise ValueError("skip_logits is True, but labels and shift_labels are None")
|
|
92
|
+
|
|
93
|
+
if skip_logits is None:
|
|
94
|
+
# By default, if in training mode, don't materialize logits
|
|
95
|
+
skip_logits = self.training and (labels is not None or shift_labels is not None)
|
|
96
|
+
|
|
97
|
+
# Compute loss
|
|
98
|
+
if skip_logits:
|
|
99
|
+
result = LigerForCausalLMLoss(
|
|
100
|
+
hidden_states=kept_hidden_states,
|
|
101
|
+
lm_head_weight=self.lm_head.weight,
|
|
102
|
+
labels=labels,
|
|
103
|
+
shift_labels=shift_labels,
|
|
104
|
+
hidden_size=self.config.hidden_size,
|
|
105
|
+
**kwargs,
|
|
106
|
+
)
|
|
107
|
+
loss, _, token_accuracy = unpack_cross_entropy_result(result)
|
|
108
|
+
|
|
109
|
+
else:
|
|
110
|
+
logits = self.lm_head(kept_hidden_states)
|
|
111
|
+
if labels is not None or shift_labels is not None:
|
|
112
|
+
loss = self.loss_function(
|
|
113
|
+
logits=logits,
|
|
114
|
+
labels=labels,
|
|
115
|
+
shift_labels=shift_labels,
|
|
116
|
+
vocab_size=self.config.vocab_size,
|
|
117
|
+
**kwargs,
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
if not return_dict:
|
|
121
|
+
output = (logits,) + outputs[1:]
|
|
122
|
+
output = ((loss,) + output) if loss is not None else output
|
|
123
|
+
output = output + (token_accuracy,) if token_accuracy is not None else output
|
|
124
|
+
return output
|
|
125
|
+
|
|
126
|
+
# Return custom output class with accuracy field
|
|
127
|
+
return LigerCausalLMOutputWithPast(
|
|
128
|
+
loss=loss,
|
|
129
|
+
logits=logits,
|
|
130
|
+
past_key_values=outputs.past_key_values,
|
|
131
|
+
hidden_states=outputs.hidden_states,
|
|
132
|
+
attentions=outputs.attentions,
|
|
133
|
+
token_accuracy=token_accuracy,
|
|
134
|
+
)
|