fusion-bench 0.2.13__py3-none-any.whl → 0.2.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. fusion_bench/method/ada_svd/clip_vision.py +4 -1
  2. fusion_bench/method/smile_upscaling/smile_mistral_upscaling.py +46 -145
  3. fusion_bench/method/smile_upscaling/smile_qwen2_upscaling.py +229 -0
  4. fusion_bench/method/smile_upscaling/smile_upscaling.py +6 -336
  5. fusion_bench/modelpool/causal_lm/causal_lm.py +73 -10
  6. fusion_bench/models/modeling_smile_mistral/modeling_smile_mistral.py +2 -203
  7. fusion_bench/models/modeling_smile_qwen2/__init__.py +8 -0
  8. fusion_bench/models/modeling_smile_qwen2/configuration_smile_qwen2.py +21 -0
  9. fusion_bench/models/modeling_smile_qwen2/modeling_smile_qwen2.py +922 -0
  10. fusion_bench/models/modeling_smile_qwen2/register.py +11 -0
  11. fusion_bench/models/rankone_moe.py +2 -88
  12. fusion_bench/models/smile_moe/linear_from_hf_config.py +373 -0
  13. fusion_bench/models/smile_moe/{linear.py → linear_from_module.py} +103 -33
  14. fusion_bench/models/smile_moe/utils/__init__.py +24 -0
  15. fusion_bench/models/smile_moe/utils/svd_utils.py +46 -0
  16. fusion_bench/taskpool/__init__.py +2 -0
  17. fusion_bench/taskpool/lm_eval_harness/__init__.py +3 -0
  18. fusion_bench/taskpool/lm_eval_harness/taskpool.py +87 -0
  19. {fusion_bench-0.2.13.dist-info → fusion_bench-0.2.15.dist-info}/METADATA +26 -3
  20. {fusion_bench-0.2.13.dist-info → fusion_bench-0.2.15.dist-info}/RECORD +36 -15
  21. {fusion_bench-0.2.13.dist-info → fusion_bench-0.2.15.dist-info}/WHEEL +1 -1
  22. fusion_bench_config/method/smile_upscaling/smile_mistral_upscaling.yaml +5 -2
  23. fusion_bench_config/method/smile_upscaling/smile_qwen2_upscaling.yaml +13 -0
  24. fusion_bench_config/modelpool/CausalLMPool/mergebench/Llama-3.1-8B-Instruct.yaml +11 -0
  25. fusion_bench_config/modelpool/CausalLMPool/mergebench/Llama-3.1-8B.yaml +11 -0
  26. fusion_bench_config/modelpool/CausalLMPool/mergebench/Llama-3.2-3B-Instruct.yaml +11 -0
  27. fusion_bench_config/modelpool/CausalLMPool/mergebench/Llama-3.2-3B.yaml +11 -0
  28. fusion_bench_config/modelpool/CausalLMPool/mergebench/gemma-2-2b-it.yaml +11 -0
  29. fusion_bench_config/modelpool/CausalLMPool/mergebench/gemma-2-2b.yaml +11 -0
  30. fusion_bench_config/modelpool/CausalLMPool/mergebench/gemma-2-9b-it.yaml +11 -0
  31. fusion_bench_config/modelpool/CausalLMPool/mergebench/gemma-2-9b.yaml +11 -0
  32. fusion_bench_config/modelpool/CausalLMPool/qwen2_math_1.5B_and_R1.yaml +17 -0
  33. fusion_bench_config/taskpool/LMEvalHarnessTaskPool/lm_eval.yaml +12 -0
  34. {fusion_bench-0.2.13.dist-info → fusion_bench-0.2.15.dist-info}/entry_points.txt +0 -0
  35. {fusion_bench-0.2.13.dist-info → fusion_bench-0.2.15.dist-info}/licenses/LICENSE +0 -0
  36. {fusion_bench-0.2.13.dist-info → fusion_bench-0.2.15.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,922 @@
1
+ import logging
2
+ from functools import partial
3
+ from typing import Callable, Optional, Tuple, Union
4
+
5
+ import torch
6
+ from torch import nn
7
+ from transformers.activations import ACT2FN
8
+ from transformers.cache_utils import (
9
+ Cache,
10
+ DynamicCache,
11
+ SlidingWindowCache,
12
+ StaticCache,
13
+ )
14
+ from transformers.generation import GenerationMixin
15
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
16
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
17
+ from transformers.modeling_outputs import (
18
+ BaseModelOutputWithPast,
19
+ CausalLMOutputWithPast,
20
+ QuestionAnsweringModelOutput,
21
+ SequenceClassifierOutputWithPast,
22
+ TokenClassifierOutput,
23
+ )
24
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
25
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
26
+ from transformers.models.qwen2.modeling_qwen2 import (
27
+ QWEN2_INPUTS_DOCSTRING,
28
+ Qwen2RMSNorm,
29
+ Qwen2RotaryEmbedding,
30
+ apply_rotary_pos_emb,
31
+ eager_attention_forward,
32
+ )
33
+ from transformers.processing_utils import Unpack
34
+ from transformers.utils import (
35
+ LossKwargs,
36
+ add_code_sample_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ can_return_tuple,
40
+ logging,
41
+ replace_return_docstrings,
42
+ )
43
+ from transformers.utils.deprecation import deprecate_kwarg
44
+
45
+ from fusion_bench.models.smile_moe.linear_from_hf_config import SmileLinear
46
+
47
+ from .configuration_smile_qwen2 import SmileQwen2Config
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CONFIG_FOR_DOC = "SmileQwen2Config"
52
+
53
+
54
+ class SmileQwen2MLP(nn.Module):
55
+ def __init__(self, config: SmileQwen2Config):
56
+ super().__init__()
57
+ self.config = config
58
+ self.hidden_size = config.hidden_size
59
+ self.intermediate_size = config.intermediate_size
60
+ # --- replace the linear modules with SmileLinear ---
61
+ self.gate_proj = SmileLinear(
62
+ config, self.hidden_size, self.intermediate_size, bias=False
63
+ )
64
+ self.up_proj = SmileLinear(
65
+ config, self.hidden_size, self.intermediate_size, bias=False
66
+ )
67
+ self.down_proj = SmileLinear(
68
+ config, self.intermediate_size, self.hidden_size, bias=False
69
+ )
70
+ # --- end of replacement ---
71
+ self.act_fn = ACT2FN[config.hidden_act]
72
+
73
+ def forward(self, x):
74
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
75
+ return down_proj
76
+
77
+
78
+ class SmileQwen2Attention(nn.Module):
79
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
80
+
81
+ def __init__(self, config: SmileQwen2Config, layer_idx: int):
82
+ super().__init__()
83
+ self.config = config
84
+ self.layer_idx = layer_idx
85
+ self.head_dim = getattr(
86
+ config, "head_dim", config.hidden_size // config.num_attention_heads
87
+ )
88
+ self.num_key_value_groups = (
89
+ config.num_attention_heads // config.num_key_value_heads
90
+ )
91
+ self.scaling = self.head_dim**-0.5
92
+ self.attention_dropout = config.attention_dropout
93
+ self.is_causal = True
94
+ # --- replace the linear modules with SmileLinear ---
95
+ self.q_proj = SmileLinear(
96
+ config,
97
+ config.hidden_size,
98
+ config.num_attention_heads * self.head_dim,
99
+ bias=True,
100
+ )
101
+ self.k_proj = SmileLinear(
102
+ config,
103
+ config.hidden_size,
104
+ config.num_key_value_heads * self.head_dim,
105
+ bias=True,
106
+ )
107
+ self.v_proj = SmileLinear(
108
+ config,
109
+ config.hidden_size,
110
+ config.num_key_value_heads * self.head_dim,
111
+ bias=True,
112
+ )
113
+ self.o_proj = SmileLinear(
114
+ config,
115
+ config.num_attention_heads * self.head_dim,
116
+ config.hidden_size,
117
+ bias=False,
118
+ )
119
+ # --- end of replacement ---
120
+
121
+ def forward(
122
+ self,
123
+ hidden_states: torch.Tensor,
124
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
125
+ attention_mask: Optional[torch.Tensor],
126
+ past_key_value: Optional[Cache] = None,
127
+ cache_position: Optional[torch.LongTensor] = None,
128
+ **kwargs: Unpack[FlashAttentionKwargs],
129
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
130
+ input_shape = hidden_states.shape[:-1]
131
+ hidden_shape = (*input_shape, -1, self.head_dim)
132
+
133
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
134
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
135
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
136
+
137
+ cos, sin = position_embeddings
138
+ query_states, key_states = apply_rotary_pos_emb(
139
+ query_states, key_states, cos, sin
140
+ )
141
+
142
+ if past_key_value is not None:
143
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
144
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
145
+ key_states, value_states = past_key_value.update(
146
+ key_states, value_states, self.layer_idx, cache_kwargs
147
+ )
148
+
149
+ sliding_window = None
150
+ if (
151
+ self.config.use_sliding_window
152
+ and getattr(self.config, "sliding_window", None) is not None
153
+ and self.layer_idx >= self.config.max_window_layers
154
+ ):
155
+ sliding_window = self.config.sliding_window
156
+
157
+ attention_interface: Callable = eager_attention_forward
158
+ if self.config._attn_implementation != "eager":
159
+ if self.config._attn_implementation == "sdpa" and kwargs.get(
160
+ "output_attentions", False
161
+ ):
162
+ logger.warning_once(
163
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
164
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
165
+ )
166
+ else:
167
+ attention_interface = ALL_ATTENTION_FUNCTIONS[
168
+ self.config._attn_implementation
169
+ ]
170
+
171
+ attn_output, attn_weights = attention_interface(
172
+ self,
173
+ query_states,
174
+ key_states,
175
+ value_states,
176
+ attention_mask,
177
+ dropout=0.0 if not self.training else self.attention_dropout,
178
+ scaling=self.scaling,
179
+ sliding_window=sliding_window, # main diff with Llama
180
+ **kwargs,
181
+ )
182
+
183
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
184
+ attn_output = self.o_proj(attn_output)
185
+ return attn_output, attn_weights
186
+
187
+
188
+ class SmileQwen2DecoderLayer(nn.Module):
189
+ def __init__(self, config: SmileQwen2Config, layer_idx: int):
190
+ super().__init__()
191
+ self.hidden_size = config.hidden_size
192
+ # --- replace attention and MLP with SmileQwen2Attention and SmileQwen2MLP ---
193
+ self.self_attn = SmileQwen2Attention(config=config, layer_idx=layer_idx)
194
+ self.mlp = SmileQwen2MLP(config)
195
+ # --- end of replacement ---
196
+ self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
197
+ self.post_attention_layernorm = Qwen2RMSNorm(
198
+ config.hidden_size, eps=config.rms_norm_eps
199
+ )
200
+ if config.sliding_window and config._attn_implementation != "flash_attention_2":
201
+ logger.warning_once(
202
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
203
+ "unexpected results may be encountered."
204
+ )
205
+
206
+ def forward(
207
+ self,
208
+ hidden_states: torch.Tensor,
209
+ attention_mask: Optional[torch.Tensor] = None,
210
+ position_ids: Optional[torch.LongTensor] = None,
211
+ past_key_value: Optional[Cache] = None,
212
+ output_attentions: Optional[bool] = False,
213
+ use_cache: Optional[bool] = False,
214
+ cache_position: Optional[torch.LongTensor] = None,
215
+ position_embeddings: Optional[
216
+ Tuple[torch.Tensor, torch.Tensor]
217
+ ] = None, # necessary, but kept here for BC
218
+ **kwargs: Unpack[FlashAttentionKwargs],
219
+ ) -> Tuple[
220
+ torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
221
+ ]:
222
+ residual = hidden_states
223
+
224
+ hidden_states = self.input_layernorm(hidden_states)
225
+
226
+ # Self Attention
227
+ hidden_states, self_attn_weights = self.self_attn(
228
+ hidden_states=hidden_states,
229
+ attention_mask=attention_mask,
230
+ position_ids=position_ids,
231
+ past_key_value=past_key_value,
232
+ output_attentions=output_attentions,
233
+ use_cache=use_cache,
234
+ cache_position=cache_position,
235
+ position_embeddings=position_embeddings,
236
+ **kwargs,
237
+ )
238
+ hidden_states = residual + hidden_states
239
+
240
+ # Fully Connected
241
+ residual = hidden_states
242
+ hidden_states = self.post_attention_layernorm(hidden_states)
243
+ hidden_states = self.mlp(hidden_states)
244
+ hidden_states = residual + hidden_states
245
+
246
+ outputs = (hidden_states,)
247
+ if output_attentions:
248
+ outputs += (self_attn_weights,)
249
+
250
+ return outputs
251
+
252
+
253
+ class SmileQwen2PreTrainedModel(PreTrainedModel):
254
+ config_class = SmileQwen2Config
255
+ base_model_prefix = "model"
256
+ supports_gradient_checkpointing = True
257
+ _no_split_modules = ["SmileQwen2DecoderLayer"]
258
+ _skip_keys_device_placement = ["past_key_values"]
259
+ _supports_flash_attn_2 = True
260
+ _supports_sdpa = True
261
+ _supports_flex_attn = True
262
+ _supports_cache_class = True
263
+ _supports_quantized_cache = True
264
+ _supports_static_cache = True
265
+ _supports_attention_backend = True
266
+
267
+ def _init_weights(self, module):
268
+ std = self.config.initializer_range
269
+ if isinstance(module, nn.Linear):
270
+ module.weight.data.normal_(mean=0.0, std=std)
271
+ if module.bias is not None:
272
+ module.bias.data.zero_()
273
+ elif isinstance(module, nn.Embedding):
274
+ module.weight.data.normal_(mean=0.0, std=std)
275
+ if module.padding_idx is not None:
276
+ module.weight.data[module.padding_idx].zero_()
277
+
278
+
279
+ class SmileQwen2Model(SmileQwen2PreTrainedModel):
280
+ """
281
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`SmileQwen2DecoderLayer`]
282
+
283
+ Args:
284
+ config: SmileQwen2Config
285
+ """
286
+
287
+ def __init__(self, config: SmileQwen2Config):
288
+ super().__init__(config)
289
+ self.padding_idx = config.pad_token_id
290
+ self.vocab_size = config.vocab_size
291
+
292
+ self.embed_tokens = nn.Embedding(
293
+ config.vocab_size, config.hidden_size, self.padding_idx
294
+ )
295
+ self.layers = nn.ModuleList(
296
+ [
297
+ SmileQwen2DecoderLayer(
298
+ config, layer_idx
299
+ ) # * replace Qwen2DecoderLayer with SmileQwen2DecoderLayer
300
+ for layer_idx in range(config.num_hidden_layers)
301
+ ]
302
+ )
303
+ self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
304
+ self.rotary_emb = Qwen2RotaryEmbedding(config=config)
305
+ self.gradient_checkpointing = False
306
+
307
+ # Initialize weights and apply final processing
308
+ self.post_init()
309
+
310
+ def get_input_embeddings(self):
311
+ return self.embed_tokens
312
+
313
+ def set_input_embeddings(self, value):
314
+ self.embed_tokens = value
315
+
316
+ @can_return_tuple
317
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
318
+ def forward(
319
+ self,
320
+ input_ids: Optional[torch.LongTensor] = None,
321
+ attention_mask: Optional[torch.Tensor] = None,
322
+ position_ids: Optional[torch.LongTensor] = None,
323
+ past_key_values: Optional[Cache] = None,
324
+ inputs_embeds: Optional[torch.FloatTensor] = None,
325
+ use_cache: Optional[bool] = None,
326
+ output_attentions: Optional[bool] = None,
327
+ output_hidden_states: Optional[bool] = None,
328
+ cache_position: Optional[torch.LongTensor] = None,
329
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
330
+ ) -> BaseModelOutputWithPast:
331
+ output_attentions = (
332
+ output_attentions
333
+ if output_attentions is not None
334
+ else self.config.output_attentions
335
+ )
336
+ output_hidden_states = (
337
+ output_hidden_states
338
+ if output_hidden_states is not None
339
+ else self.config.output_hidden_states
340
+ )
341
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
342
+
343
+ if (input_ids is None) ^ (inputs_embeds is not None):
344
+ raise ValueError(
345
+ "You must specify exactly one of input_ids or inputs_embeds"
346
+ )
347
+
348
+ if self.gradient_checkpointing and self.training and use_cache:
349
+ logger.warning_once(
350
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
351
+ )
352
+ use_cache = False
353
+
354
+ # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
355
+ if not isinstance(past_key_values, (type(None), Cache)):
356
+ raise ValueError(
357
+ "The `past_key_values` should be either a `Cache` object or `None`."
358
+ )
359
+
360
+ if inputs_embeds is None:
361
+ inputs_embeds = self.embed_tokens(input_ids)
362
+
363
+ if use_cache and past_key_values is None:
364
+ past_key_values = DynamicCache()
365
+
366
+ if cache_position is None:
367
+ past_seen_tokens = (
368
+ past_key_values.get_seq_length() if past_key_values is not None else 0
369
+ )
370
+ cache_position = torch.arange(
371
+ past_seen_tokens,
372
+ past_seen_tokens + inputs_embeds.shape[1],
373
+ device=inputs_embeds.device,
374
+ )
375
+
376
+ if position_ids is None:
377
+ position_ids = cache_position.unsqueeze(0)
378
+
379
+ causal_mask = self._update_causal_mask(
380
+ attention_mask,
381
+ inputs_embeds,
382
+ cache_position,
383
+ past_key_values,
384
+ output_attentions,
385
+ )
386
+
387
+ hidden_states = inputs_embeds
388
+
389
+ # create position embeddings to be shared across the decoder layers
390
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
391
+
392
+ # decoder layers
393
+ all_hidden_states = () if output_hidden_states else None
394
+ all_self_attns = () if output_attentions else None
395
+
396
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
397
+ if output_hidden_states:
398
+ all_hidden_states += (hidden_states,)
399
+
400
+ if self.gradient_checkpointing and self.training:
401
+ layer_outputs = self._gradient_checkpointing_func(
402
+ partial(decoder_layer.__call__, **flash_attn_kwargs),
403
+ hidden_states,
404
+ causal_mask,
405
+ position_ids,
406
+ past_key_values,
407
+ output_attentions,
408
+ use_cache,
409
+ cache_position,
410
+ position_embeddings,
411
+ )
412
+ else:
413
+ layer_outputs = decoder_layer(
414
+ hidden_states,
415
+ attention_mask=causal_mask,
416
+ position_ids=position_ids,
417
+ past_key_value=past_key_values,
418
+ output_attentions=output_attentions,
419
+ use_cache=use_cache,
420
+ cache_position=cache_position,
421
+ position_embeddings=position_embeddings,
422
+ **flash_attn_kwargs,
423
+ )
424
+
425
+ hidden_states = layer_outputs[0]
426
+
427
+ if output_attentions:
428
+ all_self_attns += (layer_outputs[1],)
429
+
430
+ hidden_states = self.norm(hidden_states)
431
+
432
+ # add hidden states from the last decoder layer
433
+ if output_hidden_states:
434
+ all_hidden_states += (hidden_states,)
435
+
436
+ return BaseModelOutputWithPast(
437
+ last_hidden_state=hidden_states,
438
+ past_key_values=past_key_values if use_cache else None,
439
+ hidden_states=all_hidden_states,
440
+ attentions=all_self_attns,
441
+ )
442
+
443
+ def _update_causal_mask(
444
+ self,
445
+ attention_mask: torch.Tensor,
446
+ input_tensor: torch.Tensor,
447
+ cache_position: torch.Tensor,
448
+ past_key_values: Cache,
449
+ output_attentions: bool = False,
450
+ ):
451
+ if self.config._attn_implementation == "flash_attention_2":
452
+ if attention_mask is not None and past_key_values is not None:
453
+ is_padding_right = (
454
+ attention_mask[:, -1].sum().item() != input_tensor.size()[0]
455
+ )
456
+ if is_padding_right:
457
+ raise ValueError(
458
+ "You are attempting to perform batched generation with padding_side='right'"
459
+ " this may lead to unexpected behaviour for Flash Attention version of Qwen2. Make sure to "
460
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
461
+ )
462
+ if attention_mask is not None and 0.0 in attention_mask:
463
+ return attention_mask
464
+ return None
465
+
466
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
467
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
468
+ # to infer the attention mask.
469
+ past_seen_tokens = (
470
+ past_key_values.get_seq_length() if past_key_values is not None else 0
471
+ )
472
+ using_static_cache = isinstance(past_key_values, StaticCache)
473
+ using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
474
+
475
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
476
+ if (
477
+ self.config._attn_implementation == "sdpa"
478
+ and not (using_static_cache or using_sliding_window_cache)
479
+ and not output_attentions
480
+ ):
481
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
482
+ attention_mask,
483
+ inputs_embeds=input_tensor,
484
+ past_key_values_length=past_seen_tokens,
485
+ sliding_window=self.config.sliding_window,
486
+ is_training=self.training,
487
+ ):
488
+ return None
489
+
490
+ dtype, device = input_tensor.dtype, input_tensor.device
491
+ min_dtype = torch.finfo(dtype).min
492
+ sequence_length = input_tensor.shape[1]
493
+ # SlidingWindowCache or StaticCache
494
+ if using_sliding_window_cache or using_static_cache:
495
+ target_length = past_key_values.get_max_cache_shape()
496
+ # DynamicCache or no cache
497
+ else:
498
+ target_length = (
499
+ attention_mask.shape[-1]
500
+ if isinstance(attention_mask, torch.Tensor)
501
+ else past_seen_tokens + sequence_length + 1
502
+ )
503
+
504
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
505
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
506
+ attention_mask,
507
+ sequence_length=sequence_length,
508
+ target_length=target_length,
509
+ dtype=dtype,
510
+ device=device,
511
+ cache_position=cache_position,
512
+ batch_size=input_tensor.shape[0],
513
+ config=self.config,
514
+ past_key_values=past_key_values,
515
+ )
516
+
517
+ if (
518
+ self.config._attn_implementation == "sdpa"
519
+ and attention_mask is not None
520
+ and attention_mask.device.type in ["cuda", "xpu"]
521
+ and not output_attentions
522
+ ):
523
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
524
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
525
+ # Details: https://github.com/pytorch/pytorch/issues/110213
526
+ causal_mask = AttentionMaskConverter._unmask_unattended(
527
+ causal_mask, min_dtype
528
+ )
529
+
530
+ return causal_mask
531
+
532
+ @staticmethod
533
+ def _prepare_4d_causal_attention_mask_with_cache_position(
534
+ attention_mask: torch.Tensor,
535
+ sequence_length: int,
536
+ target_length: int,
537
+ dtype: torch.dtype,
538
+ device: torch.device,
539
+ cache_position: torch.Tensor,
540
+ batch_size: int,
541
+ config: SmileQwen2Config,
542
+ past_key_values: Cache,
543
+ ):
544
+ """
545
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
546
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
547
+
548
+ Args:
549
+ attention_mask (`torch.Tensor`):
550
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
551
+ sequence_length (`int`):
552
+ The sequence length being processed.
553
+ target_length (`int`):
554
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
555
+ dtype (`torch.dtype`):
556
+ The dtype to use for the 4D attention mask.
557
+ device (`torch.device`):
558
+ The device to place the 4D attention mask on.
559
+ cache_position (`torch.Tensor`):
560
+ Indices depicting the position of the input sequence tokens in the sequence.
561
+ batch_size (`torch.Tensor`):
562
+ Batch size.
563
+ config (`Qwen2Config`):
564
+ The model's configuration class
565
+ past_key_values (`Cache`):
566
+ The cache class that is being used currently to generate
567
+ """
568
+ if attention_mask is not None and attention_mask.dim() == 4:
569
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
570
+ causal_mask = attention_mask
571
+ else:
572
+ min_dtype = torch.finfo(dtype).min
573
+ causal_mask = torch.full(
574
+ (sequence_length, target_length),
575
+ fill_value=min_dtype,
576
+ dtype=dtype,
577
+ device=device,
578
+ )
579
+ diagonal_attend_mask = torch.arange(
580
+ target_length, device=device
581
+ ) > cache_position.reshape(-1, 1)
582
+ if config.sliding_window is not None:
583
+ # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
584
+ # the check is needed to verify is current checkpoint was trained with sliding window or not
585
+ if (
586
+ not isinstance(past_key_values, SlidingWindowCache)
587
+ or sequence_length > target_length
588
+ ):
589
+ sliding_attend_mask = torch.arange(
590
+ target_length, device=device
591
+ ) <= (cache_position.reshape(-1, 1) - config.sliding_window)
592
+ diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
593
+ causal_mask *= diagonal_attend_mask
594
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
595
+ if attention_mask is not None:
596
+ causal_mask = (
597
+ causal_mask.clone()
598
+ ) # copy to contiguous memory for in-place edit
599
+ if attention_mask.shape[-1] > target_length:
600
+ attention_mask = attention_mask[:, :target_length]
601
+ mask_length = attention_mask.shape[-1]
602
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[
603
+ :, None, None, :
604
+ ].to(causal_mask.device)
605
+ padding_mask = padding_mask == 0
606
+ causal_mask[:, :, :, :mask_length] = causal_mask[
607
+ :, :, :, :mask_length
608
+ ].masked_fill(padding_mask, min_dtype)
609
+ return causal_mask
610
+
611
+
612
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
613
+
614
+
615
+ class SmileQwen2ForCausalLM(SmileQwen2PreTrainedModel, GenerationMixin):
616
+ _tied_weights_keys = ["lm_head.weight"]
617
+ _tp_plan = {"lm_head": "colwise_rep"}
618
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
619
+
620
+ def __init__(self, config):
621
+ super().__init__(config)
622
+ self.model = SmileQwen2Model(config)
623
+ self.vocab_size = config.vocab_size
624
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
625
+
626
+ # Initialize weights and apply final processing
627
+ self.post_init()
628
+
629
+ def get_input_embeddings(self):
630
+ return self.model.embed_tokens
631
+
632
+ def set_input_embeddings(self, value):
633
+ self.model.embed_tokens = value
634
+
635
+ def get_output_embeddings(self):
636
+ return self.lm_head
637
+
638
+ def set_output_embeddings(self, new_embeddings):
639
+ self.lm_head = new_embeddings
640
+
641
+ def set_decoder(self, decoder):
642
+ self.model = decoder
643
+
644
+ def get_decoder(self):
645
+ return self.model
646
+
647
+ @can_return_tuple
648
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
649
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
650
+ @replace_return_docstrings(
651
+ output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
652
+ )
653
+ def forward(
654
+ self,
655
+ input_ids: Optional[torch.LongTensor] = None,
656
+ attention_mask: Optional[torch.Tensor] = None,
657
+ position_ids: Optional[torch.LongTensor] = None,
658
+ past_key_values: Optional[Cache] = None,
659
+ inputs_embeds: Optional[torch.FloatTensor] = None,
660
+ labels: Optional[torch.LongTensor] = None,
661
+ use_cache: Optional[bool] = None,
662
+ output_attentions: Optional[bool] = None,
663
+ output_hidden_states: Optional[bool] = None,
664
+ cache_position: Optional[torch.LongTensor] = None,
665
+ logits_to_keep: Union[int, torch.Tensor] = 0,
666
+ **kwargs: Unpack[KwargsForCausalLM],
667
+ ) -> CausalLMOutputWithPast:
668
+ r"""
669
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
670
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
671
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
672
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
673
+
674
+ logits_to_keep (`int` or `torch.Tensor`, *optional*):
675
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
676
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
677
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
678
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
679
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
680
+
681
+ Returns:
682
+
683
+ Example:
684
+
685
+ ```python
686
+ >>> from transformers import AutoTokenizer, Qwen2ForCausalLM
687
+
688
+ >>> model = Qwen2ForCausalLM.from_pretrained("meta-qwen2/Qwen2-2-7b-hf")
689
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-qwen2/Qwen2-2-7b-hf")
690
+
691
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
692
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
693
+
694
+ >>> # Generate
695
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
696
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
697
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
698
+ ```"""
699
+ output_attentions = (
700
+ output_attentions
701
+ if output_attentions is not None
702
+ else self.config.output_attentions
703
+ )
704
+ output_hidden_states = (
705
+ output_hidden_states
706
+ if output_hidden_states is not None
707
+ else self.config.output_hidden_states
708
+ )
709
+
710
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
711
+ outputs: BaseModelOutputWithPast = self.model(
712
+ input_ids=input_ids,
713
+ attention_mask=attention_mask,
714
+ position_ids=position_ids,
715
+ past_key_values=past_key_values,
716
+ inputs_embeds=inputs_embeds,
717
+ use_cache=use_cache,
718
+ output_attentions=output_attentions,
719
+ output_hidden_states=output_hidden_states,
720
+ cache_position=cache_position,
721
+ **kwargs,
722
+ )
723
+
724
+ hidden_states = outputs.last_hidden_state
725
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
726
+ slice_indices = (
727
+ slice(-logits_to_keep, None)
728
+ if isinstance(logits_to_keep, int)
729
+ else logits_to_keep
730
+ )
731
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
732
+
733
+ loss = None
734
+ if labels is not None:
735
+ loss = self.loss_function(
736
+ logits=logits,
737
+ labels=labels,
738
+ vocab_size=self.config.vocab_size,
739
+ **kwargs,
740
+ )
741
+
742
+ return CausalLMOutputWithPast(
743
+ loss=loss,
744
+ logits=logits,
745
+ past_key_values=outputs.past_key_values,
746
+ hidden_states=outputs.hidden_states,
747
+ attentions=outputs.attentions,
748
+ )
749
+
750
+
751
+ class SmileQwen2ForSequenceClassification(SmileQwen2PreTrainedModel):
752
+ def __init__(self, config):
753
+ super().__init__(config)
754
+ self.num_labels = config.num_labels
755
+ self.model = SmileQwen2Model(config) #* replace Qwen2Model with SmileQwen2Model
756
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
757
+
758
+ # Initialize weights and apply final processing
759
+ self.post_init()
760
+
761
+ def get_input_embeddings(self):
762
+ return self.model.embed_tokens
763
+
764
+ def set_input_embeddings(self, value):
765
+ self.model.embed_tokens = value
766
+
767
+ @can_return_tuple
768
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
769
+ def forward(
770
+ self,
771
+ input_ids: Optional[torch.LongTensor] = None,
772
+ attention_mask: Optional[torch.Tensor] = None,
773
+ position_ids: Optional[torch.LongTensor] = None,
774
+ past_key_values: Optional[Cache] = None,
775
+ inputs_embeds: Optional[torch.FloatTensor] = None,
776
+ labels: Optional[torch.LongTensor] = None,
777
+ use_cache: Optional[bool] = None,
778
+ output_attentions: Optional[bool] = None,
779
+ output_hidden_states: Optional[bool] = None,
780
+ ) -> SequenceClassifierOutputWithPast:
781
+ r"""
782
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
783
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
784
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
785
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
786
+ """
787
+
788
+ transformer_outputs: BaseModelOutputWithPast = self.model(
789
+ input_ids,
790
+ attention_mask=attention_mask,
791
+ position_ids=position_ids,
792
+ past_key_values=past_key_values,
793
+ inputs_embeds=inputs_embeds,
794
+ use_cache=use_cache,
795
+ output_attentions=output_attentions,
796
+ output_hidden_states=output_hidden_states,
797
+ )
798
+ hidden_states = transformer_outputs.last_hidden_state
799
+ logits = self.score(hidden_states)
800
+
801
+ if input_ids is not None:
802
+ batch_size = input_ids.shape[0]
803
+ else:
804
+ batch_size = inputs_embeds.shape[0]
805
+
806
+ if self.config.pad_token_id is None and batch_size != 1:
807
+ raise ValueError(
808
+ "Cannot handle batch sizes > 1 if no padding token is defined."
809
+ )
810
+ if self.config.pad_token_id is None:
811
+ last_non_pad_token = -1
812
+ elif input_ids is not None:
813
+ # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
814
+ non_pad_mask = (input_ids != self.config.pad_token_id).to(
815
+ logits.device, torch.int32
816
+ )
817
+ token_indices = torch.arange(
818
+ input_ids.shape[-1], device=logits.device, dtype=torch.int32
819
+ )
820
+ last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
821
+ else:
822
+ last_non_pad_token = -1
823
+ logger.warning_once(
824
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
825
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
826
+ )
827
+
828
+ pooled_logits = logits[
829
+ torch.arange(batch_size, device=logits.device), last_non_pad_token
830
+ ]
831
+
832
+ loss = None
833
+ if labels is not None:
834
+ loss = self.loss_function(
835
+ logits=logits,
836
+ labels=labels,
837
+ pooled_logits=pooled_logits,
838
+ config=self.config,
839
+ )
840
+
841
+ return SequenceClassifierOutputWithPast(
842
+ loss=loss,
843
+ logits=pooled_logits,
844
+ past_key_values=transformer_outputs.past_key_values,
845
+ hidden_states=transformer_outputs.hidden_states,
846
+ attentions=transformer_outputs.attentions,
847
+ )
848
+
849
+
850
+ class SmileQwen2ForQuestionAnswering(SmileQwen2PreTrainedModel):
851
+ base_model_prefix = "transformer"
852
+
853
+ def __init__(self, config):
854
+ super().__init__(config)
855
+ self.transformer = SmileQwen2Model(config) #* replace Qwen2Model with SmileQwen2Model
856
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
857
+
858
+ # Initialize weights and apply final processing
859
+ self.post_init()
860
+
861
+ def get_input_embeddings(self):
862
+ return self.transformer.embed_tokens
863
+
864
+ def set_input_embeddings(self, value):
865
+ self.transformer.embed_tokens = value
866
+
867
+ @can_return_tuple
868
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
869
+ def forward(
870
+ self,
871
+ input_ids: Optional[torch.LongTensor] = None,
872
+ attention_mask: Optional[torch.FloatTensor] = None,
873
+ position_ids: Optional[torch.LongTensor] = None,
874
+ past_key_values: Optional[Cache] = None,
875
+ inputs_embeds: Optional[torch.FloatTensor] = None,
876
+ start_positions: Optional[torch.LongTensor] = None,
877
+ end_positions: Optional[torch.LongTensor] = None,
878
+ output_attentions: Optional[bool] = None,
879
+ output_hidden_states: Optional[bool] = None,
880
+ **kwargs,
881
+ ) -> QuestionAnsweringModelOutput:
882
+ r"""
883
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
884
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
885
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
886
+ are not taken into account for computing the loss.
887
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
888
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
889
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
890
+ are not taken into account for computing the loss.
891
+ """
892
+
893
+ outputs: BaseModelOutputWithPast = self.transformer(
894
+ input_ids,
895
+ attention_mask=attention_mask,
896
+ position_ids=position_ids,
897
+ past_key_values=past_key_values,
898
+ inputs_embeds=inputs_embeds,
899
+ output_attentions=output_attentions,
900
+ output_hidden_states=output_hidden_states,
901
+ )
902
+
903
+ sequence_output = outputs.last_hidden_state
904
+
905
+ logits = self.qa_outputs(sequence_output)
906
+ start_logits, end_logits = logits.split(1, dim=-1)
907
+ start_logits = start_logits.squeeze(-1).contiguous()
908
+ end_logits = end_logits.squeeze(-1).contiguous()
909
+
910
+ loss = None
911
+ if start_positions is not None and end_positions is not None:
912
+ loss = self.loss_function(
913
+ start_logits, end_logits, start_positions, end_positions, **kwargs
914
+ )
915
+
916
+ return QuestionAnsweringModelOutput(
917
+ loss=loss,
918
+ start_logits=start_logits,
919
+ end_logits=end_logits,
920
+ hidden_states=outputs.hidden_states,
921
+ attentions=outputs.attentions,
922
+ )