fusion-bench 0.2.19__py3-none-any.whl → 0.2.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (193) hide show
  1. fusion_bench/__init__.py +1 -0
  2. fusion_bench/_get_started/__init__.py +3 -0
  3. fusion_bench/_get_started/greeting_program.py +49 -0
  4. fusion_bench/compat/method/base_algorithm.py +14 -0
  5. fusion_bench/constants/__init__.py +5 -0
  6. fusion_bench/constants/clip_vision.py +26 -2
  7. fusion_bench/constants/paths.py +4 -0
  8. fusion_bench/dataset/clip_dataset.py +2 -1
  9. fusion_bench/dataset/gpt2_glue.py +9 -9
  10. fusion_bench/dataset/image_corruption/__init__.py +0 -0
  11. fusion_bench/dataset/image_corruption/make_corruption.py +179 -0
  12. fusion_bench/dataset/image_dataset.py +1 -1
  13. fusion_bench/dataset/nyuv2.py +2 -2
  14. fusion_bench/method/__init__.py +16 -1
  15. fusion_bench/method/adamerging/clip_layer_wise_adamerging.py +1 -1
  16. fusion_bench/method/adamerging/clip_task_wise_adamerging.py +11 -7
  17. fusion_bench/method/adamerging/layer_wise_adamerging.py +11 -5
  18. fusion_bench/method/base_algorithm.py +195 -12
  19. fusion_bench/method/bitdelta/__init__.py +4 -0
  20. fusion_bench/method/bitdelta/bitdelta.py +156 -0
  21. fusion_bench/method/bitdelta/bitdelta_utils/__init__.py +0 -0
  22. fusion_bench/method/bitdelta/bitdelta_utils/binary_gemm_kernel.py +462 -0
  23. fusion_bench/method/bitdelta/bitdelta_utils/data.py +35 -0
  24. fusion_bench/method/bitdelta/bitdelta_utils/diff.py +129 -0
  25. fusion_bench/method/concrete_subspace/clip_concrete_adamerging.py +0 -1
  26. fusion_bench/method/depth_upscaling/depth_upscaling.py +4 -9
  27. fusion_bench/method/doge_ta/clip_layer_wise_adamerging.py +4 -5
  28. fusion_bench/method/doge_ta/doge_ta.py +1 -1
  29. fusion_bench/method/ensemble.py +12 -12
  30. fusion_bench/method/expert_sparsity/utils/calibration_data.py +1 -1
  31. fusion_bench/method/fisher_merging/clip_fisher_merging.py +2 -2
  32. fusion_bench/method/fisher_merging/fisher_merging.py +6 -15
  33. fusion_bench/method/fisher_merging/gpt2_fisher_merging.py +3 -10
  34. fusion_bench/method/fw_merging/fw_hard.py +1 -1
  35. fusion_bench/method/fw_merging/fw_soft.py +1 -1
  36. fusion_bench/method/gossip/clip_layer_wise_gossip.py +4 -5
  37. fusion_bench/method/linear/expo.py +2 -1
  38. fusion_bench/method/linear/linear_interpolation.py +6 -4
  39. fusion_bench/method/linear/simple_average_for_llama.py +16 -6
  40. fusion_bench/method/lm_finetune/bradley_terry_rm.py +2 -2
  41. fusion_bench/method/mixture_of_experts/mixtral_upcycling.py +9 -26
  42. fusion_bench/method/model_recombination.py +2 -5
  43. fusion_bench/method/moe_pruner/hooks/__init__.py +1 -2
  44. fusion_bench/method/moe_pruner/utils/data.py +2 -1
  45. fusion_bench/method/moe_pruner/utils/prune.py +6 -1
  46. fusion_bench/method/pruning/llama_magnitude_prune.py +1 -1
  47. fusion_bench/method/pruning/wanda_utils/data.py +1 -2
  48. fusion_bench/method/pwe_moe/clip_pwe_moe.py +12 -34
  49. fusion_bench/method/randes/modelsoup.py +1 -3
  50. fusion_bench/method/regmean/clip_regmean.py +2 -2
  51. fusion_bench/method/regmean/gpt2_regmean.py +3 -10
  52. fusion_bench/method/regmean/regmean.py +2 -11
  53. fusion_bench/method/regmean_plusplus/__init__.py +3 -0
  54. fusion_bench/method/regmean_plusplus/clip_regmean_plusplus.py +199 -0
  55. fusion_bench/method/regmean_plusplus/regmean_plusplus.py +383 -0
  56. fusion_bench/method/simple_average.py +16 -4
  57. fusion_bench/method/slerp/slerp.py +5 -2
  58. fusion_bench/method/smile_upscaling/error_accumulation.py +177 -0
  59. fusion_bench/method/smile_upscaling/projected_energy.py +145 -0
  60. fusion_bench/method/smile_upscaling/smile_qwen2_upscaling.py +39 -28
  61. fusion_bench/method/smile_upscaling/smile_upscaling.py +12 -5
  62. fusion_bench/method/tall_mask/task_arithmetic.py +3 -11
  63. fusion_bench/method/task_arithmetic/task_arithmetic.py +6 -10
  64. fusion_bench/method/ties_merging/ties_merging.py +13 -26
  65. fusion_bench/method/we_moe/clip_we_moe.py +5 -4
  66. fusion_bench/method/we_moe/we_moe.py +6 -6
  67. fusion_bench/method/weighted_average/llama.py +4 -16
  68. fusion_bench/metrics/continual_learning/__init__.py +1 -0
  69. fusion_bench/metrics/continual_learning/backward_transfer.py +1 -1
  70. fusion_bench/metrics/nyuv2/__init__.py +2 -2
  71. fusion_bench/metrics/nyuv2/segmentation.py +1 -1
  72. fusion_bench/mixins/__init__.py +10 -2
  73. fusion_bench/mixins/clip_classification.py +4 -3
  74. fusion_bench/mixins/hydra_config.py +105 -7
  75. fusion_bench/mixins/lightning_fabric.py +2 -0
  76. fusion_bench/mixins/serialization.py +265 -48
  77. fusion_bench/modelpool/__init__.py +2 -2
  78. fusion_bench/modelpool/base_pool.py +29 -9
  79. fusion_bench/modelpool/causal_lm/causal_lm.py +9 -0
  80. fusion_bench/modelpool/clip_vision/modelpool.py +43 -12
  81. fusion_bench/modelpool/seq_classification_lm/__init__.py +1 -1
  82. fusion_bench/modelpool/seq_classification_lm/seq_classification_lm.py +1 -1
  83. fusion_bench/models/__init__.py +2 -1
  84. fusion_bench/models/expert_sparsity/mixtral/__init__.py +1 -1
  85. fusion_bench/models/hf_utils.py +182 -0
  86. fusion_bench/models/linearized/linearized_model_utils.py +4 -4
  87. fusion_bench/models/linearized/vision_model.py +1 -1
  88. fusion_bench/models/modeling_deepseek_v2/__init__.py +1 -1
  89. fusion_bench/models/modeling_deepseek_v2/modeling_deepseek.py +4 -4
  90. fusion_bench/models/modeling_deepseek_v2/tokenization_deepseek_fast.py +0 -1
  91. fusion_bench/models/modeling_smile_gemma2/__init__.py +9 -0
  92. fusion_bench/models/modeling_smile_gemma2/configuration_smile_gemma2.py +20 -0
  93. fusion_bench/models/modeling_smile_gemma2/modeling_smile_gemma2.py +986 -0
  94. fusion_bench/models/modeling_smile_gemma2/register.py +26 -0
  95. fusion_bench/models/modeling_smile_llama/__init__.py +0 -0
  96. fusion_bench/models/modeling_smile_llama/configuration_smile_llama.py +20 -0
  97. fusion_bench/models/modeling_smile_llama/modeling_smile_llama.py +705 -0
  98. fusion_bench/models/modeling_smile_llama/register.py +8 -0
  99. fusion_bench/models/modeling_smile_mistral/__init__.py +5 -47
  100. fusion_bench/models/modeling_smile_qwen2/__init__.py +1 -1
  101. fusion_bench/models/modeling_smile_qwen2/modeling_smile_qwen2.py +6 -7
  102. fusion_bench/models/modeling_smile_qwen2/register.py +1 -4
  103. fusion_bench/models/parameter_dict.py +1 -1
  104. fusion_bench/models/sparse_we_moe.py +1 -53
  105. fusion_bench/models/utils.py +26 -0
  106. fusion_bench/models/we_moe.py +1 -53
  107. fusion_bench/models/wrappers/ensemble.py +6 -4
  108. fusion_bench/models/wrappers/layer_wise_fusion.py +1 -1
  109. fusion_bench/models/wrappers/task_wise_fusion.py +250 -72
  110. fusion_bench/programs/base_program.py +81 -2
  111. fusion_bench/programs/fabric_fusion_program.py +24 -8
  112. fusion_bench/scripts/cli.py +6 -6
  113. fusion_bench/taskpool/base_pool.py +4 -3
  114. fusion_bench/taskpool/clip_vision/taskpool.py +34 -18
  115. fusion_bench/taskpool/dummy.py +1 -1
  116. fusion_bench/taskpool/lm_eval_harness/taskpool.py +1 -2
  117. fusion_bench/tasks/clip_classification/__init__.py +6 -4
  118. fusion_bench/utils/__init__.py +6 -1
  119. fusion_bench/utils/devices.py +14 -4
  120. fusion_bench/utils/instantiate_utils.py +3 -1
  121. fusion_bench/utils/misc.py +48 -2
  122. fusion_bench/utils/modelscope.py +265 -0
  123. fusion_bench/utils/parameters.py +2 -2
  124. fusion_bench/utils/rich_utils.py +3 -0
  125. fusion_bench/utils/state_dict_arithmetic.py +34 -27
  126. {fusion_bench-0.2.19.dist-info → fusion_bench-0.2.21.dist-info}/METADATA +31 -24
  127. {fusion_bench-0.2.19.dist-info → fusion_bench-0.2.21.dist-info}/RECORD +189 -153
  128. fusion_bench_config/_get_started/clip_evaluate_single_model.yaml +21 -0
  129. fusion_bench_config/_get_started/clip_simple_average.yaml +23 -0
  130. fusion_bench_config/_get_started/clip_task_arithmetic.yaml +24 -0
  131. fusion_bench_config/_get_started/greeting_program.yaml +4 -0
  132. fusion_bench_config/fabric/loggers/csv_logger.yaml +3 -3
  133. fusion_bench_config/fabric/loggers/tensorboard_logger.yaml +3 -3
  134. fusion_bench_config/fabric_model_fusion.yaml +45 -17
  135. fusion_bench_config/hydra/default.yaml +6 -2
  136. fusion_bench_config/llama_full_finetune.yaml +1 -0
  137. fusion_bench_config/method/adamerging/clip.yaml +1 -1
  138. fusion_bench_config/method/bitdelta/bitdelta.yaml +12 -0
  139. fusion_bench_config/method/depth_upscaling.yaml +4 -1
  140. fusion_bench_config/method/regmean/clip_regmean.yaml +1 -1
  141. fusion_bench_config/method/regmean_plusplus/clip_regmean_plusplus.yaml +11 -0
  142. fusion_bench_config/method/smile_upscaling/error_accumulation.yaml +5 -0
  143. fusion_bench_config/method/smile_upscaling/projected_energy.yaml +2 -0
  144. fusion_bench_config/method/smile_upscaling/smile_qwen2_upscaling.yaml +1 -0
  145. fusion_bench_config/modelpool/CLIPVisionModelPool/_template.yaml +1 -4
  146. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL20.yaml +73 -8
  147. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch16_TALL20_model_only.yaml +27 -7
  148. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TA8.yaml +34 -4
  149. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TA8_control_task.yaml +14 -17
  150. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TA8_model_only.yaml +14 -3
  151. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL10.yaml +39 -5
  152. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL12.yaml +49 -5
  153. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL14.yaml +55 -5
  154. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL14_model_only.yaml +21 -4
  155. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL16.yaml +61 -5
  156. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL18.yaml +67 -5
  157. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL20.yaml +73 -5
  158. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_TALL20_model_only.yaml +26 -3
  159. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_individual.yaml +4 -9
  160. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_single_finetuned.yaml +7 -5
  161. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_single_task_projection.yaml +6 -10
  162. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_sun397_and_cars.yaml +6 -7
  163. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_sun397_and_dtd.yaml +6 -7
  164. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_sun397_cars_and_dtd.yaml +7 -8
  165. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_svhn_and_mnist.yaml +8 -6
  166. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_two_tasks_control_task.yaml +4 -6
  167. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TA8.yaml +32 -7
  168. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TA8_model_only.yaml +14 -6
  169. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL20.yaml +73 -8
  170. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TALL20_model_only.yaml +27 -7
  171. fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_individual.yaml +6 -10
  172. fusion_bench_config/modelpool/CausalLMPool/Qwen2.5-1.5B_math_and_coder.yaml +2 -2
  173. fusion_bench_config/modelpool/CausalLMPool/Qwen2.5-7B-math_and_coder.yaml +9 -0
  174. fusion_bench_config/modelpool/CausalLMPool/mistral-7b.yaml +6 -0
  175. fusion_bench_config/modelpool/CausalLMPool/mixtral_moe_merging.yaml +10 -0
  176. fusion_bench_config/modelpool/CausalLMPool/qwen2_math_1.5B_and_R1.yaml +4 -12
  177. fusion_bench_config/modelpool/CausalLMPool/simle_mixtral_exp_v4.yaml +6 -16
  178. fusion_bench_config/modelpool/CausalLMPool/vicuna-7b-v1.5.yaml +8 -0
  179. fusion_bench_config/modelpool/{SeqenceClassificationModelPool → SequenceClassificationModelPool}/llama_preference700k.yaml +1 -1
  180. fusion_bench_config/modelpool/{SeqenceClassificationModelPool → SequenceClassificationModelPool}/single_reward_model.yaml +1 -1
  181. fusion_bench_config/nyuv2_config.yaml +3 -1
  182. fusion_bench_config/nyuv2_mtl_train.yaml +1 -0
  183. fusion_bench_config/path/default.yaml +28 -0
  184. fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-base-patch32_svhn_and_mnist.yaml +24 -0
  185. fusion_bench_config/method/adamerging.yaml +0 -23
  186. fusion_bench_config/modelpool/mixtral_moe_merging.yaml +0 -14
  187. fusion_bench_config/modelpool/mixtral_moe_upscaling.yaml +0 -6
  188. fusion_bench_config/taskpool/clip-vit-base-patch32_svhn_and_mnist.yaml +0 -22
  189. {fusion_bench-0.2.19.dist-info → fusion_bench-0.2.21.dist-info}/WHEEL +0 -0
  190. {fusion_bench-0.2.19.dist-info → fusion_bench-0.2.21.dist-info}/entry_points.txt +0 -0
  191. {fusion_bench-0.2.19.dist-info → fusion_bench-0.2.21.dist-info}/licenses/LICENSE +0 -0
  192. {fusion_bench-0.2.19.dist-info → fusion_bench-0.2.21.dist-info}/top_level.txt +0 -0
  193. /fusion_bench_config/modelpool/{SeqenceClassificationModelPool → SequenceClassificationModelPool}/roberta-base_glue.yaml +0 -0
@@ -0,0 +1,986 @@
1
+ from functools import partial
2
+ from typing import Callable, Optional, Tuple, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from transformers.activations import ACT2FN
7
+ from transformers.cache_utils import Cache, HybridCache, StaticCache
8
+ from transformers.generation import GenerationMixin
9
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
10
+ from transformers.modeling_outputs import (
11
+ BaseModelOutputWithPast,
12
+ CausalLMOutputWithPast,
13
+ SequenceClassifierOutputWithPast,
14
+ TokenClassifierOutput,
15
+ )
16
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
17
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
18
+ from transformers.models.gemma2.modeling_gemma2 import (
19
+ _CHECKPOINT_FOR_DOC,
20
+ GEMMA2_INPUTS_DOCSTRING,
21
+ Gemma2RMSNorm,
22
+ Gemma2RotaryEmbedding,
23
+ apply_rotary_pos_emb,
24
+ eager_attention_forward,
25
+ )
26
+ from transformers.processing_utils import Unpack
27
+ from transformers.utils import (
28
+ add_code_sample_docstrings,
29
+ add_start_docstrings,
30
+ add_start_docstrings_to_model_forward,
31
+ can_return_tuple,
32
+ logging,
33
+ replace_return_docstrings,
34
+ )
35
+ from transformers.utils.deprecation import deprecate_kwarg
36
+
37
+ from fusion_bench.models.smile_moe.linear_from_hf_config import SmileLinear
38
+
39
+ from .configuration_smile_gemma2 import SmileGemma2Config
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CONFIG_FOR_DOC = "SmileGemma2Config"
44
+
45
+
46
+ class SmileGemma2MLP(nn.Module):
47
+ def __init__(self, config: SmileGemma2Config):
48
+ super().__init__()
49
+ self.config = config
50
+ self.hidden_size = config.hidden_size
51
+ self.intermediate_size = config.intermediate_size
52
+ # * --- replace nn.Linear with SmileLinear ---
53
+ self.gate_proj = SmileLinear(config, self.hidden_size, self.intermediate_size)
54
+ self.up_proj = SmileLinear(config, self.hidden_size, self.intermediate_size)
55
+ self.down_proj = SmileLinear(config, self.intermediate_size, self.hidden_size)
56
+ # * --- end of replacement ---
57
+ self.act_fn = ACT2FN[config.hidden_activation]
58
+
59
+ def forward(self, x):
60
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
61
+ return down_proj
62
+
63
+
64
+ class SmileGemma2Attention(nn.Module):
65
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
66
+
67
+ def __init__(self, config: SmileGemma2Config, layer_idx: int):
68
+ super().__init__()
69
+ self.config = config
70
+ self.layer_idx = layer_idx
71
+ self.head_dim = getattr(
72
+ config, "head_dim", config.hidden_size // config.num_attention_heads
73
+ )
74
+ self.num_key_value_groups = (
75
+ config.num_attention_heads // config.num_key_value_heads
76
+ )
77
+ self.scaling = config.query_pre_attn_scalar**-0.5
78
+ self.attention_dropout = self.config.attention_dropout
79
+ self.is_causal = True
80
+
81
+ # * --- replace nn.Linear with SmileLinear ---
82
+ self.q_proj = SmileLinear(
83
+ config,
84
+ config.hidden_size,
85
+ config.num_attention_heads * self.head_dim,
86
+ bias=config.attention_bias,
87
+ )
88
+ self.k_proj = SmileLinear(
89
+ config,
90
+ config.hidden_size,
91
+ config.num_key_value_heads * self.head_dim,
92
+ bias=config.attention_bias,
93
+ )
94
+ self.v_proj = SmileLinear(
95
+ config,
96
+ config.hidden_size,
97
+ config.num_key_value_heads * self.head_dim,
98
+ bias=config.attention_bias,
99
+ )
100
+ self.o_proj = SmileLinear(
101
+ config,
102
+ config.num_attention_heads * self.head_dim,
103
+ config.hidden_size,
104
+ bias=config.attention_bias,
105
+ )
106
+ # * --- end of replacement ---
107
+ self.attn_logit_softcapping = self.config.attn_logit_softcapping
108
+ self.sliding_window = config.sliding_window if not bool(layer_idx % 2) else None
109
+
110
+ def forward(
111
+ self,
112
+ hidden_states: torch.Tensor,
113
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
114
+ attention_mask: Optional[torch.Tensor],
115
+ past_key_value: Optional[Cache] = None,
116
+ cache_position: Optional[torch.LongTensor] = None,
117
+ **kwargs: Unpack[FlashAttentionKwargs],
118
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
119
+ input_shape = hidden_states.shape[:-1]
120
+ hidden_shape = (*input_shape, -1, self.head_dim)
121
+
122
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
123
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
124
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
125
+
126
+ cos, sin = position_embeddings
127
+ query_states, key_states = apply_rotary_pos_emb(
128
+ query_states, key_states, cos, sin
129
+ )
130
+
131
+ if past_key_value is not None:
132
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
133
+ cache_kwargs = {
134
+ "sin": sin,
135
+ "cos": cos,
136
+ "cache_position": cache_position,
137
+ "sliding_window": self.sliding_window,
138
+ }
139
+ key_states, value_states = past_key_value.update(
140
+ key_states, value_states, self.layer_idx, cache_kwargs
141
+ )
142
+
143
+ # Here we need to slice as we use a static cache by default, but FA2 does not support it
144
+ if (
145
+ attention_mask is not None
146
+ and self.config._attn_implementation == "flash_attention_2"
147
+ ):
148
+ seq_len = attention_mask.shape[-1]
149
+ key_states, value_states = (
150
+ key_states[:, :, :seq_len, :],
151
+ value_states[:, :, :seq_len, :],
152
+ )
153
+
154
+ attention_interface: Callable = eager_attention_forward
155
+ if self.config._attn_implementation != "eager":
156
+ if self.config._attn_implementation == "sdpa" and kwargs.get(
157
+ "output_attentions", False
158
+ ):
159
+ logger.warning_once(
160
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
161
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
162
+ )
163
+ else:
164
+ attention_interface = ALL_ATTENTION_FUNCTIONS[
165
+ self.config._attn_implementation
166
+ ]
167
+
168
+ attn_output, attn_weights = attention_interface(
169
+ self,
170
+ query_states,
171
+ key_states,
172
+ value_states,
173
+ attention_mask,
174
+ dropout=self.attention_dropout if self.training else 0.0,
175
+ scaling=self.scaling,
176
+ sliding_window=self.sliding_window,
177
+ softcap=self.attn_logit_softcapping,
178
+ **kwargs,
179
+ )
180
+
181
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
182
+ attn_output = self.o_proj(attn_output)
183
+ return attn_output, attn_weights
184
+
185
+
186
+ class SmileGemma2DecoderLayer(nn.Module):
187
+
188
+ def __init__(self, config: SmileGemma2Config, layer_idx: int):
189
+ super().__init__()
190
+ self.hidden_size = config.hidden_size
191
+ self.config = config
192
+ self.is_sliding = not bool(layer_idx % 2)
193
+ self.self_attn = SmileGemma2Attention(config=config, layer_idx=layer_idx)
194
+ self.mlp = SmileGemma2MLP(config)
195
+ self.input_layernorm = Gemma2RMSNorm(
196
+ config.hidden_size, eps=config.rms_norm_eps
197
+ )
198
+ self.post_attention_layernorm = Gemma2RMSNorm(
199
+ config.hidden_size, eps=config.rms_norm_eps
200
+ )
201
+
202
+ self.pre_feedforward_layernorm = Gemma2RMSNorm(
203
+ config.hidden_size, eps=config.rms_norm_eps
204
+ )
205
+ self.post_feedforward_layernorm = Gemma2RMSNorm(
206
+ config.hidden_size, eps=config.rms_norm_eps
207
+ )
208
+ self.sliding_window = config.sliding_window
209
+
210
+ def forward(
211
+ self,
212
+ hidden_states: torch.Tensor,
213
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
214
+ attention_mask: Optional[torch.Tensor] = None,
215
+ position_ids: Optional[torch.LongTensor] = None,
216
+ past_key_value: Optional[Cache] = None,
217
+ output_attentions: Optional[bool] = False,
218
+ use_cache: Optional[bool] = False,
219
+ cache_position: Optional[torch.LongTensor] = None,
220
+ last_cache_position: int = 0,
221
+ **kwargs,
222
+ ) -> Tuple[
223
+ torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
224
+ ]:
225
+ if (
226
+ self.is_sliding and attention_mask is not None
227
+ ): # efficient SDPA and no padding
228
+ # In prefill, we may be larger than sliding window
229
+ effective_seq_len = max(cache_position.shape[0], self.sliding_window)
230
+ # For FA2, the mask is 2D and is of shape [bs, processed_tokens] (not [bs, max_cache_len]),
231
+ # thus we must slice from the right (at most `effective_seq_len` elements)
232
+ if self.config._attn_implementation == "flash_attention_2":
233
+ attention_mask = attention_mask[:, -effective_seq_len:]
234
+ # Otherwise, the mask is 4D of shape [bs, 1, query_len, max_cache_len] thus we must slice
235
+ # from the left, with an offset if we are beyond the sliding window
236
+ else:
237
+ min_dtype = torch.finfo(attention_mask.dtype).min
238
+ sliding_window_mask = torch.tril(
239
+ torch.ones_like(attention_mask, dtype=torch.bool),
240
+ diagonal=-self.sliding_window,
241
+ )
242
+ attention_mask = torch.where(
243
+ sliding_window_mask, min_dtype, attention_mask
244
+ )
245
+ # In case we are beyond the sliding window, we need to correctly offset the mask slicing
246
+ # `last_cache_position` is equivalent to `cache_position[-1]` but without breaking dynamo
247
+ offset = last_cache_position - effective_seq_len
248
+ # Should only be used when beyond the sliding window (i.e. offset > 0)
249
+ offset = max(0, offset)
250
+ attention_mask = attention_mask[
251
+ :, :, :, offset : offset + effective_seq_len
252
+ ]
253
+
254
+ residual = hidden_states
255
+
256
+ hidden_states = self.input_layernorm(hidden_states)
257
+
258
+ # Self Attention
259
+ hidden_states, self_attn_weights = self.self_attn(
260
+ hidden_states=hidden_states,
261
+ position_embeddings=position_embeddings,
262
+ attention_mask=attention_mask,
263
+ position_ids=position_ids,
264
+ past_key_value=past_key_value,
265
+ output_attentions=output_attentions,
266
+ use_cache=use_cache,
267
+ cache_position=cache_position,
268
+ **kwargs,
269
+ )
270
+ hidden_states = self.post_attention_layernorm(hidden_states)
271
+ hidden_states = residual + hidden_states
272
+
273
+ residual = hidden_states
274
+ hidden_states = self.pre_feedforward_layernorm(hidden_states)
275
+ hidden_states = self.mlp(hidden_states)
276
+ hidden_states = self.post_feedforward_layernorm(hidden_states)
277
+ hidden_states = residual + hidden_states
278
+
279
+ outputs = (hidden_states,)
280
+
281
+ if output_attentions:
282
+ outputs += (self_attn_weights,)
283
+
284
+ return outputs
285
+
286
+
287
+ class SmileGemma2PreTrainedModel(PreTrainedModel):
288
+ config_class = SmileGemma2Config
289
+ base_model_prefix = "model"
290
+ supports_gradient_checkpointing = True
291
+ _no_split_modules = ["SmileGemma2DecoderLayer"]
292
+ _skip_keys_device_placement = ["past_key_values"]
293
+ _supports_flash_attn_2 = True
294
+ _supports_sdpa = True
295
+ _supports_flex_attn = True
296
+ _supports_cache_class = True
297
+ _supports_quantized_cache = True
298
+ _supports_static_cache = True
299
+ _supports_attention_backend = True
300
+
301
+ def _init_weights(self, module):
302
+ std = self.config.initializer_range
303
+ if isinstance(module, nn.Linear):
304
+ module.weight.data.normal_(mean=0.0, std=std)
305
+ if module.bias is not None:
306
+ module.bias.data.zero_()
307
+ elif isinstance(module, nn.Embedding):
308
+ module.weight.data.normal_(mean=0.0, std=std)
309
+ if module.padding_idx is not None:
310
+ module.weight.data[module.padding_idx].zero_()
311
+
312
+
313
+ class SmileGemma2Model(SmileGemma2PreTrainedModel):
314
+ """
315
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Gemma2DecoderLayer`]
316
+
317
+ Args:
318
+ config: Gemma2Config
319
+ """
320
+
321
+ def __init__(self, config: SmileGemma2Config):
322
+ super().__init__(config)
323
+ self.padding_idx = config.pad_token_id
324
+ self.vocab_size = config.vocab_size
325
+
326
+ self.embed_tokens = nn.Embedding(
327
+ config.vocab_size, config.hidden_size, self.padding_idx
328
+ )
329
+ self.layers = nn.ModuleList(
330
+ [
331
+ SmileGemma2DecoderLayer(config, layer_idx)
332
+ for layer_idx in range(config.num_hidden_layers)
333
+ ]
334
+ )
335
+ self.norm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
336
+ self.rotary_emb = Gemma2RotaryEmbedding(config=config)
337
+ self.gradient_checkpointing = False
338
+
339
+ # Initialize weights and apply final processing
340
+ self.post_init()
341
+
342
+ def get_input_embeddings(self):
343
+ return self.embed_tokens
344
+
345
+ def set_input_embeddings(self, value):
346
+ self.embed_tokens = value
347
+
348
+ @can_return_tuple
349
+ @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
350
+ def forward(
351
+ self,
352
+ input_ids: Optional[torch.LongTensor] = None,
353
+ attention_mask: Optional[torch.Tensor] = None,
354
+ position_ids: Optional[torch.LongTensor] = None,
355
+ past_key_values: Optional[HybridCache] = None,
356
+ inputs_embeds: Optional[torch.FloatTensor] = None,
357
+ use_cache: Optional[bool] = None,
358
+ output_attentions: Optional[bool] = None,
359
+ output_hidden_states: Optional[bool] = None,
360
+ cache_position: Optional[torch.LongTensor] = None,
361
+ last_cache_position: Optional[int] = None,
362
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
363
+ ) -> BaseModelOutputWithPast:
364
+ output_attentions = (
365
+ output_attentions
366
+ if output_attentions is not None
367
+ else self.config.output_attentions
368
+ )
369
+ output_hidden_states = (
370
+ output_hidden_states
371
+ if output_hidden_states is not None
372
+ else self.config.output_hidden_states
373
+ )
374
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
375
+
376
+ if (input_ids is None) ^ (inputs_embeds is not None):
377
+ raise ValueError(
378
+ "You must specify exactly one of input_ids or inputs_embeds"
379
+ )
380
+
381
+ if self.gradient_checkpointing and self.training and use_cache:
382
+ logger.warning_once(
383
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
384
+ )
385
+ use_cache = False
386
+
387
+ if inputs_embeds is None:
388
+ inputs_embeds = self.embed_tokens(input_ids)
389
+
390
+ if use_cache and past_key_values is None and not self.training:
391
+ batch_size, seq_len, _ = inputs_embeds.shape
392
+ # NOTE: ideally, `HybridCache` should be initialized outside the model with `layer_device_map`
393
+ past_key_values = HybridCache(
394
+ self.config,
395
+ max_batch_size=batch_size,
396
+ max_cache_len=seq_len,
397
+ dtype=inputs_embeds.dtype,
398
+ device=self.device,
399
+ )
400
+
401
+ if cache_position is None:
402
+ past_seen_tokens = (
403
+ past_key_values.get_seq_length() if past_key_values is not None else 0
404
+ )
405
+ cache_position = torch.arange(
406
+ past_seen_tokens,
407
+ past_seen_tokens + inputs_embeds.shape[1],
408
+ device=inputs_embeds.device,
409
+ )
410
+
411
+ if position_ids is None:
412
+ position_ids = cache_position.unsqueeze(0)
413
+
414
+ # This is needed to correctly slice the mask without data-dependent slicing later on if using dynamo tracing
415
+ # (retrieving the same value from `cache_position` later on would crash dynamo)
416
+ if last_cache_position is None:
417
+ last_cache_position = 0
418
+ if attention_mask is not None:
419
+ # In case a 4d mask is passed directly without using `generate`, we have to rely on cache_position
420
+ # It will break dynamo tracing but there are no way around it (and it should never happen in practice)
421
+ last_cache_position = (
422
+ attention_mask.shape[-1]
423
+ if attention_mask.dim() == 2
424
+ else cache_position[-1].item()
425
+ )
426
+ causal_mask = self._update_causal_mask(
427
+ attention_mask,
428
+ inputs_embeds,
429
+ cache_position,
430
+ past_key_values,
431
+ output_attentions,
432
+ )
433
+
434
+ # embed positions
435
+ hidden_states = inputs_embeds
436
+
437
+ # create position embeddings to be shared across the decoder layers
438
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
439
+
440
+ # normalized
441
+ # Gemma2 downcasts the below to float16, causing sqrt(3072)=55.4256 to become 55.5
442
+ # See https://github.com/huggingface/transformers/pull/29402
443
+ normalizer = torch.tensor(
444
+ self.config.hidden_size**0.5, dtype=hidden_states.dtype
445
+ )
446
+ hidden_states = hidden_states * normalizer
447
+
448
+ # decoder layers
449
+ all_hidden_states = () if output_hidden_states else None
450
+ all_self_attns = () if output_attentions else None
451
+
452
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
453
+ if output_hidden_states:
454
+ all_hidden_states += (hidden_states,)
455
+
456
+ if self.gradient_checkpointing and self.training:
457
+ layer_outputs = self._gradient_checkpointing_func(
458
+ partial(decoder_layer.__call__, **flash_attn_kwargs),
459
+ hidden_states,
460
+ position_embeddings,
461
+ causal_mask,
462
+ position_ids,
463
+ past_key_values,
464
+ output_attentions,
465
+ use_cache,
466
+ cache_position,
467
+ last_cache_position,
468
+ )
469
+ else:
470
+ layer_outputs = decoder_layer(
471
+ hidden_states,
472
+ position_embeddings=position_embeddings,
473
+ attention_mask=causal_mask,
474
+ position_ids=position_ids,
475
+ past_key_value=past_key_values,
476
+ output_attentions=output_attentions,
477
+ use_cache=use_cache,
478
+ cache_position=cache_position,
479
+ last_cache_position=last_cache_position,
480
+ **flash_attn_kwargs,
481
+ )
482
+
483
+ hidden_states = layer_outputs[0]
484
+
485
+ if output_attentions:
486
+ all_self_attns += (layer_outputs[1],)
487
+
488
+ hidden_states = self.norm(hidden_states)
489
+
490
+ if output_hidden_states:
491
+ all_hidden_states += (hidden_states,)
492
+
493
+ return BaseModelOutputWithPast(
494
+ last_hidden_state=hidden_states,
495
+ past_key_values=past_key_values,
496
+ hidden_states=all_hidden_states,
497
+ attentions=all_self_attns,
498
+ )
499
+
500
+ @torch.no_grad()
501
+ def _update_causal_mask(
502
+ self,
503
+ attention_mask: torch.Tensor,
504
+ input_tensor: torch.Tensor,
505
+ cache_position: torch.Tensor,
506
+ past_key_values: HybridCache,
507
+ output_attentions: bool,
508
+ ):
509
+ # Flash Attention currently doesn't support static cache but Gemma2 work only with static cache.
510
+ # So we will pass in attention mask as is in any case, not only when ther's padding. Then we'll use its shape
511
+ # to cut out keys/values trailing 0 used in static cache. This workaround should be compile compatible
512
+ # as it doesn't cause dynamic control issues.
513
+ if self.config._attn_implementation == "flash_attention_2":
514
+ return attention_mask
515
+
516
+ dtype, device = input_tensor.dtype, input_tensor.device
517
+ sequence_length = input_tensor.shape[1]
518
+ if isinstance(past_key_values, (HybridCache, StaticCache)):
519
+ target_length = past_key_values.get_max_cache_shape()
520
+ else:
521
+ target_length = (
522
+ attention_mask.shape[-1]
523
+ if attention_mask is not None
524
+ else input_tensor.shape[1]
525
+ )
526
+
527
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
528
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
529
+ attention_mask,
530
+ sequence_length=sequence_length,
531
+ target_length=target_length,
532
+ dtype=dtype,
533
+ device=device,
534
+ cache_position=cache_position,
535
+ batch_size=input_tensor.shape[0],
536
+ )
537
+ return causal_mask
538
+
539
+ @staticmethod
540
+ def _prepare_4d_causal_attention_mask_with_cache_position(
541
+ attention_mask: torch.Tensor,
542
+ sequence_length: int,
543
+ target_length: int,
544
+ dtype: torch.dtype,
545
+ device: torch.device,
546
+ cache_position: torch.Tensor,
547
+ batch_size: int,
548
+ **kwargs,
549
+ ):
550
+ """
551
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
552
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
553
+
554
+ Args:
555
+ attention_mask (`torch.Tensor`):
556
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
557
+ `(batch_size, 1, query_length, key_value_length)`.
558
+ sequence_length (`int`):
559
+ The sequence length being processed.
560
+ target_length (`int`):
561
+ The target length: when generating with static cache, the mask should be as long as the static cache,
562
+ to account for the 0 padding, the part of the cache that is not filled yet.
563
+ dtype (`torch.dtype`):
564
+ The dtype to use for the 4D attention mask.
565
+ device (`torch.device`):
566
+ The device to place the 4D attention mask on.
567
+ cache_position (`torch.Tensor`):
568
+ Indices depicting the position of the input sequence tokens in the sequence.
569
+ batch_size (`torch.Tensor`):
570
+ Batch size.
571
+ """
572
+ if attention_mask is not None and attention_mask.dim() == 4:
573
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
574
+ causal_mask = attention_mask
575
+ else:
576
+ min_dtype = torch.finfo(dtype).min
577
+ causal_mask = torch.full(
578
+ (sequence_length, target_length),
579
+ fill_value=min_dtype,
580
+ dtype=dtype,
581
+ device=device,
582
+ )
583
+ if sequence_length != 1:
584
+ causal_mask = torch.triu(causal_mask, diagonal=1)
585
+ causal_mask *= torch.arange(
586
+ target_length, device=device
587
+ ) > cache_position.reshape(-1, 1)
588
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
589
+ if attention_mask is not None:
590
+ causal_mask = (
591
+ causal_mask.clone()
592
+ ) # copy to contiguous memory for in-place edit
593
+ mask_length = attention_mask.shape[-1]
594
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[
595
+ :, None, None, :
596
+ ].to(causal_mask.device)
597
+ padding_mask = padding_mask == 0
598
+ causal_mask[:, :, :, :mask_length] = causal_mask[
599
+ :, :, :, :mask_length
600
+ ].masked_fill(padding_mask, min_dtype)
601
+
602
+ return causal_mask
603
+
604
+
605
+ class SmileGemma2ForCausalLM(SmileGemma2PreTrainedModel, GenerationMixin):
606
+ _tied_weights_keys = ["lm_head.weight"]
607
+ _tp_plan = {"lm_head": "colwise_rep"}
608
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
609
+
610
+ def __init__(self, config):
611
+ super().__init__(config)
612
+ self.model = SmileGemma2Model(config)
613
+ self.vocab_size = config.vocab_size
614
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
615
+
616
+ # Initialize weights and apply final processing
617
+ self.post_init()
618
+
619
+ def get_input_embeddings(self):
620
+ return self.model.embed_tokens
621
+
622
+ def set_input_embeddings(self, value):
623
+ self.model.embed_tokens = value
624
+
625
+ def get_output_embeddings(self):
626
+ return self.lm_head
627
+
628
+ def set_output_embeddings(self, new_embeddings):
629
+ self.lm_head = new_embeddings
630
+
631
+ def set_decoder(self, decoder):
632
+ self.model = decoder
633
+
634
+ def get_decoder(self):
635
+ return self.model
636
+
637
+ @can_return_tuple
638
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
639
+ @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
640
+ @replace_return_docstrings(
641
+ output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
642
+ )
643
+ def forward(
644
+ self,
645
+ input_ids: Optional[torch.LongTensor] = None,
646
+ attention_mask: Optional[torch.Tensor] = None,
647
+ position_ids: Optional[torch.LongTensor] = None,
648
+ past_key_values: Optional[HybridCache] = None,
649
+ inputs_embeds: Optional[torch.FloatTensor] = None,
650
+ labels: Optional[torch.LongTensor] = None,
651
+ use_cache: Optional[bool] = None,
652
+ output_attentions: Optional[bool] = None,
653
+ output_hidden_states: Optional[bool] = None,
654
+ cache_position: Optional[torch.LongTensor] = None,
655
+ logits_to_keep: Union[int, torch.Tensor] = 0,
656
+ **loss_kwargs,
657
+ ) -> CausalLMOutputWithPast:
658
+ r"""
659
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
660
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
661
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
662
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
663
+
664
+ logits_to_keep (`int` or `torch.Tensor`, *optional*):
665
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
666
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
667
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
668
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
669
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
670
+
671
+ Returns:
672
+
673
+ Example:
674
+
675
+ ```python
676
+ >>> from transformers import AutoTokenizer, Gemma2ForCausalLM
677
+
678
+ >>> model = Gemma2ForCausalLM.from_pretrained("google/gemma-2-9b")
679
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b")
680
+
681
+ >>> prompt = "What is your favorite condiment?"
682
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
683
+
684
+ >>> # Generate
685
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
686
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
687
+ "What is your favorite condiment?"
688
+ ```"""
689
+
690
+ if self.training and self.config._attn_implementation != "eager":
691
+ logger.warning_once(
692
+ "It is strongly recommended to train Gemma2 models with the `eager` attention implementation "
693
+ f"instead of `{self.config._attn_implementation}`. Use `eager` with `AutoModelForCausalLM.from_pretrained('<path-to-checkpoint>', attn_implementation='eager')`."
694
+ )
695
+ output_attentions = (
696
+ output_attentions
697
+ if output_attentions is not None
698
+ else self.config.output_attentions
699
+ )
700
+ output_hidden_states = (
701
+ output_hidden_states
702
+ if output_hidden_states is not None
703
+ else self.config.output_hidden_states
704
+ )
705
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
706
+ outputs: BaseModelOutputWithPast = self.model(
707
+ input_ids=input_ids,
708
+ attention_mask=attention_mask,
709
+ position_ids=position_ids,
710
+ past_key_values=past_key_values,
711
+ inputs_embeds=inputs_embeds,
712
+ use_cache=use_cache,
713
+ output_attentions=output_attentions,
714
+ output_hidden_states=output_hidden_states,
715
+ cache_position=cache_position,
716
+ **loss_kwargs,
717
+ )
718
+
719
+ hidden_states = outputs.last_hidden_state
720
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
721
+ slice_indices = (
722
+ slice(-logits_to_keep, None)
723
+ if isinstance(logits_to_keep, int)
724
+ else logits_to_keep
725
+ )
726
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
727
+ if self.config.final_logit_softcapping is not None:
728
+ logits = logits / self.config.final_logit_softcapping
729
+ logits = torch.tanh(logits)
730
+ logits = logits * self.config.final_logit_softcapping
731
+
732
+ loss = None
733
+ if labels is not None:
734
+ loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs)
735
+
736
+ return CausalLMOutputWithPast(
737
+ loss=loss,
738
+ logits=logits,
739
+ past_key_values=outputs.past_key_values,
740
+ hidden_states=outputs.hidden_states,
741
+ attentions=outputs.attentions,
742
+ )
743
+
744
+ def prepare_inputs_for_generation(
745
+ self,
746
+ input_ids,
747
+ past_key_values=None,
748
+ attention_mask=None,
749
+ inputs_embeds=None,
750
+ cache_position=None,
751
+ position_ids=None,
752
+ use_cache=True,
753
+ logits_to_keep=None,
754
+ **kwargs,
755
+ ):
756
+ # Overwritten: has a special cache type, `HybridCache`
757
+
758
+ model_inputs = super().prepare_inputs_for_generation(
759
+ input_ids,
760
+ past_key_values=past_key_values,
761
+ attention_mask=attention_mask,
762
+ inputs_embeds=inputs_embeds,
763
+ cache_position=cache_position,
764
+ position_ids=position_ids,
765
+ use_cache=use_cache,
766
+ logits_to_keep=logits_to_keep,
767
+ **kwargs,
768
+ )
769
+
770
+ # This is needed to correctly slice the mask without data-dependent slicing later on if using dynamo tracing
771
+ # (retrieving the same value from `cache_position` later on would crash dynamo)
772
+ model_inputs["last_cache_position"] = (
773
+ attention_mask.shape[-1] if attention_mask is not None else 0
774
+ )
775
+ if logits_to_keep is None:
776
+ _ = model_inputs.pop("logits_to_keep", None)
777
+
778
+ if (
779
+ isinstance(past_key_values, HybridCache)
780
+ and attention_mask.ndim == 2
781
+ and not self.config._attn_implementation == "flash_attention_2"
782
+ ):
783
+ if model_inputs["inputs_embeds"] is not None:
784
+ batch_size, sequence_length, _ = model_inputs["inputs_embeds"].shape
785
+ device = model_inputs["inputs_embeds"].device
786
+ else:
787
+ batch_size, sequence_length = model_inputs["input_ids"].shape
788
+ device = model_inputs["input_ids"].device
789
+
790
+ attention_mask = (
791
+ self.model._prepare_4d_causal_attention_mask_with_cache_position(
792
+ attention_mask,
793
+ sequence_length=sequence_length,
794
+ target_length=past_key_values.get_max_cache_shape(),
795
+ dtype=self.lm_head.weight.dtype,
796
+ device=device,
797
+ cache_position=cache_position,
798
+ batch_size=batch_size,
799
+ )
800
+ )
801
+ model_inputs["attention_mask"] = attention_mask
802
+
803
+ return model_inputs
804
+
805
+
806
+ class SmileGemma2ForSequenceClassification(SmileGemma2PreTrainedModel):
807
+ def __init__(self, config):
808
+ super().__init__(config)
809
+ self.num_labels = config.num_labels
810
+ self.model = SmileGemma2Model(config)
811
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
812
+
813
+ # Initialize weights and apply final processing
814
+ self.post_init()
815
+
816
+ def get_input_embeddings(self):
817
+ return self.model.embed_tokens
818
+
819
+ def set_input_embeddings(self, value):
820
+ self.model.embed_tokens = value
821
+
822
+ @can_return_tuple
823
+ @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
824
+ def forward(
825
+ self,
826
+ input_ids: Optional[torch.LongTensor] = None,
827
+ attention_mask: Optional[torch.Tensor] = None,
828
+ position_ids: Optional[torch.LongTensor] = None,
829
+ past_key_values: Optional[Cache] = None,
830
+ inputs_embeds: Optional[torch.FloatTensor] = None,
831
+ labels: Optional[torch.LongTensor] = None,
832
+ use_cache: Optional[bool] = None,
833
+ output_attentions: Optional[bool] = None,
834
+ output_hidden_states: Optional[bool] = None,
835
+ ) -> SequenceClassifierOutputWithPast:
836
+ r"""
837
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
838
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
839
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
840
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
841
+ """
842
+
843
+ transformer_outputs: BaseModelOutputWithPast = self.model(
844
+ input_ids,
845
+ attention_mask=attention_mask,
846
+ position_ids=position_ids,
847
+ past_key_values=past_key_values,
848
+ inputs_embeds=inputs_embeds,
849
+ use_cache=use_cache,
850
+ output_attentions=output_attentions,
851
+ output_hidden_states=output_hidden_states,
852
+ )
853
+ hidden_states = transformer_outputs.last_hidden_state
854
+ logits = self.score(hidden_states)
855
+
856
+ if input_ids is not None:
857
+ batch_size = input_ids.shape[0]
858
+ else:
859
+ batch_size = inputs_embeds.shape[0]
860
+
861
+ if self.config.pad_token_id is None and batch_size != 1:
862
+ raise ValueError(
863
+ "Cannot handle batch sizes > 1 if no padding token is defined."
864
+ )
865
+ if self.config.pad_token_id is None:
866
+ last_non_pad_token = -1
867
+ elif input_ids is not None:
868
+ # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
869
+ non_pad_mask = (input_ids != self.config.pad_token_id).to(
870
+ logits.device, torch.int32
871
+ )
872
+ token_indices = torch.arange(
873
+ input_ids.shape[-1], device=logits.device, dtype=torch.int32
874
+ )
875
+ last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
876
+ else:
877
+ last_non_pad_token = -1
878
+ logger.warning_once(
879
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
880
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
881
+ )
882
+
883
+ pooled_logits = logits[
884
+ torch.arange(batch_size, device=logits.device), last_non_pad_token
885
+ ]
886
+
887
+ loss = None
888
+ if labels is not None:
889
+ loss = self.loss_function(
890
+ logits=logits,
891
+ labels=labels,
892
+ pooled_logits=pooled_logits,
893
+ config=self.config,
894
+ )
895
+
896
+ return SequenceClassifierOutputWithPast(
897
+ loss=loss,
898
+ logits=pooled_logits,
899
+ past_key_values=transformer_outputs.past_key_values,
900
+ hidden_states=transformer_outputs.hidden_states,
901
+ attentions=transformer_outputs.attentions,
902
+ )
903
+
904
+
905
+ class SmileGemma2ForTokenClassification(SmileGemma2PreTrainedModel):
906
+ def __init__(self, config: SmileGemma2Config):
907
+ super().__init__(config)
908
+ self.num_labels = config.num_labels
909
+ self.model = SmileGemma2Model(config)
910
+ if getattr(config, "classifier_dropout", None) is not None:
911
+ classifier_dropout = config.classifier_dropout
912
+ elif getattr(config, "hidden_dropout", None) is not None:
913
+ classifier_dropout = config.hidden_dropout
914
+ else:
915
+ classifier_dropout = 0.1
916
+ self.dropout = nn.Dropout(classifier_dropout)
917
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
918
+
919
+ # Initialize weights and apply final processing
920
+ self.post_init()
921
+
922
+ def get_input_embeddings(self):
923
+ return self.model.embed_tokens
924
+
925
+ def set_input_embeddings(self, value):
926
+ self.model.embed_tokens = value
927
+
928
+ @can_return_tuple
929
+ @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING)
930
+ @add_code_sample_docstrings(
931
+ checkpoint=_CHECKPOINT_FOR_DOC,
932
+ output_type=TokenClassifierOutput,
933
+ config_class=_CONFIG_FOR_DOC,
934
+ )
935
+ def forward(
936
+ self,
937
+ input_ids: Optional[torch.LongTensor] = None,
938
+ attention_mask: Optional[torch.Tensor] = None,
939
+ position_ids: Optional[torch.LongTensor] = None,
940
+ past_key_values: Optional[Cache] = None,
941
+ inputs_embeds: Optional[torch.FloatTensor] = None,
942
+ labels: Optional[torch.LongTensor] = None,
943
+ use_cache: Optional[bool] = None,
944
+ output_attentions: Optional[bool] = None,
945
+ output_hidden_states: Optional[bool] = None,
946
+ ) -> TokenClassifierOutput:
947
+ r"""
948
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
949
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
950
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
951
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
952
+ """
953
+
954
+ outputs: BaseModelOutputWithPast = self.model(
955
+ input_ids,
956
+ attention_mask=attention_mask,
957
+ position_ids=position_ids,
958
+ past_key_values=past_key_values,
959
+ inputs_embeds=inputs_embeds,
960
+ use_cache=use_cache,
961
+ output_attentions=output_attentions,
962
+ output_hidden_states=output_hidden_states,
963
+ )
964
+ sequence_output = outputs.last_hidden_state
965
+ sequence_output = self.dropout(sequence_output)
966
+ logits = self.score(sequence_output)
967
+
968
+ loss = None
969
+ if labels is not None:
970
+ loss = self.loss_function(logits, labels, self.config)
971
+
972
+ return TokenClassifierOutput(
973
+ loss=loss,
974
+ logits=logits,
975
+ hidden_states=outputs.hidden_states,
976
+ attentions=outputs.attentions,
977
+ )
978
+
979
+
980
+ __all__ = [
981
+ "SmileGemma2PreTrainedModel",
982
+ "SmileGemma2Model",
983
+ "SmileGemma2ForCausalLM",
984
+ "SmileGemma2ForSequenceClassification",
985
+ "SmileGemma2ForTokenClassification",
986
+ ]