fusion-bench 0.2.20__py3-none-any.whl → 0.2.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fusion_bench/__init__.py +22 -2
- fusion_bench/_get_started/__init__.py +3 -0
- fusion_bench/_get_started/greeting_program.py +49 -0
- fusion_bench/compat/method/base_algorithm.py +14 -0
- fusion_bench/constants/__init__.py +6 -0
- fusion_bench/constants/clip_vision.py +26 -2
- fusion_bench/constants/paths.py +4 -0
- fusion_bench/constants/runtime.py +57 -0
- fusion_bench/dataset/clip_dataset.py +2 -1
- fusion_bench/dataset/gpt2_glue.py +9 -9
- fusion_bench/dataset/image_corruption/__init__.py +0 -0
- fusion_bench/dataset/image_corruption/make_corruption.py +179 -0
- fusion_bench/dataset/image_dataset.py +1 -1
- fusion_bench/dataset/nyuv2.py +2 -2
- fusion_bench/method/__init__.py +24 -5
- fusion_bench/method/adamerging/clip_layer_wise_adamerging.py +1 -1
- fusion_bench/method/adamerging/clip_task_wise_adamerging.py +11 -7
- fusion_bench/method/adamerging/layer_wise_adamerging.py +11 -5
- fusion_bench/method/base_algorithm.py +195 -12
- fusion_bench/method/bitdelta/__init__.py +5 -0
- fusion_bench/method/bitdelta/bitdelta.py +156 -0
- fusion_bench/method/bitdelta/bitdelta_utils/__init__.py +0 -0
- fusion_bench/method/bitdelta/bitdelta_utils/binary_gemm_kernel.py +462 -0
- fusion_bench/method/bitdelta/bitdelta_utils/data.py +35 -0
- fusion_bench/method/bitdelta/bitdelta_utils/diff.py +129 -0
- fusion_bench/method/classification/clip_finetune.py +1 -1
- fusion_bench/method/concrete_subspace/clip_concrete_adamerging.py +0 -1
- fusion_bench/method/depth_upscaling/depth_upscaling.py +4 -9
- fusion_bench/method/doge_ta/clip_layer_wise_adamerging.py +4 -5
- fusion_bench/method/doge_ta/doge_ta.py +1 -1
- fusion_bench/method/ensemble.py +12 -12
- fusion_bench/method/expert_sparsity/utils/calibration_data.py +1 -1
- fusion_bench/method/fisher_merging/clip_fisher_merging.py +2 -6
- fusion_bench/method/fisher_merging/fisher_merging.py +6 -15
- fusion_bench/method/fisher_merging/gpt2_fisher_merging.py +3 -10
- fusion_bench/method/fw_merging/fw_hard.py +1 -1
- fusion_bench/method/fw_merging/fw_soft.py +1 -1
- fusion_bench/method/gossip/clip_layer_wise_gossip.py +4 -5
- fusion_bench/method/linear/expo.py +2 -1
- fusion_bench/method/linear/linear_interpolation.py +6 -4
- fusion_bench/method/linear/simple_average_for_llama.py +17 -13
- fusion_bench/method/lm_finetune/bradley_terry_rm.py +2 -2
- fusion_bench/method/mixture_of_experts/mixtral_upcycling.py +9 -26
- fusion_bench/method/model_recombination.py +2 -5
- fusion_bench/method/moe_pruner/hooks/__init__.py +1 -2
- fusion_bench/method/moe_pruner/utils/data.py +2 -1
- fusion_bench/method/moe_pruner/utils/prune.py +6 -1
- fusion_bench/method/pruning/llama_magnitude_prune.py +1 -1
- fusion_bench/method/pruning/wanda_utils/data.py +1 -2
- fusion_bench/method/pwe_moe/clip_pwe_moe.py +12 -34
- fusion_bench/method/randes/modelsoup.py +1 -3
- fusion_bench/method/regmean/clip_regmean.py +2 -2
- fusion_bench/method/regmean/gpt2_regmean.py +3 -10
- fusion_bench/method/regmean/regmean.py +2 -11
- fusion_bench/method/regmean_plusplus/__init__.py +1 -1
- fusion_bench/method/regmean_plusplus/clip_regmean_plusplus.py +24 -17
- fusion_bench/method/regmean_plusplus/regmean_plusplus.py +56 -38
- fusion_bench/method/simple_average.py +12 -16
- fusion_bench/method/slerp/slerp.py +5 -2
- fusion_bench/method/smile_upscaling/causal_lm_upscaling.py +371 -0
- fusion_bench/method/smile_upscaling/error_accumulation.py +177 -0
- fusion_bench/method/smile_upscaling/projected_energy.py +144 -0
- fusion_bench/method/smile_upscaling/smile_mistral_upscaling.py +5 -1
- fusion_bench/method/smile_upscaling/smile_qwen2_upscaling.py +71 -51
- fusion_bench/method/smile_upscaling/smile_upscaling.py +12 -5
- fusion_bench/method/tall_mask/task_arithmetic.py +3 -11
- fusion_bench/method/task_arithmetic/task_arithmetic.py +6 -10
- fusion_bench/method/ties_merging/ties_merging.py +13 -26
- fusion_bench/method/we_moe/__init__.py +1 -0
- fusion_bench/method/we_moe/clip_we_moe.py +5 -4
- fusion_bench/method/we_moe/entropy_loss.py +25 -0
- fusion_bench/method/we_moe/flan_t5_we_moe.py +331 -0
- fusion_bench/method/we_moe/utils.py +15 -0
- fusion_bench/method/we_moe/we_moe.py +6 -6
- fusion_bench/method/weighted_average/llama.py +4 -16
- fusion_bench/metrics/continual_learning/__init__.py +1 -0
- fusion_bench/metrics/continual_learning/backward_transfer.py +1 -1
- fusion_bench/metrics/nyuv2/__init__.py +2 -2
- fusion_bench/metrics/nyuv2/segmentation.py +1 -1
- fusion_bench/mixins/__init__.py +10 -2
- fusion_bench/mixins/clip_classification.py +15 -45
- fusion_bench/mixins/hydra_config.py +105 -7
- fusion_bench/mixins/lightning_fabric.py +2 -0
- fusion_bench/mixins/serialization.py +275 -48
- fusion_bench/modelpool/__init__.py +2 -2
- fusion_bench/modelpool/base_pool.py +29 -9
- fusion_bench/modelpool/causal_lm/causal_lm.py +41 -33
- fusion_bench/modelpool/clip_vision/modelpool.py +1 -3
- fusion_bench/modelpool/seq_classification_lm/__init__.py +1 -1
- fusion_bench/modelpool/seq_classification_lm/seq_classification_lm.py +1 -1
- fusion_bench/models/__init__.py +7 -1
- fusion_bench/models/expert_sparsity/mixtral/__init__.py +1 -1
- fusion_bench/models/hf_utils.py +160 -0
- fusion_bench/models/linearized/linearized_model_utils.py +4 -4
- fusion_bench/models/linearized/vision_model.py +1 -1
- fusion_bench/models/model_card_templates/default.md +46 -0
- fusion_bench/models/modeling_deepseek_v2/__init__.py +1 -1
- fusion_bench/models/modeling_deepseek_v2/modeling_deepseek.py +4 -4
- fusion_bench/models/modeling_deepseek_v2/tokenization_deepseek_fast.py +0 -1
- fusion_bench/models/modeling_smile_gemma2/__init__.py +9 -0
- fusion_bench/models/modeling_smile_gemma2/configuration_smile_gemma2.py +20 -0
- fusion_bench/models/modeling_smile_gemma2/modeling_smile_gemma2.py +986 -0
- fusion_bench/models/modeling_smile_gemma2/register.py +26 -0
- fusion_bench/models/modeling_smile_llama/__init__.py +7 -0
- fusion_bench/models/modeling_smile_llama/configuration_smile_llama.py +20 -0
- fusion_bench/models/modeling_smile_llama/modeling_smile_llama.py +698 -0
- fusion_bench/models/modeling_smile_llama/register.py +8 -0
- fusion_bench/models/modeling_smile_mistral/__init__.py +5 -47
- fusion_bench/models/modeling_smile_qwen2/__init__.py +1 -1
- fusion_bench/models/modeling_smile_qwen2/modeling_smile_qwen2.py +7 -12
- fusion_bench/models/modeling_smile_qwen2/register.py +1 -4
- fusion_bench/models/parameter_dict.py +1 -1
- fusion_bench/models/sparse_we_moe.py +1 -53
- fusion_bench/models/utils.py +26 -0
- fusion_bench/models/we_moe.py +1 -53
- fusion_bench/models/wrappers/ensemble.py +6 -4
- fusion_bench/models/wrappers/layer_wise_fusion.py +1 -1
- fusion_bench/models/wrappers/task_wise_fusion.py +250 -72
- fusion_bench/programs/base_program.py +81 -2
- fusion_bench/programs/fabric_fusion_program.py +46 -61
- fusion_bench/scripts/cli.py +38 -5
- fusion_bench/taskpool/base_pool.py +4 -3
- fusion_bench/taskpool/clip_vision/taskpool.py +43 -22
- fusion_bench/taskpool/dummy.py +1 -1
- fusion_bench/taskpool/lm_eval_harness/taskpool.py +1 -2
- fusion_bench/tasks/clip_classification/__init__.py +6 -4
- fusion_bench/utils/__init__.py +7 -1
- fusion_bench/utils/cache_utils.py +101 -1
- fusion_bench/utils/devices.py +14 -4
- fusion_bench/utils/fabric.py +2 -2
- fusion_bench/utils/instantiate_utils.py +3 -1
- fusion_bench/utils/lazy_imports.py +23 -0
- fusion_bench/utils/lazy_state_dict.py +38 -3
- fusion_bench/utils/modelscope.py +127 -8
- fusion_bench/utils/parameters.py +2 -2
- fusion_bench/utils/path.py +56 -0
- fusion_bench/utils/pylogger.py +1 -1
- fusion_bench/utils/rich_utils.py +3 -0
- fusion_bench/utils/state_dict_arithmetic.py +25 -23
- {fusion_bench-0.2.20.dist-info → fusion_bench-0.2.22.dist-info}/METADATA +24 -47
- {fusion_bench-0.2.20.dist-info → fusion_bench-0.2.22.dist-info}/RECORD +184 -145
- fusion_bench_config/_get_started/clip_evaluate_single_model.yaml +21 -0
- fusion_bench_config/_get_started/clip_simple_average.yaml +23 -0
- fusion_bench_config/_get_started/clip_task_arithmetic.yaml +24 -0
- fusion_bench_config/_get_started/greeting_program.yaml +4 -0
- fusion_bench_config/fabric/loggers/csv_logger.yaml +3 -3
- fusion_bench_config/fabric/loggers/tensorboard_logger.yaml +3 -3
- fusion_bench_config/fabric_model_fusion.yaml +45 -17
- fusion_bench_config/hydra/default.yaml +6 -2
- fusion_bench_config/llama_full_finetune.yaml +1 -0
- fusion_bench_config/method/adamerging/clip.yaml +1 -1
- fusion_bench_config/method/bitdelta/bitdelta.yaml +12 -0
- fusion_bench_config/method/depth_upscaling.yaml +4 -1
- fusion_bench_config/method/fisher_merging/clip_fisher_merging.yaml +0 -1
- fusion_bench_config/method/linear/simple_average_for_llama.yaml +3 -2
- fusion_bench_config/method/smile_upscaling/causal_lm_upscaling.yaml +21 -0
- fusion_bench_config/method/smile_upscaling/error_accumulation.yaml +5 -0
- fusion_bench_config/method/smile_upscaling/projected_energy.yaml +2 -0
- fusion_bench_config/method/smile_upscaling/smile_qwen2_upscaling.yaml +2 -1
- fusion_bench_config/method/wemoe/flan_t5_weight_ensembling_moe.yaml +20 -0
- fusion_bench_config/modelpool/CLIPVisionModelPool/_template.yaml +1 -4
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_individual.yaml +4 -9
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_single_finetuned.yaml +1 -1
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-base-patch32_svhn_and_mnist.yaml +0 -6
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TA8.yaml +1 -1
- fusion_bench_config/modelpool/CLIPVisionModelPool/clip-vit-large-patch14_TA8_model_only.yaml +1 -1
- fusion_bench_config/modelpool/CausalLMPool/Qwen2.5-1.5B_math_and_coder.yaml +3 -3
- fusion_bench_config/modelpool/CausalLMPool/Qwen2.5-7B-math_and_coder.yaml +9 -0
- fusion_bench_config/modelpool/CausalLMPool/mistral-7b.yaml +6 -0
- fusion_bench_config/modelpool/CausalLMPool/mixtral_moe_merging.yaml +10 -0
- fusion_bench_config/modelpool/CausalLMPool/qwen2_math_1.5B_and_R1.yaml +4 -12
- fusion_bench_config/modelpool/CausalLMPool/simle_mixtral_exp_v4.yaml +6 -16
- fusion_bench_config/modelpool/CausalLMPool/vicuna-7b-v1.5.yaml +8 -0
- fusion_bench_config/modelpool/{SeqenceClassificationModelPool → SequenceClassificationModelPool}/llama_preference700k.yaml +1 -1
- fusion_bench_config/modelpool/{SeqenceClassificationModelPool → SequenceClassificationModelPool}/single_reward_model.yaml +1 -1
- fusion_bench_config/nyuv2_config.yaml +3 -1
- fusion_bench_config/nyuv2_mtl_train.yaml +1 -0
- fusion_bench_config/path/default.yaml +28 -0
- fusion_bench_config/taskpool/CLIPVisionModelTaskPool/clip-vit-base-patch32_svhn_and_mnist.yaml +24 -0
- fusion_bench_config/method/adamerging.yaml +0 -23
- fusion_bench_config/modelpool/mixtral_moe_merging.yaml +0 -14
- fusion_bench_config/modelpool/mixtral_moe_upscaling.yaml +0 -6
- fusion_bench_config/taskpool/clip-vit-base-patch32_svhn_and_mnist.yaml +0 -22
- {fusion_bench-0.2.20.dist-info → fusion_bench-0.2.22.dist-info}/WHEEL +0 -0
- {fusion_bench-0.2.20.dist-info → fusion_bench-0.2.22.dist-info}/entry_points.txt +0 -0
- {fusion_bench-0.2.20.dist-info → fusion_bench-0.2.22.dist-info}/licenses/LICENSE +0 -0
- {fusion_bench-0.2.20.dist-info → fusion_bench-0.2.22.dist-info}/top_level.txt +0 -0
- /fusion_bench_config/modelpool/{SeqenceClassificationModelPool → SequenceClassificationModelPool}/roberta-base_glue.yaml +0 -0
|
@@ -0,0 +1,698 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from functools import partial
|
|
3
|
+
from typing import Callable, Optional, Tuple, Union
|
|
4
|
+
|
|
5
|
+
import torch
|
|
6
|
+
import torch.utils.checkpoint
|
|
7
|
+
from torch import nn
|
|
8
|
+
from transformers import PreTrainedModel
|
|
9
|
+
from transformers.activations import ACT2FN
|
|
10
|
+
from transformers.cache_utils import Cache, DynamicCache, StaticCache
|
|
11
|
+
from transformers.generation import GenerationMixin
|
|
12
|
+
from transformers.modeling_attn_mask_utils import AttentionMaskConverter
|
|
13
|
+
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
|
|
14
|
+
from transformers.modeling_outputs import (
|
|
15
|
+
BaseModelOutputWithPast,
|
|
16
|
+
CausalLMOutputWithPast,
|
|
17
|
+
)
|
|
18
|
+
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
|
|
19
|
+
from transformers.models.llama.modeling_llama import (
|
|
20
|
+
LlamaRMSNorm,
|
|
21
|
+
LlamaRotaryEmbedding,
|
|
22
|
+
apply_rotary_pos_emb,
|
|
23
|
+
eager_attention_forward,
|
|
24
|
+
)
|
|
25
|
+
from transformers.processing_utils import Unpack
|
|
26
|
+
from transformers.utils import (
|
|
27
|
+
add_start_docstrings_to_model_forward,
|
|
28
|
+
can_return_tuple,
|
|
29
|
+
is_torch_flex_attn_available,
|
|
30
|
+
replace_return_docstrings,
|
|
31
|
+
)
|
|
32
|
+
from transformers.utils.deprecation import deprecate_kwarg
|
|
33
|
+
|
|
34
|
+
from fusion_bench.models.smile_moe.linear_from_hf_config import SmileLinear
|
|
35
|
+
|
|
36
|
+
from .configuration_smile_llama import SmileLlamaConfig
|
|
37
|
+
|
|
38
|
+
if is_torch_flex_attn_available():
|
|
39
|
+
from torch.nn.attention.flex_attention import BlockMask
|
|
40
|
+
from transformers.integrations.flex_attention import make_flex_block_causal_mask
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
logger = logging.getLogger(__name__)
|
|
44
|
+
|
|
45
|
+
_CONFIG_FOR_DOC = "SmileLlamaConfig"
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class SmileLlamaMLP(nn.Module):
|
|
49
|
+
def __init__(self, config: SmileLlamaConfig):
|
|
50
|
+
super().__init__()
|
|
51
|
+
self.config = config
|
|
52
|
+
self.hidden_size = config.hidden_size
|
|
53
|
+
self.intermediate_size = config.intermediate_size
|
|
54
|
+
# * --- replace nn.Linear with SmileLinear ---
|
|
55
|
+
self.gate_proj = SmileLinear(
|
|
56
|
+
config, self.hidden_size, self.intermediate_size, bias=config.mlp_bias
|
|
57
|
+
)
|
|
58
|
+
self.up_proj = SmileLinear(
|
|
59
|
+
config, self.hidden_size, self.intermediate_size, bias=config.mlp_bias
|
|
60
|
+
)
|
|
61
|
+
self.down_proj = SmileLinear(
|
|
62
|
+
config, self.intermediate_size, self.hidden_size, bias=config.mlp_bias
|
|
63
|
+
)
|
|
64
|
+
# * --- end of replacement ---
|
|
65
|
+
self.act_fn = ACT2FN[config.hidden_act]
|
|
66
|
+
|
|
67
|
+
def forward(self, x):
|
|
68
|
+
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
|
69
|
+
return down_proj
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class SmileLlamaAttention(nn.Module):
|
|
73
|
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
|
74
|
+
|
|
75
|
+
def __init__(self, config: SmileLlamaConfig, layer_idx: int):
|
|
76
|
+
super().__init__()
|
|
77
|
+
self.config = config
|
|
78
|
+
self.layer_idx = layer_idx
|
|
79
|
+
self.head_dim = getattr(
|
|
80
|
+
config, "head_dim", config.hidden_size // config.num_attention_heads
|
|
81
|
+
)
|
|
82
|
+
self.num_key_value_groups = (
|
|
83
|
+
config.num_attention_heads // config.num_key_value_heads
|
|
84
|
+
)
|
|
85
|
+
self.scaling = self.head_dim**-0.5
|
|
86
|
+
self.attention_dropout = config.attention_dropout
|
|
87
|
+
self.is_causal = True
|
|
88
|
+
|
|
89
|
+
# * --- replace nn.Linear with SmileLinear ---
|
|
90
|
+
self.q_proj = SmileLinear(
|
|
91
|
+
config,
|
|
92
|
+
config.hidden_size,
|
|
93
|
+
config.num_attention_heads * self.head_dim,
|
|
94
|
+
bias=config.attention_bias,
|
|
95
|
+
)
|
|
96
|
+
self.k_proj = SmileLinear(
|
|
97
|
+
config,
|
|
98
|
+
config.hidden_size,
|
|
99
|
+
config.num_key_value_heads * self.head_dim,
|
|
100
|
+
bias=config.attention_bias,
|
|
101
|
+
)
|
|
102
|
+
self.v_proj = SmileLinear(
|
|
103
|
+
config,
|
|
104
|
+
config.hidden_size,
|
|
105
|
+
config.num_key_value_heads * self.head_dim,
|
|
106
|
+
bias=config.attention_bias,
|
|
107
|
+
)
|
|
108
|
+
self.o_proj = SmileLinear(
|
|
109
|
+
config,
|
|
110
|
+
config.num_attention_heads * self.head_dim,
|
|
111
|
+
config.hidden_size,
|
|
112
|
+
bias=config.attention_bias,
|
|
113
|
+
)
|
|
114
|
+
# * --- end of replacement ---
|
|
115
|
+
|
|
116
|
+
def forward(
|
|
117
|
+
self,
|
|
118
|
+
hidden_states: torch.Tensor,
|
|
119
|
+
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
|
|
120
|
+
attention_mask: Optional[torch.Tensor],
|
|
121
|
+
past_key_value: Optional[Cache] = None,
|
|
122
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
123
|
+
**kwargs: Unpack[FlashAttentionKwargs],
|
|
124
|
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
|
125
|
+
input_shape = hidden_states.shape[:-1]
|
|
126
|
+
hidden_shape = (*input_shape, -1, self.head_dim)
|
|
127
|
+
|
|
128
|
+
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
|
129
|
+
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
|
130
|
+
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
|
131
|
+
|
|
132
|
+
cos, sin = position_embeddings
|
|
133
|
+
query_states, key_states = apply_rotary_pos_emb(
|
|
134
|
+
query_states, key_states, cos, sin
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
if past_key_value is not None:
|
|
138
|
+
# sin and cos are specific to RoPE models; cache_position needed for the static cache
|
|
139
|
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
|
140
|
+
key_states, value_states = past_key_value.update(
|
|
141
|
+
key_states, value_states, self.layer_idx, cache_kwargs
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
attention_interface: Callable = eager_attention_forward
|
|
145
|
+
if self.config._attn_implementation != "eager":
|
|
146
|
+
if self.config._attn_implementation == "sdpa" and kwargs.get(
|
|
147
|
+
"output_attentions", False
|
|
148
|
+
):
|
|
149
|
+
logger.warning_once(
|
|
150
|
+
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
|
|
151
|
+
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
|
152
|
+
)
|
|
153
|
+
else:
|
|
154
|
+
attention_interface = ALL_ATTENTION_FUNCTIONS[
|
|
155
|
+
self.config._attn_implementation
|
|
156
|
+
]
|
|
157
|
+
|
|
158
|
+
attn_output, attn_weights = attention_interface(
|
|
159
|
+
self,
|
|
160
|
+
query_states,
|
|
161
|
+
key_states,
|
|
162
|
+
value_states,
|
|
163
|
+
attention_mask,
|
|
164
|
+
dropout=0.0 if not self.training else self.attention_dropout,
|
|
165
|
+
scaling=self.scaling,
|
|
166
|
+
**kwargs,
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
|
170
|
+
attn_output = self.o_proj(attn_output)
|
|
171
|
+
return attn_output, attn_weights
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
class SmileLlamaDecoderLayer(nn.Module):
|
|
175
|
+
|
|
176
|
+
def __init__(self, config: SmileLlamaConfig, layer_idx: int):
|
|
177
|
+
super().__init__()
|
|
178
|
+
self.hidden_size = config.hidden_size
|
|
179
|
+
|
|
180
|
+
self.self_attn = SmileLlamaAttention(config=config, layer_idx=layer_idx)
|
|
181
|
+
|
|
182
|
+
self.mlp = SmileLlamaMLP(config)
|
|
183
|
+
self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
|
184
|
+
self.post_attention_layernorm = LlamaRMSNorm(
|
|
185
|
+
config.hidden_size, eps=config.rms_norm_eps
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
def forward(
|
|
189
|
+
self,
|
|
190
|
+
hidden_states: torch.Tensor,
|
|
191
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
192
|
+
position_ids: Optional[torch.LongTensor] = None,
|
|
193
|
+
past_key_value: Optional[Cache] = None,
|
|
194
|
+
output_attentions: Optional[bool] = False,
|
|
195
|
+
use_cache: Optional[bool] = False,
|
|
196
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
197
|
+
position_embeddings: Optional[
|
|
198
|
+
Tuple[torch.Tensor, torch.Tensor]
|
|
199
|
+
] = None, # necessary, but kept here for BC
|
|
200
|
+
**kwargs: Unpack[FlashAttentionKwargs],
|
|
201
|
+
) -> Tuple[
|
|
202
|
+
torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
|
|
203
|
+
]:
|
|
204
|
+
residual = hidden_states
|
|
205
|
+
|
|
206
|
+
hidden_states = self.input_layernorm(hidden_states)
|
|
207
|
+
|
|
208
|
+
# Self Attention
|
|
209
|
+
hidden_states, self_attn_weights = self.self_attn(
|
|
210
|
+
hidden_states=hidden_states,
|
|
211
|
+
attention_mask=attention_mask,
|
|
212
|
+
position_ids=position_ids,
|
|
213
|
+
past_key_value=past_key_value,
|
|
214
|
+
output_attentions=output_attentions,
|
|
215
|
+
use_cache=use_cache,
|
|
216
|
+
cache_position=cache_position,
|
|
217
|
+
position_embeddings=position_embeddings,
|
|
218
|
+
**kwargs,
|
|
219
|
+
)
|
|
220
|
+
hidden_states = residual + hidden_states
|
|
221
|
+
|
|
222
|
+
# Fully Connected
|
|
223
|
+
residual = hidden_states
|
|
224
|
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
|
225
|
+
hidden_states = self.mlp(hidden_states)
|
|
226
|
+
hidden_states = residual + hidden_states
|
|
227
|
+
|
|
228
|
+
outputs = (hidden_states,)
|
|
229
|
+
if output_attentions:
|
|
230
|
+
outputs += (self_attn_weights,)
|
|
231
|
+
|
|
232
|
+
return outputs
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
class SmileLlamaPreTrainedModel(PreTrainedModel):
|
|
236
|
+
config_class = SmileLlamaConfig
|
|
237
|
+
base_model_prefix = "model"
|
|
238
|
+
supports_gradient_checkpointing = True
|
|
239
|
+
_no_split_modules = ["SmileLlamaDecoderLayer"]
|
|
240
|
+
_skip_keys_device_placement = ["past_key_values"]
|
|
241
|
+
_supports_flash_attn_2 = True
|
|
242
|
+
_supports_sdpa = True
|
|
243
|
+
_supports_flex_attn = True
|
|
244
|
+
_supports_cache_class = True
|
|
245
|
+
_supports_quantized_cache = True
|
|
246
|
+
_supports_static_cache = True
|
|
247
|
+
_supports_attention_backend = True
|
|
248
|
+
|
|
249
|
+
def _init_weights(self, module):
|
|
250
|
+
std = self.config.initializer_range
|
|
251
|
+
if isinstance(module, nn.Linear):
|
|
252
|
+
module.weight.data.normal_(mean=0.0, std=std)
|
|
253
|
+
if module.bias is not None:
|
|
254
|
+
module.bias.data.zero_()
|
|
255
|
+
elif isinstance(module, nn.Embedding):
|
|
256
|
+
module.weight.data.normal_(mean=0.0, std=std)
|
|
257
|
+
if module.padding_idx is not None:
|
|
258
|
+
module.weight.data[module.padding_idx].zero_()
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
class SmileLlamaModel(SmileLlamaPreTrainedModel):
|
|
262
|
+
"""
|
|
263
|
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
|
|
264
|
+
|
|
265
|
+
Args:
|
|
266
|
+
config: LlamaConfig
|
|
267
|
+
"""
|
|
268
|
+
|
|
269
|
+
def __init__(self, config: SmileLlamaConfig):
|
|
270
|
+
super().__init__(config)
|
|
271
|
+
self.padding_idx = config.pad_token_id
|
|
272
|
+
self.vocab_size = config.vocab_size
|
|
273
|
+
|
|
274
|
+
self.embed_tokens = nn.Embedding(
|
|
275
|
+
config.vocab_size, config.hidden_size, self.padding_idx
|
|
276
|
+
)
|
|
277
|
+
self.layers = nn.ModuleList(
|
|
278
|
+
[
|
|
279
|
+
SmileLlamaDecoderLayer(config, layer_idx)
|
|
280
|
+
for layer_idx in range(config.num_hidden_layers)
|
|
281
|
+
]
|
|
282
|
+
)
|
|
283
|
+
self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
|
284
|
+
self.rotary_emb = LlamaRotaryEmbedding(config=config)
|
|
285
|
+
self.gradient_checkpointing = False
|
|
286
|
+
|
|
287
|
+
# Initialize weights and apply final processing
|
|
288
|
+
self.post_init()
|
|
289
|
+
|
|
290
|
+
def get_input_embeddings(self):
|
|
291
|
+
return self.embed_tokens
|
|
292
|
+
|
|
293
|
+
def set_input_embeddings(self, value):
|
|
294
|
+
self.embed_tokens = value
|
|
295
|
+
|
|
296
|
+
@can_return_tuple
|
|
297
|
+
def forward(
|
|
298
|
+
self,
|
|
299
|
+
input_ids: Optional[torch.LongTensor] = None,
|
|
300
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
301
|
+
position_ids: Optional[torch.LongTensor] = None,
|
|
302
|
+
past_key_values: Optional[Cache] = None,
|
|
303
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
304
|
+
use_cache: Optional[bool] = None,
|
|
305
|
+
output_attentions: Optional[bool] = None,
|
|
306
|
+
output_hidden_states: Optional[bool] = None,
|
|
307
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
308
|
+
**flash_attn_kwargs: Unpack[FlashAttentionKwargs],
|
|
309
|
+
) -> BaseModelOutputWithPast:
|
|
310
|
+
output_attentions = (
|
|
311
|
+
output_attentions
|
|
312
|
+
if output_attentions is not None
|
|
313
|
+
else self.config.output_attentions
|
|
314
|
+
)
|
|
315
|
+
output_hidden_states = (
|
|
316
|
+
output_hidden_states
|
|
317
|
+
if output_hidden_states is not None
|
|
318
|
+
else self.config.output_hidden_states
|
|
319
|
+
)
|
|
320
|
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
|
321
|
+
|
|
322
|
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
|
323
|
+
raise ValueError(
|
|
324
|
+
"You must specify exactly one of input_ids or inputs_embeds"
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
if self.gradient_checkpointing and self.training and use_cache:
|
|
328
|
+
logger.warning_once(
|
|
329
|
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
|
|
330
|
+
)
|
|
331
|
+
use_cache = False
|
|
332
|
+
|
|
333
|
+
# TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
|
|
334
|
+
if not isinstance(past_key_values, (type(None), Cache)):
|
|
335
|
+
raise ValueError(
|
|
336
|
+
"The `past_key_values` should be either a `Cache` object or `None`."
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
if inputs_embeds is None:
|
|
340
|
+
inputs_embeds = self.embed_tokens(input_ids)
|
|
341
|
+
|
|
342
|
+
if use_cache and past_key_values is None:
|
|
343
|
+
past_key_values = DynamicCache()
|
|
344
|
+
|
|
345
|
+
if cache_position is None:
|
|
346
|
+
past_seen_tokens = (
|
|
347
|
+
past_key_values.get_seq_length() if past_key_values is not None else 0
|
|
348
|
+
)
|
|
349
|
+
cache_position = torch.arange(
|
|
350
|
+
past_seen_tokens,
|
|
351
|
+
past_seen_tokens + inputs_embeds.shape[1],
|
|
352
|
+
device=inputs_embeds.device,
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
if position_ids is None:
|
|
356
|
+
position_ids = cache_position.unsqueeze(0)
|
|
357
|
+
|
|
358
|
+
causal_mask = self._update_causal_mask(
|
|
359
|
+
attention_mask,
|
|
360
|
+
inputs_embeds,
|
|
361
|
+
cache_position,
|
|
362
|
+
past_key_values,
|
|
363
|
+
output_attentions,
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
hidden_states = inputs_embeds
|
|
367
|
+
|
|
368
|
+
# create position embeddings to be shared across the decoder layers
|
|
369
|
+
position_embeddings = self.rotary_emb(hidden_states, position_ids)
|
|
370
|
+
|
|
371
|
+
# decoder layers
|
|
372
|
+
all_hidden_states = () if output_hidden_states else None
|
|
373
|
+
all_self_attns = () if output_attentions else None
|
|
374
|
+
|
|
375
|
+
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
|
|
376
|
+
if output_hidden_states:
|
|
377
|
+
all_hidden_states += (hidden_states,)
|
|
378
|
+
|
|
379
|
+
if self.gradient_checkpointing and self.training:
|
|
380
|
+
layer_outputs = self._gradient_checkpointing_func(
|
|
381
|
+
partial(decoder_layer.__call__, **flash_attn_kwargs),
|
|
382
|
+
hidden_states,
|
|
383
|
+
causal_mask,
|
|
384
|
+
position_ids,
|
|
385
|
+
past_key_values,
|
|
386
|
+
output_attentions,
|
|
387
|
+
use_cache,
|
|
388
|
+
cache_position,
|
|
389
|
+
position_embeddings,
|
|
390
|
+
)
|
|
391
|
+
else:
|
|
392
|
+
layer_outputs = decoder_layer(
|
|
393
|
+
hidden_states,
|
|
394
|
+
attention_mask=causal_mask,
|
|
395
|
+
position_ids=position_ids,
|
|
396
|
+
past_key_value=past_key_values,
|
|
397
|
+
output_attentions=output_attentions,
|
|
398
|
+
use_cache=use_cache,
|
|
399
|
+
cache_position=cache_position,
|
|
400
|
+
position_embeddings=position_embeddings,
|
|
401
|
+
**flash_attn_kwargs,
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
hidden_states = layer_outputs[0]
|
|
405
|
+
|
|
406
|
+
if output_attentions:
|
|
407
|
+
all_self_attns += (layer_outputs[1],)
|
|
408
|
+
|
|
409
|
+
hidden_states = self.norm(hidden_states)
|
|
410
|
+
|
|
411
|
+
# add hidden states from the last decoder layer
|
|
412
|
+
if output_hidden_states:
|
|
413
|
+
all_hidden_states += (hidden_states,)
|
|
414
|
+
|
|
415
|
+
return BaseModelOutputWithPast(
|
|
416
|
+
last_hidden_state=hidden_states,
|
|
417
|
+
past_key_values=past_key_values if use_cache else None,
|
|
418
|
+
hidden_states=all_hidden_states,
|
|
419
|
+
attentions=all_self_attns,
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
def _update_causal_mask(
|
|
423
|
+
self,
|
|
424
|
+
attention_mask: torch.Tensor,
|
|
425
|
+
input_tensor: torch.Tensor,
|
|
426
|
+
cache_position: torch.Tensor,
|
|
427
|
+
past_key_values: Cache,
|
|
428
|
+
output_attentions: bool = False,
|
|
429
|
+
):
|
|
430
|
+
if self.config._attn_implementation == "flash_attention_2":
|
|
431
|
+
if attention_mask is not None and (attention_mask == 0.0).any():
|
|
432
|
+
return attention_mask
|
|
433
|
+
return None
|
|
434
|
+
if self.config._attn_implementation == "flex_attention":
|
|
435
|
+
if isinstance(attention_mask, torch.Tensor):
|
|
436
|
+
attention_mask = make_flex_block_causal_mask(attention_mask)
|
|
437
|
+
if isinstance(attention_mask, BlockMask):
|
|
438
|
+
return attention_mask
|
|
439
|
+
|
|
440
|
+
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
|
|
441
|
+
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
|
|
442
|
+
# to infer the attention mask.
|
|
443
|
+
past_seen_tokens = (
|
|
444
|
+
past_key_values.get_seq_length() if past_key_values is not None else 0
|
|
445
|
+
)
|
|
446
|
+
using_static_cache = isinstance(past_key_values, StaticCache)
|
|
447
|
+
|
|
448
|
+
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
|
|
449
|
+
if (
|
|
450
|
+
self.config._attn_implementation == "sdpa"
|
|
451
|
+
and not using_static_cache
|
|
452
|
+
and not output_attentions
|
|
453
|
+
):
|
|
454
|
+
if AttentionMaskConverter._ignore_causal_mask_sdpa(
|
|
455
|
+
attention_mask,
|
|
456
|
+
inputs_embeds=input_tensor,
|
|
457
|
+
past_key_values_length=past_seen_tokens,
|
|
458
|
+
is_training=self.training,
|
|
459
|
+
):
|
|
460
|
+
return None
|
|
461
|
+
|
|
462
|
+
dtype, device = input_tensor.dtype, input_tensor.device
|
|
463
|
+
sequence_length = input_tensor.shape[1]
|
|
464
|
+
if using_static_cache:
|
|
465
|
+
target_length = past_key_values.get_max_cache_shape()
|
|
466
|
+
else:
|
|
467
|
+
target_length = (
|
|
468
|
+
attention_mask.shape[-1]
|
|
469
|
+
if isinstance(attention_mask, torch.Tensor)
|
|
470
|
+
else past_seen_tokens + sequence_length + 1
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
|
|
474
|
+
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
|
|
475
|
+
attention_mask,
|
|
476
|
+
sequence_length=sequence_length,
|
|
477
|
+
target_length=target_length,
|
|
478
|
+
dtype=dtype,
|
|
479
|
+
device=device,
|
|
480
|
+
cache_position=cache_position,
|
|
481
|
+
batch_size=input_tensor.shape[0],
|
|
482
|
+
)
|
|
483
|
+
|
|
484
|
+
if (
|
|
485
|
+
self.config._attn_implementation == "sdpa"
|
|
486
|
+
and attention_mask is not None
|
|
487
|
+
and attention_mask.device.type in ["cuda", "xpu"]
|
|
488
|
+
and not output_attentions
|
|
489
|
+
):
|
|
490
|
+
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
|
|
491
|
+
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
|
|
492
|
+
# Details: https://github.com/pytorch/pytorch/issues/110213
|
|
493
|
+
min_dtype = torch.finfo(dtype).min
|
|
494
|
+
causal_mask = AttentionMaskConverter._unmask_unattended(
|
|
495
|
+
causal_mask, min_dtype
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
return causal_mask
|
|
499
|
+
|
|
500
|
+
@staticmethod
|
|
501
|
+
def _prepare_4d_causal_attention_mask_with_cache_position(
|
|
502
|
+
attention_mask: torch.Tensor,
|
|
503
|
+
sequence_length: int,
|
|
504
|
+
target_length: int,
|
|
505
|
+
dtype: torch.dtype,
|
|
506
|
+
device: torch.device,
|
|
507
|
+
cache_position: torch.Tensor,
|
|
508
|
+
batch_size: int,
|
|
509
|
+
**kwargs,
|
|
510
|
+
):
|
|
511
|
+
"""
|
|
512
|
+
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
|
|
513
|
+
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
|
|
514
|
+
|
|
515
|
+
Args:
|
|
516
|
+
attention_mask (`torch.Tensor`):
|
|
517
|
+
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
|
|
518
|
+
`(batch_size, 1, query_length, key_value_length)`.
|
|
519
|
+
sequence_length (`int`):
|
|
520
|
+
The sequence length being processed.
|
|
521
|
+
target_length (`int`):
|
|
522
|
+
The target length: when generating with static cache, the mask should be as long as the static cache,
|
|
523
|
+
to account for the 0 padding, the part of the cache that is not filled yet.
|
|
524
|
+
dtype (`torch.dtype`):
|
|
525
|
+
The dtype to use for the 4D attention mask.
|
|
526
|
+
device (`torch.device`):
|
|
527
|
+
The device to place the 4D attention mask on.
|
|
528
|
+
cache_position (`torch.Tensor`):
|
|
529
|
+
Indices depicting the position of the input sequence tokens in the sequence.
|
|
530
|
+
batch_size (`torch.Tensor`):
|
|
531
|
+
Batch size.
|
|
532
|
+
"""
|
|
533
|
+
if attention_mask is not None and attention_mask.dim() == 4:
|
|
534
|
+
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
|
|
535
|
+
causal_mask = attention_mask
|
|
536
|
+
else:
|
|
537
|
+
min_dtype = torch.finfo(dtype).min
|
|
538
|
+
causal_mask = torch.full(
|
|
539
|
+
(sequence_length, target_length),
|
|
540
|
+
fill_value=min_dtype,
|
|
541
|
+
dtype=dtype,
|
|
542
|
+
device=device,
|
|
543
|
+
)
|
|
544
|
+
if sequence_length != 1:
|
|
545
|
+
causal_mask = torch.triu(causal_mask, diagonal=1)
|
|
546
|
+
causal_mask *= torch.arange(
|
|
547
|
+
target_length, device=device
|
|
548
|
+
) > cache_position.reshape(-1, 1)
|
|
549
|
+
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
|
|
550
|
+
if attention_mask is not None:
|
|
551
|
+
causal_mask = (
|
|
552
|
+
causal_mask.clone()
|
|
553
|
+
) # copy to contiguous memory for in-place edit
|
|
554
|
+
mask_length = attention_mask.shape[-1]
|
|
555
|
+
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[
|
|
556
|
+
:, None, None, :
|
|
557
|
+
].to(causal_mask.device)
|
|
558
|
+
padding_mask = padding_mask == 0
|
|
559
|
+
causal_mask[:, :, :, :mask_length] = causal_mask[
|
|
560
|
+
:, :, :, :mask_length
|
|
561
|
+
].masked_fill(padding_mask, min_dtype)
|
|
562
|
+
|
|
563
|
+
return causal_mask
|
|
564
|
+
|
|
565
|
+
|
|
566
|
+
class SmileLlamaForCausalLM(SmileLlamaPreTrainedModel, GenerationMixin):
|
|
567
|
+
_tied_weights_keys = ["lm_head.weight"]
|
|
568
|
+
_tp_plan = {"lm_head": "colwise_rep"}
|
|
569
|
+
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
|
|
570
|
+
|
|
571
|
+
def __init__(self, config: SmileLlamaConfig):
|
|
572
|
+
super().__init__(config)
|
|
573
|
+
self.model = SmileLlamaModel(config)
|
|
574
|
+
self.vocab_size = config.vocab_size
|
|
575
|
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
|
576
|
+
|
|
577
|
+
# Initialize weights and apply final processing
|
|
578
|
+
self.post_init()
|
|
579
|
+
|
|
580
|
+
def get_input_embeddings(self):
|
|
581
|
+
return self.model.embed_tokens
|
|
582
|
+
|
|
583
|
+
def set_input_embeddings(self, value):
|
|
584
|
+
self.model.embed_tokens = value
|
|
585
|
+
|
|
586
|
+
def get_output_embeddings(self):
|
|
587
|
+
return self.lm_head
|
|
588
|
+
|
|
589
|
+
def set_output_embeddings(self, new_embeddings):
|
|
590
|
+
self.lm_head = new_embeddings
|
|
591
|
+
|
|
592
|
+
def set_decoder(self, decoder):
|
|
593
|
+
self.model = decoder
|
|
594
|
+
|
|
595
|
+
def get_decoder(self):
|
|
596
|
+
return self.model
|
|
597
|
+
|
|
598
|
+
@can_return_tuple
|
|
599
|
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
|
600
|
+
@replace_return_docstrings(
|
|
601
|
+
output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
|
|
602
|
+
)
|
|
603
|
+
def forward(
|
|
604
|
+
self,
|
|
605
|
+
input_ids: Optional[torch.LongTensor] = None,
|
|
606
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
607
|
+
position_ids: Optional[torch.LongTensor] = None,
|
|
608
|
+
past_key_values: Optional[Cache] = None,
|
|
609
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
610
|
+
labels: Optional[torch.LongTensor] = None,
|
|
611
|
+
use_cache: Optional[bool] = None,
|
|
612
|
+
output_attentions: Optional[bool] = None,
|
|
613
|
+
output_hidden_states: Optional[bool] = None,
|
|
614
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
615
|
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
616
|
+
**kwargs,
|
|
617
|
+
) -> CausalLMOutputWithPast:
|
|
618
|
+
r"""
|
|
619
|
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
620
|
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
621
|
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
|
622
|
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
623
|
+
|
|
624
|
+
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
|
625
|
+
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
|
626
|
+
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
|
627
|
+
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
|
628
|
+
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
|
629
|
+
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
|
630
|
+
|
|
631
|
+
Returns:
|
|
632
|
+
|
|
633
|
+
Example:
|
|
634
|
+
|
|
635
|
+
```python
|
|
636
|
+
>>> from transformers import AutoTokenizer, LlamaForCausalLM
|
|
637
|
+
|
|
638
|
+
>>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
|
|
639
|
+
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
|
|
640
|
+
|
|
641
|
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
|
642
|
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
643
|
+
|
|
644
|
+
>>> # Generate
|
|
645
|
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
|
646
|
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
647
|
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
|
648
|
+
```"""
|
|
649
|
+
output_attentions = (
|
|
650
|
+
output_attentions
|
|
651
|
+
if output_attentions is not None
|
|
652
|
+
else self.config.output_attentions
|
|
653
|
+
)
|
|
654
|
+
output_hidden_states = (
|
|
655
|
+
output_hidden_states
|
|
656
|
+
if output_hidden_states is not None
|
|
657
|
+
else self.config.output_hidden_states
|
|
658
|
+
)
|
|
659
|
+
|
|
660
|
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
|
661
|
+
outputs: BaseModelOutputWithPast = self.model(
|
|
662
|
+
input_ids=input_ids,
|
|
663
|
+
attention_mask=attention_mask,
|
|
664
|
+
position_ids=position_ids,
|
|
665
|
+
past_key_values=past_key_values,
|
|
666
|
+
inputs_embeds=inputs_embeds,
|
|
667
|
+
use_cache=use_cache,
|
|
668
|
+
output_attentions=output_attentions,
|
|
669
|
+
output_hidden_states=output_hidden_states,
|
|
670
|
+
cache_position=cache_position,
|
|
671
|
+
**kwargs,
|
|
672
|
+
)
|
|
673
|
+
|
|
674
|
+
hidden_states = outputs.last_hidden_state
|
|
675
|
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
|
676
|
+
slice_indices = (
|
|
677
|
+
slice(-logits_to_keep, None)
|
|
678
|
+
if isinstance(logits_to_keep, int)
|
|
679
|
+
else logits_to_keep
|
|
680
|
+
)
|
|
681
|
+
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
|
682
|
+
|
|
683
|
+
loss = None
|
|
684
|
+
if labels is not None:
|
|
685
|
+
loss = self.loss_function(
|
|
686
|
+
logits=logits,
|
|
687
|
+
labels=labels,
|
|
688
|
+
vocab_size=self.config.vocab_size,
|
|
689
|
+
**kwargs,
|
|
690
|
+
)
|
|
691
|
+
|
|
692
|
+
return CausalLMOutputWithPast(
|
|
693
|
+
loss=loss,
|
|
694
|
+
logits=logits,
|
|
695
|
+
past_key_values=outputs.past_key_values,
|
|
696
|
+
hidden_states=outputs.hidden_states,
|
|
697
|
+
attentions=outputs.attentions,
|
|
698
|
+
)
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
from transformers import AutoConfig, AutoModel, AutoModelForCausalLM
|
|
2
|
+
|
|
3
|
+
from .configuration_smile_llama import SmileLlamaConfig
|
|
4
|
+
from .modeling_smile_llama import SmileLlamaForCausalLM, SmileLlamaModel
|
|
5
|
+
|
|
6
|
+
AutoConfig.register("smile_llama", SmileLlamaConfig)
|
|
7
|
+
AutoModel.register(SmileLlamaConfig, SmileLlamaModel)
|
|
8
|
+
AutoModelForCausalLM.register(SmileLlamaConfig, SmileLlamaForCausalLM)
|