vllm-npu 0.4.2__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vllm/__init__.py +23 -0
- vllm/_custom_ops.py +251 -0
- vllm/attention/__init__.py +13 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +127 -0
- vllm/attention/backends/flash_attn.py +271 -0
- vllm/attention/backends/flashinfer.py +220 -0
- vllm/attention/backends/rocm_flash_attn.py +374 -0
- vllm/attention/backends/torch_sdpa.py +250 -0
- vllm/attention/backends/xformers.py +393 -0
- vllm/attention/layer.py +56 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/paged_attn.py +216 -0
- vllm/attention/ops/prefix_prefill.py +792 -0
- vllm/attention/ops/triton_flash_attention.py +810 -0
- vllm/attention/selector.py +91 -0
- vllm/block.py +84 -0
- vllm/config.py +1225 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +295 -0
- vllm/core/block/common.py +199 -0
- vllm/core/block/cpu_gpu_block_allocator.py +228 -0
- vllm/core/block/interfaces.py +205 -0
- vllm/core/block/naive_block.py +318 -0
- vllm/core/block/prefix_caching_block.py +606 -0
- vllm/core/block_manager_v1.py +625 -0
- vllm/core/block_manager_v2.py +258 -0
- vllm/core/evictor_v1.py +105 -0
- vllm/core/evictor_v2.py +127 -0
- vllm/core/interfaces.py +113 -0
- vllm/core/policy.py +45 -0
- vllm/core/scheduler.py +1163 -0
- vllm/distributed/__init__.py +3 -0
- vllm/distributed/communication_op.py +237 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +274 -0
- vllm/distributed/device_communicators/pynccl.py +287 -0
- vllm/distributed/device_communicators/pynccl_utils.py +66 -0
- vllm/distributed/parallel_state.py +339 -0
- vllm/distributed/utils.py +136 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +649 -0
- vllm/engine/async_llm_engine.py +737 -0
- vllm/engine/llm_engine.py +784 -0
- vllm/engine/metrics.py +368 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +76 -0
- vllm/engine/output_processor/multi_step.py +142 -0
- vllm/engine/output_processor/single_step.py +284 -0
- vllm/engine/output_processor/stop_checker.py +101 -0
- vllm/engine/output_processor/util.py +19 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +119 -0
- vllm/entrypoints/llm.py +259 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +186 -0
- vllm/entrypoints/openai/cli_args.py +115 -0
- vllm/entrypoints/openai/protocol.py +460 -0
- vllm/entrypoints/openai/serving_chat.py +392 -0
- vllm/entrypoints/openai/serving_completion.py +347 -0
- vllm/entrypoints/openai/serving_engine.py +234 -0
- vllm/envs.py +217 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/cpu_executor.py +152 -0
- vllm/executor/distributed_gpu_executor.py +115 -0
- vllm/executor/executor_base.py +115 -0
- vllm/executor/gpu_executor.py +150 -0
- vllm/executor/multiproc_worker_utils.py +263 -0
- vllm/executor/neuron_executor.py +91 -0
- vllm/executor/ray_gpu_executor.py +327 -0
- vllm/executor/ray_utils.py +119 -0
- vllm/logger.py +153 -0
- vllm/logging/__init__.py +5 -0
- vllm/logging/formatter.py +15 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +262 -0
- vllm/lora/layers.py +1181 -0
- vllm/lora/lora.py +167 -0
- vllm/lora/models.py +645 -0
- vllm/lora/punica.py +213 -0
- vllm/lora/request.py +32 -0
- vllm/lora/utils.py +98 -0
- vllm/lora/worker_manager.py +251 -0
- vllm/model_executor/__init__.py +7 -0
- vllm/model_executor/guided_decoding/__init__.py +25 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +70 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +130 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +184 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +173 -0
- vllm/model_executor/layers/fused_moe/__init__.py +7 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +140 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +479 -0
- vllm/model_executor/layers/layernorm.py +71 -0
- vllm/model_executor/layers/linear.py +709 -0
- vllm/model_executor/layers/logits_processor.py +115 -0
- vllm/model_executor/layers/ops/__init__.py +0 -0
- vllm/model_executor/layers/ops/rand.py +157 -0
- vllm/model_executor/layers/ops/sample.py +406 -0
- vllm/model_executor/layers/quantization/__init__.py +35 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/awq.py +175 -0
- vllm/model_executor/layers/quantization/base_config.py +97 -0
- vllm/model_executor/layers/quantization/fp8.py +265 -0
- vllm/model_executor/layers/quantization/gptq.py +224 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +438 -0
- vllm/model_executor/layers/quantization/marlin.py +227 -0
- vllm/model_executor/layers/quantization/schema.py +84 -0
- vllm/model_executor/layers/quantization/squeezellm.py +137 -0
- vllm/model_executor/layers/rejection_sampler.py +405 -0
- vllm/model_executor/layers/rotary_embedding.py +525 -0
- vllm/model_executor/layers/sampler.py +1051 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +155 -0
- vllm/model_executor/model_loader/__init__.py +30 -0
- vllm/model_executor/model_loader/loader.py +362 -0
- vllm/model_executor/model_loader/neuron.py +136 -0
- vllm/model_executor/model_loader/tensorizer.py +368 -0
- vllm/model_executor/model_loader/utils.py +41 -0
- vllm/model_executor/model_loader/weight_utils.py +372 -0
- vllm/model_executor/models/__init__.py +119 -0
- vllm/model_executor/models/baichuan.py +410 -0
- vllm/model_executor/models/bloom.py +327 -0
- vllm/model_executor/models/chatglm.py +386 -0
- vllm/model_executor/models/commandr.py +373 -0
- vllm/model_executor/models/dbrx.py +413 -0
- vllm/model_executor/models/decilm.py +122 -0
- vllm/model_executor/models/deepseek.py +438 -0
- vllm/model_executor/models/falcon.py +444 -0
- vllm/model_executor/models/gemma.py +393 -0
- vllm/model_executor/models/gpt2.py +266 -0
- vllm/model_executor/models/gpt_bigcode.py +274 -0
- vllm/model_executor/models/gpt_j.py +281 -0
- vllm/model_executor/models/gpt_neox.py +295 -0
- vllm/model_executor/models/internlm2.py +323 -0
- vllm/model_executor/models/jais.py +333 -0
- vllm/model_executor/models/llama.py +442 -0
- vllm/model_executor/models/llava.py +239 -0
- vllm/model_executor/models/minicpm.py +531 -0
- vllm/model_executor/models/mixtral.py +583 -0
- vllm/model_executor/models/mixtral_quant.py +404 -0
- vllm/model_executor/models/mpt.py +295 -0
- vllm/model_executor/models/olmo.py +356 -0
- vllm/model_executor/models/opt.py +349 -0
- vllm/model_executor/models/orion.py +319 -0
- vllm/model_executor/models/phi.py +300 -0
- vllm/model_executor/models/qwen.py +284 -0
- vllm/model_executor/models/qwen2.py +367 -0
- vllm/model_executor/models/qwen2_moe.py +447 -0
- vllm/model_executor/models/stablelm.py +301 -0
- vllm/model_executor/models/starcoder2.py +302 -0
- vllm/model_executor/models/xverse.py +366 -0
- vllm/model_executor/sampling_metadata.py +588 -0
- vllm/model_executor/utils.py +35 -0
- vllm/outputs.py +150 -0
- vllm/py.typed +2 -0
- vllm/sampling_params.py +340 -0
- vllm/sequence.py +766 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +397 -0
- vllm/spec_decode/interfaces.py +73 -0
- vllm/spec_decode/metrics.py +191 -0
- vllm/spec_decode/multi_step_worker.py +203 -0
- vllm/spec_decode/ngram_worker.py +176 -0
- vllm/spec_decode/spec_decode_worker.py +472 -0
- vllm/spec_decode/top1_proposer.py +200 -0
- vllm/spec_decode/util.py +228 -0
- vllm/test_utils.py +41 -0
- vllm/transformers_utils/__init__.py +0 -0
- vllm/transformers_utils/config.py +58 -0
- vllm/transformers_utils/configs/__init__.py +16 -0
- vllm/transformers_utils/configs/chatglm.py +68 -0
- vllm/transformers_utils/configs/dbrx.py +278 -0
- vllm/transformers_utils/configs/falcon.py +87 -0
- vllm/transformers_utils/configs/jais.py +236 -0
- vllm/transformers_utils/configs/mpt.py +178 -0
- vllm/transformers_utils/detokenizer.py +313 -0
- vllm/transformers_utils/tokenizer.py +149 -0
- vllm/transformers_utils/tokenizer_group/__init__.py +33 -0
- vllm/transformers_utils/tokenizer_group/base_tokenizer_group.py +55 -0
- vllm/transformers_utils/tokenizer_group/ray_tokenizer_group.py +169 -0
- vllm/transformers_utils/tokenizer_group/tokenizer_group.py +78 -0
- vllm/transformers_utils/tokenizers/__init__.py +5 -0
- vllm/transformers_utils/tokenizers/baichuan.py +255 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +209 -0
- vllm/utils.py +677 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +105 -0
- vllm/worker/cpu_model_runner.py +346 -0
- vllm/worker/cpu_worker.py +321 -0
- vllm/worker/model_runner.py +1168 -0
- vllm/worker/neuron_model_runner.py +196 -0
- vllm/worker/neuron_worker.py +98 -0
- vllm/worker/worker.py +345 -0
- vllm/worker/worker_base.py +146 -0
- vllm_npu-0.4.2.dist-info/LICENSE +201 -0
- vllm_npu-0.4.2.dist-info/METADATA +173 -0
- vllm_npu-0.4.2.dist-info/RECORD +219 -0
- vllm_npu-0.4.2.dist-info/WHEEL +5 -0
- vllm_npu-0.4.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,583 @@
|
|
1
|
+
# coding=utf-8
|
2
|
+
# Adapted from
|
3
|
+
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py
|
4
|
+
# Copyright 2023 The vLLM team.
|
5
|
+
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
6
|
+
#
|
7
|
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
8
|
+
# and OPT implementations in this library. It has been modified from its
|
9
|
+
# original forms to accommodate minor architectural differences compared
|
10
|
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
11
|
+
#
|
12
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
13
|
+
# you may not use this file except in compliance with the License.
|
14
|
+
# You may obtain a copy of the License at
|
15
|
+
#
|
16
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
17
|
+
#
|
18
|
+
# Unless required by applicable law or agreed to in writing, software
|
19
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
20
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
21
|
+
# See the License for the specific language governing permissions and
|
22
|
+
# limitations under the License.
|
23
|
+
"""Inference-only Mixtral model."""
|
24
|
+
from typing import Iterable, List, Optional, Tuple
|
25
|
+
|
26
|
+
import torch
|
27
|
+
from torch import nn
|
28
|
+
from transformers import MixtralConfig
|
29
|
+
|
30
|
+
from vllm import _custom_ops as ops
|
31
|
+
from vllm.attention import Attention, AttentionMetadata
|
32
|
+
from vllm.config import LoRAConfig
|
33
|
+
from vllm.distributed import (get_tensor_model_parallel_rank,
|
34
|
+
get_tensor_model_parallel_world_size,
|
35
|
+
tensor_model_parallel_all_reduce)
|
36
|
+
from vllm.model_executor.layers.fused_moe import fused_moe
|
37
|
+
from vllm.model_executor.layers.layernorm import RMSNorm
|
38
|
+
from vllm.model_executor.layers.linear import (QKVParallelLinear,
|
39
|
+
ReplicatedLinear,
|
40
|
+
RowParallelLinear)
|
41
|
+
from vllm.model_executor.layers.logits_processor import LogitsProcessor
|
42
|
+
from vllm.model_executor.layers.quantization.base_config import (
|
43
|
+
QuantizationConfig)
|
44
|
+
from vllm.model_executor.layers.quantization.fp8 import Fp8Config
|
45
|
+
from vllm.model_executor.layers.rotary_embedding import get_rope
|
46
|
+
from vllm.model_executor.layers.sampler import Sampler
|
47
|
+
from vllm.model_executor.layers.vocab_parallel_embedding import (
|
48
|
+
DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding)
|
49
|
+
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
|
50
|
+
from vllm.model_executor.sampling_metadata import SamplingMetadata
|
51
|
+
from vllm.model_executor.utils import set_weight_attrs
|
52
|
+
from vllm.sequence import SamplerOutput
|
53
|
+
from vllm.utils import print_warning_once
|
54
|
+
|
55
|
+
|
56
|
+
class MixtralMoE(nn.Module):
|
57
|
+
"""A tensor-parallel MoE implementation for Mixtral that shards each expert
|
58
|
+
across all ranks.
|
59
|
+
|
60
|
+
Each expert's weights are sharded across all ranks and a fused MoE
|
61
|
+
kernel is used for the forward pass, and finally we reduce the outputs
|
62
|
+
across ranks.
|
63
|
+
"""
|
64
|
+
|
65
|
+
def __init__(
|
66
|
+
self,
|
67
|
+
num_experts: int,
|
68
|
+
top_k: int,
|
69
|
+
hidden_size: int,
|
70
|
+
intermediate_size: int,
|
71
|
+
params_dtype: Optional[torch.dtype] = None,
|
72
|
+
tp_size: Optional[int] = None,
|
73
|
+
quant_config: Optional[QuantizationConfig] = None,
|
74
|
+
):
|
75
|
+
super().__init__()
|
76
|
+
self.tp_size = tp_size or get_tensor_model_parallel_world_size()
|
77
|
+
self.num_total_experts = num_experts
|
78
|
+
self.top_k = top_k
|
79
|
+
self.hidden_size = hidden_size
|
80
|
+
self.intermediate_size = intermediate_size // self.tp_size
|
81
|
+
self.quant_config = quant_config
|
82
|
+
|
83
|
+
# FIXME(pcmoritz): Make this more general to support different
|
84
|
+
# quantization schemes
|
85
|
+
self.use_fp8 = isinstance(quant_config, Fp8Config)
|
86
|
+
|
87
|
+
if params_dtype is None:
|
88
|
+
params_dtype = torch.get_default_dtype()
|
89
|
+
self.params_dtype = params_dtype
|
90
|
+
|
91
|
+
# Gate always runs at half / full precision for now.
|
92
|
+
self.gate = ReplicatedLinear(self.hidden_size,
|
93
|
+
self.num_total_experts,
|
94
|
+
bias=False,
|
95
|
+
params_dtype=self.params_dtype,
|
96
|
+
quant_config=None)
|
97
|
+
|
98
|
+
if self.use_fp8:
|
99
|
+
params_dtype = torch.float8_e4m3fn
|
100
|
+
|
101
|
+
self.w13_weight = nn.Parameter(
|
102
|
+
torch.empty(self.num_total_experts,
|
103
|
+
2 * self.intermediate_size,
|
104
|
+
self.hidden_size,
|
105
|
+
dtype=params_dtype))
|
106
|
+
self.w2_weight = nn.Parameter(
|
107
|
+
torch.empty(self.num_total_experts,
|
108
|
+
self.hidden_size,
|
109
|
+
self.intermediate_size,
|
110
|
+
dtype=params_dtype))
|
111
|
+
|
112
|
+
set_weight_attrs(self.w13_weight, {
|
113
|
+
"weight_loader": self.weight_loader,
|
114
|
+
})
|
115
|
+
set_weight_attrs(self.w2_weight, {
|
116
|
+
"weight_loader": self.weight_loader,
|
117
|
+
})
|
118
|
+
|
119
|
+
# Used for fp8.
|
120
|
+
self.w13_scale = None
|
121
|
+
self.w2_scale = None
|
122
|
+
self.a13_scale = None
|
123
|
+
self.a2_scale = None
|
124
|
+
|
125
|
+
if self.use_fp8:
|
126
|
+
# WEIGHT_SCALE (for fp8)
|
127
|
+
self.w13_scale = nn.Parameter(torch.ones(self.num_total_experts,
|
128
|
+
dtype=torch.float32),
|
129
|
+
requires_grad=False)
|
130
|
+
self.w2_scale = nn.Parameter(torch.ones(self.num_total_experts,
|
131
|
+
dtype=torch.float32),
|
132
|
+
requires_grad=False)
|
133
|
+
|
134
|
+
# If loading fp8 checkpoint, pass the weight loaders.
|
135
|
+
# If loading an fp16 checkpoint, do not (we will quantize in
|
136
|
+
# process_weights_after_loading()
|
137
|
+
if quant_config.is_checkpoint_fp8_serialized:
|
138
|
+
set_weight_attrs(self.w13_scale, {
|
139
|
+
"weight_loader": self.weight_loader,
|
140
|
+
})
|
141
|
+
set_weight_attrs(self.w2_scale, {
|
142
|
+
"weight_loader": self.weight_loader,
|
143
|
+
})
|
144
|
+
|
145
|
+
# ACT_SCALE (for fp8)
|
146
|
+
if quant_config.activation_scheme == "static":
|
147
|
+
if not quant_config.is_checkpoint_fp8_serialized:
|
148
|
+
raise ValueError(
|
149
|
+
"Found static activation scheme for checkpoint that "
|
150
|
+
"was not serialized fp8.")
|
151
|
+
self.a13_scale = nn.Parameter(torch.zeros(
|
152
|
+
self.num_total_experts, dtype=torch.float32),
|
153
|
+
requires_grad=False)
|
154
|
+
self.a2_scale = nn.Parameter(torch.zeros(
|
155
|
+
self.num_total_experts, dtype=torch.float32),
|
156
|
+
requires_grad=False)
|
157
|
+
|
158
|
+
set_weight_attrs(self.a13_scale, {
|
159
|
+
"weight_loader": self.weight_loader,
|
160
|
+
})
|
161
|
+
set_weight_attrs(self.a2_scale, {
|
162
|
+
"weight_loader": self.weight_loader,
|
163
|
+
})
|
164
|
+
|
165
|
+
def weight_loader(self, param: nn.Parameter, loaded_weight: torch.Tensor,
|
166
|
+
weight_name: str, expert_id: int):
|
167
|
+
tp_rank = get_tensor_model_parallel_rank()
|
168
|
+
param_data = param.data
|
169
|
+
shard_size = self.intermediate_size
|
170
|
+
shard = slice(tp_rank * shard_size, (tp_rank + 1) * shard_size)
|
171
|
+
if weight_name.endswith("w1.weight"):
|
172
|
+
param_data[expert_id, 0:shard_size, :] = loaded_weight[shard, :]
|
173
|
+
if weight_name.endswith("w3.weight"):
|
174
|
+
param_data[expert_id,
|
175
|
+
shard_size:2 * shard_size, :] = loaded_weight[shard, :]
|
176
|
+
if weight_name.endswith("w2.weight"):
|
177
|
+
param_data[expert_id, :, :] = loaded_weight[:, shard]
|
178
|
+
if "act_scale" in weight_name or "weight_scale" in weight_name:
|
179
|
+
param_data[expert_id] = loaded_weight
|
180
|
+
|
181
|
+
def process_weights_after_loading(self):
|
182
|
+
# Fp8 is the only case where we need to process after loading.
|
183
|
+
if not self.use_fp8:
|
184
|
+
return
|
185
|
+
|
186
|
+
# If checkpoint is fp16, quantize here.
|
187
|
+
if not self.quant_config.is_checkpoint_fp8_serialized:
|
188
|
+
w13_weight = torch.empty_like(self.w13_weight.data,
|
189
|
+
dtype=torch.float8_e4m3fn)
|
190
|
+
w2_weight = torch.empty_like(self.w2_weight.data,
|
191
|
+
dtype=torch.float8_e4m3fn)
|
192
|
+
for expert in range(self.num_total_experts):
|
193
|
+
w13_weight[expert, :, :], self.w13_scale[
|
194
|
+
expert] = ops.scaled_fp8_quant(
|
195
|
+
self.w13_weight.data[expert, :, :])
|
196
|
+
w2_weight[expert, :, :], self.w2_scale[
|
197
|
+
expert] = ops.scaled_fp8_quant(
|
198
|
+
self.w2_weight.data[expert, :, :])
|
199
|
+
self.w13_weight = nn.Parameter(w13_weight, requires_grad=False)
|
200
|
+
self.w2_weight = nn.Parameter(w2_weight, requires_grad=False)
|
201
|
+
|
202
|
+
# If checkpoint is fp8 + static, cleanup act_scales.
|
203
|
+
# Since state_dict has an act_scale per expert but our kernels
|
204
|
+
# are passed one act_scale shared across all experts.
|
205
|
+
elif self.quant_config.activation_scheme == "static":
|
206
|
+
if self.a13_scale is None or self.a2_scale is None:
|
207
|
+
raise ValueError(
|
208
|
+
"QuantConfig has static quantization, but found "
|
209
|
+
"activation scales are None.")
|
210
|
+
|
211
|
+
if (not all_close_1d(self.a13_scale)
|
212
|
+
or not all_close_1d(self.a2_scale)):
|
213
|
+
print_warning_once(
|
214
|
+
"Found act_scales that are not equal for fp8 MoE layer. "
|
215
|
+
"Using the maximum across experts for each layer. ")
|
216
|
+
|
217
|
+
self.a13_scale = nn.Parameter(self.a13_scale.max(),
|
218
|
+
requires_grad=False)
|
219
|
+
self.a2_scale = nn.Parameter(self.a2_scale.max(),
|
220
|
+
requires_grad=False)
|
221
|
+
|
222
|
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
223
|
+
num_tokens, hidden_size = hidden_states.shape
|
224
|
+
hidden_states = hidden_states.view(-1, self.hidden_size)
|
225
|
+
# router_logits: (num_tokens, n_experts)
|
226
|
+
router_logits, _ = self.gate(hidden_states)
|
227
|
+
final_hidden_states = fused_moe(hidden_states,
|
228
|
+
self.w13_weight,
|
229
|
+
self.w2_weight,
|
230
|
+
router_logits,
|
231
|
+
self.top_k,
|
232
|
+
renormalize=True,
|
233
|
+
inplace=True,
|
234
|
+
use_fp8=self.use_fp8,
|
235
|
+
w1_scale=self.w13_scale,
|
236
|
+
w2_scale=self.w2_scale,
|
237
|
+
a1_scale=self.a13_scale,
|
238
|
+
a2_scale=self.a2_scale)
|
239
|
+
|
240
|
+
if self.tp_size > 1:
|
241
|
+
final_hidden_states = tensor_model_parallel_all_reduce(
|
242
|
+
final_hidden_states)
|
243
|
+
|
244
|
+
return final_hidden_states.view(num_tokens, hidden_size)
|
245
|
+
|
246
|
+
|
247
|
+
class MixtralAttention(nn.Module):
|
248
|
+
|
249
|
+
def __init__(self,
|
250
|
+
hidden_size: int,
|
251
|
+
num_heads: int,
|
252
|
+
num_kv_heads: int,
|
253
|
+
max_position: int = 4096 * 32,
|
254
|
+
rope_theta: float = 10000,
|
255
|
+
quant_config: Optional[QuantizationConfig] = None,
|
256
|
+
sliding_window: Optional[int] = None) -> None:
|
257
|
+
super().__init__()
|
258
|
+
self.hidden_size = hidden_size
|
259
|
+
tp_size = get_tensor_model_parallel_world_size()
|
260
|
+
self.total_num_heads = num_heads
|
261
|
+
assert self.total_num_heads % tp_size == 0
|
262
|
+
self.num_heads = self.total_num_heads // tp_size
|
263
|
+
self.total_num_kv_heads = num_kv_heads
|
264
|
+
if self.total_num_kv_heads >= tp_size:
|
265
|
+
# Number of KV heads is greater than TP size, so we partition
|
266
|
+
# the KV heads across multiple tensor parallel GPUs.
|
267
|
+
assert self.total_num_kv_heads % tp_size == 0
|
268
|
+
else:
|
269
|
+
# Number of KV heads is less than TP size, so we replicate
|
270
|
+
# the KV heads across multiple tensor parallel GPUs.
|
271
|
+
assert tp_size % self.total_num_kv_heads == 0
|
272
|
+
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
|
273
|
+
self.head_dim = hidden_size // self.total_num_heads
|
274
|
+
self.q_size = self.num_heads * self.head_dim
|
275
|
+
self.kv_size = self.num_kv_heads * self.head_dim
|
276
|
+
self.scaling = self.head_dim**-0.5
|
277
|
+
self.rope_theta = rope_theta
|
278
|
+
self.sliding_window = sliding_window
|
279
|
+
|
280
|
+
if isinstance(
|
281
|
+
quant_config,
|
282
|
+
Fp8Config) and not quant_config.is_checkpoint_fp8_serialized:
|
283
|
+
print_warning_once(
|
284
|
+
"For Mixtral FP8 quantization, we currently do not quantize "
|
285
|
+
"the attention layers until their FP8 performance is improved."
|
286
|
+
)
|
287
|
+
quant_config = None
|
288
|
+
|
289
|
+
self.qkv_proj = QKVParallelLinear(
|
290
|
+
hidden_size,
|
291
|
+
self.head_dim,
|
292
|
+
self.total_num_heads,
|
293
|
+
self.total_num_kv_heads,
|
294
|
+
bias=False,
|
295
|
+
quant_config=quant_config,
|
296
|
+
)
|
297
|
+
self.o_proj = RowParallelLinear(
|
298
|
+
self.total_num_heads * self.head_dim,
|
299
|
+
hidden_size,
|
300
|
+
bias=False,
|
301
|
+
quant_config=quant_config,
|
302
|
+
)
|
303
|
+
self.rotary_emb = get_rope(
|
304
|
+
self.head_dim,
|
305
|
+
rotary_dim=self.head_dim,
|
306
|
+
max_position=max_position,
|
307
|
+
base=int(self.rope_theta),
|
308
|
+
is_neox_style=True,
|
309
|
+
)
|
310
|
+
self.attn = Attention(
|
311
|
+
self.num_heads,
|
312
|
+
self.head_dim,
|
313
|
+
self.scaling,
|
314
|
+
num_kv_heads=self.num_kv_heads,
|
315
|
+
sliding_window=self.sliding_window,
|
316
|
+
)
|
317
|
+
|
318
|
+
def forward(
|
319
|
+
self,
|
320
|
+
positions: torch.Tensor,
|
321
|
+
hidden_states: torch.Tensor,
|
322
|
+
kv_cache: torch.Tensor,
|
323
|
+
attn_metadata: AttentionMetadata,
|
324
|
+
) -> torch.Tensor:
|
325
|
+
qkv, _ = self.qkv_proj(hidden_states)
|
326
|
+
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
|
327
|
+
q, k = self.rotary_emb(positions, q, k)
|
328
|
+
attn_output = self.attn(q, k, v, kv_cache, attn_metadata)
|
329
|
+
output, _ = self.o_proj(attn_output)
|
330
|
+
return output
|
331
|
+
|
332
|
+
|
333
|
+
class MixtralDecoderLayer(nn.Module):
|
334
|
+
|
335
|
+
def __init__(
|
336
|
+
self,
|
337
|
+
config: MixtralConfig,
|
338
|
+
quant_config: Optional[QuantizationConfig] = None,
|
339
|
+
) -> None:
|
340
|
+
super().__init__()
|
341
|
+
self.hidden_size = config.hidden_size
|
342
|
+
# Requires transformers > 4.32.0
|
343
|
+
rope_theta = getattr(config, "rope_theta", 10000)
|
344
|
+
self.self_attn = MixtralAttention(
|
345
|
+
hidden_size=self.hidden_size,
|
346
|
+
num_heads=config.num_attention_heads,
|
347
|
+
max_position=config.max_position_embeddings,
|
348
|
+
num_kv_heads=config.num_key_value_heads,
|
349
|
+
rope_theta=rope_theta,
|
350
|
+
sliding_window=config.sliding_window,
|
351
|
+
quant_config=quant_config)
|
352
|
+
self.block_sparse_moe = MixtralMoE(
|
353
|
+
num_experts=config.num_local_experts,
|
354
|
+
top_k=config.num_experts_per_tok,
|
355
|
+
hidden_size=config.hidden_size,
|
356
|
+
intermediate_size=config.intermediate_size,
|
357
|
+
quant_config=quant_config)
|
358
|
+
self.input_layernorm = RMSNorm(config.hidden_size,
|
359
|
+
eps=config.rms_norm_eps)
|
360
|
+
self.post_attention_layernorm = RMSNorm(config.hidden_size,
|
361
|
+
eps=config.rms_norm_eps)
|
362
|
+
|
363
|
+
def forward(
|
364
|
+
self,
|
365
|
+
positions: torch.Tensor,
|
366
|
+
hidden_states: torch.Tensor,
|
367
|
+
kv_cache: torch.Tensor,
|
368
|
+
attn_metadata: AttentionMetadata,
|
369
|
+
residual: Optional[torch.Tensor],
|
370
|
+
) -> torch.Tensor:
|
371
|
+
# Self Attention
|
372
|
+
if residual is None:
|
373
|
+
residual = hidden_states
|
374
|
+
hidden_states = self.input_layernorm(hidden_states)
|
375
|
+
else:
|
376
|
+
hidden_states, residual = self.input_layernorm(
|
377
|
+
hidden_states, residual)
|
378
|
+
hidden_states = self.self_attn(
|
379
|
+
positions=positions,
|
380
|
+
hidden_states=hidden_states,
|
381
|
+
kv_cache=kv_cache,
|
382
|
+
attn_metadata=attn_metadata,
|
383
|
+
)
|
384
|
+
|
385
|
+
# Fully Connected
|
386
|
+
hidden_states, residual = self.post_attention_layernorm(
|
387
|
+
hidden_states, residual)
|
388
|
+
hidden_states = self.block_sparse_moe(hidden_states)
|
389
|
+
return hidden_states, residual
|
390
|
+
|
391
|
+
|
392
|
+
class MixtralModel(nn.Module):
|
393
|
+
|
394
|
+
def __init__(
|
395
|
+
self,
|
396
|
+
config: MixtralConfig,
|
397
|
+
quant_config: Optional[QuantizationConfig] = None,
|
398
|
+
lora_config: Optional[LoRAConfig] = None,
|
399
|
+
) -> None:
|
400
|
+
super().__init__()
|
401
|
+
self.padding_idx = config.pad_token_id
|
402
|
+
lora_vocab = (lora_config.lora_extra_vocab_size *
|
403
|
+
(lora_config.max_loras or 1)) if lora_config else 0
|
404
|
+
self.vocab_size = config.vocab_size + lora_vocab
|
405
|
+
self.org_vocab_size = config.vocab_size
|
406
|
+
|
407
|
+
self.embed_tokens = VocabParallelEmbedding(
|
408
|
+
self.vocab_size,
|
409
|
+
config.hidden_size,
|
410
|
+
org_num_embeddings=config.vocab_size,
|
411
|
+
)
|
412
|
+
self.layers = nn.ModuleList([
|
413
|
+
MixtralDecoderLayer(config, quant_config=quant_config)
|
414
|
+
for _ in range(config.num_hidden_layers)
|
415
|
+
])
|
416
|
+
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
417
|
+
|
418
|
+
def forward(
|
419
|
+
self,
|
420
|
+
input_ids: torch.Tensor,
|
421
|
+
positions: torch.Tensor,
|
422
|
+
kv_caches: List[torch.Tensor],
|
423
|
+
attn_metadata: AttentionMetadata,
|
424
|
+
) -> torch.Tensor:
|
425
|
+
hidden_states = self.embed_tokens(input_ids)
|
426
|
+
residual = None
|
427
|
+
for i in range(len(self.layers)):
|
428
|
+
layer = self.layers[i]
|
429
|
+
hidden_states, residual = layer(positions, hidden_states,
|
430
|
+
kv_caches[i], attn_metadata,
|
431
|
+
residual)
|
432
|
+
hidden_states, _ = self.norm(hidden_states, residual)
|
433
|
+
return hidden_states
|
434
|
+
|
435
|
+
|
436
|
+
class MixtralForCausalLM(nn.Module):
|
437
|
+
fall_back_to_pt_during_load = False
|
438
|
+
|
439
|
+
packed_modules_mapping = {
|
440
|
+
"qkv_proj": [
|
441
|
+
"q_proj",
|
442
|
+
"k_proj",
|
443
|
+
"v_proj",
|
444
|
+
],
|
445
|
+
}
|
446
|
+
|
447
|
+
# LoRA specific attributes
|
448
|
+
supported_lora_modules = [
|
449
|
+
"qkv_proj",
|
450
|
+
"o_proj",
|
451
|
+
"embed_tokens",
|
452
|
+
"lm_head",
|
453
|
+
]
|
454
|
+
embedding_modules = {
|
455
|
+
"embed_tokens": "input_embeddings",
|
456
|
+
"lm_head": "output_embeddings",
|
457
|
+
}
|
458
|
+
embedding_padding_modules = ["lm_head"]
|
459
|
+
|
460
|
+
def __init__(
|
461
|
+
self,
|
462
|
+
config: MixtralConfig,
|
463
|
+
quant_config: Optional[QuantizationConfig] = None,
|
464
|
+
lora_config: Optional[LoRAConfig] = None,
|
465
|
+
) -> None:
|
466
|
+
super().__init__()
|
467
|
+
self.config = config
|
468
|
+
self.model = MixtralModel(config,
|
469
|
+
quant_config,
|
470
|
+
lora_config=lora_config)
|
471
|
+
self.unpadded_vocab_size = config.vocab_size
|
472
|
+
if lora_config:
|
473
|
+
self.unpadded_vocab_size += lora_config.lora_extra_vocab_size
|
474
|
+
self.lm_head = ParallelLMHead(
|
475
|
+
self.unpadded_vocab_size,
|
476
|
+
config.hidden_size,
|
477
|
+
org_num_embeddings=config.vocab_size,
|
478
|
+
padding_size=DEFAULT_VOCAB_PADDING_SIZE
|
479
|
+
# We need bigger padding if using lora for kernel
|
480
|
+
# compatibility
|
481
|
+
if not lora_config else lora_config.lora_vocab_padding_size,
|
482
|
+
)
|
483
|
+
self.logits_processor = LogitsProcessor(self.unpadded_vocab_size,
|
484
|
+
config.vocab_size)
|
485
|
+
self.sampler = Sampler()
|
486
|
+
|
487
|
+
def forward(
|
488
|
+
self,
|
489
|
+
input_ids: torch.Tensor,
|
490
|
+
positions: torch.Tensor,
|
491
|
+
kv_caches: List[torch.Tensor],
|
492
|
+
attn_metadata: AttentionMetadata,
|
493
|
+
) -> torch.Tensor:
|
494
|
+
hidden_states = self.model(input_ids, positions, kv_caches,
|
495
|
+
attn_metadata)
|
496
|
+
return hidden_states
|
497
|
+
|
498
|
+
def compute_logits(self, hidden_states: torch.Tensor,
|
499
|
+
sampling_metadata: SamplingMetadata) -> torch.Tensor:
|
500
|
+
logits = self.logits_processor(self.lm_head.weight, hidden_states,
|
501
|
+
sampling_metadata)
|
502
|
+
return logits
|
503
|
+
|
504
|
+
def sample(
|
505
|
+
self,
|
506
|
+
logits: Optional[torch.Tensor],
|
507
|
+
sampling_metadata: SamplingMetadata,
|
508
|
+
) -> Optional[SamplerOutput]:
|
509
|
+
next_tokens = self.sampler(logits, sampling_metadata)
|
510
|
+
return next_tokens
|
511
|
+
|
512
|
+
def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
|
513
|
+
stacked_params_mapping = [
|
514
|
+
# (param_name, shard_name, shard_id)
|
515
|
+
("qkv_proj", "q_proj", "q"),
|
516
|
+
("qkv_proj", "k_proj", "k"),
|
517
|
+
("qkv_proj", "v_proj", "v"),
|
518
|
+
]
|
519
|
+
|
520
|
+
expert_params_mapping = [
|
521
|
+
# These are the weight scales for the experts
|
522
|
+
# (param_name, weight_name, expert_id)
|
523
|
+
("w13_scale" if weight_name in ["w1", "w3"] else "w2_scale",
|
524
|
+
f"experts.{expert_id}.{weight_name}.weight_scale", expert_id)
|
525
|
+
for expert_id in range(self.config.num_local_experts)
|
526
|
+
for weight_name in ["w1", "w2", "w3"]
|
527
|
+
] + [
|
528
|
+
# These are the weights for the experts
|
529
|
+
# (param_name, weight_name, expert_id)
|
530
|
+
("w13_weight" if weight_name in ["w1", "w3"] else "w2_weight",
|
531
|
+
f"experts.{expert_id}.{weight_name}.weight", expert_id)
|
532
|
+
for expert_id in range(self.config.num_local_experts)
|
533
|
+
for weight_name in ["w1", "w2", "w3"]
|
534
|
+
] + [
|
535
|
+
# These are the activation scales for the experts
|
536
|
+
# (param_name, weight_name, expert_id)
|
537
|
+
("a13_scale" if weight_name in ["w1", "w3"] else "a2_scale",
|
538
|
+
f"experts.{expert_id}.{weight_name}.act_scale", expert_id)
|
539
|
+
for expert_id in range(self.config.num_local_experts)
|
540
|
+
for weight_name in ["w1", "w2", "w3"]
|
541
|
+
]
|
542
|
+
|
543
|
+
params_dict = dict(self.named_parameters())
|
544
|
+
for name, loaded_weight in weights:
|
545
|
+
if "rotary_emb.inv_freq" in name:
|
546
|
+
continue
|
547
|
+
|
548
|
+
for (param_name, weight_name, shard_id) in stacked_params_mapping:
|
549
|
+
if weight_name not in name:
|
550
|
+
continue
|
551
|
+
name = name.replace(weight_name, param_name)
|
552
|
+
# Skip loading extra bias for GPTQ models.
|
553
|
+
if name.endswith(".bias") and name not in params_dict:
|
554
|
+
continue
|
555
|
+
param = params_dict[name]
|
556
|
+
weight_loader = param.weight_loader
|
557
|
+
weight_loader(param, loaded_weight, shard_id)
|
558
|
+
break
|
559
|
+
else:
|
560
|
+
for param_name, weight_name, expert_id in expert_params_mapping:
|
561
|
+
if weight_name not in name:
|
562
|
+
continue
|
563
|
+
name = name.replace(weight_name, param_name)
|
564
|
+
param = params_dict[name]
|
565
|
+
weight_loader = param.weight_loader
|
566
|
+
weight_loader(param,
|
567
|
+
loaded_weight,
|
568
|
+
weight_name,
|
569
|
+
expert_id=expert_id)
|
570
|
+
break
|
571
|
+
else:
|
572
|
+
# Skip loading extra bias for GPTQ models.
|
573
|
+
if name.endswith(".bias") and name not in params_dict:
|
574
|
+
continue
|
575
|
+
param = params_dict[name]
|
576
|
+
weight_loader = getattr(param, "weight_loader",
|
577
|
+
default_weight_loader)
|
578
|
+
weight_loader(param, loaded_weight)
|
579
|
+
|
580
|
+
|
581
|
+
def all_close_1d(x: torch.Tensor) -> bool:
|
582
|
+
assert len(x.shape) == 1
|
583
|
+
return all(torch.allclose(x[0], x[i]) for i in range(x.shape[0]))
|