vllm-npu 0.4.2__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vllm/__init__.py +23 -0
- vllm/_custom_ops.py +251 -0
- vllm/attention/__init__.py +13 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +127 -0
- vllm/attention/backends/flash_attn.py +271 -0
- vllm/attention/backends/flashinfer.py +220 -0
- vllm/attention/backends/rocm_flash_attn.py +374 -0
- vllm/attention/backends/torch_sdpa.py +250 -0
- vllm/attention/backends/xformers.py +393 -0
- vllm/attention/layer.py +56 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/paged_attn.py +216 -0
- vllm/attention/ops/prefix_prefill.py +792 -0
- vllm/attention/ops/triton_flash_attention.py +810 -0
- vllm/attention/selector.py +91 -0
- vllm/block.py +84 -0
- vllm/config.py +1225 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +295 -0
- vllm/core/block/common.py +199 -0
- vllm/core/block/cpu_gpu_block_allocator.py +228 -0
- vllm/core/block/interfaces.py +205 -0
- vllm/core/block/naive_block.py +318 -0
- vllm/core/block/prefix_caching_block.py +606 -0
- vllm/core/block_manager_v1.py +625 -0
- vllm/core/block_manager_v2.py +258 -0
- vllm/core/evictor_v1.py +105 -0
- vllm/core/evictor_v2.py +127 -0
- vllm/core/interfaces.py +113 -0
- vllm/core/policy.py +45 -0
- vllm/core/scheduler.py +1163 -0
- vllm/distributed/__init__.py +3 -0
- vllm/distributed/communication_op.py +237 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +274 -0
- vllm/distributed/device_communicators/pynccl.py +287 -0
- vllm/distributed/device_communicators/pynccl_utils.py +66 -0
- vllm/distributed/parallel_state.py +339 -0
- vllm/distributed/utils.py +136 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +649 -0
- vllm/engine/async_llm_engine.py +737 -0
- vllm/engine/llm_engine.py +784 -0
- vllm/engine/metrics.py +368 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +76 -0
- vllm/engine/output_processor/multi_step.py +142 -0
- vllm/engine/output_processor/single_step.py +284 -0
- vllm/engine/output_processor/stop_checker.py +101 -0
- vllm/engine/output_processor/util.py +19 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +119 -0
- vllm/entrypoints/llm.py +259 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +186 -0
- vllm/entrypoints/openai/cli_args.py +115 -0
- vllm/entrypoints/openai/protocol.py +460 -0
- vllm/entrypoints/openai/serving_chat.py +392 -0
- vllm/entrypoints/openai/serving_completion.py +347 -0
- vllm/entrypoints/openai/serving_engine.py +234 -0
- vllm/envs.py +217 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/cpu_executor.py +152 -0
- vllm/executor/distributed_gpu_executor.py +115 -0
- vllm/executor/executor_base.py +115 -0
- vllm/executor/gpu_executor.py +150 -0
- vllm/executor/multiproc_worker_utils.py +263 -0
- vllm/executor/neuron_executor.py +91 -0
- vllm/executor/ray_gpu_executor.py +327 -0
- vllm/executor/ray_utils.py +119 -0
- vllm/logger.py +153 -0
- vllm/logging/__init__.py +5 -0
- vllm/logging/formatter.py +15 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +262 -0
- vllm/lora/layers.py +1181 -0
- vllm/lora/lora.py +167 -0
- vllm/lora/models.py +645 -0
- vllm/lora/punica.py +213 -0
- vllm/lora/request.py +32 -0
- vllm/lora/utils.py +98 -0
- vllm/lora/worker_manager.py +251 -0
- vllm/model_executor/__init__.py +7 -0
- vllm/model_executor/guided_decoding/__init__.py +25 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +70 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +130 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +184 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +173 -0
- vllm/model_executor/layers/fused_moe/__init__.py +7 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +140 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +479 -0
- vllm/model_executor/layers/layernorm.py +71 -0
- vllm/model_executor/layers/linear.py +709 -0
- vllm/model_executor/layers/logits_processor.py +115 -0
- vllm/model_executor/layers/ops/__init__.py +0 -0
- vllm/model_executor/layers/ops/rand.py +157 -0
- vllm/model_executor/layers/ops/sample.py +406 -0
- vllm/model_executor/layers/quantization/__init__.py +35 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/awq.py +175 -0
- vllm/model_executor/layers/quantization/base_config.py +97 -0
- vllm/model_executor/layers/quantization/fp8.py +265 -0
- vllm/model_executor/layers/quantization/gptq.py +224 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +438 -0
- vllm/model_executor/layers/quantization/marlin.py +227 -0
- vllm/model_executor/layers/quantization/schema.py +84 -0
- vllm/model_executor/layers/quantization/squeezellm.py +137 -0
- vllm/model_executor/layers/rejection_sampler.py +405 -0
- vllm/model_executor/layers/rotary_embedding.py +525 -0
- vllm/model_executor/layers/sampler.py +1051 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +155 -0
- vllm/model_executor/model_loader/__init__.py +30 -0
- vllm/model_executor/model_loader/loader.py +362 -0
- vllm/model_executor/model_loader/neuron.py +136 -0
- vllm/model_executor/model_loader/tensorizer.py +368 -0
- vllm/model_executor/model_loader/utils.py +41 -0
- vllm/model_executor/model_loader/weight_utils.py +372 -0
- vllm/model_executor/models/__init__.py +119 -0
- vllm/model_executor/models/baichuan.py +410 -0
- vllm/model_executor/models/bloom.py +327 -0
- vllm/model_executor/models/chatglm.py +386 -0
- vllm/model_executor/models/commandr.py +373 -0
- vllm/model_executor/models/dbrx.py +413 -0
- vllm/model_executor/models/decilm.py +122 -0
- vllm/model_executor/models/deepseek.py +438 -0
- vllm/model_executor/models/falcon.py +444 -0
- vllm/model_executor/models/gemma.py +393 -0
- vllm/model_executor/models/gpt2.py +266 -0
- vllm/model_executor/models/gpt_bigcode.py +274 -0
- vllm/model_executor/models/gpt_j.py +281 -0
- vllm/model_executor/models/gpt_neox.py +295 -0
- vllm/model_executor/models/internlm2.py +323 -0
- vllm/model_executor/models/jais.py +333 -0
- vllm/model_executor/models/llama.py +442 -0
- vllm/model_executor/models/llava.py +239 -0
- vllm/model_executor/models/minicpm.py +531 -0
- vllm/model_executor/models/mixtral.py +583 -0
- vllm/model_executor/models/mixtral_quant.py +404 -0
- vllm/model_executor/models/mpt.py +295 -0
- vllm/model_executor/models/olmo.py +356 -0
- vllm/model_executor/models/opt.py +349 -0
- vllm/model_executor/models/orion.py +319 -0
- vllm/model_executor/models/phi.py +300 -0
- vllm/model_executor/models/qwen.py +284 -0
- vllm/model_executor/models/qwen2.py +367 -0
- vllm/model_executor/models/qwen2_moe.py +447 -0
- vllm/model_executor/models/stablelm.py +301 -0
- vllm/model_executor/models/starcoder2.py +302 -0
- vllm/model_executor/models/xverse.py +366 -0
- vllm/model_executor/sampling_metadata.py +588 -0
- vllm/model_executor/utils.py +35 -0
- vllm/outputs.py +150 -0
- vllm/py.typed +2 -0
- vllm/sampling_params.py +340 -0
- vllm/sequence.py +766 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +397 -0
- vllm/spec_decode/interfaces.py +73 -0
- vllm/spec_decode/metrics.py +191 -0
- vllm/spec_decode/multi_step_worker.py +203 -0
- vllm/spec_decode/ngram_worker.py +176 -0
- vllm/spec_decode/spec_decode_worker.py +472 -0
- vllm/spec_decode/top1_proposer.py +200 -0
- vllm/spec_decode/util.py +228 -0
- vllm/test_utils.py +41 -0
- vllm/transformers_utils/__init__.py +0 -0
- vllm/transformers_utils/config.py +58 -0
- vllm/transformers_utils/configs/__init__.py +16 -0
- vllm/transformers_utils/configs/chatglm.py +68 -0
- vllm/transformers_utils/configs/dbrx.py +278 -0
- vllm/transformers_utils/configs/falcon.py +87 -0
- vllm/transformers_utils/configs/jais.py +236 -0
- vllm/transformers_utils/configs/mpt.py +178 -0
- vllm/transformers_utils/detokenizer.py +313 -0
- vllm/transformers_utils/tokenizer.py +149 -0
- vllm/transformers_utils/tokenizer_group/__init__.py +33 -0
- vllm/transformers_utils/tokenizer_group/base_tokenizer_group.py +55 -0
- vllm/transformers_utils/tokenizer_group/ray_tokenizer_group.py +169 -0
- vllm/transformers_utils/tokenizer_group/tokenizer_group.py +78 -0
- vllm/transformers_utils/tokenizers/__init__.py +5 -0
- vllm/transformers_utils/tokenizers/baichuan.py +255 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +209 -0
- vllm/utils.py +677 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +105 -0
- vllm/worker/cpu_model_runner.py +346 -0
- vllm/worker/cpu_worker.py +321 -0
- vllm/worker/model_runner.py +1168 -0
- vllm/worker/neuron_model_runner.py +196 -0
- vllm/worker/neuron_worker.py +98 -0
- vllm/worker/worker.py +345 -0
- vllm/worker/worker_base.py +146 -0
- vllm_npu-0.4.2.dist-info/LICENSE +201 -0
- vllm_npu-0.4.2.dist-info/METADATA +173 -0
- vllm_npu-0.4.2.dist-info/RECORD +219 -0
- vllm_npu-0.4.2.dist-info/WHEEL +5 -0
- vllm_npu-0.4.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,265 @@
|
|
1
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
2
|
+
|
3
|
+
import torch
|
4
|
+
from torch.nn import Module
|
5
|
+
from torch.nn.parameter import Parameter
|
6
|
+
|
7
|
+
from vllm import _custom_ops as ops
|
8
|
+
from vllm.logger import init_logger
|
9
|
+
from vllm.model_executor.layers.linear import LinearBase, LinearMethodBase
|
10
|
+
from vllm.model_executor.layers.quantization.base_config import (
|
11
|
+
QuantizationConfig)
|
12
|
+
from vllm.model_executor.utils import set_weight_attrs
|
13
|
+
|
14
|
+
ACTIVATION_SCHEMES = ["static", "dynamic"]
|
15
|
+
|
16
|
+
logger = init_logger(__name__)
|
17
|
+
|
18
|
+
|
19
|
+
class Fp8Config(QuantizationConfig):
|
20
|
+
"""Config class for FP8."""
|
21
|
+
|
22
|
+
def __init__(
|
23
|
+
self,
|
24
|
+
is_checkpoint_fp8_serialized: bool = False,
|
25
|
+
activation_scheme: str = "dynamic",
|
26
|
+
) -> None:
|
27
|
+
self.is_checkpoint_fp8_serialized = is_checkpoint_fp8_serialized
|
28
|
+
if is_checkpoint_fp8_serialized:
|
29
|
+
logger.warning("Detected fp8 checkpoint. Please note that the "
|
30
|
+
"format is experimental and subject to change.")
|
31
|
+
if activation_scheme not in ACTIVATION_SCHEMES:
|
32
|
+
raise ValueError(
|
33
|
+
f"Unsupported activation scheme {activation_scheme}")
|
34
|
+
self.activation_scheme = activation_scheme
|
35
|
+
|
36
|
+
@classmethod
|
37
|
+
def get_name(cls) -> str:
|
38
|
+
return "fp8"
|
39
|
+
|
40
|
+
@classmethod
|
41
|
+
def get_supported_act_dtypes(cls) -> List[torch.dtype]:
|
42
|
+
return [torch.bfloat16, torch.half]
|
43
|
+
|
44
|
+
@classmethod
|
45
|
+
def get_min_capability(cls) -> int:
|
46
|
+
return 89
|
47
|
+
|
48
|
+
@classmethod
|
49
|
+
def get_config_filenames(cls) -> List[str]:
|
50
|
+
return []
|
51
|
+
|
52
|
+
@classmethod
|
53
|
+
def from_config(cls, config: Dict[str, Any]) -> "Fp8Config":
|
54
|
+
quant_method = cls.get_from_keys(config, ["quant_method"])
|
55
|
+
is_checkpoint_fp8_serialized = ("fp8" in quant_method)
|
56
|
+
activation_scheme = cls.get_from_keys(config, ["activation_scheme"])
|
57
|
+
return cls(is_checkpoint_fp8_serialized=is_checkpoint_fp8_serialized,
|
58
|
+
activation_scheme=activation_scheme)
|
59
|
+
|
60
|
+
def get_quant_method(
|
61
|
+
self, layer: torch.nn.Module) -> Optional["Fp8LinearMethod"]:
|
62
|
+
if isinstance(layer, LinearBase):
|
63
|
+
return Fp8LinearMethod(self)
|
64
|
+
return None
|
65
|
+
|
66
|
+
def get_scaled_act_names(self) -> List[str]:
|
67
|
+
return []
|
68
|
+
|
69
|
+
|
70
|
+
class Fp8LinearMethod(LinearMethodBase):
|
71
|
+
"""Linear method for FP8.
|
72
|
+
Supports loading FP8 checkpoints with static weight scale and
|
73
|
+
dynamic/static activation scale.
|
74
|
+
|
75
|
+
Also supports loading quantized FP16/BF16 model checkpoints with dynamic
|
76
|
+
activation scaling. The weight scaling factor will be initialized after
|
77
|
+
the model weights are loaded.
|
78
|
+
|
79
|
+
Limitations:
|
80
|
+
1. Only support per-tensor quantization due to torch._scaled_mm support.
|
81
|
+
2. Only support float8_e4m3fn data type due to the limitation of
|
82
|
+
torch._scaled_mm (https://github.com/pytorch/pytorch/blob/2e48b39603411a41c5025efbe52f89560b827825/aten/src/ATen/native/cuda/Blas.cpp#L854-L856)
|
83
|
+
|
84
|
+
Args:
|
85
|
+
quant_config: The quantization config.
|
86
|
+
"""
|
87
|
+
|
88
|
+
def __init__(self, quant_config: Fp8Config):
|
89
|
+
self.quant_config = quant_config
|
90
|
+
|
91
|
+
def _create_scale_param(
|
92
|
+
self,
|
93
|
+
scale_name: str,
|
94
|
+
layer: torch.nn.Module,
|
95
|
+
output_partition_sizes: List[int],
|
96
|
+
**extra_weight_attrs,
|
97
|
+
) -> None:
|
98
|
+
scale = Parameter(torch.empty(len(output_partition_sizes),
|
99
|
+
dtype=torch.float32),
|
100
|
+
requires_grad=False)
|
101
|
+
layer.register_parameter(scale_name, scale)
|
102
|
+
set_weight_attrs(
|
103
|
+
scale, {
|
104
|
+
**extra_weight_attrs,
|
105
|
+
"fp8_scales_shard_indexer":
|
106
|
+
self.scales_shard_indexer,
|
107
|
+
})
|
108
|
+
|
109
|
+
def create_weights(
|
110
|
+
self,
|
111
|
+
layer: torch.nn.Module,
|
112
|
+
input_size_per_partition: int,
|
113
|
+
output_partition_sizes: List[int],
|
114
|
+
input_size: int,
|
115
|
+
output_size: int,
|
116
|
+
params_dtype: torch.dtype,
|
117
|
+
**extra_weight_attrs,
|
118
|
+
):
|
119
|
+
del input_size, output_size
|
120
|
+
output_size_per_partition = sum(output_partition_sizes)
|
121
|
+
|
122
|
+
layer.process_after_load = True
|
123
|
+
layer.logical_widths = output_partition_sizes
|
124
|
+
|
125
|
+
# WEIGHT
|
126
|
+
weight_dtype = (torch.float8_e4m3fn
|
127
|
+
if self.quant_config.is_checkpoint_fp8_serialized else
|
128
|
+
params_dtype)
|
129
|
+
weight = Parameter(torch.empty(output_size_per_partition,
|
130
|
+
input_size_per_partition,
|
131
|
+
dtype=weight_dtype),
|
132
|
+
requires_grad=False)
|
133
|
+
layer.register_parameter("weight", weight)
|
134
|
+
set_weight_attrs(weight, {
|
135
|
+
**extra_weight_attrs,
|
136
|
+
"input_dim": 1,
|
137
|
+
"output_dim": 0,
|
138
|
+
})
|
139
|
+
|
140
|
+
# If checkpoint is serialized fp8, load them.
|
141
|
+
# Otherwise, wait until process_weights_after_loading.
|
142
|
+
if self.quant_config.is_checkpoint_fp8_serialized:
|
143
|
+
# WEIGHT SCALE
|
144
|
+
self._create_scale_param(
|
145
|
+
scale_name="weight_scale",
|
146
|
+
layer=layer,
|
147
|
+
output_partition_sizes=output_partition_sizes,
|
148
|
+
**extra_weight_attrs)
|
149
|
+
|
150
|
+
# ACTIVATION SCALE
|
151
|
+
if self.quant_config.activation_scheme == "static":
|
152
|
+
self._create_scale_param(
|
153
|
+
scale_name="act_scale",
|
154
|
+
layer=layer,
|
155
|
+
output_partition_sizes=output_partition_sizes,
|
156
|
+
**extra_weight_attrs)
|
157
|
+
|
158
|
+
def scales_shard_indexer(
|
159
|
+
self, param: torch.Tensor, loaded_weight: torch.Tensor,
|
160
|
+
shard_id: Union[str, int]) -> Tuple[torch.Tensor, torch.Tensor]:
|
161
|
+
qkv_idxs = {"q": 0, "k": 1, "v": 2}
|
162
|
+
|
163
|
+
if isinstance(shard_id, int):
|
164
|
+
pass
|
165
|
+
elif isinstance(shard_id, str):
|
166
|
+
if shard_id not in qkv_idxs:
|
167
|
+
raise ValueError(f"Unknown shard_id: {shard_id}")
|
168
|
+
shard_id = qkv_idxs[shard_id]
|
169
|
+
else:
|
170
|
+
ValueError(f"Shard id must be int or str but got {type(shard_id)}")
|
171
|
+
|
172
|
+
return param[shard_id], loaded_weight
|
173
|
+
|
174
|
+
def process_weights_after_loading(self, layer: Module) -> None:
|
175
|
+
if (not hasattr(layer, "process_after_load")
|
176
|
+
or not layer.process_after_load):
|
177
|
+
return
|
178
|
+
|
179
|
+
# If checkpoint is fp/bf16 (not serialized fp8), quantize the weights.
|
180
|
+
if not self.quant_config.is_checkpoint_fp8_serialized:
|
181
|
+
qweight, weight_scale = ops.scaled_fp8_quant(layer.weight,
|
182
|
+
scale=None)
|
183
|
+
layer.weight = Parameter(qweight.t(), requires_grad=False)
|
184
|
+
layer.weight_scale = Parameter(weight_scale, requires_grad=False)
|
185
|
+
layer.logical_widths = None
|
186
|
+
layer.act_scale = None
|
187
|
+
return
|
188
|
+
|
189
|
+
# If checkpoint is fp8, requantize the separately quantized logical
|
190
|
+
# weights into a single fp8 weight with a single weight scale.
|
191
|
+
else:
|
192
|
+
# WEIGHT_SCALE / WEIGHT
|
193
|
+
# Loop over logical weights, requantizing with single scale.
|
194
|
+
max_w_scale = layer.weight_scale.max()
|
195
|
+
start = 0
|
196
|
+
for idx, logical_width in enumerate(layer.logical_widths):
|
197
|
+
end = start + logical_width
|
198
|
+
weight_dq = per_tensor_dequantize(layer.weight[start:end, :],
|
199
|
+
layer.weight_scale[idx])
|
200
|
+
|
201
|
+
layer.weight[start:end, :] = per_tensor_quantize(
|
202
|
+
weight_dq, layer.weight_scale.max())
|
203
|
+
start = end
|
204
|
+
layer.weight_scale = Parameter(max_w_scale, requires_grad=False)
|
205
|
+
|
206
|
+
# WEIGHT
|
207
|
+
# Transpose weight for passing to torch._scaled_mm
|
208
|
+
weight = layer.weight
|
209
|
+
layer.weight = Parameter(weight.t(), requires_grad=False)
|
210
|
+
|
211
|
+
# ACT_SCALE
|
212
|
+
# Dynamic: set to None (required input to ops.scaled_fp8_quant).
|
213
|
+
# Static: set to max of the act_scales (since they are equal).
|
214
|
+
if self.quant_config.activation_scheme == "dynamic":
|
215
|
+
layer.act_scale = None
|
216
|
+
elif self.quant_config.activation_scheme == "static":
|
217
|
+
if not all_close_1d(layer.act_scale):
|
218
|
+
raise ValueError(
|
219
|
+
"All the act_scales for the logical weights of a layer "
|
220
|
+
f"must be equal. But got {layer.act_scale}")
|
221
|
+
layer.act_scale = Parameter(layer.act_scale.max(),
|
222
|
+
requires_grad=False)
|
223
|
+
else:
|
224
|
+
raise ValueError(
|
225
|
+
f"Unknown scheme {self.quant_config.activation_scheme}")
|
226
|
+
|
227
|
+
def apply(self,
|
228
|
+
layer: torch.nn.Module,
|
229
|
+
x: torch.Tensor,
|
230
|
+
bias: Optional[torch.Tensor] = None) -> torch.Tensor:
|
231
|
+
# ops.scaled_fp8_quant supports both dynamic and static quant.
|
232
|
+
# If dynamic, layer.act_scale is None and x_scale computed from x.
|
233
|
+
# If static, layer.act_scale is scalar and x_scale set to act_scale.
|
234
|
+
qinput, x_scale = ops.scaled_fp8_quant(x, layer.act_scale)
|
235
|
+
|
236
|
+
# Fused GEMM_DQ
|
237
|
+
output, _ = torch._scaled_mm(
|
238
|
+
qinput,
|
239
|
+
layer.weight,
|
240
|
+
out_dtype=x.dtype,
|
241
|
+
scale_a=x_scale,
|
242
|
+
scale_b=layer.weight_scale,
|
243
|
+
bias=bias,
|
244
|
+
)
|
245
|
+
|
246
|
+
return output
|
247
|
+
|
248
|
+
|
249
|
+
def all_close_1d(x: torch.Tensor) -> bool:
|
250
|
+
assert len(x.shape) == 1
|
251
|
+
return all(torch.allclose(x[0], x[i]) for i in range(x.shape[0]))
|
252
|
+
|
253
|
+
|
254
|
+
def per_tensor_quantize(tensor: torch.Tensor,
|
255
|
+
inv_scale: float) -> torch.Tensor:
|
256
|
+
finfo = torch.finfo(torch.float8_e4m3fn)
|
257
|
+
qweight = (tensor / inv_scale).clamp(min=finfo.min, max=finfo.max)
|
258
|
+
return qweight.to(torch.float8_e4m3fn)
|
259
|
+
|
260
|
+
|
261
|
+
def per_tensor_dequantize(tensor: torch.Tensor,
|
262
|
+
inv_scale: float) -> torch.Tensor:
|
263
|
+
fake_qweight = tensor.to(torch.float16)
|
264
|
+
dq_weight = fake_qweight * inv_scale
|
265
|
+
return dq_weight
|
@@ -0,0 +1,224 @@
|
|
1
|
+
import enum
|
2
|
+
from enum import Enum
|
3
|
+
from fractions import Fraction
|
4
|
+
from typing import Any, Dict, List, Optional
|
5
|
+
|
6
|
+
import torch
|
7
|
+
from torch.nn.parameter import Parameter
|
8
|
+
|
9
|
+
from vllm import _custom_ops as ops
|
10
|
+
from vllm.model_executor.layers.linear import LinearBase, LinearMethodBase
|
11
|
+
from vllm.model_executor.layers.quantization.base_config import (
|
12
|
+
QuantizationConfig)
|
13
|
+
from vllm.model_executor.utils import set_weight_attrs
|
14
|
+
|
15
|
+
|
16
|
+
class GPTQConfig(QuantizationConfig):
|
17
|
+
"""Config class for GPTQ.
|
18
|
+
|
19
|
+
Reference: https://arxiv.org/abs/2210.17323
|
20
|
+
"""
|
21
|
+
|
22
|
+
def __init__(
|
23
|
+
self,
|
24
|
+
weight_bits: int,
|
25
|
+
group_size: int,
|
26
|
+
desc_act: bool,
|
27
|
+
) -> None:
|
28
|
+
self.weight_bits = weight_bits
|
29
|
+
self.group_size = group_size
|
30
|
+
self.desc_act = desc_act
|
31
|
+
self.pack_factor = Fraction(32, self.weight_bits)
|
32
|
+
if self.weight_bits not in [2, 3, 4, 8]:
|
33
|
+
raise ValueError(
|
34
|
+
"Currently, only 2/3/4/8-bit weight quantization is "
|
35
|
+
f"supported for GPTQ, but got {self.weight_bits} bits.")
|
36
|
+
|
37
|
+
def __repr__(self) -> str:
|
38
|
+
return (f"GPTQConfig(weight_bits={self.weight_bits}, "
|
39
|
+
f"group_size={self.group_size}, "
|
40
|
+
f"desc_act={self.desc_act})")
|
41
|
+
|
42
|
+
@classmethod
|
43
|
+
def get_name(cls) -> str:
|
44
|
+
return "gptq"
|
45
|
+
|
46
|
+
@classmethod
|
47
|
+
def get_supported_act_dtypes(cls) -> List[torch.dtype]:
|
48
|
+
return [torch.half]
|
49
|
+
|
50
|
+
@classmethod
|
51
|
+
# Need to figure it out
|
52
|
+
def get_min_capability(cls) -> int:
|
53
|
+
return 60
|
54
|
+
|
55
|
+
@classmethod
|
56
|
+
def get_config_filenames(cls) -> List[str]:
|
57
|
+
return ["quantize_config.json"]
|
58
|
+
|
59
|
+
@classmethod
|
60
|
+
def from_config(cls, config: Dict[str, Any]) -> "GPTQConfig":
|
61
|
+
weight_bits = cls.get_from_keys(config, ["bits"])
|
62
|
+
group_size = cls.get_from_keys(config, ["group_size"])
|
63
|
+
desc_act = cls.get_from_keys(config, ["desc_act"])
|
64
|
+
return cls(weight_bits, group_size, desc_act)
|
65
|
+
|
66
|
+
def get_quant_method(
|
67
|
+
self, layer: torch.nn.Module) -> Optional["GPTQLinearMethod"]:
|
68
|
+
if isinstance(layer, LinearBase):
|
69
|
+
return GPTQLinearMethod(self)
|
70
|
+
return None
|
71
|
+
|
72
|
+
def get_scaled_act_names(self) -> List[str]:
|
73
|
+
return []
|
74
|
+
|
75
|
+
|
76
|
+
class ExllamaState(Enum):
|
77
|
+
|
78
|
+
UNUSED = enum.auto()
|
79
|
+
UNINITIALIZED = enum.auto()
|
80
|
+
READY = enum.auto()
|
81
|
+
|
82
|
+
|
83
|
+
class GPTQLinearMethod(LinearMethodBase):
|
84
|
+
"""Linear method for GPTQ.
|
85
|
+
|
86
|
+
Args:
|
87
|
+
quant_config: The GPTQ quantization config.
|
88
|
+
"""
|
89
|
+
|
90
|
+
def __init__(self, quant_config: GPTQConfig):
|
91
|
+
self.quant_config = quant_config
|
92
|
+
|
93
|
+
def create_weights(
|
94
|
+
self,
|
95
|
+
layer: torch.nn.Module,
|
96
|
+
input_size_per_partition: int,
|
97
|
+
output_partition_sizes: List[int],
|
98
|
+
input_size: int,
|
99
|
+
output_size: int,
|
100
|
+
params_dtype: torch.dtype,
|
101
|
+
**extra_weight_attrs,
|
102
|
+
):
|
103
|
+
del output_size # Unused.
|
104
|
+
if input_size_per_partition % self.quant_config.group_size != 0:
|
105
|
+
raise ValueError(
|
106
|
+
"The input size is not aligned with the quantized "
|
107
|
+
"weight shape. This can be caused by too large "
|
108
|
+
"tensor parallel size.")
|
109
|
+
output_size_per_partition = sum(output_partition_sizes)
|
110
|
+
if (output_size_per_partition % self.quant_config.pack_factor.numerator
|
111
|
+
!= 0):
|
112
|
+
raise ValueError(
|
113
|
+
"The output size is not aligned with the quantized "
|
114
|
+
"weight shape. This can be caused by too large "
|
115
|
+
"tensor parallel size.")
|
116
|
+
|
117
|
+
if self.quant_config.group_size != -1:
|
118
|
+
group_size = self.quant_config.group_size
|
119
|
+
else:
|
120
|
+
group_size = input_size
|
121
|
+
exllama_state = ExllamaState.UNINITIALIZED
|
122
|
+
scale_and_zero_size = input_size // group_size
|
123
|
+
scale_and_zero_input_dim = None
|
124
|
+
if (input_size != input_size_per_partition
|
125
|
+
and self.quant_config.group_size != -1):
|
126
|
+
# For act-order models, we cannot use Exllama for row parallel layer
|
127
|
+
if self.quant_config.desc_act:
|
128
|
+
exllama_state = ExllamaState.UNUSED
|
129
|
+
else:
|
130
|
+
# we need to partition qzeros and scales for exllama kernel
|
131
|
+
scale_and_zero_size = input_size_per_partition // group_size
|
132
|
+
scale_and_zero_input_dim = 0
|
133
|
+
|
134
|
+
qweight = Parameter(
|
135
|
+
torch.empty(
|
136
|
+
input_size_per_partition // self.quant_config.pack_factor,
|
137
|
+
output_size_per_partition,
|
138
|
+
dtype=torch.int32,
|
139
|
+
),
|
140
|
+
requires_grad=False,
|
141
|
+
)
|
142
|
+
set_weight_attrs(
|
143
|
+
qweight, {
|
144
|
+
"input_dim": 0,
|
145
|
+
"output_dim": 1,
|
146
|
+
"packed_dim": 0,
|
147
|
+
"pack_factor": self.quant_config.pack_factor,
|
148
|
+
})
|
149
|
+
g_idx = Parameter(
|
150
|
+
torch.tensor(
|
151
|
+
[
|
152
|
+
i // self.quant_config.group_size
|
153
|
+
for i in range(input_size_per_partition)
|
154
|
+
],
|
155
|
+
dtype=torch.int32,
|
156
|
+
),
|
157
|
+
requires_grad=False,
|
158
|
+
)
|
159
|
+
# Ignore warning from fused linear layers such as QKVParallelLinear.
|
160
|
+
set_weight_attrs(g_idx, {"input_dim": 0, "ignore_warning": True})
|
161
|
+
qzeros = Parameter(
|
162
|
+
torch.empty(
|
163
|
+
scale_and_zero_size,
|
164
|
+
output_size_per_partition // self.quant_config.pack_factor,
|
165
|
+
dtype=torch.int32,
|
166
|
+
),
|
167
|
+
requires_grad=False,
|
168
|
+
)
|
169
|
+
set_weight_attrs(
|
170
|
+
qzeros, {
|
171
|
+
"input_dim": scale_and_zero_input_dim,
|
172
|
+
"output_dim": 1,
|
173
|
+
"packed_dim": 1,
|
174
|
+
"pack_factor": self.quant_config.pack_factor,
|
175
|
+
})
|
176
|
+
scales = Parameter(
|
177
|
+
torch.empty(
|
178
|
+
scale_and_zero_size,
|
179
|
+
output_size_per_partition,
|
180
|
+
dtype=params_dtype,
|
181
|
+
),
|
182
|
+
requires_grad=False,
|
183
|
+
)
|
184
|
+
set_weight_attrs(scales, {
|
185
|
+
"input_dim": scale_and_zero_input_dim,
|
186
|
+
"output_dim": 1,
|
187
|
+
})
|
188
|
+
|
189
|
+
layer.register_parameter("qweight", qweight)
|
190
|
+
set_weight_attrs(qweight, extra_weight_attrs)
|
191
|
+
layer.register_parameter("g_idx", g_idx)
|
192
|
+
set_weight_attrs(g_idx, extra_weight_attrs)
|
193
|
+
layer.register_parameter("qzeros", qzeros)
|
194
|
+
set_weight_attrs(qzeros, extra_weight_attrs)
|
195
|
+
layer.register_parameter("scales", scales)
|
196
|
+
set_weight_attrs(scales, extra_weight_attrs)
|
197
|
+
|
198
|
+
layer.exllama_state = exllama_state
|
199
|
+
|
200
|
+
def apply(self,
|
201
|
+
layer: torch.nn.Module,
|
202
|
+
x: torch.Tensor,
|
203
|
+
bias: Optional[torch.Tensor] = None) -> torch.Tensor:
|
204
|
+
qweight = layer.qweight
|
205
|
+
out_shape = x.shape[:-1] + (qweight.shape[-1], )
|
206
|
+
reshaped_x = x.reshape(-1, x.shape[-1])
|
207
|
+
# exllama needs to shuffle the weight after the weight is loaded
|
208
|
+
# here we do the shuffle on first forward pass
|
209
|
+
if layer.exllama_state == ExllamaState.UNINITIALIZED:
|
210
|
+
if self.quant_config.desc_act:
|
211
|
+
layer.g_idx.data = torch.argsort(layer.g_idx).to(torch.int)
|
212
|
+
else:
|
213
|
+
layer.g_idx.data = torch.empty((0, ),
|
214
|
+
device=layer.g_idx.device)
|
215
|
+
layer.exllama_state = ExllamaState.READY
|
216
|
+
ops.gptq_shuffle(layer.qweight, layer.g_idx,
|
217
|
+
self.quant_config.weight_bits)
|
218
|
+
output = ops.gptq_gemm(reshaped_x, layer.qweight, layer.qzeros,
|
219
|
+
layer.scales, layer.g_idx,
|
220
|
+
layer.exllama_state == ExllamaState.READY,
|
221
|
+
self.quant_config.weight_bits)
|
222
|
+
if bias is not None:
|
223
|
+
output.add_(bias)
|
224
|
+
return output.reshape(out_shape)
|