sglang 0.2.10__py3-none-any.whl → 0.2.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/__init__.py +8 -0
- sglang/api.py +10 -2
- sglang/bench_latency.py +151 -40
- sglang/bench_serving.py +46 -22
- sglang/check_env.py +24 -2
- sglang/global_config.py +0 -1
- sglang/lang/backend/base_backend.py +3 -1
- sglang/lang/backend/openai.py +8 -3
- sglang/lang/backend/runtime_endpoint.py +46 -29
- sglang/lang/choices.py +164 -0
- sglang/lang/compiler.py +2 -2
- sglang/lang/interpreter.py +6 -13
- sglang/lang/ir.py +14 -5
- sglang/srt/constrained/base_tool_cache.py +1 -1
- sglang/srt/constrained/fsm_cache.py +12 -2
- sglang/srt/layers/activation.py +33 -0
- sglang/srt/layers/{token_attention.py → decode_attention.py} +9 -5
- sglang/srt/layers/extend_attention.py +6 -1
- sglang/srt/layers/layernorm.py +65 -0
- sglang/srt/layers/logits_processor.py +6 -1
- sglang/srt/layers/pooler.py +50 -0
- sglang/srt/layers/{context_flashattention_nopad.py → prefill_attention.py} +5 -0
- sglang/srt/layers/radix_attention.py +4 -7
- sglang/srt/managers/detokenizer_manager.py +31 -9
- sglang/srt/managers/io_struct.py +63 -0
- sglang/srt/managers/policy_scheduler.py +173 -25
- sglang/srt/managers/schedule_batch.py +174 -380
- sglang/srt/managers/tokenizer_manager.py +197 -112
- sglang/srt/managers/tp_worker.py +299 -364
- sglang/srt/mem_cache/{base_cache.py → base_prefix_cache.py} +9 -4
- sglang/srt/mem_cache/chunk_cache.py +43 -20
- sglang/srt/mem_cache/memory_pool.py +10 -15
- sglang/srt/mem_cache/radix_cache.py +74 -40
- sglang/srt/model_executor/cuda_graph_runner.py +27 -12
- sglang/srt/model_executor/forward_batch_info.py +319 -0
- sglang/srt/model_executor/model_runner.py +30 -47
- sglang/srt/models/chatglm.py +1 -1
- sglang/srt/models/commandr.py +1 -1
- sglang/srt/models/dbrx.py +1 -1
- sglang/srt/models/deepseek.py +1 -1
- sglang/srt/models/deepseek_v2.py +1 -1
- sglang/srt/models/gemma.py +1 -1
- sglang/srt/models/gemma2.py +1 -2
- sglang/srt/models/gpt_bigcode.py +1 -1
- sglang/srt/models/grok.py +1 -1
- sglang/srt/models/internlm2.py +3 -8
- sglang/srt/models/llama2.py +5 -5
- sglang/srt/models/llama_classification.py +1 -1
- sglang/srt/models/llama_embedding.py +88 -0
- sglang/srt/models/llava.py +1 -2
- sglang/srt/models/llavavid.py +1 -2
- sglang/srt/models/minicpm.py +1 -1
- sglang/srt/models/mixtral.py +1 -1
- sglang/srt/models/mixtral_quant.py +1 -1
- sglang/srt/models/qwen.py +1 -1
- sglang/srt/models/qwen2.py +1 -1
- sglang/srt/models/qwen2_moe.py +1 -12
- sglang/srt/models/stablelm.py +1 -1
- sglang/srt/openai_api/adapter.py +189 -39
- sglang/srt/openai_api/protocol.py +43 -1
- sglang/srt/sampling/penaltylib/__init__.py +13 -0
- sglang/srt/sampling/penaltylib/orchestrator.py +357 -0
- sglang/srt/sampling/penaltylib/penalizers/frequency_penalty.py +80 -0
- sglang/srt/sampling/penaltylib/penalizers/min_new_tokens.py +105 -0
- sglang/srt/sampling/penaltylib/penalizers/presence_penalty.py +79 -0
- sglang/srt/sampling/penaltylib/penalizers/repetition_penalty.py +83 -0
- sglang/srt/sampling_params.py +31 -4
- sglang/srt/server.py +93 -21
- sglang/srt/server_args.py +30 -19
- sglang/srt/utils.py +31 -13
- sglang/test/run_eval.py +10 -1
- sglang/test/runners.py +63 -63
- sglang/test/simple_eval_humaneval.py +2 -8
- sglang/test/simple_eval_mgsm.py +203 -0
- sglang/test/srt/sampling/penaltylib/utils.py +337 -0
- sglang/test/test_layernorm.py +60 -0
- sglang/test/test_programs.py +4 -2
- sglang/test/test_utils.py +21 -3
- sglang/utils.py +0 -1
- sglang/version.py +1 -1
- {sglang-0.2.10.dist-info → sglang-0.2.12.dist-info}/METADATA +50 -31
- sglang-0.2.12.dist-info/RECORD +112 -0
- sglang/srt/layers/linear.py +0 -884
- sglang/srt/layers/quantization/__init__.py +0 -64
- sglang/srt/layers/quantization/fp8.py +0 -677
- sglang-0.2.10.dist-info/RECORD +0 -100
- {sglang-0.2.10.dist-info → sglang-0.2.12.dist-info}/LICENSE +0 -0
- {sglang-0.2.10.dist-info → sglang-0.2.12.dist-info}/WHEEL +0 -0
- {sglang-0.2.10.dist-info → sglang-0.2.12.dist-info}/top_level.txt +0 -0
@@ -1,677 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Copyright 2023-2024 SGLang Team
|
3
|
-
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
you may not use this file except in compliance with the License.
|
5
|
-
You may obtain a copy of the License at
|
6
|
-
|
7
|
-
http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
|
9
|
-
Unless required by applicable law or agreed to in writing, software
|
10
|
-
distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
See the License for the specific language governing permissions and
|
13
|
-
limitations under the License.
|
14
|
-
"""
|
15
|
-
|
16
|
-
# adapted from https://github.com/vllm-project/vllm/blob/e76466dde2bc9525d55165ceaa600d298c7bf773/vllm/model_executor/layers/quantization/fp8.py
|
17
|
-
# FIXME refactor in progress
|
18
|
-
from typing import Any, Dict, List, Optional, Union
|
19
|
-
|
20
|
-
import torch
|
21
|
-
from torch.nn import Module
|
22
|
-
from torch.nn.parameter import Parameter
|
23
|
-
from vllm import _custom_ops as ops
|
24
|
-
from vllm.logger import init_logger
|
25
|
-
from vllm.model_executor.layers.fused_moe import FusedMoE, FusedMoEMethodBase, fused_moe
|
26
|
-
from vllm.model_executor.layers.quantization.base_config import (
|
27
|
-
QuantizationConfig,
|
28
|
-
QuantizeMethodBase,
|
29
|
-
)
|
30
|
-
from vllm.model_executor.layers.quantization.gptq_marlin import (
|
31
|
-
GPTQ_MARLIN_MAX_PARALLEL,
|
32
|
-
GPTQ_MARLIN_MIN_THREAD_N,
|
33
|
-
GPTQMarlinState,
|
34
|
-
marlin_permute_scales,
|
35
|
-
)
|
36
|
-
from vllm.model_executor.layers.quantization.utils.marlin_utils import pack_fp8_to_int32
|
37
|
-
from vllm.model_executor.utils import set_weight_attrs
|
38
|
-
from vllm.platforms import current_platform
|
39
|
-
from vllm.utils import print_warning_once
|
40
|
-
|
41
|
-
from sglang.srt.layers.linear import LinearBase, LinearMethodBase
|
42
|
-
|
43
|
-
ACTIVATION_SCHEMES = ["static", "dynamic"]
|
44
|
-
|
45
|
-
logger = init_logger(__name__)
|
46
|
-
|
47
|
-
|
48
|
-
def cutlass_fp8_supported() -> bool:
|
49
|
-
capability = current_platform.get_device_capability()
|
50
|
-
capability = capability[0] * 10 + capability[1]
|
51
|
-
|
52
|
-
return ops.cutlass_scaled_mm_supports_fp8(capability)
|
53
|
-
|
54
|
-
|
55
|
-
class Fp8Config(QuantizationConfig):
|
56
|
-
"""Config class for FP8."""
|
57
|
-
|
58
|
-
def __init__(
|
59
|
-
self,
|
60
|
-
is_checkpoint_fp8_serialized: bool = False,
|
61
|
-
activation_scheme: str = "dynamic",
|
62
|
-
) -> None:
|
63
|
-
self.is_checkpoint_fp8_serialized = is_checkpoint_fp8_serialized
|
64
|
-
if is_checkpoint_fp8_serialized:
|
65
|
-
logger.warning(
|
66
|
-
"Detected fp8 checkpoint. Please note that the "
|
67
|
-
"format is experimental and subject to change."
|
68
|
-
)
|
69
|
-
if activation_scheme not in ACTIVATION_SCHEMES:
|
70
|
-
raise ValueError(f"Unsupported activation scheme {activation_scheme}")
|
71
|
-
self.activation_scheme = activation_scheme
|
72
|
-
|
73
|
-
@classmethod
|
74
|
-
def get_name(cls) -> str:
|
75
|
-
return "fp8"
|
76
|
-
|
77
|
-
@classmethod
|
78
|
-
def get_supported_act_dtypes(cls) -> List[torch.dtype]:
|
79
|
-
return [torch.bfloat16, torch.half]
|
80
|
-
|
81
|
-
@classmethod
|
82
|
-
def get_min_capability(cls) -> int:
|
83
|
-
return 80
|
84
|
-
|
85
|
-
@classmethod
|
86
|
-
def get_config_filenames(cls) -> List[str]:
|
87
|
-
return []
|
88
|
-
|
89
|
-
@classmethod
|
90
|
-
def from_config(cls, config: Dict[str, Any]) -> "Fp8Config":
|
91
|
-
quant_method = cls.get_from_keys(config, ["quant_method"])
|
92
|
-
is_checkpoint_fp8_serialized = "fp8" in quant_method
|
93
|
-
activation_scheme = cls.get_from_keys(config, ["activation_scheme"])
|
94
|
-
return cls(
|
95
|
-
is_checkpoint_fp8_serialized=is_checkpoint_fp8_serialized,
|
96
|
-
activation_scheme=activation_scheme,
|
97
|
-
)
|
98
|
-
|
99
|
-
def get_quant_method(
|
100
|
-
self, layer: torch.nn.Module
|
101
|
-
) -> Optional["QuantizeMethodBase"]:
|
102
|
-
|
103
|
-
if isinstance(layer, LinearBase):
|
104
|
-
return Fp8LinearMethod(self)
|
105
|
-
elif isinstance(layer, FusedMoE):
|
106
|
-
return Fp8MoEMethod(self)
|
107
|
-
return None
|
108
|
-
|
109
|
-
def get_scaled_act_names(self) -> List[str]:
|
110
|
-
return []
|
111
|
-
|
112
|
-
|
113
|
-
class Fp8LinearMethod(LinearMethodBase):
|
114
|
-
"""Linear method for FP8.
|
115
|
-
Supports loading FP8 checkpoints with static weight scale and
|
116
|
-
dynamic/static activation scale.
|
117
|
-
|
118
|
-
Also supports loading quantized FP16/BF16 model checkpoints with dynamic
|
119
|
-
activation scaling. The weight scaling factor will be initialized after
|
120
|
-
the model weights are loaded.
|
121
|
-
|
122
|
-
Limitations:
|
123
|
-
1. Only support per-tensor quantization due to torch._scaled_mm support.
|
124
|
-
2. Only support float8_e4m3fn data type due to the limitation of
|
125
|
-
torch._scaled_mm (https://github.com/pytorch/pytorch/blob/2e48b39603411a41c5025efbe52f89560b827825/aten/src/ATen/native/cuda/Blas.cpp#L854-L856)
|
126
|
-
|
127
|
-
Args:
|
128
|
-
quant_config: The quantization config.
|
129
|
-
"""
|
130
|
-
|
131
|
-
def __init__(self, quant_config: Fp8Config):
|
132
|
-
self.quant_config = quant_config
|
133
|
-
self.cutlass_fp8_supported = cutlass_fp8_supported()
|
134
|
-
|
135
|
-
# For GPUs that lack FP8 hardware support, we can leverage the Marlin
|
136
|
-
# kernel for fast weight-only FP8 quantization
|
137
|
-
capability = current_platform.get_device_capability()
|
138
|
-
capability = capability[0] * 10 + capability[1]
|
139
|
-
self.use_marlin = capability < 89
|
140
|
-
|
141
|
-
def _create_scale_param(
|
142
|
-
self,
|
143
|
-
scale_name: str,
|
144
|
-
layer: torch.nn.Module,
|
145
|
-
output_partition_sizes: List[int],
|
146
|
-
**extra_weight_attrs,
|
147
|
-
) -> None:
|
148
|
-
scale = Parameter(
|
149
|
-
torch.empty(len(output_partition_sizes), dtype=torch.float32),
|
150
|
-
requires_grad=False,
|
151
|
-
)
|
152
|
-
scale[:] = torch.finfo(torch.float8_e4m3fn).min
|
153
|
-
layer.register_parameter(scale_name, scale)
|
154
|
-
set_weight_attrs(
|
155
|
-
scale,
|
156
|
-
{
|
157
|
-
**extra_weight_attrs,
|
158
|
-
"needs_scalar_to_array": True,
|
159
|
-
},
|
160
|
-
)
|
161
|
-
|
162
|
-
def create_weights(
|
163
|
-
self,
|
164
|
-
layer: torch.nn.Module,
|
165
|
-
input_size_per_partition: int,
|
166
|
-
output_partition_sizes: List[int],
|
167
|
-
input_size: int,
|
168
|
-
output_size: int,
|
169
|
-
params_dtype: torch.dtype,
|
170
|
-
**extra_weight_attrs,
|
171
|
-
):
|
172
|
-
del input_size, output_size
|
173
|
-
output_size_per_partition = sum(output_partition_sizes)
|
174
|
-
|
175
|
-
layer.process_after_load = True
|
176
|
-
layer.logical_widths = output_partition_sizes
|
177
|
-
|
178
|
-
layer.input_size_per_partition = input_size_per_partition
|
179
|
-
layer.output_size_per_partition = output_size_per_partition
|
180
|
-
layer.orig_dtype = params_dtype
|
181
|
-
|
182
|
-
# WEIGHT
|
183
|
-
# weight_dtype = (torch.float8_e4m3fn
|
184
|
-
# if self.quant_config.is_checkpoint_fp8_serialized else
|
185
|
-
# params_dtype)
|
186
|
-
weight_dtype = torch.float8_e4m3fn
|
187
|
-
weight = Parameter(
|
188
|
-
torch.empty(
|
189
|
-
output_size_per_partition, input_size_per_partition, dtype=weight_dtype
|
190
|
-
),
|
191
|
-
requires_grad=False,
|
192
|
-
)
|
193
|
-
layer.register_parameter("weight", weight)
|
194
|
-
set_weight_attrs(
|
195
|
-
weight,
|
196
|
-
{
|
197
|
-
**extra_weight_attrs,
|
198
|
-
"input_dim": 1,
|
199
|
-
"output_dim": 0,
|
200
|
-
},
|
201
|
-
)
|
202
|
-
|
203
|
-
# If checkpoint is serialized fp8, load them.
|
204
|
-
# Otherwise, wait until process_weights_after_loading.
|
205
|
-
if self.quant_config.is_checkpoint_fp8_serialized:
|
206
|
-
# WEIGHT SCALE
|
207
|
-
self._create_scale_param(
|
208
|
-
scale_name="weight_scale",
|
209
|
-
layer=layer,
|
210
|
-
output_partition_sizes=output_partition_sizes,
|
211
|
-
**extra_weight_attrs,
|
212
|
-
)
|
213
|
-
|
214
|
-
# INPUT ACTIVATION SCALE
|
215
|
-
if self.quant_config.activation_scheme == "static":
|
216
|
-
self._create_scale_param(
|
217
|
-
scale_name="input_scale",
|
218
|
-
layer=layer,
|
219
|
-
output_partition_sizes=output_partition_sizes,
|
220
|
-
**extra_weight_attrs,
|
221
|
-
)
|
222
|
-
|
223
|
-
# For GPUs without FP8 hardware support, we use Marlin for fast
|
224
|
-
# fused dequantization
|
225
|
-
if self.use_marlin:
|
226
|
-
layer.marlin_state = GPTQMarlinState.REPACK
|
227
|
-
|
228
|
-
def prepare_layer_for_marlin(self, layer: Module) -> None:
|
229
|
-
print_warning_once(
|
230
|
-
"Your GPU does not have native support for FP8 computation but "
|
231
|
-
"FP8 quantization is being used. Weight-only FP8 compression will "
|
232
|
-
"be used leveraging the Marlin kernel. This may degrade "
|
233
|
-
"performance for compute-heavy workloads."
|
234
|
-
)
|
235
|
-
|
236
|
-
part_size_n = layer.output_size_per_partition
|
237
|
-
part_size_k = layer.input_size_per_partition
|
238
|
-
|
239
|
-
assert layer.marlin_state == GPTQMarlinState.REPACK
|
240
|
-
layer.marlin_state = GPTQMarlinState.READY
|
241
|
-
|
242
|
-
device = layer.weight.device
|
243
|
-
|
244
|
-
# WEIGHTS
|
245
|
-
# Repack weights to gptq format (packed int32 elements)
|
246
|
-
packed_gptq_qweight = pack_fp8_to_int32(layer.weight)
|
247
|
-
|
248
|
-
# Repack weights to marlin format
|
249
|
-
marlin_qweight = ops.gptq_marlin_repack(
|
250
|
-
b_q_weight=packed_gptq_qweight,
|
251
|
-
perm=torch.empty(0, dtype=torch.int, device=device),
|
252
|
-
size_k=part_size_k,
|
253
|
-
size_n=part_size_n,
|
254
|
-
num_bits=8,
|
255
|
-
)
|
256
|
-
layer.weight = Parameter(marlin_qweight, requires_grad=False)
|
257
|
-
|
258
|
-
# WEIGHT SCALES
|
259
|
-
# Currently Marlin doesn't support per-tensor scales, so we
|
260
|
-
# expand it to channelwise
|
261
|
-
scales = (
|
262
|
-
layer.weight_scale.repeat(1, part_size_n).to(layer.orig_dtype).to(device)
|
263
|
-
)
|
264
|
-
# Permute scales
|
265
|
-
marlin_scales = marlin_permute_scales(
|
266
|
-
s=scales,
|
267
|
-
size_k=part_size_k,
|
268
|
-
size_n=part_size_n,
|
269
|
-
group_size=-1,
|
270
|
-
num_bits=8,
|
271
|
-
)
|
272
|
-
layer.weight_scale = Parameter(marlin_scales, requires_grad=False)
|
273
|
-
|
274
|
-
# Allocate marlin workspace
|
275
|
-
max_workspace_size = (
|
276
|
-
part_size_n // GPTQ_MARLIN_MIN_THREAD_N
|
277
|
-
) * GPTQ_MARLIN_MAX_PARALLEL
|
278
|
-
workspace = torch.zeros(
|
279
|
-
max_workspace_size, dtype=torch.int, device=device, requires_grad=False
|
280
|
-
)
|
281
|
-
|
282
|
-
layer.workspace = workspace
|
283
|
-
|
284
|
-
def process_weights_after_loading(self, layer: Module) -> None:
|
285
|
-
if not hasattr(layer, "process_after_load") or not layer.process_after_load:
|
286
|
-
return
|
287
|
-
|
288
|
-
# If checkpoint is fp/bf16 (not serialized fp8), quantize the weights.
|
289
|
-
if not self.quant_config.is_checkpoint_fp8_serialized:
|
290
|
-
qweight, weight_scale = ops.scaled_fp8_quant(layer.weight, scale=None)
|
291
|
-
layer.weight = Parameter(qweight.t(), requires_grad=False)
|
292
|
-
layer.weight_scale = Parameter(weight_scale, requires_grad=False)
|
293
|
-
layer.logical_widths = None
|
294
|
-
layer.input_scale = None
|
295
|
-
if self.use_marlin:
|
296
|
-
self.prepare_layer_for_marlin(layer)
|
297
|
-
return
|
298
|
-
|
299
|
-
# If checkpoint is fp8, requantize the separately quantized logical
|
300
|
-
# weights into a single fp8 weight with a single weight scale.
|
301
|
-
else:
|
302
|
-
# WEIGHT_SCALE / WEIGHT
|
303
|
-
# Loop over logical weights, requantizing with single scale.
|
304
|
-
max_w_scale = layer.weight_scale.max()
|
305
|
-
|
306
|
-
# QKV / MLP is fused in the on disk checkpoint if any of the
|
307
|
-
# weight scales are still set to the default since we initialize
|
308
|
-
# N weight scales for N shards but we only load 1 weight scale
|
309
|
-
# from disk in this case. As a result, we skip dequant -> requant
|
310
|
-
# since we already have quantized QKV together.
|
311
|
-
# Sample Model with fused checkpoint:
|
312
|
-
# * nm-testing/Phi-3-mini-128k-instruct-FP8
|
313
|
-
unfused_module_in_checkpoint = (
|
314
|
-
layer.weight_scale[-1] > torch.finfo(torch.float8_e4m3fn).min
|
315
|
-
)
|
316
|
-
|
317
|
-
if unfused_module_in_checkpoint:
|
318
|
-
start = 0
|
319
|
-
for idx, logical_width in enumerate(layer.logical_widths):
|
320
|
-
end = start + logical_width
|
321
|
-
weight_dq = per_tensor_dequantize(
|
322
|
-
layer.weight[start:end, :], layer.weight_scale[idx]
|
323
|
-
)
|
324
|
-
|
325
|
-
layer.weight[start:end, :] = per_tensor_quantize(
|
326
|
-
weight_dq, layer.weight_scale.max()
|
327
|
-
)
|
328
|
-
start = end
|
329
|
-
layer.weight_scale = Parameter(max_w_scale, requires_grad=False)
|
330
|
-
|
331
|
-
# WEIGHT
|
332
|
-
# Transpose weight for passing to torch._scaled_mm
|
333
|
-
weight = layer.weight
|
334
|
-
layer.weight = Parameter(weight.t(), requires_grad=False)
|
335
|
-
|
336
|
-
# INPUT ACTIVATION SCALE
|
337
|
-
# Dynamic: set to None (required input to ops.scaled_fp8_quant).
|
338
|
-
# Static: set to max of the input_scales (since they are equal).
|
339
|
-
if self.quant_config.activation_scheme == "dynamic":
|
340
|
-
layer.input_scale = None
|
341
|
-
elif self.quant_config.activation_scheme == "static":
|
342
|
-
layer.input_scale = Parameter(
|
343
|
-
layer.input_scale.max(), requires_grad=False
|
344
|
-
)
|
345
|
-
else:
|
346
|
-
raise ValueError(
|
347
|
-
f"Unknown scheme {self.quant_config.activation_scheme}"
|
348
|
-
)
|
349
|
-
|
350
|
-
if self.use_marlin:
|
351
|
-
self.prepare_layer_for_marlin(layer)
|
352
|
-
|
353
|
-
def apply(
|
354
|
-
self,
|
355
|
-
layer: torch.nn.Module,
|
356
|
-
x: torch.Tensor,
|
357
|
-
bias: Optional[torch.Tensor] = None,
|
358
|
-
) -> torch.Tensor:
|
359
|
-
|
360
|
-
if self.use_marlin:
|
361
|
-
# For GPUs that lack FP8 hardware support, we can leverage the
|
362
|
-
# Marlin kernel for fast weight-only FP8 quantization
|
363
|
-
|
364
|
-
reshaped_x = x.reshape(-1, x.shape[-1])
|
365
|
-
out_shape = x.shape[:-1] + (layer.output_size_per_partition,)
|
366
|
-
|
367
|
-
output = ops.fp8_marlin_gemm(
|
368
|
-
a=reshaped_x,
|
369
|
-
b_q_weight=layer.weight,
|
370
|
-
b_scales=layer.weight_scale,
|
371
|
-
workspace=layer.workspace,
|
372
|
-
num_bits=8,
|
373
|
-
size_m=reshaped_x.shape[0],
|
374
|
-
size_n=layer.output_size_per_partition,
|
375
|
-
size_k=layer.input_size_per_partition,
|
376
|
-
)
|
377
|
-
|
378
|
-
if bias is not None:
|
379
|
-
output.add_(bias) # In-place add
|
380
|
-
|
381
|
-
return output.reshape(out_shape)
|
382
|
-
|
383
|
-
else:
|
384
|
-
|
385
|
-
# ops.scaled_fp8_quant supports both dynamic and static quant.
|
386
|
-
# If dynamic, layer.input_scale is None and x_scale computed from x
|
387
|
-
# If static, layer.input_scale is scalar and x_scale is input_scale
|
388
|
-
|
389
|
-
if bias is None and self.cutlass_fp8_supported:
|
390
|
-
qinput, x_scale = ops.scaled_fp8_quant(x, layer.input_scale)
|
391
|
-
|
392
|
-
# Fused GEMM_DQ
|
393
|
-
output = ops.cutlass_scaled_mm(
|
394
|
-
qinput,
|
395
|
-
layer.weight,
|
396
|
-
out_dtype=x.dtype,
|
397
|
-
scale_a=x_scale,
|
398
|
-
scale_b=layer.weight_scale,
|
399
|
-
)
|
400
|
-
|
401
|
-
else:
|
402
|
-
qinput, x_scale = ops.scaled_fp8_quant(
|
403
|
-
x, layer.input_scale, batch_dim_padding=17
|
404
|
-
)
|
405
|
-
|
406
|
-
# Fused GEMM_DQ -- note we padded the input above because
|
407
|
-
# torch._scaled_mm is more performant for matrices with
|
408
|
-
# batch dimension > 16. Note that this could change
|
409
|
-
# in the future.
|
410
|
-
output, _ = torch._scaled_mm(
|
411
|
-
qinput,
|
412
|
-
layer.weight,
|
413
|
-
out_dtype=x.dtype,
|
414
|
-
scale_a=x_scale,
|
415
|
-
scale_b=layer.weight_scale,
|
416
|
-
bias=bias,
|
417
|
-
)
|
418
|
-
|
419
|
-
return torch.narrow(output, 0, 0, x.shape[0])
|
420
|
-
|
421
|
-
|
422
|
-
class Fp8MoEMethod(FusedMoEMethodBase):
|
423
|
-
"""MoE method for FP8.
|
424
|
-
Supports loading FP8 checkpoints with static weight scale and
|
425
|
-
dynamic/static activation scale.
|
426
|
-
|
427
|
-
Also supports loading quantized FP16/BF16 model checkpoints with dynamic
|
428
|
-
activation scaling. The weight scaling factor will be initialized after
|
429
|
-
the model weights are loaded.
|
430
|
-
|
431
|
-
Args:
|
432
|
-
quant_config: The quantization config.
|
433
|
-
"""
|
434
|
-
|
435
|
-
def __init__(self, quant_config: Fp8Config):
|
436
|
-
self.quant_config = quant_config
|
437
|
-
|
438
|
-
def create_weights(
|
439
|
-
self,
|
440
|
-
layer: Module,
|
441
|
-
num_experts: int,
|
442
|
-
hidden_size: int,
|
443
|
-
intermediate_size: int,
|
444
|
-
params_dtype: torch.dtype,
|
445
|
-
**extra_weight_attrs,
|
446
|
-
):
|
447
|
-
|
448
|
-
layer.process_after_load = True
|
449
|
-
|
450
|
-
if self.quant_config.is_checkpoint_fp8_serialized:
|
451
|
-
params_dtype = torch.float8_e4m3fn
|
452
|
-
|
453
|
-
# WEIGHTS
|
454
|
-
w13_weight = torch.nn.Parameter(
|
455
|
-
torch.empty(
|
456
|
-
num_experts, 2 * intermediate_size, hidden_size, dtype=params_dtype
|
457
|
-
),
|
458
|
-
requires_grad=False,
|
459
|
-
)
|
460
|
-
layer.register_parameter("w13_weight", w13_weight)
|
461
|
-
set_weight_attrs(w13_weight, extra_weight_attrs)
|
462
|
-
|
463
|
-
w2_weight = torch.nn.Parameter(
|
464
|
-
torch.empty(
|
465
|
-
num_experts, hidden_size, intermediate_size, dtype=params_dtype
|
466
|
-
),
|
467
|
-
requires_grad=False,
|
468
|
-
)
|
469
|
-
layer.register_parameter("w2_weight", w2_weight)
|
470
|
-
set_weight_attrs(w2_weight, extra_weight_attrs)
|
471
|
-
|
472
|
-
# WEIGHT_SCALES
|
473
|
-
# Allocate 2 scales for w1 and w3 respectively.
|
474
|
-
# They will be combined to a single scale after weight loading.
|
475
|
-
w13_scale = torch.nn.Parameter(
|
476
|
-
torch.ones(num_experts, 2, dtype=torch.float32), requires_grad=False
|
477
|
-
)
|
478
|
-
layer.register_parameter("w13_scale", w13_scale)
|
479
|
-
|
480
|
-
w2_scale = torch.nn.Parameter(
|
481
|
-
torch.ones(num_experts, dtype=torch.float32), requires_grad=False
|
482
|
-
)
|
483
|
-
layer.register_parameter("w2_scale", w2_scale)
|
484
|
-
|
485
|
-
# If loading fp8 checkpoint, pass the weight loaders.
|
486
|
-
# If loading an fp16 checkpoint, do not (we will quantize in
|
487
|
-
# process_weights_after_loading()
|
488
|
-
if self.quant_config.is_checkpoint_fp8_serialized:
|
489
|
-
set_weight_attrs(w13_scale, extra_weight_attrs)
|
490
|
-
set_weight_attrs(w2_scale, extra_weight_attrs)
|
491
|
-
|
492
|
-
# INPUT_SCALES
|
493
|
-
if self.quant_config.activation_scheme == "static":
|
494
|
-
if not self.quant_config.is_checkpoint_fp8_serialized:
|
495
|
-
raise ValueError(
|
496
|
-
"Found static activation scheme for checkpoint that "
|
497
|
-
"was not serialized fp8."
|
498
|
-
)
|
499
|
-
|
500
|
-
a13_scale = torch.nn.Parameter(
|
501
|
-
torch.ones(num_experts, dtype=torch.float32), requires_grad=False
|
502
|
-
)
|
503
|
-
layer.register_parameter("a13_scale", a13_scale)
|
504
|
-
set_weight_attrs(a13_scale, extra_weight_attrs)
|
505
|
-
|
506
|
-
a2_scale = torch.nn.Parameter(
|
507
|
-
torch.ones(num_experts, dtype=torch.float32), requires_grad=False
|
508
|
-
)
|
509
|
-
layer.register_parameter("a2_scale", a2_scale)
|
510
|
-
set_weight_attrs(a2_scale, extra_weight_attrs)
|
511
|
-
else:
|
512
|
-
layer.a13_scale = None
|
513
|
-
layer.a2_scale = None
|
514
|
-
|
515
|
-
def process_weights_after_loading(self, layer: Module) -> None:
|
516
|
-
if not hasattr(layer, "process_after_load") or not layer.process_after_load:
|
517
|
-
return
|
518
|
-
|
519
|
-
# If checkpoint is fp16, quantize in place.
|
520
|
-
if not self.quant_config.is_checkpoint_fp8_serialized:
|
521
|
-
w13_weight = torch.empty_like(
|
522
|
-
layer.w13_weight.data, dtype=torch.float8_e4m3fn
|
523
|
-
)
|
524
|
-
w2_weight = torch.empty_like(
|
525
|
-
layer.w2_weight.data, dtype=torch.float8_e4m3fn
|
526
|
-
)
|
527
|
-
|
528
|
-
# Re-initialize w13_scale because we directly quantize
|
529
|
-
# merged w13 weights and generate a single scaling factor.
|
530
|
-
layer.w13_scale = torch.nn.Parameter(
|
531
|
-
torch.ones(
|
532
|
-
layer.num_experts, dtype=torch.float32, device=w13_weight.device
|
533
|
-
),
|
534
|
-
requires_grad=False,
|
535
|
-
)
|
536
|
-
for expert in range(layer.num_experts):
|
537
|
-
w13_weight[expert, :, :], layer.w13_scale[expert] = (
|
538
|
-
ops.scaled_fp8_quant(layer.w13_weight.data[expert, :, :])
|
539
|
-
)
|
540
|
-
w2_weight[expert, :, :], layer.w2_scale[expert] = ops.scaled_fp8_quant(
|
541
|
-
layer.w2_weight.data[expert, :, :]
|
542
|
-
)
|
543
|
-
layer.w13_weight = torch.nn.Parameter(w13_weight, requires_grad=False)
|
544
|
-
layer.w2_weight = torch.nn.Parameter(w2_weight, requires_grad=False)
|
545
|
-
return
|
546
|
-
|
547
|
-
# If checkpoint is fp8, we need to handle that the
|
548
|
-
# MoE kernels require single activation scale and single weight
|
549
|
-
# scale for w13 per expert.
|
550
|
-
else:
|
551
|
-
# Fp8 moe kernels require a single activation scale.
|
552
|
-
# We take the max of all the scales in case they differ.
|
553
|
-
if self.quant_config.activation_scheme == "static":
|
554
|
-
if layer.a13_scale is None or layer.a2_scale is None:
|
555
|
-
raise ValueError(
|
556
|
-
"QuantConfig has static quantization, but found "
|
557
|
-
"activation scales are None."
|
558
|
-
)
|
559
|
-
if not all_close_1d(layer.a13_scale) or not all_close_1d(
|
560
|
-
layer.a2_scale
|
561
|
-
):
|
562
|
-
print_warning_once(
|
563
|
-
"Found input_scales that are not equal for "
|
564
|
-
"fp8 MoE layer. Using the maximum across experts "
|
565
|
-
"for each layer. "
|
566
|
-
)
|
567
|
-
layer.a13_scale = torch.nn.Parameter(
|
568
|
-
layer.a13_scale.max(), requires_grad=False
|
569
|
-
)
|
570
|
-
layer.a2_scale = torch.nn.Parameter(
|
571
|
-
layer.a2_scale.max(), requires_grad=False
|
572
|
-
)
|
573
|
-
|
574
|
-
# Fp8 moe kernel needs single weight scale for w13 per expert.
|
575
|
-
# We take the max then dequant and requant each expert.
|
576
|
-
assert layer.w13_scale is not None
|
577
|
-
shard_size = layer.intermediate_size_per_partition
|
578
|
-
max_w13_scales = layer.w13_scale.max(dim=1).values
|
579
|
-
for expert_id in range(layer.num_experts):
|
580
|
-
start = 0
|
581
|
-
for shard_id in range(2):
|
582
|
-
dq_weight = per_tensor_dequantize(
|
583
|
-
layer.w13_weight[expert_id][start : start + shard_size, :],
|
584
|
-
layer.w13_scale[expert_id][shard_id],
|
585
|
-
)
|
586
|
-
layer.w13_weight[expert_id][start : start + shard_size, :] = (
|
587
|
-
per_tensor_quantize(dq_weight, max_w13_scales[expert_id])
|
588
|
-
)
|
589
|
-
start += shard_size
|
590
|
-
|
591
|
-
layer.w13_scale = torch.nn.Parameter(max_w13_scales, requires_grad=False)
|
592
|
-
return
|
593
|
-
|
594
|
-
def apply(
|
595
|
-
self,
|
596
|
-
layer: torch.nn.Module,
|
597
|
-
x: torch.Tensor,
|
598
|
-
router_logits: torch.Tensor,
|
599
|
-
top_k: int,
|
600
|
-
renormalize: bool = True,
|
601
|
-
) -> torch.Tensor:
|
602
|
-
|
603
|
-
return fused_moe(
|
604
|
-
x,
|
605
|
-
layer.w13_weight,
|
606
|
-
layer.w2_weight,
|
607
|
-
router_logits,
|
608
|
-
top_k,
|
609
|
-
renormalize=renormalize,
|
610
|
-
inplace=True,
|
611
|
-
use_fp8=True,
|
612
|
-
w1_scale=layer.w13_scale,
|
613
|
-
w2_scale=layer.w2_scale,
|
614
|
-
a1_scale=layer.a13_scale,
|
615
|
-
a2_scale=layer.a2_scale,
|
616
|
-
)
|
617
|
-
|
618
|
-
|
619
|
-
# FIXME: not used
|
620
|
-
class Fp8KVCacheMethod(QuantizeMethodBase):
|
621
|
-
"""Supports loading kv-cache scaling factors from FP8 checkpoints."""
|
622
|
-
|
623
|
-
def __init__(self, quant_config: Fp8Config):
|
624
|
-
self.quant_config = quant_config
|
625
|
-
|
626
|
-
def create_weights(self, layer: torch.nn.Module):
|
627
|
-
"""Create "weight" (aka kv_scale) for an attention layer.
|
628
|
-
|
629
|
-
Args:
|
630
|
-
layer: The layer that is using the QuantizeMethodBase factory.
|
631
|
-
"""
|
632
|
-
# Initialize the KV cache scale to 1.0 as the default value.
|
633
|
-
# If the kv_scale appears in the checkpoint, it will be
|
634
|
-
# overwritten when loading weights.
|
635
|
-
layer.kv_scale = Parameter(torch.tensor(1.0), requires_grad=False)
|
636
|
-
|
637
|
-
def apply(self, layer: torch.nn.Module) -> torch.Tensor:
|
638
|
-
raise RuntimeError("Fp8KVCacheMethod.apply should not be called.")
|
639
|
-
|
640
|
-
def process_weights_after_loading(self, layer: Module) -> None:
|
641
|
-
# If the kv-cache dtype is auto, we enforce the kv-scale to be 1.0
|
642
|
-
# regardless whether the kv-scale is available in the checkpoint.
|
643
|
-
if layer.kv_cache_dtype != "auto":
|
644
|
-
kv_scale = layer.kv_scale.to("cpu").tolist()
|
645
|
-
if not isinstance(kv_scale, float):
|
646
|
-
raise ValueError(
|
647
|
-
"Only support per-tensor scaling factor " "for fp8 KV cache"
|
648
|
-
)
|
649
|
-
layer._kv_scale = kv_scale
|
650
|
-
if layer._kv_scale == 1.0 and "e5m2" not in layer.kv_cache_dtype:
|
651
|
-
print_warning_once(
|
652
|
-
"Using KV cache scaling factor 1.0 for fp8_e4m3. This may "
|
653
|
-
"cause accuracy issues. Please make sure kv-cache scaling "
|
654
|
-
"factor is available in the fp8 checkpoint."
|
655
|
-
)
|
656
|
-
del layer.kv_scale
|
657
|
-
|
658
|
-
|
659
|
-
def per_tensor_quantize(
|
660
|
-
tensor: torch.Tensor, inv_scale: Union[float, torch.Tensor]
|
661
|
-
) -> torch.Tensor:
|
662
|
-
finfo = torch.finfo(torch.float8_e4m3fn)
|
663
|
-
qweight = (tensor / inv_scale).clamp(min=finfo.min, max=finfo.max)
|
664
|
-
return qweight.to(torch.float8_e4m3fn)
|
665
|
-
|
666
|
-
|
667
|
-
def per_tensor_dequantize(
|
668
|
-
tensor: torch.Tensor, inv_scale: Union[float, torch.Tensor]
|
669
|
-
) -> torch.Tensor:
|
670
|
-
fake_qweight = tensor.to(torch.float16)
|
671
|
-
dq_weight = fake_qweight * inv_scale
|
672
|
-
return dq_weight
|
673
|
-
|
674
|
-
|
675
|
-
def all_close_1d(x: torch.Tensor) -> bool:
|
676
|
-
assert len(x.shape) == 1
|
677
|
-
return all(torch.allclose(x[0], x[i]) for i in range(x.shape[0]))
|