sglang 0.2.11__py3-none-any.whl → 0.2.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. sglang/api.py +7 -1
  2. sglang/bench_latency.py +9 -6
  3. sglang/bench_serving.py +46 -22
  4. sglang/global_config.py +1 -1
  5. sglang/lang/backend/runtime_endpoint.py +60 -49
  6. sglang/lang/compiler.py +2 -2
  7. sglang/lang/interpreter.py +4 -2
  8. sglang/lang/ir.py +16 -7
  9. sglang/srt/constrained/base_tool_cache.py +1 -1
  10. sglang/srt/constrained/fsm_cache.py +12 -2
  11. sglang/srt/constrained/jump_forward.py +13 -2
  12. sglang/srt/layers/activation.py +32 -0
  13. sglang/srt/layers/{token_attention.py → decode_attention.py} +9 -5
  14. sglang/srt/layers/extend_attention.py +9 -2
  15. sglang/srt/layers/fused_moe/__init__.py +1 -0
  16. sglang/srt/layers/{fused_moe.py → fused_moe/fused_moe.py} +165 -108
  17. sglang/srt/layers/fused_moe/layer.py +587 -0
  18. sglang/srt/layers/layernorm.py +65 -0
  19. sglang/srt/layers/logits_processor.py +7 -2
  20. sglang/srt/layers/pooler.py +50 -0
  21. sglang/srt/layers/{context_flashattention_nopad.py → prefill_attention.py} +5 -0
  22. sglang/srt/layers/radix_attention.py +40 -16
  23. sglang/srt/managers/detokenizer_manager.py +31 -9
  24. sglang/srt/managers/io_struct.py +63 -0
  25. sglang/srt/managers/policy_scheduler.py +173 -25
  26. sglang/srt/managers/schedule_batch.py +115 -97
  27. sglang/srt/managers/tokenizer_manager.py +194 -112
  28. sglang/srt/managers/tp_worker.py +290 -359
  29. sglang/srt/mem_cache/{base_cache.py → base_prefix_cache.py} +9 -4
  30. sglang/srt/mem_cache/chunk_cache.py +43 -20
  31. sglang/srt/mem_cache/memory_pool.py +2 -2
  32. sglang/srt/mem_cache/radix_cache.py +74 -40
  33. sglang/srt/model_executor/cuda_graph_runner.py +71 -25
  34. sglang/srt/model_executor/forward_batch_info.py +293 -156
  35. sglang/srt/model_executor/model_runner.py +77 -57
  36. sglang/srt/models/chatglm.py +2 -2
  37. sglang/srt/models/commandr.py +1 -1
  38. sglang/srt/models/deepseek.py +2 -2
  39. sglang/srt/models/deepseek_v2.py +7 -6
  40. sglang/srt/models/gemma.py +1 -1
  41. sglang/srt/models/gemma2.py +11 -6
  42. sglang/srt/models/grok.py +50 -396
  43. sglang/srt/models/internlm2.py +2 -7
  44. sglang/srt/models/llama2.py +4 -4
  45. sglang/srt/models/llama_embedding.py +88 -0
  46. sglang/srt/models/minicpm.py +2 -2
  47. sglang/srt/models/mixtral.py +56 -254
  48. sglang/srt/models/mixtral_quant.py +1 -4
  49. sglang/srt/models/qwen.py +2 -2
  50. sglang/srt/models/qwen2.py +2 -2
  51. sglang/srt/models/qwen2_moe.py +2 -13
  52. sglang/srt/models/stablelm.py +1 -1
  53. sglang/srt/openai_api/adapter.py +187 -48
  54. sglang/srt/openai_api/protocol.py +37 -1
  55. sglang/srt/sampling/penaltylib/__init__.py +13 -0
  56. sglang/srt/sampling/penaltylib/orchestrator.py +357 -0
  57. sglang/srt/sampling/penaltylib/penalizers/frequency_penalty.py +80 -0
  58. sglang/srt/sampling/penaltylib/penalizers/min_new_tokens.py +105 -0
  59. sglang/srt/sampling/penaltylib/penalizers/presence_penalty.py +79 -0
  60. sglang/srt/sampling/penaltylib/penalizers/repetition_penalty.py +83 -0
  61. sglang/srt/sampling_params.py +31 -8
  62. sglang/srt/server.py +91 -29
  63. sglang/srt/server_args.py +32 -19
  64. sglang/srt/utils.py +32 -15
  65. sglang/test/run_eval.py +10 -1
  66. sglang/test/runners.py +81 -73
  67. sglang/test/simple_eval_humaneval.py +2 -8
  68. sglang/test/simple_eval_mgsm.py +203 -0
  69. sglang/test/srt/sampling/penaltylib/utils.py +337 -0
  70. sglang/test/test_layernorm.py +60 -0
  71. sglang/test/test_programs.py +36 -7
  72. sglang/test/test_utils.py +24 -2
  73. sglang/utils.py +0 -1
  74. sglang/version.py +1 -1
  75. {sglang-0.2.11.dist-info → sglang-0.2.13.dist-info}/METADATA +33 -16
  76. sglang-0.2.13.dist-info/RECORD +112 -0
  77. {sglang-0.2.11.dist-info → sglang-0.2.13.dist-info}/WHEEL +1 -1
  78. sglang/srt/layers/linear.py +0 -884
  79. sglang/srt/layers/quantization/__init__.py +0 -64
  80. sglang/srt/layers/quantization/fp8.py +0 -677
  81. sglang/srt/model_loader/model_loader.py +0 -292
  82. sglang/srt/model_loader/utils.py +0 -275
  83. sglang-0.2.11.dist-info/RECORD +0 -102
  84. {sglang-0.2.11.dist-info → sglang-0.2.13.dist-info}/LICENSE +0 -0
  85. {sglang-0.2.11.dist-info → sglang-0.2.13.dist-info}/top_level.txt +0 -0
@@ -1,884 +0,0 @@
1
- """
2
- Copyright 2023-2024 SGLang Team
3
- Licensed under the Apache License, Version 2.0 (the "License");
4
- you may not use this file except in compliance with the License.
5
- You may obtain a copy of the License at
6
-
7
- http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- Unless required by applicable law or agreed to in writing, software
10
- distributed under the License is distributed on an "AS IS" BASIS,
11
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- See the License for the specific language governing permissions and
13
- limitations under the License.
14
- """
15
-
16
- # temporarily adapted from https://github.com/vllm-project/vllm/blob/e76466dde2bc9525d55165ceaa600d298c7bf773/vllm/model_executor/layers/linear.py
17
- # FIXME: refactor the linear abstraction
18
- from abc import abstractmethod
19
- from typing import Dict, List, Optional, Tuple
20
-
21
- import torch
22
- import torch.nn.functional as F
23
- from torch.nn.parameter import Parameter
24
- from vllm.distributed import (
25
- divide,
26
- get_tensor_model_parallel_rank,
27
- get_tensor_model_parallel_world_size,
28
- split_tensor_along_last_dim,
29
- tensor_model_parallel_all_gather,
30
- tensor_model_parallel_all_reduce,
31
- )
32
- from vllm.logger import init_logger
33
- from vllm.model_executor.layers.quantization.base_config import (
34
- QuantizationConfig,
35
- QuantizeMethodBase,
36
- )
37
- from vllm.model_executor.utils import set_weight_attrs
38
-
39
- logger = init_logger(__name__)
40
-
41
-
42
- def adjust_marlin_shard(param, shard_size, shard_offset):
43
- marlin_tile_size = getattr(param, "marlin_tile_size", None)
44
- if marlin_tile_size is None:
45
- return shard_size, shard_offset
46
-
47
- return shard_size * marlin_tile_size, shard_offset * marlin_tile_size
48
-
49
-
50
- def adjust_bitsandbytes_shard(
51
- param: Parameter, qkv_offsets: Dict[str, Tuple[int, int]], loaded_shard_id: str
52
- ) -> Tuple[int, int]:
53
- """Adjust the quantization offsets and sizes for BitsAndBytes sharding."""
54
-
55
- total, _ = qkv_offsets["total"]
56
- orig_offset, orig_size = qkv_offsets[loaded_shard_id]
57
-
58
- quantized_total = param.data.shape[0]
59
- quantized_offset = orig_offset * quantized_total // total
60
- quantized_size = orig_size * quantized_total // total
61
-
62
- return quantized_size, quantized_offset
63
-
64
-
65
- def adjust_scalar_to_fused_array(param, loaded_weight, shard_id):
66
- """For fused modules (QKV and MLP) we have an array of length
67
- N that holds 1 scale for each "logical" matrix. So the param
68
- is an array of length N. The loaded_weight corresponds to
69
- one of the shards on disk. Here, we slice the param based on
70
- the shard_id for loading.
71
- """
72
- qkv_idxs = {"q": 0, "k": 1, "v": 2}
73
-
74
- if isinstance(shard_id, str):
75
- shard_id = qkv_idxs[shard_id]
76
- elif not isinstance(shard_id, int):
77
- raise ValueError(f"Unknown Shard Id {shard_id}")
78
-
79
- # AutoFP8 scales do not have a shape
80
- # compressed-tensors scales do have a shape
81
- if len(loaded_weight.shape) != 0:
82
- assert loaded_weight.shape[0] == 1
83
- loaded_weight = loaded_weight[0]
84
-
85
- return param[shard_id], loaded_weight
86
-
87
-
88
- class LinearMethodBase(QuantizeMethodBase):
89
- """Base class for different (maybe quantized) linear methods."""
90
-
91
- @abstractmethod
92
- def create_weights(
93
- self,
94
- layer: torch.nn.Module,
95
- input_size_per_partition: int,
96
- output_partition_sizes: List[int],
97
- input_size: int,
98
- output_size: int,
99
- params_dtype: torch.dtype,
100
- **extra_weight_attrs,
101
- ):
102
- """Create weights for a linear layer.
103
- The weights will be set as attributes of the layer.
104
-
105
- Args:
106
- layer: The layer that is using the LinearMethodBase factory.
107
- input_size_per_partition: Size of the weight input dim on rank X.
108
- output_partition_sizes: Sizes of the output dim of each logical
109
- weight on rank X. E.g., output_partition_sizes for QKVLinear
110
- is a list contains the width of Wq, Wk, Wv on rank X.
111
- input_size: Size of the input dim of the weight across all ranks.
112
- output_size: Size of the output dim of the weight across all ranks.
113
- params_dtype: Datatype of the parameters.
114
- """
115
- raise NotImplementedError
116
-
117
- @abstractmethod
118
- def apply(
119
- self,
120
- layer: torch.nn.Module,
121
- x: torch.Tensor,
122
- bias: Optional[torch.Tensor] = None,
123
- ) -> torch.Tensor:
124
- """Apply the weights in layer to the input tensor.
125
- Expects create_weights to have been called before on the layer."""
126
- raise NotImplementedError
127
-
128
-
129
- class UnquantizedLinearMethod(LinearMethodBase):
130
- """Linear method without quantization.
131
-
132
- Args:
133
- separate_bias_add: If true, add bias separately after matrix
134
- multiplication.
135
- """
136
-
137
- def __init__(self, separate_bias_add: bool = False):
138
- self.separate_bias_add = separate_bias_add
139
-
140
- def create_weights(
141
- self,
142
- layer: torch.nn.Module,
143
- input_size_per_partition: int,
144
- output_partition_sizes: List[int],
145
- input_size: int,
146
- output_size: int,
147
- params_dtype: torch.dtype,
148
- **extra_weight_attrs,
149
- ):
150
- weight = Parameter(
151
- torch.empty(
152
- sum(output_partition_sizes),
153
- input_size_per_partition,
154
- dtype=params_dtype,
155
- ),
156
- requires_grad=False,
157
- )
158
- set_weight_attrs(weight, {"input_dim": 1, "output_dim": 0})
159
- layer.register_parameter("weight", weight)
160
- set_weight_attrs(weight, extra_weight_attrs)
161
-
162
- def apply(
163
- self,
164
- layer: torch.nn.Module,
165
- x: torch.Tensor,
166
- bias: Optional[torch.Tensor] = None,
167
- ) -> torch.Tensor:
168
- weight = layer.weight
169
- if self.separate_bias_add:
170
- if bias is not None:
171
- return F.linear(x, weight) + bias
172
- return F.linear(x, weight)
173
- return F.linear(x, weight, bias)
174
-
175
-
176
- class LinearBase(torch.nn.Module):
177
- """Base linear layer.
178
-
179
- Args:
180
- input_size: input dimension of the linear layer.
181
- output_size: output dimension of the linear layer.
182
- bias: If true, add bias.
183
- skip_bias_add: If true, skip adding bias but instead return it.
184
- params_dtype: Data type for the parameters.
185
- quant_config: Quantization configure.
186
- """
187
-
188
- def __init__(
189
- self,
190
- input_size: int,
191
- output_size: int,
192
- skip_bias_add: bool = False,
193
- params_dtype: Optional[torch.dtype] = None,
194
- quant_config: Optional[QuantizationConfig] = None,
195
- ):
196
- super().__init__()
197
-
198
- # Keep input parameters
199
- self.input_size = input_size
200
- self.output_size = output_size
201
- self.skip_bias_add = skip_bias_add
202
- if params_dtype is None:
203
- params_dtype = torch.get_default_dtype()
204
- self.params_dtype = params_dtype
205
- if quant_config is None:
206
- self.quant_method: Optional[QuantizeMethodBase] = UnquantizedLinearMethod()
207
- else:
208
- self.quant_method = quant_config.get_quant_method(self)
209
-
210
- def forward(self, x: torch.Tensor) -> torch.Tensor:
211
- raise NotImplementedError
212
-
213
-
214
- class ReplicatedLinear(LinearBase):
215
- """Replicated linear layer.
216
-
217
- Args:
218
- input_size: input dimension of the linear layer.
219
- output_size: output dimension of the linear layer.
220
- bias: If true, add bias.
221
- skip_bias_add: If true, skip adding bias but instead return it.
222
- params_dtype: Data type for the parameters.
223
- quant_config: Quantization configure.
224
- """
225
-
226
- def __init__(
227
- self,
228
- input_size: int,
229
- output_size: int,
230
- bias: bool = True,
231
- skip_bias_add: bool = False,
232
- params_dtype: Optional[torch.dtype] = None,
233
- quant_config: Optional[QuantizationConfig] = None,
234
- ):
235
- super().__init__(
236
- input_size, output_size, skip_bias_add, params_dtype, quant_config
237
- )
238
-
239
- # All the linear layer supports quant method.
240
- assert self.quant_method is not None
241
- self.quant_method.create_weights(
242
- self,
243
- self.input_size,
244
- [self.output_size],
245
- self.input_size,
246
- self.output_size,
247
- self.params_dtype,
248
- )
249
-
250
- if bias:
251
- self.bias = Parameter(
252
- torch.empty(self.output_size, dtype=self.params_dtype)
253
- )
254
- set_weight_attrs(self.bias, {"output_dim": 0})
255
- else:
256
- self.register_parameter("bias", None)
257
-
258
- def forward(self, x: torch.Tensor) -> torch.Tensor:
259
- bias = self.bias if not self.skip_bias_add else None
260
- assert self.quant_method is not None
261
- output = self.quant_method.apply(self, x, bias)
262
- output_bias = self.bias if self.skip_bias_add else None
263
- return output, output_bias
264
-
265
- def extra_repr(self) -> str:
266
- s = f"in_features={self.input_size}"
267
- s += f", output_features={self.output_size}"
268
- s += f", bias={self.bias is not None}"
269
- return s
270
-
271
-
272
- class ColumnParallelLinear(LinearBase):
273
- """Linear layer with column parallelism.
274
-
275
- The linear layer is defined as Y = XA + b. A is parallelized along
276
- its second dimension as A = [A_1, ..., A_p].
277
-
278
- Args:
279
- input_size: first dimension of matrix A.
280
- output_size: second dimension of matrix A.
281
- bias: If true, add bias.
282
- gather_output: If true, call all-gather on output and make Y available
283
- to all GPUs, otherwise, every GPU will have its output
284
- which is Y_i = XA_i
285
- skip_bias_add: This was added to enable performance optimizations where
286
- bias can be fused with other element-wise operations. we
287
- skip adding bias but instead return it.
288
- params_dtype: Data type for the parameters.
289
- quant_config: Quantization configure.
290
- output_sizes: list of output sizes packed into one output, like for QKV
291
- the list would be size 3.
292
- """
293
-
294
- def __init__(
295
- self,
296
- input_size: int,
297
- output_size: int,
298
- bias: bool = True,
299
- gather_output: bool = False,
300
- skip_bias_add: bool = False,
301
- params_dtype: Optional[torch.dtype] = None,
302
- quant_config: Optional[QuantizationConfig] = None,
303
- output_sizes: Optional[List[int]] = None,
304
- ):
305
- super().__init__(
306
- input_size, output_size, skip_bias_add, params_dtype, quant_config
307
- )
308
-
309
- self.gather_output = gather_output
310
-
311
- # Divide the weight matrix along the last dimension.
312
- tp_size = get_tensor_model_parallel_world_size()
313
- assert self.quant_method is not None
314
- self.output_size_per_partition = divide(self.output_size, tp_size)
315
- self.output_partition_sizes = [self.output_size_per_partition]
316
- # If QKV or MergedColumn, use output size of each partition.
317
- if hasattr(self, "output_sizes"):
318
- self.output_partition_sizes = [
319
- divide(output_size, tp_size) for output_size in self.output_sizes
320
- ]
321
-
322
- if output_sizes is None:
323
- output_sizes = [output_size]
324
- self.quant_method.create_weights(
325
- layer=self,
326
- input_size_per_partition=self.input_size,
327
- output_partition_sizes=self.output_partition_sizes,
328
- input_size=self.input_size,
329
- output_size=self.output_size,
330
- params_dtype=self.params_dtype,
331
- weight_loader=self.weight_loader,
332
- )
333
- if bias:
334
- self.bias = Parameter(
335
- torch.empty(self.output_size_per_partition, dtype=params_dtype)
336
- )
337
- set_weight_attrs(
338
- self.bias,
339
- {
340
- "output_dim": 0,
341
- "weight_loader": self.weight_loader,
342
- },
343
- )
344
- else:
345
- self.register_parameter("bias", None)
346
-
347
- def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
348
- if param.data.dtype != loaded_weight.dtype:
349
- param.data = torch.empty_like(
350
- param.data, dtype=loaded_weight.dtype, device="cuda"
351
- )
352
-
353
- tp_rank = get_tensor_model_parallel_rank()
354
- output_dim = getattr(param, "output_dim", None)
355
- param_data = param.data
356
- if output_dim is not None:
357
- shard_size = param_data.shape[output_dim]
358
- start_idx = tp_rank * shard_size
359
- loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size)
360
-
361
- # Special case for loading scales off disk, which often do not
362
- # have a shape (such as in the case of AutoFP8).
363
- if len(loaded_weight.shape) == 0:
364
- loaded_weight = loaded_weight.reshape(1)
365
-
366
- assert param_data.shape == loaded_weight.shape
367
- param_data.copy_(loaded_weight)
368
-
369
- def forward(self, input_):
370
- bias = self.bias if not self.skip_bias_add else None
371
-
372
- # Matrix multiply.
373
- assert self.quant_method is not None
374
- output_parallel = self.quant_method.apply(self, input_, bias)
375
- if self.gather_output:
376
- # All-gather across the partitions.
377
- output = tensor_model_parallel_all_gather(output_parallel)
378
- else:
379
- output = output_parallel
380
- output_bias = self.bias if self.skip_bias_add else None
381
- return output, output_bias
382
-
383
- def extra_repr(self) -> str:
384
- s = f"in_features={self.input_size}"
385
- s += f", output_features={self.output_size_per_partition}"
386
- s += f", bias={self.bias is not None}"
387
- s += f", tp_size={get_tensor_model_parallel_world_size()}"
388
- s += f", gather_output={self.gather_output}"
389
- return s
390
-
391
-
392
- class MergedColumnParallelLinear(ColumnParallelLinear):
393
- """Packed linear layers with column parallelism.
394
-
395
- Similar to ColumnParallelLinear, but the weight matrix is concatenated
396
- along the output dimension. When the weight matrix is loaded, the
397
- different partitions are sharded separately.
398
-
399
- Args:
400
- input_size: input dimension of the linear layer.
401
- output_sizes: list of output dimensions of the linear layer.
402
- bias: If true, add bias.
403
- gather_output: If true, call all-gather on output and make the output
404
- available to all GPUs, otherwise, every GPU will have
405
- its own output.
406
- skip_bias_add: This was added to enable performance optimizations where
407
- bias can be fused with other element-wise operations. we
408
- skip adding bias but instead return it.
409
- params_dtype: Data type for the parameters.
410
- quant_config: Quantization configure.
411
- """
412
-
413
- def __init__(
414
- self,
415
- input_size: int,
416
- output_sizes: List[int],
417
- bias: bool = True,
418
- gather_output: bool = False,
419
- skip_bias_add: bool = False,
420
- params_dtype: Optional[torch.dtype] = None,
421
- quant_config: Optional[QuantizationConfig] = None,
422
- ):
423
- self.output_sizes = output_sizes
424
- tp_size = get_tensor_model_parallel_world_size()
425
- assert all(output_size % tp_size == 0 for output_size in output_sizes)
426
- super().__init__(
427
- input_size=input_size,
428
- output_size=sum(output_sizes),
429
- bias=bias,
430
- gather_output=gather_output,
431
- skip_bias_add=skip_bias_add,
432
- params_dtype=params_dtype,
433
- quant_config=quant_config,
434
- )
435
-
436
- def weight_loader(
437
- self,
438
- param: Parameter,
439
- loaded_weight: torch.Tensor,
440
- loaded_shard_id: Optional[int] = None,
441
- ):
442
- if param.data.dtype != loaded_weight.dtype:
443
- param.data = torch.empty_like(
444
- param.data, dtype=loaded_weight.dtype, device="cuda"
445
- )
446
-
447
- param_data = param.data
448
- output_dim = getattr(param, "output_dim", None)
449
- # Special case for AQLM codebooks.
450
- is_metadata = getattr(param, "is_metadata", False)
451
- # Special case for per-tensor scale to load scalar into fused array.
452
- needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
453
-
454
- if loaded_shard_id is None:
455
- # Loaded weight is already fused on disk (qkv/mlp).
456
- if output_dim is None:
457
- if needs_scalar_to_array is not None:
458
- param_data, loaded_weight = adjust_scalar_to_fused_array(
459
- param_data, loaded_weight, 0
460
- )
461
-
462
- assert param_data.shape == loaded_weight.shape
463
- param_data.copy_(loaded_weight)
464
- return
465
- current_shard_offset = 0
466
- shard_offsets: List[Tuple[int, int, int]] = []
467
- for i, output_size in enumerate(self.output_sizes):
468
- shard_offsets.append((i, current_shard_offset, output_size))
469
- current_shard_offset += output_size
470
- packed_dim = getattr(param, "packed_dim", None)
471
- for shard_id, shard_offset, shard_size in shard_offsets:
472
- # Special case for Quantization.
473
- # If quantized, we need to adjust the offset and size to account
474
- # for the packing.
475
- if packed_dim == output_dim:
476
- shard_size = shard_size // param.pack_factor
477
- shard_offset = shard_offset // param.pack_factor
478
- # Special case for Marlin.
479
- shard_size, shard_offset = adjust_marlin_shard(
480
- param, shard_size, shard_offset
481
- )
482
-
483
- loaded_weight_shard = loaded_weight.narrow(
484
- output_dim, shard_offset, shard_size
485
- )
486
- self.weight_loader(param, loaded_weight_shard, shard_id)
487
- return
488
-
489
- assert loaded_shard_id < len(self.output_sizes)
490
- tp_rank = get_tensor_model_parallel_rank()
491
- tp_size = get_tensor_model_parallel_world_size()
492
- if output_dim is not None:
493
- shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
494
- shard_size = self.output_sizes[loaded_shard_id] // tp_size
495
- # Special case for quantization.
496
- # If quantized, we need to adjust the offset and size to account
497
- # for the packing.
498
- packed_dim = getattr(param, "packed_dim", None)
499
- if packed_dim == output_dim:
500
- shard_size = shard_size // param.pack_factor
501
- shard_offset = shard_offset // param.pack_factor
502
- # Special case for Marlin.
503
- shard_size, shard_offset = adjust_marlin_shard(
504
- param, shard_size, shard_offset
505
- )
506
-
507
- use_bitsandbytes = getattr(param, "use_bitsandbytes", False)
508
- if use_bitsandbytes:
509
- shard_size = loaded_weight.shape[output_dim]
510
- shard_offset = loaded_weight.shape[output_dim] * loaded_shard_id
511
-
512
- param_data = param_data.narrow(output_dim, shard_offset, shard_size)
513
- start_idx = tp_rank * shard_size
514
- loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size)
515
- # Special case for AQLM codebooks.
516
- elif is_metadata:
517
- # metadata indicates fixed size concatenated along dim 0
518
- shard_size = loaded_weight.shape[0]
519
- shard_offset = loaded_shard_id * shard_size
520
- param_data = param_data.narrow(0, shard_offset, shard_size)
521
-
522
- # Special case for per-tensor scales in fused case.
523
- elif needs_scalar_to_array:
524
- param_data, loaded_weight = adjust_scalar_to_fused_array(
525
- param_data, loaded_weight, loaded_shard_id
526
- )
527
-
528
- else:
529
- ignore_warning = getattr(param, "ignore_warning", False)
530
- if not ignore_warning:
531
- logger.warning(
532
- "Loading a weight without `output_dim` attribute in "
533
- "MergedColumnParallelLinear, assume the weight is "
534
- "the same for all partitions."
535
- )
536
-
537
- assert param_data.shape == loaded_weight.shape
538
- param_data.copy_(loaded_weight)
539
-
540
-
541
- class QKVParallelLinear(ColumnParallelLinear):
542
- """Linear layers for the attention's QKV transformation.
543
-
544
- Linear layers for the linear transformation of the query, key, and value
545
- vectors in the attention layer. The weight matrix is concatenated along
546
- the output dimension. The layer is parallelized along the head dimension.
547
- When the number of key/value heads is smaller than the number of query
548
- heads (e.g., multi-query/grouped-query attention), the key/value head may
549
- be replicated while the query heads are partitioned.
550
-
551
- Args:
552
- hidden_size: input hidden state size of the transformer.
553
- head_size: size of each attention head.
554
- total_num_heads: total number of attention query heads.
555
- total_num_kv_heads: total number of attention key/value heads. If
556
- None, assume total_num_kv_heads = total_num_heads.
557
- bias: If true, add bias.
558
- skip_bias_add: This was added to enable performance optimizations where
559
- bias can be fused with other element-wise operations. we
560
- skip adding bias but instead return it.
561
- params_dtype: Data type for the parameters.
562
- quant_config: Quantization configure.
563
- """
564
-
565
- def __init__(
566
- self,
567
- hidden_size: int,
568
- head_size: int,
569
- total_num_heads: int,
570
- total_num_kv_heads: Optional[int] = None,
571
- bias: bool = True,
572
- skip_bias_add: bool = False,
573
- params_dtype: Optional[torch.dtype] = None,
574
- quant_config: Optional[QuantizationConfig] = None,
575
- ):
576
- self.hidden_size = hidden_size
577
- self.head_size = head_size
578
- self.total_num_heads = total_num_heads
579
- if total_num_kv_heads is None:
580
- total_num_kv_heads = total_num_heads
581
- self.total_num_kv_heads = total_num_kv_heads
582
- # Divide the weight matrix along the last dimension.
583
- tp_size = get_tensor_model_parallel_world_size()
584
- self.num_heads = divide(self.total_num_heads, tp_size)
585
- if tp_size >= self.total_num_kv_heads:
586
- self.num_kv_heads = 1
587
- self.num_kv_head_replicas = divide(tp_size, self.total_num_kv_heads)
588
- else:
589
- self.num_kv_heads = divide(self.total_num_kv_heads, tp_size)
590
- self.num_kv_head_replicas = 1
591
- input_size = self.hidden_size
592
- output_size = (
593
- (self.num_heads + 2 * self.num_kv_heads) * tp_size * self.head_size
594
- )
595
- self.output_sizes = [
596
- self.num_heads * self.head_size * tp_size, # q_proj
597
- self.num_kv_heads * self.head_size * tp_size, # k_proj
598
- self.num_kv_heads * self.head_size * tp_size, # v_proj
599
- ]
600
-
601
- super().__init__(
602
- input_size=input_size,
603
- output_size=output_size,
604
- bias=bias,
605
- gather_output=False,
606
- skip_bias_add=skip_bias_add,
607
- params_dtype=params_dtype,
608
- quant_config=quant_config,
609
- )
610
-
611
- def weight_loader(
612
- self,
613
- param: Parameter,
614
- loaded_weight: torch.Tensor,
615
- loaded_shard_id: Optional[str] = None,
616
- ):
617
- if param.data.dtype != loaded_weight.dtype:
618
- param.data = torch.empty_like(
619
- param.data, dtype=loaded_weight.dtype, device="cuda"
620
- )
621
-
622
- param_data = param.data
623
- output_dim = getattr(param, "output_dim", None)
624
- # Special case for AQLM codebooks.
625
- is_metadata = getattr(param, "is_metadata", False)
626
-
627
- # Special case for per-tensor scales in fused case.
628
- needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
629
-
630
- if loaded_shard_id is None:
631
- # Loaded weight is already fused on disk (qkv/mlp).
632
- if output_dim is None:
633
- if needs_scalar_to_array is not None:
634
- param_data, loaded_weight = adjust_scalar_to_fused_array(
635
- param_data, loaded_weight, 0
636
- )
637
-
638
- assert param_data.shape == loaded_weight.shape
639
- param_data.copy_(loaded_weight)
640
- return
641
- shard_offsets = [
642
- # (shard_id, shard_offset, shard_size)
643
- ("q", 0, self.total_num_heads * self.head_size),
644
- (
645
- "k",
646
- self.total_num_heads * self.head_size,
647
- self.total_num_kv_heads * self.head_size,
648
- ),
649
- (
650
- "v",
651
- (self.total_num_heads + self.total_num_kv_heads) * self.head_size,
652
- self.total_num_kv_heads * self.head_size,
653
- ),
654
- ]
655
- packed_dim = getattr(param, "packed_dim", None)
656
- for shard_id, shard_offset, shard_size in shard_offsets:
657
- # Special case for Quantized Weights.
658
- # If quantized, we need to adjust the offset and size to account
659
- # for the packing.
660
- if packed_dim == output_dim:
661
- shard_size = shard_size // param.pack_factor
662
- shard_offset = shard_offset // param.pack_factor
663
-
664
- # Special case for Marlin.
665
- shard_size, shard_offset = adjust_marlin_shard(
666
- param, shard_size, shard_offset
667
- )
668
-
669
- loaded_weight_shard = loaded_weight.narrow(
670
- output_dim, shard_offset, shard_size
671
- )
672
- self.weight_loader(param, loaded_weight_shard, shard_id)
673
- return
674
-
675
- tp_rank = get_tensor_model_parallel_rank()
676
- assert loaded_shard_id in ["q", "k", "v"]
677
-
678
- # If output dim is defined, use the default loading process.
679
- if output_dim is not None:
680
- if loaded_shard_id == "q":
681
- shard_offset = 0
682
- shard_size = self.num_heads * self.head_size
683
- elif loaded_shard_id == "k":
684
- shard_offset = self.num_heads * self.head_size
685
- shard_size = self.num_kv_heads * self.head_size
686
- elif loaded_shard_id == "v":
687
- shard_offset = (self.num_heads + self.num_kv_heads) * self.head_size
688
- shard_size = self.num_kv_heads * self.head_size
689
- # Special case for Quantized Weights.
690
- # If quantized, we need to adjust the offset and size to account
691
- # for the packing.
692
- packed_dim = getattr(param, "packed_dim", None)
693
- if packed_dim == output_dim:
694
- shard_size = shard_size // param.pack_factor
695
- shard_offset = shard_offset // param.pack_factor
696
-
697
- # Special case for Marlin.
698
- shard_size, shard_offset = adjust_marlin_shard(
699
- param, shard_size, shard_offset
700
- )
701
-
702
- use_bitsandbytes = getattr(param, "use_bitsandbytes", False)
703
- if use_bitsandbytes:
704
- orig_qkv_offsets = {
705
- "q": (0, self.num_heads * self.head_size),
706
- "k": (
707
- self.num_heads * self.head_size,
708
- self.num_kv_heads * self.head_size,
709
- ),
710
- "v": (
711
- (self.num_heads + self.num_kv_heads) * self.head_size,
712
- self.num_kv_heads * self.head_size,
713
- ),
714
- "total": (
715
- (self.num_heads + 2 * self.num_kv_heads) * self.head_size,
716
- 0,
717
- ),
718
- }
719
- shard_size, shard_offset = adjust_bitsandbytes_shard(
720
- param, orig_qkv_offsets, loaded_shard_id
721
- )
722
-
723
- param_data = param_data.narrow(output_dim, shard_offset, shard_size)
724
- if loaded_shard_id == "q":
725
- shard_id = tp_rank
726
- else:
727
- shard_id = tp_rank // self.num_kv_head_replicas
728
- start_idx = shard_id * shard_size
729
- loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size)
730
- # Special case for for AQLM codebooks.
731
- elif is_metadata:
732
- # metadata indicates fixed size concatenated along dim 0
733
- shard_size = loaded_weight.shape[0]
734
- shard_index = ["q", "k", "v"].index(loaded_shard_id)
735
- param_data = param_data.narrow(0, shard_index * shard_size, shard_size)
736
- # Special case for per-tensor scales in fused case.
737
- elif needs_scalar_to_array:
738
- param_data, loaded_weight = adjust_scalar_to_fused_array(
739
- param_data, loaded_weight, loaded_shard_id
740
- )
741
- else:
742
- ignore_warning = getattr(param, "ignore_warning", False)
743
- if not ignore_warning:
744
- logger.warning(
745
- "Loading a weight without `output_dim` attribute in "
746
- "QKVParallelLinear, assume the weight is the same "
747
- "for all partitions."
748
- )
749
-
750
- assert param_data.shape == loaded_weight.shape
751
- param_data.copy_(loaded_weight)
752
-
753
-
754
- class RowParallelLinear(LinearBase):
755
- """Linear layer with row parallelism.
756
-
757
- The linear layer is defined as Y = XA + b. A is parallelized along
758
- its first dimension and X along its second dimension as:
759
- - -
760
- | A_1 |
761
- | . |
762
- A = | . | X = [X_1, ..., X_p]
763
- | . |
764
- | A_p |
765
- - -
766
- Arguments:
767
- input_size: first dimension of matrix A.
768
- output_size: second dimension of matrix A.
769
- bias: If true, add bias. Note that bias is not parallelized.
770
- input_is_parallel: If true, we assume that the input is already
771
- split across the GPUs and we do not split
772
- again.
773
- skip_bias_add: This was added to enable performance optimization where
774
- bias can be fused with other element-wise operations.
775
- We skip adding bias but instead return it.
776
- params_dtype: Data type for the parameters.
777
- quant_config: Quantization configure.
778
- """
779
-
780
- def __init__(
781
- self,
782
- input_size: int,
783
- output_size: int,
784
- bias: bool = True,
785
- input_is_parallel: bool = True,
786
- skip_bias_add: bool = False,
787
- params_dtype: Optional[torch.dtype] = None,
788
- reduce_results: bool = True,
789
- quant_config: Optional[QuantizationConfig] = None,
790
- ):
791
- super().__init__(
792
- input_size, output_size, skip_bias_add, params_dtype, quant_config
793
- )
794
-
795
- self.input_is_parallel = input_is_parallel
796
- self.reduce_results = reduce_results
797
-
798
- # Divide the weight matrix along the last dimension.
799
- self.tp_size = get_tensor_model_parallel_world_size()
800
- self.input_size_per_partition = divide(input_size, self.tp_size)
801
- assert self.quant_method is not None
802
- self.quant_method.create_weights(
803
- layer=self,
804
- input_size_per_partition=self.input_size_per_partition,
805
- output_partition_sizes=[self.output_size],
806
- input_size=self.input_size,
807
- output_size=self.output_size,
808
- params_dtype=self.params_dtype,
809
- weight_loader=self.weight_loader,
810
- )
811
- if not reduce_results and (bias and not skip_bias_add):
812
- raise ValueError(
813
- "When not reduce the results, adding bias to the "
814
- "results can lead to incorrect results"
815
- )
816
-
817
- if bias:
818
- self.bias = Parameter(torch.empty(self.output_size, dtype=params_dtype))
819
- set_weight_attrs(
820
- self.bias,
821
- {
822
- "output_dim": 0,
823
- "weight_loader": self.weight_loader,
824
- },
825
- )
826
- else:
827
- self.register_parameter("bias", None)
828
-
829
- def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
830
- if param.data.dtype != loaded_weight.dtype:
831
- param.data = torch.empty_like(
832
- param.data, dtype=loaded_weight.dtype, device="cuda"
833
- )
834
-
835
- param_data = param.data
836
- tp_rank = get_tensor_model_parallel_rank()
837
- input_dim = getattr(param, "input_dim", None)
838
- if input_dim is not None:
839
- shard_size = param.data.shape[input_dim]
840
- start_idx = tp_rank * shard_size
841
- loaded_weight = loaded_weight.narrow(input_dim, start_idx, shard_size)
842
-
843
- # Special case for loading scales off disk, which often do not
844
- # have a shape (such as in the case of AutoFP8).
845
- if len(loaded_weight.shape) == 0:
846
- loaded_weight = loaded_weight.reshape(1)
847
-
848
- assert param_data.shape == loaded_weight.shape
849
- param_data.copy_(loaded_weight)
850
-
851
- def forward(self, input_):
852
- # Set up backprop all-reduce.
853
- if self.input_is_parallel:
854
- input_parallel = input_
855
- else:
856
- tp_rank = get_tensor_model_parallel_rank()
857
- splitted_input = split_tensor_along_last_dim(
858
- input_, num_partitions=self.tp_size
859
- )
860
- input_parallel = splitted_input[tp_rank].contiguous()
861
-
862
- # Matrix multiply.
863
- assert self.quant_method is not None
864
- output_parallel = self.quant_method.apply(self, input_parallel)
865
- if self.reduce_results and self.tp_size > 1:
866
- output_ = tensor_model_parallel_all_reduce(output_parallel)
867
- else:
868
- output_ = output_parallel
869
-
870
- if not self.skip_bias_add:
871
- output = output_ + self.bias if self.bias is not None else output_
872
- output_bias = None
873
- else:
874
- output = output_
875
- output_bias = self.bias
876
- return output, output_bias
877
-
878
- def extra_repr(self) -> str:
879
- s = f"input_features={self.input_size_per_partition}"
880
- s += f", output_features={self.output_size}"
881
- s += f", bias={self.bias is not None}"
882
- s += f", tp_size={self.tp_size}"
883
- s += f", reduce_results={self.reduce_results}"
884
- return s